refactors directory and file structure

Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>
This commit is contained in:
neelanjan00
2023-06-05 13:15:56 +05:30
parent 9bf371a651
commit ae8467237a
840 changed files with 6787 additions and 43442 deletions

View File

@@ -0,0 +1,33 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: container-kill
version: 0.1.0
annotations:
categories: "Kubernetes"
vendor: "Harness"
spec:
displayName: Container Kill
categoryDescription: |
Container kill fault disrupts state of kubernetes resources. This fault injects random container delete failures against specified application.
- Executes SIGKILL on containers of random replicas of an application deployment.
- Tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the application pod.
keywords:
- Kubernetes
platforms:
- GKE
- Minikube
- Packet(Kubeadm)
- EKS
- AKS
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,41 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
appinfo:
appns: ''
applabel: ''
appkind: ''
chaosServiceAccount: container-kill-sa
experiments:
- name: container-kill
spec:
components:
env:
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
# provide the name of container runtime
# it supports docker, containerd, crio
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
- name: PODS_AFFECTED_PERC
value: ''
- name: TARGET_CONTAINER
value: ''

View File

@@ -0,0 +1,123 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: "Kills a container belonging to an application pod \n"
kind: ChaosExperiment
metadata:
name: container-kill
labels:
name: container-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name container-kill
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ''
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: TARGET_PODS
value: ''
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
- name: SIGNAL
value: 'SIGKILL'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
# provide the name of container runtime
# it supports docker, containerd, crio
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
# To select pods on specific node(s)
- name: NODE_LABEL
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: container-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,37 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: disk-fill
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
support: https://slack.kubernetes.io/
spec:
displayName: Disk Fill
categoryDescription: |
Disk fill fault disrupts state of kubernetes resources.
- Causes (forced/graceful) Disk Stress by filling up the Ephemeral Storage of the Pod using one of it containers.
- Causes Pod to get Evicted if the Pod exceeds it Ephemeral Storage Limit.
- Tests the Ephemeral Storage Limits, to ensure those parameters are sufficient.
keywords:
- Kubernetes
platforms:
- GKE
- EKS
- AKS
experiments:
- sock-shop
- sock-shop-promProbe
chaosType: infra
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,36 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
appinfo:
appns: ''
applabel: ''
appkind: ''
chaosServiceAccount: disk-fill-sa
experiments:
- name: disk-fill
spec:
components:
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
# specify the fill percentage according to the disk pressure required
- name: FILL_PERCENTAGE
value: '80'
- name: PODS_AFFECTED_PERC
value: ''
# Provide the container runtime path
# Default set to docker container path
- name: CONTAINER_PATH
value: '/var/lib/docker/containers'

View File

@@ -0,0 +1,123 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Fillup Ephemeral Storage of a Resource
kind: ChaosExperiment
metadata:
name: disk-fill
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name disk-fill
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ''
- name: FILL_PERCENTAGE
value: '80'
- name: TOTAL_CHAOS_DURATION
value: '60'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide the data block size
# supported unit is KB
- name: DATA_BLOCK_SIZE
value: '256'
- name: TARGET_PODS
value: ''
- name: EPHEMERAL_STORAGE_MEBIBYTES
value: ''
# To select pods on specific node(s)
- name: NODE_LABEL
value: ''
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
# Provide the container runtime path
# Default set to docker container path
- name: CONTAINER_PATH
value: '/var/lib/docker/containers'
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/host-path-usage: "true"
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,30 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: docker-service-kill
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Docker Service Kill
categoryDescription: |
docker-service-kill fault kills the Docker service gracefully for a certain chaos duration.
- Causes replicas to be evicted or become unreachable on account of nodes turning unschedulable (Not Ready) due to Docker service kill.
- The application node should be healthy once the chaos is stopped and the services are re-accessable.
keywords:
- Kubernetes
platforms:
- GKE
- AKS
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- base64data: ""
mediatype: ""

View File

@@ -0,0 +1,27 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
auxiliaryAppInfo: ''
chaosServiceAccount: docker-service-kill-sa
experiments:
- name: docker-service-kill
spec:
components:
# nodeSelector:
# # provide the node labels
# kubernetes.io/hostname: 'node02'
env:
- name: TOTAL_CHAOS_DURATION
value: '90' # in seconds
- name: TARGET_NODE
value: ''
- name: NODE_LABEL
value: ""

View File

@@ -0,0 +1,84 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Kills the Docker service on the application node to check the resiliency.
kind: ChaosExperiment
metadata:
name: docker-service-kill
labels:
name: docker-service-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name docker-service-kill
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '90' # in seconds
# Period to wait before injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: NODE_LABEL
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'ubuntu:16.04'
# provide the target node name
- name: TARGET_NODE
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
labels:
name: docker-service-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/service-kill: "true"
app.kubernetes.io/version: ci

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 959 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

@@ -0,0 +1,26 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: kubelet-service-kill-sa
experiments:
- name: kubelet-service-kill
spec:
components:
# nodeSelector:
# # provide the node labels
# kubernetes.io/hostname: 'node02'
env:
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
# provide the target node name
- name: TARGET_NODE
value: ''

View File

@@ -0,0 +1,84 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Kills the Kubelet service on the application node to check the resiliency.
kind: ChaosExperiment
metadata:
name: kubelet-service-kill
labels:
name: kubelet-service-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name kubelet-service-kill
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: NODE_LABEL
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'ubuntu:16.04'
# provide the target node name
- name: TARGET_NODE
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
labels:
name: kubelet-service-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/service-kill: "true"
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,33 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: kubelet-service-kill
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Kubelet Service Kill
categoryDescription: |
kubelet-service-kill fault kills the Kubelet service gracefully for a certain chaos duration.
- Replicas may be evicted or become unreachable on account on nodes turning unschedulable (Not Ready) due to Kubelet service kill.
- The application node should be healthy once chaos is stopped and the services are reaccessable.
keywords:
- Kubernetes
platforms:
- GKE
- Packet(Kubeadm)
- Minikube
- EKS
- AKS
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- base64data: ""
mediatype: ""

View File

@@ -0,0 +1,120 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: kubernetes
version: 0.1.0
annotations:
categories: Kubernetes
chartDescription: Injects kubernetes chaos experiment
executionPlane: "kubernetes"
spec:
displayName: Kubernetes
categoryDescription: >
Kubernetes is an open-source system for automating deployment, scaling, and management of containerized applications. It groups containers that make up an application into logical units for easier management and discovery. It will install all the experiments which can be used to inject chaos into containerized applications.
faults:
- name: pod-delete
description: It injects pod-delete chaos in a Kubernetes pod.
displayName: "Pod Delete"
- name: container-kill
description: It injects container-kill chaos in a Kubernetes pod .
displayName: "Container Kill"
- name: pod-cpu-hog
description: It injects cpu-hog chaos in a Kubernetes pod.
displayName: "Pod CPU Hog"
- name: pod-network-loss
description: It injects network-loss chaos in a Kubernetes pod.
displayName: "Pod Network Loss"
- name: pod-network-latency
description: It injects network-latency chaos in a Kubernetes pod.
displayName: "Pod Network Latency"
- name: pod-network-corruption
description: It injects network-corruption chaos in a Kubernetes pod.
displayName: "Pod Network Corruption"
- name: node-drain
description: It injects node-drain chaos in a Kubernetes node.
displayName: "Node Drain"
- name: node-cpu-hog
description: It injects node-cpu-hog chaos in a Kubernetes node.
displayName: "Node CPU Hog"
- name: node-restart
description: It injects node-restart chaos in a Kubernetes node.
displayName: "Node Restart"
- name: disk-fill
description: It injects disk-fill chaos in a Kubernetes node.
displayName: "Disk Fill"
- name: node-memory-hog
description: It injects node-memory-hog chaos in a Kubernetes node.
displayName: "Node Memory Hog"
- name: node-poweroff
description: Node poweroff contains chaos experiment to poweroff a node via SSH.
displayName: "Node Poweroff"
- name: pod-memory-hog
description: It injects pod-memory-hog chaos in a Kubernetes pod.
displayName: "Pod Memory Hog"
- name: kubelet-service-kill
description: It injects kubelet-service-kill chaos in a Kubernetes node.
displayName: "Kubelet Service Kill"
- name: pod-network-duplication
description: It injects pod-network-duplication chaos in a Kubernetes pod.
displayName: "Pod Network Duplication"
- name: node-taint
description: It injects node-taint chaos in a Kubernetes node.
displayName: "Node Taint"
- name: docker-service-kill
description: It injects docker-service-kill chaos in a Kubernetes pod.
displayName: "Docker Service Kill"
- name: pod-autoscaler
description: It injects pod-autoscaler chaos in a Kubernetes pod.
displayName: "Pod Autoscaler"
- name: node-io-stress
description: It injects node-io-stress chaos in a Kubernetes node.
displayName: "Node IO Stress"
- name: pod-io-stress
description: It injects pod-io-stress chaos in a Kubernetes pod.
displayName: "Pod IO Stress"
- name: pod-dns-error
description: It injects pod-dns-error chaos in a Kubernetes pod.
displayName: "Pod DNS Error"
- name: pod-dns-spoof
description: It injects pod-dns-spoof chaos in a Kubernetes pod.
displayName: "Pod DNS Spoof"
- name: pod-cpu-hog-exec
description: It injects pod-cpu-hog-exec chaos in a Kubernetes pod.
displayName: "Pod CPU Hog Exec"
- name: pod-memory-hog-exec
description: It injects pod-memory-hog-exec chaos in a Kubernetes pod.
displayName: "Pod Memory Hog Exec"
- name: pod-network-partition
description: It injects pod-network-partition chaos in a Kubernetes pod.
displayName: "Pod Network Partition"
- name: pod-http-latency
description: It injects pod-http-latency chaos in a Kubernetes pod.
displayName: "Pod HTTP Latency"
- name: pod-http-status-code
description: It injects pod-http-status-code chaos in a Kubernetes pod.
displayName: "Pod HTTP Status Code"
- name: pod-http-modify-header
description: It injects pod-http-modify-header chaos in a Kubernetes pod.
displayName: "Pod HTTP Modify Header"
- name: pod-http-modify-body
description: It injects pod-http-modify-body chaos in a Kubernetes pod.
displayName: "Pod HTTP Modify Body"
- name: pod-http-reset-peer
description: It injects pod-http-reset-peer chaos in a Kubernetes pod.
displayName: "Pod HTTP Reset Peer"
keywords:
- Kubernetes
minKubeVersion: 1.12.0
links:
- name: Kubernetes Website
url: https://kubernetes.io
- name: Source Code
url: https://github.com/kubernetes/kubernetes
- name: Kubernetes Slack
url: https://slack.kubernetes.io/
- name: Documentation
url: https://litmuschaos.github.io/litmus/experiments/categories/contents/#kubernetes
icon:
- url: https://raw.githubusercontent.com/litmuschaos/charthub.litmuschaos.io/master/public/litmus.ico
mediatype: image/png

View File

@@ -0,0 +1,102 @@
---
packageName: kubernetes
faults:
- name: pod-delete
CSV: pod-delete.chartserviceversion.yaml
desc: "pod-delete"
- name: container-kill
CSV: container-kill.chartserviceversion.yaml
desc: "container-kill"
- name: pod-network-loss
CSV: pod-network-loss.chartserviceversion.yaml
desc: "pod-network-loss"
- name: pod-network-latency
CSV: pod-network-latency.chartserviceversion.yaml
desc: "pod-network-latency"
- name: pod-cpu-hog
CSV: pod-cpu-hog.chartserviceversion.yaml
desc: "pod-cpu-hog"
- name: node-cpu-hog
CSV: node-cpu-hog.chartserviceversion.yaml
desc: "node-cpu-hog"
- name: disk-fill
CSV: disk-fill.chartserviceversion.yaml
desc: "disk-fill"
- name: node-drain
CSV: node-drain.chartserviceversion.yaml
desc: "node-drain"
- name: pod-network-corruption
CSV: pod-network-corruption.chartserviceversion.yaml
desc: "pod-network-corruption"
- name: node-memory-hog
CSV: node-memory-hog.chartserviceversion.yaml
desc: "node-memory-hog"
- name: node-poweroff
CSV: node-poweroff.chartserviceversion.yaml
desc: "node-poweroff"
- name: node-restart
CSV: node-restart.chartserviceversion.yaml
desc: "node-restart"
- name: pod-memory-hog
CSV: pod-memory-hog.chartserviceversion.yaml
desc: "pod-memory-hog"
- name: kubelet-service-kill
CSV: kubelet-service-kill.chartserviceversion.yaml
desc: "kubelet-service-kill"
- name: pod-network-duplication
CSV: pod-network-duplication.chartserviceversion.yaml
desc: "pod-network-duplication"
- name: node-taint
CSV: node-taint.chartserviceversion.yaml
desc: "node-taint"
- name: docker-service-kill
CSV: docker-service-kill.chartserviceversion.yaml
desc: "docker-service-kill"
- name: pod-autoscaler
CSV: pod-autoscaler.chartserviceversion.yaml
desc: "pod-autoscaler"
- name: node-io-stress
CSV: node-io-stress.chartserviceversion.yaml
desc: "node-io-stress"
- name: pod-io-stress
CSV: pod-io-stress.chartserviceversion.yaml
desc: "pod-io-stress"
- name: pod-dns-error
CSV: pod-dns-error.chartserviceversion.yaml
desc: "pod-dns-error"
- name: pod-dns-spoof
CSV: pod-dns-spoof.chartserviceversion.yaml
desc: "pod-dns-spoof"
- name: pod-cpu-hog-exec
CSV: pod-cpu-hog-exec.chartserviceversion.yaml
desc: "pod-cpu-hog-exec"
- name: pod-memory-hog-exec
CSV: pod-memory-hog-exec.chartserviceversion.yaml
desc: "pod-memory-hog-exec"
- name: pod-network-partition
CSV: pod-network-partition.chartserviceversion.yaml
desc: "pod-network-partition"
- name: pod-http-latency
CSV: pod-http-latency.chartserviceversion.yaml
desc: "pod-http-latency"
- name: pod-http-status-code
CSV: pod-http-status-code.chartserviceversion.yaml
desc: "pod-http-status-code"
- name: pod-http-modify-header
CSV: pod-http-modify-header.chartserviceversion.yaml
desc: "pod-http-modify-header"
- name: pod-http-modify-body
CSV: pod-http-modify-body.chartserviceversion.yaml
desc: "pod-http-modify-body"
- name: pod-http-reset-peer
CSV: pod-http-reset-peer.chartserviceversion.yaml
desc: "pod-http-reset-peer"
- name: node-network-latency
CSV: node-network-latency.chartserviceversion.yaml
desc: "node-network-latency"
- name: node-network-loss
CSV: node-network-loss.chartserviceversion.yaml
desc: "node-network-loss"
- name: time-chaos
CSV: time-chaos.chartserviceversion.yaml
desc: "time-chaos"

View File

@@ -0,0 +1,38 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: node-cpu-hog-sa
experiments:
- name: node-cpu-hog
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '60'
## ENTER THE NUMBER OF CORES OF CPU FOR CPU HOGGING
## OPTIONAL VALUE IN CASE OF EMPTY VALUE IT WILL TAKE NODE CPU CAPACITY
- name: NODE_CPU_CORE
value: '1'
## LOAD CPU WITH GIVEN PERCENT LOADING FOR THE CPU STRESS WORKERS.
## 0 IS EFFECTIVELY A SLEEP (NO LOAD) AND 100 IS FULL LOADING
- name: CPU_LOAD
value: '0'
## percentage of total nodes to target
- name: NODES_AFFECTED_PERC
value: ''
# provide the comma separated target node names
- name: TARGET_NODES
value: ''

View File

@@ -0,0 +1,102 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Give a CPU spike on a node belonging to a deployment
kind: ChaosExperiment
metadata:
name: node-cpu-hog
labels:
name: node-cpu-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name node-cpu-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
## ENTER THE NUMBER OF CORES OF CPU FOR CPU HOGGING
## OPTIONAL VALUE IN CASE OF EMPTY VALUE IT WILL TAKE NODE CPU CAPACITY
- name: NODE_CPU_CORE
value: ''
## LOAD CPU WITH GIVEN PERCENT LOADING FOR THE CPU STRESS WORKERS.
## 0 IS EFFECTIVELY A SLEEP (NO LOAD) AND 100 IS FULL LOADING
- name: CPU_LOAD
value: '100'
# ENTER THE COMMA SEPARATED TARGET NODES NAME
- name: TARGET_NODES
value: ''
- name: NODE_LABEL
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
## percentage of total nodes to target
- name: NODES_AFFECTED_PERC
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
## it defines the sequence of chaos execution for multiple target nodes
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: node-cpu-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,37 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: node-cpu-hog
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Node CPU Hog
categoryDescription: |
Node CPU hog fault disrupts the state of Kubernetes resources. This fault injects a CPU spike on a node where the application pod is scheduled.
- CPU hog on a particular node where the application deployment is available.
- After test, the recovery should be manual for the application pod and node in case they are not in an appropriate state.
keywords:
- Kubernetes
platforms:
- GKE
- EKS
- AKS
- Kind
- Rancher
- OpenShift(OKD)
experiments:
- node-cpu-hog
chaosType: infra
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,26 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: node-drain-sa
experiments:
- name: node-drain
spec:
components:
# nodeSelector:
# # provide the node labels
# kubernetes.io/hostname: 'node02'
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
# enter the target node name
- name: TARGET_NODE
value: ''

View File

@@ -0,0 +1,82 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Drain the node where application pod is scheduled
kind: ChaosExperiment
metadata:
name: node-drain
labels:
name: node-drain
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec", "pods/eviction"]
verbs: ["get", "list", "create"]
# ignore daemonsets while draining the node
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["list", "get", "delete"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "patch"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name node-drain
command:
- /bin/bash
env:
- name: TARGET_NODE
value: ''
- name: NODE_LABEL
value: ''
- name: TOTAL_CHAOS_DURATION
value: '60'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
labels:
name: node-drain
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,33 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: node-drain
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Node Drain
categoryDescription: >
Drain the node where application pod is scheduled.
keywords:
- Kubernetes
platforms:
- GKE
- AWS(KOPS)
- Packet(Kubeadm)
- Konvoy
- EKS
- AKS
chaosType: infra
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url: ""
mediatype: ""

View File

@@ -0,0 +1,40 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: node-io-stress-sa
experiments:
- name: node-io-stress
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '120'
## specify the size as percentage of free space on the file system
- name: FILESYSTEM_UTILIZATION_PERCENTAGE
value: '10'
## Number of core of CPU
- name: CPU
value: '1'
## Total number of workers default value is 4
- name: NUMBER_OF_WORKERS
value: '4'
## percentage of total nodes to target
- name: NODES_AFFECTED_PERC
value: ''
# provide the comma separated target node names
- name: TARGET_NODES
value: ''

View File

@@ -0,0 +1,114 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Give IO disk stress on a node belonging to a deployment
kind: ChaosExperiment
metadata:
name: node-io-stress
labels:
name: node-io-stress
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name node-io-stress
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '120'
## specify the size as percentage of free space on the file system
## default value 90 (in percentage)
- name: FILESYSTEM_UTILIZATION_PERCENTAGE
value: '10'
## we can specify the size in Gigabyte (Gb) also in place of percentage of free space
## NOTE: for selecting this option FILESYSTEM_UTILIZATION_PERCENTAGE should be empty
- name: FILESYSTEM_UTILIZATION_BYTES
value: ''
## Number of core of CPU
- name: CPU
value: '1'
## Total number of workers default value is 4
- name: NUMBER_OF_WORKERS
value: '4'
## Total number of VM workers
- name: VM_WORKERS
value: '1'
## enter the comma separated target nodes name
- name: TARGET_NODES
value: ''
- name: NODE_LABEL
value: ''
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
## percentage of total nodes to target
- name: NODES_AFFECTED_PERC
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
## it defines the sequence of chaos execution for multiple target nodes
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: node-io-stress
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,32 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: node-io-stress
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Node IO Stress
categoryDescription: |
This fault causes disk stress on the Kubernetes node. This fault aims to verify the resiliency of applications that share this disk resource for ephemeral or persistent storage purposes..
- Disk stress on a particular node filesystem where the application deployment is available.
- The amount of disk stress can be either specifed as the size in percentage of the total free space on the file system or simply in Gigabytes(GB)
keywords:
- Kubernetes
platforms:
- GKE
- EKS
- AKS
chaosType: infra
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url: ""
mediatype: ""

View File

@@ -0,0 +1,38 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: node-memory-hog-sa
experiments:
- name: node-memory-hog
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '120'
## percentage of total nodes to target
- name: NODES_AFFECTED_PERC
value: ''
# provide the comma separated target node names
- name: TARGET_NODES
value: ''
## Specify the size as percent of total node capacity Ex: '30'
## NOTE: for selecting this option keep MEMORY_CONSUMPTION_MEBIBYTES empty
- name: MEMORY_CONSUMPTION_PERCENTAGE
value: '0'
## Specify the amount of memory to be consumed in mebibytes
## NOTE: for selecting this option keep MEMORY_CONSUMPTION_PERCENTAGE empty
- name: MEMORY_CONSUMPTION_MEBIBYTES
value: '0'

View File

@@ -0,0 +1,105 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Give a memory hog on a node belonging to a deployment
kind: ChaosExperiment
metadata:
name: node-memory-hog
labels:
name: node-memory-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name node-memory-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '120'
## Specify the size as percent of total node capacity Ex: '30'
## NOTE: for selecting this option keep MEMORY_CONSUMPTION_MEBIBYTES empty
- name: MEMORY_CONSUMPTION_PERCENTAGE
value: ''
## Specify the amount of memory to be consumed in mebibytes
## NOTE: for selecting this option keep MEMORY_CONSUMPTION_PERCENTAGE empty
- name: MEMORY_CONSUMPTION_MEBIBYTES
value: ''
- name: NUMBER_OF_WORKERS
value: '1'
# ENTER THE COMMA SEPARATED TARGET NODES NAME
- name: TARGET_NODES
value: ''
- name: NODE_LABEL
value: ''
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
- name: DEFAULT_HEALTH_CHECK
value: 'false'
## percentage of total nodes to target
- name: NODES_AFFECTED_PERC
value: ''
## it defines the sequence of chaos execution for multiple target nodes
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: node-memory-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,37 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: node-memory-hog
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Node Memory Hog
categoryDescription: |
Kubernetes Node memory hog fault disrupts the state of Kubernetes resources. This fault injects a memory spike on a node where the application pod is scheduled.
- Memory hog on a particular node where the application deployment is available.
- After the test, the recovery should be manual for the application pod and node in case they are not in an appropriate state.
keywords:
- Kubernetes
platforms:
- GKE
- EKS
- AKS
- Kind
- Rancher
- OpenShift(OKD)
experiments:
- node-memory-hog
chaosType: infra
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url: ""
mediatype: ""

View File

@@ -0,0 +1,33 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: node-poweroff-sa
experiments:
- name: node-poweroff
spec:
components:
# nodeSelector:
# # provide the node labels
# kubernetes.io/hostname: 'node02'
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
# ENTER THE TARGET NODE NAME
- name: TARGET_NODE
value: ''
# ENTER THE TARGET NODE IP
- name: TARGET_NODE_IP
value: ''
# ENTER THE USER TO BE USED FOR SSH AUTH
- name: SSH_USER
value: 'root'

View File

@@ -0,0 +1,93 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Poweroff a node in the cluster
kind: ChaosExperiment
metadata:
name: node-poweroff
labels:
name: node-poweroff
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps","secrets"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get","list"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name node-restart
command:
- /bin/bash
env:
- name: SSH_USER
value: 'root'
- name: TOTAL_CHAOS_DURATION
value: '60'
- name: REBOOT_COMMAND
value: '-o ServerAliveInterval=1 -o ServerAliveCountMax=1 "sudo systemctl poweroff --force --force" ; true'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide lib image
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
# ENTER THE TARGET NODE NAME
- name: TARGET_NODE
value: ''
- name: NODE_LABEL
value: ''
# ENTER THE TARGET NODE IP
- name: TARGET_NODE_IP
value: ''
labels:
name: node-poweroff
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: id-rsa
mountPath: /mnt/

View File

@@ -0,0 +1,29 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: node-poweroff
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Node Poweroff
categoryDescription: |
Node poweroff contains chaos experiment to poweroff a node via SSH.
keywords:
- Kubernetes
platforms:
- KVM/LibVirt based K8s
- EKS
chaosType: infra
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url: ""
mediatype: ""

View File

@@ -0,0 +1,37 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: litmus-admin
experiments:
- name: node-restart
spec:
components:
# nodeSelector:
# # provide the node labels
# kubernetes.io/hostname: 'node02'
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
# ENTER THE TARGET NODE NAME
- name: TARGET_NODE
value: ''
# ENTER THE TARGET NODE IP
- name: TARGET_NODE_IP
value: ''
# ENTER THE USER TO BE USED FOR SSH AUTH
- name: SSH_USER
value: 'root'
- name: REBOOT_COMMAND
value: 'sudo systemctl reboot; true'

View File

@@ -0,0 +1,95 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Restart node
kind: ChaosExperiment
metadata:
name: node-restart
labels:
name: node-restart
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps", "secrets"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name node-restart
command:
- /bin/bash
env:
- name: SSH_USER
value: 'root'
- name: REBOOT_COMMAND
value: 'sudo systemctl reboot; true'
- name: TOTAL_CHAOS_DURATION
value: '60'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide lib image
- name: LIB_IMAGE
value: "chaosnative/go-runner:ci"
# ENTER THE TARGET NODE NAME
- name: TARGET_NODE
value: ''
- name: NODE_LABEL
value: ''
# ENTER THE TARGET NODE IP
- name: TARGET_NODE_IP
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
labels:
name: node-restart
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: id-rsa
mountPath: /mnt/

View File

@@ -0,0 +1,25 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: node-restart
version: 0.1.0
annotations:
categories: Kubernetes
spec:
displayName: Node Restart
categoryDescription: |
Node restart fault restarts a target node via SSH.
keywords:
- Kubernetes
platforms:
- KVM/LibVirt based K8s
- EKS
chaosType: infra
minKubeVersion: 1.12.0
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,31 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: node-taint-sa
experiments:
- name: node-taint
spec:
components:
# nodeSelector:
# # provide the node labels
# kubernetes.io/hostname: 'node02'
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
# set target node name
- name: TARGET_NODE
value: ''
# set taint label & effect
# key=value:effect or key:effect
- name: TAINTS
value: 'app.hce.io/chaos=true:NoSchedule'

View File

@@ -0,0 +1,87 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Taint the node where application pod is scheduled
kind: ChaosExperiment
metadata:
name: node-taint
labels:
name: node-taint
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec", "pods/eviction"]
verbs: ["get", "list", "create"]
# ignore daemonsets while draining the node
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["list", "get", "delete"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "patch", "update"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name node-taint
command:
- /bin/bash
env:
- name: TARGET_NODE
value: ''
- name: NODE_LABEL
value: ''
- name: TOTAL_CHAOS_DURATION
value: '60'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# set taint label & effect
# key=value:effect or key:effect
- name: TAINTS
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
labels:
name: node-taint
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,33 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: node-taint
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Node Taint
categoryDescription: >
Taint the node where application pod is scheduled.
keywords:
- Kubernetes
platforms:
- GKE
- AWS(KOPS)
- Packet(Kubeadm)
- Konvoy
- EKS
- AKS
chaosType: infra
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url: ""
mediatype: ""

View File

@@ -0,0 +1,29 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
appinfo:
appns: ''
applabel: ''
appkind: ''
chaosServiceAccount: pod-autoscaler-sa
experiments:
- name: pod-autoscaler
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '60'
# number of replicas to scale
- name: REPLICA_COUNT
value: '5'

View File

@@ -0,0 +1,76 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Scale the application replicas and test the node autoscaling on cluster
kind: ChaosExperiment
metadata:
name: pod-autoscaler
labels:
name: pod-autoscaler
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# performs CRUD operations on the deployments and statefulsets
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets"]
verbs: ["list", "get", "patch", "update"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-autoscaler
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# Number of replicas to scale
- name: REPLICA_COUNT
value: '5'
- name: DEFAULT_HEALTH_CHECK
value: 'false'
labels:
name: pod-autoscaler
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,32 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: pod-autoscaler
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Pod Autoscaler
categoryDescription: |
This fault aims to check the ability of nodes to accommodate the number of replicas a given application pod.
It can be used for other cases as well, such as for checking the Node auto-scaling feature. For example, check if the pods are successfully rescheduled within a specified period in cases where the existing nodes are already running at the specified limits.
keywords:
- Kubernetes
platforms:
- GKE
- EKS
- Minikube
- AKS
chaosType: infra
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,31 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
appinfo:
appns: ''
applabel: ''
appkind: ''
chaosServiceAccount: pod-cpu-hog-exec-sa
experiments:
- name: pod-cpu-hog-exec
spec:
components:
env:
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
#number of CPU cores to be consumed
#verify the resources the app has been launched with
- name: CPU_CORES
value: '1'
## Percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''

View File

@@ -0,0 +1,107 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects CPU consumption on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-cpu-hog-exec
labels:
name: pod-cpu-hog-exec
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-cpu-hog-exec
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
## Number of CPU cores to stress
- name: CPU_CORES
value: '1'
## Percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
## Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# The command to kill the chaos process
- name: CHAOS_KILL_COMMAND
value: "kill $(find /proc -name exe -lname '*/md5sum' 2>&1 | grep -v 'Permission denied' | awk -F/ '{print $(NF-1)}')"
- name: TARGET_CONTAINER
value: ''
- name: TARGET_PODS
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: pod-cpu-hog-exec
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,34 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: pod-cpu-hog-exec
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Pod CPU Hog Exec
categoryDescription: |
pod-cpu-hog-exec fault consumes CPU resources of specified containers in Kubernetes pods.
- Causes high CPU resource consumption utilizing one or more cores by triggering md5sum commands.
- The application pod should be healthy once chaos is stopped. Expectation is that service-requests should be served despite chaos.
keywords:
- Kubernetes
platforms:
- GKE
- Packet(Kubeadm)
- Minikube
- EKS
- AKS
- Kind
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- base64data: ""
mediatype: ""

View File

@@ -0,0 +1,36 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
appinfo:
appns: ''
applabel: ''
appkind: ''
chaosServiceAccount: pod-cpu-hog-sa
experiments:
- name: pod-cpu-hog
spec:
components:
env:
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
- name: CPU_CORES
value: '1'
## Percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
## provide the cluster runtime
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'

View File

@@ -0,0 +1,129 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects CPU consumption on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-cpu-hog
labels:
name: pod-cpu-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-cpu-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
## Number of CPU cores to stress
- name: CPU_CORES
value: '1'
## LOAD CPU WITH GIVEN PERCENT LOADING FOR THE CPU STRESS WORKERS.
## 0 IS EFFECTIVELY A SLEEP (NO LOAD) AND 100 IS FULL LOADING
- name: CPU_LOAD
value: '100'
## Percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
## Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
## It is used in pumba lib only
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
## It is used in pumba lib only
- name: STRESS_IMAGE
value: 'alexeiled/stress-ng:latest-ubuntu'
## provide the cluster runtime
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
- name: TARGET_CONTAINER
value: ''
- name: TARGET_PODS
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# To select pods on specific node(s)
- name: NODE_LABEL
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: pod-cpu-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,39 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: pod-cpu-hog
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Pod CPU Hog
categoryDescription: |
Pod-CPU-Hog fault consumes the CPU resources of specified containers in Kubernetes pods.
- Causes CPU resource consumption on specified application containers using cgroups and litmus nsutil which consume CPU resources of the given target containers.
- It can test the application's resilience to potential slowness/unavailability of some replicas due to high CPU load.
- The application pod should be healthy once chaos is stopped. Expectation is that service-requests should be served despite chaos.
keywords:
- Kubernetes
platforms:
- GKE
- Packet(Kubeadm)
- Minikube
- EKS
- AKS
- Kind
experiments:
- pod-cpu-hog
- sock-shop
- sock-shop-promProbe
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- base64data: ""
mediatype: ""

View File

@@ -0,0 +1,35 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
appinfo:
appns: ''
applabel: ''
appkind: ''
# It can be active/stop
engineState: 'active'
chaosServiceAccount: pod-delete-sa
experiments:
- name: pod-delete
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '10'
# pod failures without '--force' & default terminationGracePeriodSeconds
- name: FORCE
value: 'false'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''

View File

@@ -0,0 +1,109 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes a pod belonging to a deployment/statefulset/daemonset
kind: ChaosExperiment
metadata:
name: pod-delete
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-delete
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '15'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: FORCE
value: 'true'
- name: CHAOS_INTERVAL
value: '5'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
- name: TARGET_CONTAINER
value: ''
- name: TARGET_PODS
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# To select pods on specific node(s)
- name: NODE_LABEL
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,42 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: pod-delete
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Pod Delete
categoryDescription: |
Pod delete fault disrupts the state of Kubernetes resources. This fault injects random pod delete failures against specified application.
- Causes (forced/graceful) pod failure of random replicas of an application deployment.
- Tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the application pod.
keywords:
- Kubernetes
platforms:
- GKE
- Konvoy
- Packet(Kubeadm)
- Minikube
- EKS
- AKS
- Kind
- Rancher
- OpenShift(OKD)
experiments:
- pod-delete
- podtato-head
- sock-shop
- sock-shop-promProbe
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,43 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
spec:
appinfo:
appns: ""
applabel: ""
appkind: ""
# It can be active/stop
engineState: "active"
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ""
chaosServiceAccount: pod-dns-error-sa
experiments:
- name: pod-dns-error
spec:
components:
env:
- name: TOTAL_CHAOS_DURATION
value: "60" # in seconds
# list of the target hostnames or kewywords eg. '["litmuschaos","chaosnative.io"]' . If empty all hostnames are targets
- name: TARGET_HOSTNAMES
value: ""
# can be either exact or substring, determines whether the DNS query has to match exactly with one of the targets or can have any of the targets as substring
- name: MATCH_SCHEME
value: "exact"
# provide the name of container runtime, it supports docker, containerd, crio
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""

View File

@@ -0,0 +1,119 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Pod DNS Error injects DNS failure/error in target pod containers
kind: ChaosExperiment
metadata:
name: pod-dns-error
labels:
name: pod-dns-error
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
args:
- -c
- ./experiments -name pod-dns-error
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
# provide lib image
- name: LIB_IMAGE
value: "chaosnative/go-runner:ci"
- name: DEFAULT_HEALTH_CHECK
value: 'false'
- name: TOTAL_CHAOS_DURATION
value: "60" # in seconds
# Time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: TARGET_PODS
value: ""
# provide the name of container runtime, it supports docker, containerd, crio
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
# list of the target hostnames or kewywords eg. '["litmuschaos","chaosnative.io"]' . If empty all hostnames are targets
- name: TARGET_HOSTNAMES
value: ""
# can be either exact or substring, determines whether the DNS query has to match exactly with one of the targets or can have any of the targets as substring
- name: MATCH_SCHEME
value: "exact"
labels:
experiment: pod-dns-error
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,24 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: pod-dns-error
version: 0.1.0
annotations:
categories: kubernetes
spec:
displayName: Pod DNS Error
categoryDescription: >
Pod DNS Error fault injects DNS failure/error in target pod containers.
keywords:
- Kubernetes
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- base64data: ""
mediatype: ""

View File

@@ -0,0 +1,39 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
spec:
appinfo:
appns: ""
applabel: ""
appkind: ""
# It can be active/stop
engineState: "active"
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ""
chaosServiceAccount: pod-dns-spoof-sa
experiments:
- name: pod-dns-spoof
spec:
components:
env:
- name: TOTAL_CHAOS_DURATION
value: "60" # in seconds
# map of the target hostnames eg. '{"abc.com":"spoofabc.com"}' . If empty no queries will be spoofed
- name: SPOOF_MAP
value: '{"google.com":"fakegoogle.com"}'
# provide the name of container runtime, it supports docker, containerd, crio
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""

View File

@@ -0,0 +1,115 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Pod DNS Spoof can spoof particular DNS requests in target pod container to desired target hostnames
kind: ChaosExperiment
metadata:
name: pod-dns-spoof
labels:
name: pod-dns-spoof
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
args:
- -c
- ./experiments -name pod-dns-spoof
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
# provide lib image
- name: LIB_IMAGE
value: "chaosnative/go-runner:ci"
- name: DEFAULT_HEALTH_CHECK
value: 'false'
- name: TOTAL_CHAOS_DURATION
value: "60" # in seconds
# Time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: TARGET_PODS
value: ""
# provide the name of container runtime, it supports docker, containerd, crio
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
# map of the target hostnames eg. '{"abc.com":"spoofabc.com"}' . If empty no queries will be spoofed
- name: SPOOF_MAP
value: ""
labels:
experiment: pod-dns-spoof
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,24 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: pod-dns-spoof
version: 0.1.0
annotations:
categories: kubernetes
spec:
displayName: Pod DNS Spoof
categoryDescription: >
Pod DNS Spoof fault spoofs particular DNS requests in a target pod container to desired target hostnames.
keywords:
- Kubernetes
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- base64data: ""
mediatype: ""

View File

@@ -0,0 +1,43 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-http-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
appinfo:
appns: ''
# FYI, To see app label, apply kubectl get pods --show-labels
applabel: ''
appkind: ''
chaosServiceAccount: pod-http-latency-sa
experiments:
- name: pod-http-latency
spec:
components:
env:
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
- name: LATENCY
value: '2000' #in ms
# port of the target service
- name: TARGET_SERVICE_PORT
value: "80"
# provide the name of container runtime
# it supports docker, containerd, crio
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''

View File

@@ -0,0 +1,139 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects HTTP request latency on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-http-latency
labels:
name: pod-http-latency
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-http-latency
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
- name: LATENCY
value: '2000' #in ms
# port of the target service
- name: TARGET_SERVICE_PORT
value: "80"
# toxicity is the probability of the request to be affected
# provide the percentage value in the range of 0-100
# 0 means no request will be affected and 100 means all request will be affected
- name: TOXICITY
value: "100"
# port on which the proxy will listen
- name: PROXY_PORT
value: "20000"
# network interface on which the proxy will listen
- name: NETWORK_INTERFACE
value: "eth0"
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
# Time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
- name: TARGET_PODS
value: ''
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
# To select pods on specific node(s)
- name: NODE_LABEL
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: pod-http-latency
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,31 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: pod-http-latency
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Pod HTTP Latency
categoryDescription: |
Pod-http-latency fault disrupts HTTP requests of kubernetes pods. This fault can inject random HTTP response delays on the app replica pods.
- Causes flaky access to application replica by injecting HTTP response delay using toxiproxy.
- The application pod should be healthy once chaos is stopped. Service-requests should be served despite chaos.
keywords:
- Kubernetes
platforms:
- GKE
- Minikube
- EKS
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- base64data: ""
mediatype: ""

View File

@@ -0,0 +1,43 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
spec:
# It can be active/stop
engineState: 'active'
appinfo:
appns: ''
applabel: ''
appkind: ''
chaosServiceAccount: pod-http-modify-body-sa
experiments:
- name: pod-http-modify-body
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
# provide the body string to overwrite the response body
- name: RESPONSE_BODY
value: ''
# port of the target service
- name: TARGET_SERVICE_PORT
value: "80"
# provide the name of container runtime
# it supports docker, containerd, crio
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''

View File

@@ -0,0 +1,150 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It injects the chaos inside the pod which modifies the body of the response from the provided application server to the body string provided by the user and reverts after a specified duration
kind: ChaosExperiment
metadata:
name: pod-http-modify-body
labels:
name: pod-http-modify-body
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-http-modify-body
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
# provide the body string to overwrite the response body
# if no value is provided, response will be an empty body.
- name: RESPONSE_BODY
value: ''
# provide the encoding type for the response body
# currently supported value are gzip, deflate
# if empty no encoding will be applied
- name: CONTENT_ENCODING
value: ''
# provide the content type for the response body
- name: CONTENT_TYPE
value: 'text/plain'
# port of the target service
- name: TARGET_SERVICE_PORT
value: "80"
# toxicity is the probability of the request to be affected
# provide the percentage value in the range of 0-100
# 0 means no request will be affected and 100 means all request will be affected
- name: TOXICITY
value: "100"
# port on which the proxy will listen
- name: PROXY_PORT
value: "20000"
# network interface on which the proxy will listen
- name: NETWORK_INTERFACE
value: "eth0"
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
# Time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
- name: TARGET_PODS
value: ''
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
# To select pods on specific node(s)
- name: NODE_LABEL
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: pod-http-modify-body
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,28 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: pod-http-modify-body
version: 0.1.0
annotations:
categories: kubernetes
spec:
displayName: Pod HTTP Modify Body
categoryDescription: >
Pod-http-modify-body fault disrupts HTTP requests of kubernetes pods. This fault can modify the body of the response from the service targetted.
- Causes modification of response body of the HTTP request.
- The application pod should be healthy once chaos is stopped. Service-requests should be served despite chaos.
keywords:
- kubernetes
platforms:
- "Minikube"
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- base64data: ""
mediatype: ""

View File

@@ -0,0 +1,49 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-http-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
appinfo:
appns: ''
# FYI, To see app label, apply kubectl get pods --show-labels
applabel: ''
appkind: ''
chaosServiceAccount: pod-http-modify-header-sa
experiments:
- name: pod-http-modify-header
spec:
components:
env:
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
# port of the target service
- name: TARGET_SERVICE_PORT
value: "80"
# map of headers to modify/add; Eg: {"X-Litmus-Test-Header": "X-Litmus-Test-Value"}
# to remove a header, just set the value to ""; Eg: {"X-Litmus-Test-Header": ""}
- name: HEADERS_MAP
value: '{}'
# whether to modify response headers or request headers. Accepted values: request, response
- name: HEADER_MODE
value: 'response'
# provide the name of container runtime
# it supports docker, containerd, crio
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''

View File

@@ -0,0 +1,145 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It injects the chaos inside the pod which modifies the header of the request/response from the provided application server to the headers provided by the user and reverts after a specified duration
kind: ChaosExperiment
metadata:
name: pod-http-modify-header
labels:
name: pod-http-modify-header
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-http-modify-header
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
# map of headers to modify/add; Eg: {"X-Litmus-Test-Header": "X-Litmus-Test-Value"}
# to remove a header, just set the value to ""; Eg: {"X-Litmus-Test-Header": ""}
- name: HEADERS_MAP
value: '{}'
# whether to modify response headers or request headers. Accepted values: request, response
- name: HEADER_MODE
value: 'response'
# port of the target service
- name: TARGET_SERVICE_PORT
value: "80"
# toxicity is the probability of the request to be affected
# provide the percentage value in the range of 0-100
# 0 means no request will be affected and 100 means all request will be affected
- name: TOXICITY
value: "100"
# port on which the proxy will listen
- name: PROXY_PORT
value: "20000"
# network interface on which the proxy will listen
- name: NETWORK_INTERFACE
value: "eth0"
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
# Time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
- name: TARGET_PODS
value: ''
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
# To select pods on specific node(s)
- name: NODE_LABEL
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: pod-http-modify-header
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,31 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: pod-http-modify-header
version: 0.1.0
annotations:
categories: Kubernetes
vendor: CNCF
spec:
displayName: Pod HTTP Modify Header
categoryDescription: |
Pod-http-modify-header fault disrupts HTTP requests of kubernetes pods. This fault can modify headers of incoming requests or the response from the service targetted.
- Causes modification of request/response headers of the HTTP request.
- The application pod should be healthy once chaos is stopped. Service-requests should be served despite chaos.
keywords:
- Kubernetes
platforms:
- GKE
- Minikube
- EKS
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
icon:
- base64data: ""
mediatype: ""
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category

View File

@@ -0,0 +1,49 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
appinfo:
appns: ''
# FYI, To see app label, apply kubectl get pods --show-labels
applabel: ''
appkind: ''
chaosServiceAccount: pod-http-reset-peer-sa
experiments:
- name: pod-http-reset-peer
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
# reset timeout specifies after how much duration to reset the connection
- name: RESET_TIMEOUT
value: '0' #in ms
# port of the target service
- name: TARGET_SERVICE_PORT
value: "80"
- name: TARGET_PODS
value: ''
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''

View File

@@ -0,0 +1,139 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
it injects chaos into the pod which stops outgoing HTTP requests by resetting the TCP connection and then reverts back to the original state after a specified duration
kind: ChaosExperiment
metadata:
name: pod-http-reset-peer
labels:
name: pod-http-reset-peer
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-http-reset-peer
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
# reset timeout specifies after how much duration to reset the connection
- name: RESET_TIMEOUT
value: '0' #in ms
# port of the target service
- name: TARGET_SERVICE_PORT
value: "80"
# toxicity is the probability of the request to be affected
# provide the percentage value in the range of 0-100
# 0 means no request will be affected and 100 means all request will be affected
- name: TOXICITY
value: "100"
# port on which the proxy will listen
- name: PROXY_PORT
value: "20000"
# network interface on which the proxy will listen
- name: NETWORK_INTERFACE
value: "eth0"
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
# Time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
- name: TARGET_PODS
value: ''
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
# To select pods on specific node(s)
- name: NODE_LABEL
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: pod-http-reset-peer
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,28 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: pod-http-reset-peer
version: 0.1.0
annotations:
categories: kubernetes
spec:
displayName: Pod HTTP Reset Peer
categoryDescription: >
pod-http-reset-peer fault disrupts HTTP requests of kubernetes pods. This fault can stop outgoing HTTP requests by resetting the TCP connection on the service targetted.
- Causes connection failure (connection reset by peer) of the HTTP request.
- The application pod should be healthy once chaos is stopped. Service-requests should be served despite chaos.
keywords:
- kubernetes
platforms:
- "Minikube"
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- base64data: ""
mediatype: ""

View File

@@ -0,0 +1,54 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
spec:
# It can be active/stop
engineState: 'active'
appinfo:
appns: ''
applabel: ''
appkind: ''
chaosServiceAccount: pod-http-status-code-sa
experiments:
- name: pod-http-status-code
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '10'
# modified status code for the HTTP response
# if no value is provided, a random status code from the supported code list will selected
# if an invalid status code is provided, the experiment will fail
# supported status code list: [200, 201, 202, 204, 300, 301, 302, 304, 307, 400, 401, 403, 404, 500, 501, 502, 503, 504
- name: STATUS_CODE
value: ''
# whether to modify the body as per the status code provided
- name: "MODIFY_RESPONSE_BODY"
value: "true"
# port of the target service
- name: TARGET_SERVICE_PORT
value: "80"
# provide the name of container runtime
# it supports docker, containerd, crio
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''

View File

@@ -0,0 +1,160 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It injects chaos inside the pod which modifies the status code of the response from the provided application server to desired status code provided by the user and reverts after a specified duration
kind: ChaosExperiment
metadata:
name: pod-http-status-code
labels:
name: pod-http-status-code
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-http-status-code
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
# modified status code for the HTTP response
# if no value is provided, a random status code from the supported code list will selected
# if an invalid status code is provided, the experiment will fail
# supported status code list: [200, 201, 202, 204, 300, 301, 302, 304, 307, 400, 401, 403, 404, 500, 501, 502, 503, 504]
- name: STATUS_CODE
value: ''
# whether to modify the body as per the status code provided
- name: "MODIFY_RESPONSE_BODY"
value: "true"
# provide the body string to overwrite the response body. This will be used only if MODIFY_RESPONSE_BODY is set to true
- name: RESPONSE_BODY
value: ''
# provide the encoding type for the response body
# currently supported value are gzip, deflate
# if empty no encoding will be applied
- name: CONTENT_ENCODING
value: ''
# provide the content type for the response body
- name: CONTENT_TYPE
value: 'text/plain'
# port of the target service
- name: TARGET_SERVICE_PORT
value: "80"
# toxicity is the probability of the request to be affected
# provide the percentage value in the range of 0-100
# 0 means no request will be affected and 100 means all request will be affected
- name: TOXICITY
value: "100"
# port on which the proxy will listen
- name: PROXY_PORT
value: "20000"
# network interface on which the proxy will listen
- name: NETWORK_INTERFACE
value: "eth0"
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
# Time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
- name: TARGET_PODS
value: ''
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
# To select pods on specific node(s)
- name: NODE_LABEL
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: pod-http-status-code
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,28 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: pod-http-status-code
version: 0.1.0
annotations:
categories: kubernetes
spec:
displayName: Pod HTTP Status Code
categoryDescription: >
pod-http-status-code fault disrupts HTTP requests of kubernetes pods. This fault can modify the status code of the response on the service targetted.
- Causes modification of status code of the HTTP request.
- The application pod should be healthy once chaos is stopped. Service-requests should be served despite chaos.
keywords:
- kubernetes
platforms:
- "Minikube"
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- base64data: ""
mediatype: ""

View File

@@ -0,0 +1,39 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
appinfo:
appns: ''
applabel: ''
appkind: ''
chaosServiceAccount: pod-io-stress-sa
experiments:
- name: pod-io-stress
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '120'
## specify the size as percentage of free space on the file system
- name: FILESYSTEM_UTILIZATION_PERCENTAGE
value: '10'
## Percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
## provide the cluster runtime
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'

View File

@@ -0,0 +1,135 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
IO stress on a app pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-io-stress
labels:
name: pod-io-stress
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-io-stress
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '120'
## specify the size as percentage of free space on the file system
## default value 90 (in percentage)
- name: FILESYSTEM_UTILIZATION_PERCENTAGE
value: '10'
## we can specify the size in Gigabyte (Gb) also in place of percentage of free space
## NOTE: for selecting this option FILESYSTEM_UTILIZATION_PERCENTAGE should be empty
- name: FILESYSTEM_UTILIZATION_BYTES
value: ''
## Total number of workers default value is 4
- name: NUMBER_OF_WORKERS
value: '4'
## Percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
# provide volume mount path
- name: VOLUME_MOUNT_PATH
value: ''
- name: TARGET_CONTAINER
value: ''
## specify the comma separated target pods
- name: TARGET_PODS
value: ''
# To select pods on specific node(s)
- name: NODE_LABEL
value: ''
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
- name: DEFAULT_HEALTH_CHECK
value: 'false'
## provide the cluster runtime
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: pod-io-stress
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: ci

Some files were not shown because too many files have changed in this diff Show More