refactors directory and file structure

Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>
This commit is contained in:
neelanjan00
2023-06-05 13:15:56 +05:30
parent 9bf371a651
commit ae8467237a
840 changed files with 6787 additions and 43442 deletions

View File

@@ -0,0 +1,32 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2022-07-26T10:28:08Z
name: aws-az-chaos
version: 0.1.0
annotations:
categories: Kube AWS
vendor: Harness
spec:
displayName: AWS AZ Chaos
categoryDescription: |
Execute AWS AZ chaos to detach the target zones from the load balancer for a certain chaos duration.
keywords:
- AWS
platforms:
- GKE
- Packet(Kubeadm)
- Minikube
- EKS
- AKS
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,28 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: az-chaos-engine
namespace: default
spec:
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: aws-az-chaos
spec:
components:
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
#LOAD_BALANCER_NAME name of the load balancer
- name: LOAD_BALANCER_NAME
value: ""
#LOAD_BALANCER_ZONES can be multiple, provide it by comma separated
- name: LOAD_BALANCER_ZONES
value: ""
- name: LOAD_BALANCERNAME_ARN
value: "na"
- name: AWS_SHARED_CREDENTIALS_FILE
value: "/tmp/cloud_config.yml"

View File

@@ -0,0 +1,77 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It injects AZ chaos to detach the target zones from the load balancer.
kind: ChaosExperiment
metadata:
name: aws-az-chaos
labels:
name: aws-az-chaos
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos/py-runner:latest"
imagePullPolicy: Always
args:
- -c
- python3 -u experiment -name aws-az-chaos
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: LIB
value: 'litmus'
- name: LOAD_BALANCER_NAME
value: ''
- name: LOAD_BALANCER_ZONES
value: ''
- name: LOAD_BALANCERNAME_ARN
value: 'na'
- name: AWS_SHARED_CREDENTIALS_FILE
value: "/tmp/cloud_config.yml"
- name: RAMP_TIME
value: ''
labels:
name: aws-az-chaos
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,30 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2022-07-27T10:28:08Z
name: aws-ssm-chaos-by-id
version: 0.1.0
annotations:
categories: AWS
vendor: Harness
spec:
displayName: AWS SSM Chaos By ID
categoryDescription: |
AWS SSM Chaos By ID fault disrupts the state of infra resources. The fault induces chaos on AWS resources using Amazon SSM Run Command This is carried out by using SSM Docs that defines the actions performed by Systems Manager on your managed instances (having SSM agent installed) which let us perform chaos faults on resources.
- Causes chaos on AWS EC2 instances with given instance ID(s) using SSM docs for total chaos duration with the specified chaos interval.
- Tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the target application pod(if provided).
keywords:
- AWS
platforms:
- AWS
chaosType: infra
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,39 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: aws-chaos
namespace: default
spec:
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: aws-ssm-chaos-by-id
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '60'
# set chaos duration (in sec) as desired
- name: CHAOS_INTERVAL
value: '60'
# Instance ID of the target EC2 instance
# Multiple IDs can also be provided as comma separated values ex: id1,id2
- name: EC2_INSTANCE_ID
value: ''
# provide the region name of the target instances
- name: REGION
value: ''
# provide the percentage of available memory to stress
- name: MEMORY_PERCENTAGE
value: '80'
# provide the CPU chores to be comsumed
# 0 will consume all the available CPU cores
- name: CPU_CORE
value: '0'

View File

@@ -0,0 +1,128 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Execute AWS SSM Chaos on given EC2 instance IDs
kind: ChaosExperiment
metadata:
name: aws-ssm-chaos-by-id
labels:
name: aws-ssm-chaos-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name aws-ssm-chaos-by-id
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
- name: CHAOS_INTERVAL
value: '60'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# Instance ID of the target EC2 instance
# Multiple IDs can also be provided as comma separated values ex: id1,id2
- name: EC2_INSTANCE_ID
value: ''
- name: REGION
value: ''
# it defines the sequence of chaos execution for multiple target instances
# supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
# Provide the path of AWS credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
# provide the number of workers for memory stress
- name: NUMBER_OF_WORKERS
value: '1'
# provide the percentage of available memory to stress
- name: MEMORY_PERCENTAGE
value: '80'
# provide the CPU chores to be consumed
# 0 will consume all the available CPU cores
- name: CPU_CORE
value: '0'
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# Provide the name of SSM doc
# if not using the default stress docs
- name: DOCUMENT_NAME
value: 'LitmusChaos-AWS-SSM-Doc'
# Provide the type of SSM doc
# if not using the default stress docs
- name: DOCUMENT_TYPE
value: 'Command'
# Provide the format of SSM doc
# if not using the default stress docs
- name: DOCUMENT_FORMAT
value: 'YAML'
# Provide the path of SSM doc
# if not using the default stress docs
- name: DOCUMENT_PATH
value: 'Litmus-AWS-SSM-Docs-For-EC2-CPU-Hog.yml'
# if you want to install dependencies to run default SSM docs
- name: INSTALL_DEPENDENCIES
value: 'True'
labels:
name: aws-ssm-chaos-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,30 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2022-07-27T10:28:08Z
name: aws-ssm-chaos-by-tag
version: 0.1.0
annotations:
categories: AWS
vendor: Harness
spec:
displayName: AWS SSM Chaos By Tag
categoryDescription: |
AWS SSM Chaos By Tag fault disrupts the state of infra resources. The fault induces chaos on AWS resources using Amazon SSM Run Command This is carried out by using SSM Docs that defines the actions performed by Systems Manager on your managed instances (having SSM agent installed) which let us perform chaos faults on resources.
- Causes chaos on AWS EC2 instances with given instance tag using SSM docs for total chaos duration with the specified chaos interval.
- Tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the target application pod(if provided).
keywords:
- AWS
platforms:
- AWS
chaosType: infra
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,39 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: aws-ssm-chaos-by-tag
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '60'
# set chaos duration (in sec) as desired
- name: CHAOS_INTERVAL
value: '60'
# provide tag of the target EC2 instances
# ex: team:devops (key:value)
- name: EC2_INSTANCE_TAG
value: ''
# provide the region name of the target instances
- name: REGION
value: ''
# provide the percentage of available memory to stress
- name: MEMORY_PERCENTAGE
value: '80'
# provide the CPU chores to comsumed
# 0 will consume all the available CPU cores
- name: CPU_CORE
value: '0'

View File

@@ -0,0 +1,132 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Execute AWS SSM Chaos on given EC2 instance Tag
kind: ChaosExperiment
metadata:
name: aws-ssm-chaos-by-tag
labels:
name: aws-ssm-chaos-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name aws-ssm-chaos-by-tag
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
- name: CHAOS_INTERVAL
value: '60'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide tag of the target EC2 instances
# ex: team:devops (key:value)
- name: EC2_INSTANCE_TAG
value: ''
- name: REGION
value: ''
# it defines the sequence of chaos execution for multiple target instances
# supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
# Provide the path of AWS credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
# percentage of total instance to target
- name: INSTANCE_AFFECTED_PERC
value: ''
# provide the number of workers for memory stress
- name: NUMBER_OF_WORKERS
value: '1'
# provide the percentage of available memory to stress
- name: MEMORY_PERCENTAGE
value: '80'
# provide the CPU chores to comsumed
# 0 will consume all the available CPU cores
- name: CPU_CORE
value: '0'
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# Provide the name of SSM doc
# if not using the default stress docs
- name: DOCUMENT_NAME
value: 'LitmusChaos-AWS-SSM-Doc'
# Provide the type of SSM doc
# if not using the default stress docs
- name: DOCUMENT_TYPE
value: 'Command'
# Provide the format of SSM doc
# if not using the default stress docs
- name: DOCUMENT_FORMAT
value: 'YAML'
# Provide the path of SSM doc
# if not using the default stress docs
- name: DOCUMENT_PATH
value: 'Litmus-AWS-SSM-Docs-For-EC2-CPU-Hog.yml'
# if you want to install dependencies to run default SSM docs
- name: INSTALL_DEPENDENCIES
value: 'True'
labels:
name: aws-ssm-chaos-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,51 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: aws
version: 0.1.0
annotations:
categories: AWS
chartDescription: Injects faults on AWS services
executionPlane: "kubernetes"
spec:
displayName: AWS
categoryDescription: >
AWS chaos contains different faults for AWS cloud provider to test the app/platform service availability. It uses AWS command to carry the fault logic in the instance. It will install all the faults which can be used to inject chaos into the AWS services like EC2, ECS, S3 and so on.
faults:
- name: alb-az-down
description: It injects AZ down chaos on ALB for a certain chaos duration.
displayName: "ALB AZ Down"
- name: aws-ssm-chaos-by-id
description: It allow to perform in-VM chaos using AWS SSM service and EC2 instance ID.
displayName: "AWS SSM Chaos By ID"
- name: aws-ssm-chaos-by-tag
description: It allow to perform in-VM chaos using AWS SSM service and EC2 instance tag.
displayName: "AWS SSM Chaos By Tag"
- name: ec2-terminate-by-id
description: It stops the AWS EC2 instance identified by ID for a certain chaos duration.
displayName: "EC2 Stop By ID"
- name: ec2-terminate-by-tag
description: It stops the AWS EC2 instance identified by Tag for a certain chaos duration.
displayName: "EC2 Stop By Tag"
- name: ebs-loss-by-id
description: It detach the EBS volume from AWS EC2 instance identified by ID for a certain chaos duration.
displayName: "EBS Loss By ID"
- name: ebs-loss-by-tag
description: It detach the EBS volume from AWS EC2 instance identified by Tag for a certain chaos duration.
displayName: "EBS Loss By Tag"
keywords:
- AWS
minKubeVersion: 1.12.0
links:
- name: Kubernetes Website
url: https://kubernetes.io
- name: Source Code
url: https://github.com/kubernetes/kubernetes
- name: Kubernetes Slack
url: https://slack.kubernetes.io/
- name: Documentation
url: ""
icon:
- url: https://raw.githubusercontent.com/litmuschaos/charthub.litmuschaos.io/master/public/litmus.ico
mediatype: image/png

View File

@@ -0,0 +1,24 @@
---
packageName: aws
faults:
- name: alb-az-down
CSV: alb-az-down.chartserviceversion.yaml
desc: "alb-az-down"
- name: aws-ssm-chaos-by-id
CSV: aws-ssm-chaos-by-id.chartserviceversion.yaml
desc: "aws-ssm-chaos-by-id"
- name: aws-ssm-chaos-by-tag
CSV: aws-ssm-chaos-by-tag.chartserviceversion.yaml
desc: "aws-ssm-chaos-by-tag"
- name: ec2-terminate-by-id
CSV: ec2-terminate-by-id.chartserviceversion.yaml
desc: "ec2-terminate-by-id"
- name: ec2-terminate-by-tag
CSV: ec2-terminate-by-tag.chartserviceversion.yaml
desc: "ec2-terminate-by-tag"
- name: ebs-loss-by-id
CSV: ebs-loss-by-id.chartserviceversion.yaml
desc: "ebs-loss-by-id"
- name: ebs-loss-by-tag
CSV: ebs-loss-by-tag.chartserviceversion.yaml
desc: "ebs-loss-by-tag"

View File

@@ -0,0 +1,30 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2022-07-28T10:28:08Z
name: ebs-loss-by-id
version: 0.1.0
annotations:
categories: AWS
vendor: Harness
spec:
displayName: EBS Loss By ID
categoryDescription: |
EBS Loss By ID fault disrupts state of infra resources. The fault induces EBS volume loss against specified application for the give EBS Volume(s).
- Causes EBS volume loss from node or EC2 instance for a certain chaos interval from total chaos duration.
- Tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the application pod.
keywords:
- AWS
platforms:
- AWS
chaosType: infra
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,29 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: aws-chaos
namespace: default
spec:
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: ebs-loss-by-id
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos duration (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
# set target EBS volume ID
- name: EBS_VOLUME_ID
value: ''
# provide the region name of the instance
- name: REGION
value: ''

View File

@@ -0,0 +1,87 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching an EBS volume from EC2 instance for a certain chaos duration.
kind: ChaosExperiment
metadata:
name: ebs-loss-by-id
labels:
name: ebs-loss-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ebs-loss-by-id
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: EBS_VOLUME_ID
value: ''
- name: REGION
value: ''
- name: SEQUENCE
value: 'parallel'
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# Provide the path of AWS credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
labels:
name: ebs-loss-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,30 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2022-07-28T10:28:08Z
name: ebs-loss-by-tag
version: 0.1.0
annotations:
categories: AWS
vendor: Harness
spec:
displayName: EBS Loss By Tag
categoryDescription: |
EBS Loss By Tag fault disrupts state of infra resources. The fault induces EBS volume loss against specified application for given volume tag.
- Causes EBS volume loss by tag from node or EC2 instance for certain chaos interval from total chaos duration.
- Tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the application pod.
keywords:
- AWS
platforms:
- AWS
chaosType: infra
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,32 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: aws-chaos
namespace: default
spec:
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: ebs-loss-by-tag
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# provide EBS volume tag attached to the given instance
# it'll be in form of key:value (ex: 'team:devops')
- name: EBS_VOLUME_TAG
value: ''
# provide the region name of the instance
- name: REGION
value: ''
- name: VOLUME_AFFECTED_PERC
value: ''

View File

@@ -0,0 +1,89 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching an EBS volume from EC2 instance.
kind: ChaosExperiment
metadata:
name: ebs-loss-by-tag
labels:
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ebs-loss-by-tag
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: RAMP_TIME
value: ''
- name: EBS_VOLUME_TAG
value: ''
- name: REGION
value: ''
- name: SEQUENCE
value: 'parallel'
- name: VOLUME_AFFECTED_PERC
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# Provide the path of AWS credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
labels:
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,30 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2022-07-27T10:28:08Z
name: ec2-stop-by-tag
version: 0.1.0
annotations:
categories: AWS
vendor: Harness
spec:
displayName: EC2 Stop By Tag
categoryDescription: |
This fault causes termination of an EC2 instance for a certain chaos duration.
- Causes termination of an EC2 instance using instance tags before bringing it back to running state after the specified chaos duration.
- It helps to check the performance of the application on the EC2 instance.
keywords:
- AWS
platforms:
- AWS
chaosType: infra
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,38 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: aws-chaos
namespace: default
spec:
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: ec2-stop-by-tag
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set interval duration (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
# Instance Tag of the target EC2 instances
# ex: team:devops (key:value)
- name: EC2_INSTANCE_TAG
value: ''
# provide the region name of the instance
- name: REGION
value: ''
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: 'disable'
# Target the percentage of instance filtered from tag
- name: INSTANCE_AFFECTED_PERC
value: ''

View File

@@ -0,0 +1,103 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stopping an EC2 instance identified by tag.
kind: ChaosExperiment
metadata:
name: ec2-stop-by-tag
labels:
name: ec2-stop-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ec2-stop-by-tag
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# Provide a common tag to target ec2 instances
- name: EC2_INSTANCE_TAG
value: ''
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: 'disable'
- name: REGION
value: ''
# Target the percentage of instance filtered from tag
- name: INSTANCE_AFFECTED_PERC
value: ''
- name: SEQUENCE
value: 'parallel'
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# Provide the path of AWS credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
- name: SEQUENCE
value: 'parallel'
labels:
name: ec2-stop-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,30 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2022-07-27T10:28:08Z
name: ec2-terminate-by-id
version: 0.1.0
annotations:
categories: AWS
vendor: Harness
spec:
displayName: EC2 Terminate By ID
categoryDescription: |
This fault causes termination of an EC2 instance for a certain chaos duration.
- Causes termination of an EC2 instance provided by instance ID before bringing it back to running state after the specified chaos duration.
- It helps to check the performance of the application on the EC2 instances.
keywords:
- AWS
platforms:
- AWS
chaosType: infra
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,34 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: aws-chaos
namespace: default
spec:
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: ec2-terminate-by-id
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set interval duration (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
# Instance ID of the target EC2 instance
# Multiple IDs can also be provided as comma separated values ex: id1,id2
- name: EC2_INSTANCE_ID
value: ''
# provide the region name of the instance
- name: REGION
value: ''
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: 'disable'

View File

@@ -0,0 +1,97 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stopping an EC2 instance identified by ID.
kind: ChaosExperiment
metadata:
name: ec2-terminate-by-id
labels:
name: ec2-terminate-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ec2-terminate-by-id
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: 'disable'
# Instance ID of the target EC2 instance
# Multiple IDs can also be provided as comma separated values ex: id1,id2
- name: EC2_INSTANCE_ID
value: ''
- name: REGION
value: ''
- name: SEQUENCE
value: 'parallel'
# Provide the path of AWS credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
labels:
name: ec2-terminate-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

View File

@@ -0,0 +1,24 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2022-07-18T10:28:08Z
name: azure-disk-loss
version: 0.1.0
annotations:
categories: Azure
vendor: Harness
spec:
displayName: Azure Disk Loss
categoryDescription: |
This fault causes the detachment of the disk from the VM for a certain chaos duration.
- Causes detachment of the disk from the VM and then re-attachment of the disk to the VM.
- It helps to check the performance of the application on the instance.
keywords:
- Azure
platforms:
- Azure
minKubeVersion: 1.12.0
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,33 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: azure-chaos
spec:
# It can be active/stop
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: azure-disk-loss
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
# provide the resource group of the instance
- name: RESOURCE_GROUP
value: ''
# accepts enable/disable, default is disable
- name: SCALE_SET
value: ''
# provide the virtual disk names (comma separated if multiple)
- name: VIRTUAL_DISK_NAMES
value: ''

View File

@@ -0,0 +1,94 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaches disk from the VM and then re-attaches disk to the VM
kind: ChaosExperiment
metadata:
name: azure-disk-loss
labels:
name: azure-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name azure-disk-loss
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide the resource group of the instance
- name: RESOURCE_GROUP
value: ''
# accepts enable/disable, default is disable
- name: SCALE_SET
value: ''
# provide the virtual disk names (comma separated if multiple)
- name: VIRTUAL_DISK_NAMES
value: ''
# provide the sequence type for the run. Options: serial/parallel
- name: SEQUENCE
value: 'parallel'
# provide the path to aks credentials mounted from secret
- name: AZURE_AUTH_LOCATION
value: '/tmp/azure.auth'
- name: DEFAULT_HEALTH_CHECK
value: 'false'
labels:
name: azure-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,26 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2022-07-18T10:28:08Z
name: azure-instance-stop
version: 0.1.0
annotations:
categories: Azure
vendor: Harness
spec:
displayName: Azure Instance Stop
categoryDescription: |
Azure instance stop fault disrupts the state of Azure instance. This fault injects Azure instances stop for a certain chaos duration.
- It helps to check the performance of the application/processes running on the Azure instance when subject to instance stop.
keywords:
- Azure
platforms:
- Azure
minKubeVersion: 1.12.0
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,33 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: azure-instance-stop
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos intreval (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
# provide the target instance name(s) (comma separated if multiple)
- name: AZURE_INSTANCE_NAMES
value: ''
# provide the resource group of the instance
- name: RESOURCE_GROUP
value: ''
# accepts enable/disable, default is disable
- name: SCALE_SET
value: ''

View File

@@ -0,0 +1,93 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stops an Azure instance for a certain chaos duration
kind: ChaosExperiment
metadata:
name: azure-instance-stop
labels:
name: azure-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name azure-instance-stop
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide the target instance name(s) (comma separated if multiple)
- name: AZURE_INSTANCE_NAMES
value: ''
# provide the resource group of the instance
- name: RESOURCE_GROUP
value: ''
# accepts enable/disable, default is disable
- name: SCALE_SET
value: ''
# Provide the path of aks credentials mounted from secret
- name: AZURE_AUTH_LOCATION
value: '/tmp/azure.auth'
- name: SEQUENCE
value: 'parallel'
- name: DEFAULT_HEALTH_CHECK
value: 'false'
labels:
name: azure-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,27 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: azure
version: 0.1.0
annotations:
categories: Azure
chartDescription: Injects faults on Azure services
executionPlane: "kubernetes"
spec:
displayName: Azure
categoryDescription: >
Azure chaos contains different chaos experiments for Azure cloud provider to test the app/platform service availability. It uses Azure RunCommand to carry out the fault logic in the instance. It will install all the experiments which can be used to inject chaos into the Azure services like VMs, and so on.
faults:
- name: azure-disk-loss
description: It injects Azure disk loss fault from an Azure VM instance for a certain chaos duration.
displayName: "Azure Disk Loss"
- name: azure-instance-stop
description: It injects the Azure VM instance stop fault for a certain chaos duration.
displayName: "Azure Instance Stop"
keywords:
- Azure
minKubeVersion: 1.12.0
icon:
- url: https://raw.githubusercontent.com/litmuschaos/charthub.litmuschaos.io/master/public/litmus.ico
mediatype: image/png

View File

@@ -0,0 +1,9 @@
---
packageName: azure
faults:
- name: azure-disk-loss
CSV: azure-disk-loss.chartserviceversion.yaml
desc: "azure-disk-loss"
- name: azure-instance-stop
CSV: azure-instance-stop.chartserviceversion.yaml
desc: "azure-instance-stop"

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -0,0 +1,42 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: gcp-chaos
spec:
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: gcp-vm-disk-loss-by-label
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
# set the GCP project id
- name: GCP_PROJECT_ID
value: ''
# set the zone in which all the disks are created
# all the disks must exist in the same zone
- name: ZONES
value: ''
# set the label of the target disk volumes
- name: DISK_VOLUME_LABEL
value: ''
# set the percentage value of the disks with the given label
# which should be targeted as part of the chaos injection
- name: DISK_AFFECTED_PERC
value: ''
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: 'parallel'

View File

@@ -0,0 +1,89 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Causes loss of a non-boot storage persistent disk from a GCP VM instance filtered by a label for a specified duration before attaching them back
kind: ChaosExperiment
metadata:
name: gcp-vm-disk-loss-by-label
labels:
name: gcp-vm-disk-loss-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-disk-loss-by-label
command:
- /bin/bash
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
# set the GCP project id
- name: GCP_PROJECT_ID
value: ''
# set the zone in which all the disks are created
# all the disks must exist in the same zone
- name: ZONES
value: ''
# set the label of the target disk volumes
- name: DISK_VOLUME_LABEL
value: ''
# set the percentage value of the disks with the given label
# which should be targeted as part of the chaos injection
- name: DISK_AFFECTED_PERC
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
- name: SEQUENCE
value: 'parallel'
labels:
name: gcp-vm-disk-loss-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,28 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: gcp-vm-disk-loss-by-label
version: 0.1.0
annotations:
categories: GCP
vendor: Harness
spec:
displayName: GCP VM Disk Loss By Label
categoryDescription: >
Causes loss of a non-boot storage persistent disk from a GCP VM instance filtered by a label for a specified duration before attaching them back.
keywords:
- GCP
platforms:
- "Minikube"
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,41 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: gcp-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: gcp-vm-disk-loss
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
# set the GCP project id
- name: GCP_PROJECT_ID
value: ''
# set the disk volume name(s) as comma seperated values
# eg. volume1,volume2,...
- name: DISK_VOLUME_NAMES
value: ''
# set the disk zone(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. zone1,zone2,...
- name: ZONES
value: ''
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: 'parallel'

View File

@@ -0,0 +1,93 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Causes loss of a non-boot storage persistent disk from a GCP VM instance for a specified duration before attaching them back
kind: ChaosExperiment
metadata:
name: gcp-vm-disk-loss
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-disk-loss
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: 'parallel'
# set the GCP project id
- name: GCP_PROJECT_ID
value: ''
# set the disk volume name(s) as comma seperated values
# eg. volume1,volume2,...
- name: DISK_VOLUME_NAMES
value: ''
# set the disk zone(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. zone1,zone2,...
- name: ZONES
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: 'parallel'
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,27 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: gcp-vm-disk-loss
version: 0.1.0
annotations:
categories: GCP
vendor: Harness
spec:
displayName: GCP VM Disk Loss
categoryDescription: >
Causes loss of a non-boot storage persistent disk from a GCP VM instance for a specified duration before attaching them back.
keywords:
- GCP
platforms:
- GCP
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,47 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: gcp-chaos
spec:
# It can be active/stop
engineState: 'active'
chaosServiceAccount: litmus-admin
# It can be delete/retain
experiments:
- name: gcp-vm-instance-stop-by-label
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
- name: SEQUENCE
value: 'parallel'
# GCP project ID to which the VM instances belong
- name: GCP_PROJECT_ID
value: ''
# Label of the target VM instance(s)
- name: INSTANCE_LABEL
value: ''
# Zone in which the target VM instance(s) filtered by the label exist
# all the instances should lie in a single zone
- name: ZONES
value: ''
# enable it if the target instances are a part of a managed instance group
- name: MANAGED_INSTANCE_GROUP
value: 'disable'
# set the percentage value of the instances with the given label
# which should be targeted as part of the chaos injection
- name: INSTANCE_AFFECTED_PERC
value: ''

View File

@@ -0,0 +1,97 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stops GCP VM instances and GKE nodes filtered by a label for a specified duration and later restarts them
kind: ChaosExperiment
metadata:
name: gcp-vm-instance-stop-by-label
labels:
name: gcp-vm-instance-stop-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-instance-stop-by-label
command:
- /bin/bash
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
- name: SEQUENCE
value: 'parallel'
# GCP project ID to which the VM instances belong
- name: GCP_PROJECT_ID
value: ''
# Label of the target VM instance(s)
- name: INSTANCE_LABEL
value: ''
# Zone in which the target VM instance(s) filtered by the label exist
# all the instances should lie in a single zone
- name: ZONES
value: ''
# enable it if the target instance is a part of a managed instance group
- name: MANAGED_INSTANCE_GROUP
value: 'disable'
# set the percentage value of the instances with the given label
# which should be targeted as part of the chaos injection
- name: INSTANCE_AFFECTED_PERC
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
labels:
name: gcp-vm-instance-stop-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,27 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: gcp-vm-instance-stop-by-label
version: 0.1.0
annotations:
categories: GCP
vendor: Harness
spec:
displayName: GCP VM Instance Stop By Label
categoryDescription: >
Stops GCP VM instances and GKE nodes filtered by a label for a specified duration and later restarts them.
keywords:
- GCP
platforms:
- "Minikube"
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,43 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: gcp-vm-chaos
spec:
engineState: 'active'
chaosServiceAccount: gcp-vm-instance-stop-sa
experiments:
- name: gcp-vm-instance-stop
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
# Instance name of the target VM instance(s)
# Multiple instance names can be provided as comma separated values ex: instance1,instance2
- name: VM_INSTANCE_NAMES
value: ''
# GCP project ID to which the VM instances belong
- name: GCP_PROJECT_ID
value: ''
# Instance zone(s) of the target VM instance(s)
# If more than one instance is targetted, provide zone for each in the order of their
# respective instance name in VM_INSTANCE_NAME as comma separated values ex: zone1,zone2
- name: ZONES
value: ''
# enable it if the target instance is a part of a managed instance group.
- name: MANAGED_INSTANCE_GROUP
value: 'disable'
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: 'parallel'

View File

@@ -0,0 +1,101 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stops GCP VM instances and GKE nodes for a specified duration and later restarts them
kind: ChaosExperiment
metadata:
name: gcp-vm-instance-stop
labels:
name: gcp-vm-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-instance-stop
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# parallel or serial; determines how the VM instances are terminated, all at once or one at a time
- name: SEQUENCE
value: 'parallel'
# period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# enable or disable; shall be set to enable if the target instances are a part of a managed instance group
- name: MANAGED_INSTANCE_GROUP
value: 'disable'
# Instance name of the target VM instance(s)
# Multiple instance names can be provided as comma separated values ex: instance1,instance2
- name: VM_INSTANCE_NAMES
value: ''
# GCP project ID to which the VM instances belong
- name: GCP_PROJECT_ID
value: ''
# Instance zone(s) of the target VM instance(s)
# If more than one instance is targetted, provide zone for each in the order of their
# respective instance name in VM_INSTANCE_NAME as comma separated values ex: zone1,zone2
- name: ZONES
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: 'parallel'
labels:
name: gcp-vm-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,26 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: gcp-vm-instance-stop
version: 0.1.0
annotations:
categories: gcp
spec:
displayName: GCP VM Instance Stop
categoryDescription: >
Stops GCP VM instances and GKE nodes for a specified duration and later restarts them.
keywords:
- GCP
platforms:
- GCP
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,42 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: gcp
version: 0.1.0
annotations:
categories: GCP
chartDescription: Injects faults on GCP services
executionPlane: "kubernetes"
spec:
displayName: GCP
categoryDescription: >
GCP fault disrupts state of GCP resources running part of the GCP services
faults:
- name: gcp-vm-instance-stop
description: It injects gcp-vm-instance-stop fault into GCP VMs.
displayName: "GCP VM Instance Stop"
- name: gcp-vm-instance-stop-by-label
description: It injects gcp-vm-instance-stop-by-label fault into GCP VMs.
displayName: "GCP VM Instance Stop By Label"
- name: gcp-vm-disk-loss-by-label
description: It injects gcp-vm-disk-loss-by-label fault into GCP VMs.
displayName: "GCP VM Disk Loss By Label"
- name: gcp-vm-disk-loss
description: It injects gcp-vm-disk-loss fault into GCP VMs.
displayName: "GCP VM Disk Loss"
keywords:
- GCP
minKubeVersion: 1.12.0
links:
- name: Kubernetes Website
url: https://kubernetes.io
- name: Source Code
url: https://github.com/kubernetes/kubernetes
- name: Kubernetes Slack
url: https://slack.kubernetes.io/
- name: Documentation
url: ""
icon:
- url: https://raw.githubusercontent.com/litmuschaos/charthub.litmuschaos.io/master/public/litmus.ico
mediatype: image/png

View File

@@ -0,0 +1,15 @@
---
packageName: gcp
faults:
- name: gcp-vm-instance-stop
CSV: gcp-vm-instance-stop.chartserviceversion.yaml
desc: "gcp-vm-instance-stop"
- name: gcp-vm-instance-stop-by-label
CSV: gcp-vm-instance-stop-by-label.chartserviceversion.yaml
desc: "gcp-vm-instance-stop-by-label"
- name: gcp-vm-disk-loss-by-label
CSV: gcp-vm-disk-loss-by-label.chartserviceversion.yaml
desc: "gcp-vm-disk-loss-by-label"
- name: gcp-vm-disk-loss
CSV: gcp-vm-disk-loss.chartserviceversion.yaml
desc: "gcp-vm-disk-loss"

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 KiB

View File

@@ -0,0 +1,33 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: container-kill
version: 0.1.0
annotations:
categories: "Kubernetes"
vendor: "Harness"
spec:
displayName: Container Kill
categoryDescription: |
Container kill fault disrupts state of kubernetes resources. This fault injects random container delete failures against specified application.
- Executes SIGKILL on containers of random replicas of an application deployment.
- Tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the application pod.
keywords:
- Kubernetes
platforms:
- GKE
- Minikube
- Packet(Kubeadm)
- EKS
- AKS
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,41 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
appinfo:
appns: ''
applabel: ''
appkind: ''
chaosServiceAccount: container-kill-sa
experiments:
- name: container-kill
spec:
components:
env:
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
# provide the name of container runtime
# it supports docker, containerd, crio
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
- name: PODS_AFFECTED_PERC
value: ''
- name: TARGET_CONTAINER
value: ''

View File

@@ -0,0 +1,123 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: "Kills a container belonging to an application pod \n"
kind: ChaosExperiment
metadata:
name: container-kill
labels:
name: container-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name container-kill
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ''
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: TARGET_PODS
value: ''
# provide the chaos interval
- name: CHAOS_INTERVAL
value: '10'
- name: SIGNAL
value: 'SIGKILL'
# provide the socket file path
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
# provide the name of container runtime
# it supports docker, containerd, crio
- name: CONTAINER_RUNTIME
value: 'containerd'
# provide the total chaos duration
- name: TOTAL_CHAOS_DURATION
value: '20'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
# To select pods on specific node(s)
- name: NODE_LABEL
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: container-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,37 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: disk-fill
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
support: https://slack.kubernetes.io/
spec:
displayName: Disk Fill
categoryDescription: |
Disk fill fault disrupts state of kubernetes resources.
- Causes (forced/graceful) Disk Stress by filling up the Ephemeral Storage of the Pod using one of it containers.
- Causes Pod to get Evicted if the Pod exceeds it Ephemeral Storage Limit.
- Tests the Ephemeral Storage Limits, to ensure those parameters are sufficient.
keywords:
- Kubernetes
platforms:
- GKE
- EKS
- AKS
experiments:
- sock-shop
- sock-shop-promProbe
chaosType: infra
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url:
mediatype: ""

View File

@@ -0,0 +1,36 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
appinfo:
appns: ''
applabel: ''
appkind: ''
chaosServiceAccount: disk-fill-sa
experiments:
- name: disk-fill
spec:
components:
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
# specify the fill percentage according to the disk pressure required
- name: FILL_PERCENTAGE
value: '80'
- name: PODS_AFFECTED_PERC
value: ''
# Provide the container runtime path
# Default set to docker container path
- name: CONTAINER_PATH
value: '/var/lib/docker/containers'

View File

@@ -0,0 +1,123 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Fillup Ephemeral Storage of a Resource
kind: ChaosExperiment
metadata:
name: disk-fill
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name disk-fill
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ''
- name: FILL_PERCENTAGE
value: '80'
- name: TOTAL_CHAOS_DURATION
value: '60'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide the data block size
# supported unit is KB
- name: DATA_BLOCK_SIZE
value: '256'
- name: TARGET_PODS
value: ''
- name: EPHEMERAL_STORAGE_MEBIBYTES
value: ''
# To select pods on specific node(s)
- name: NODE_LABEL
value: ''
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
# Provide the container runtime path
# Default set to docker container path
- name: CONTAINER_PATH
value: '/var/lib/docker/containers'
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/host-path-usage: "true"
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,30 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: docker-service-kill
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Docker Service Kill
categoryDescription: |
docker-service-kill fault kills the Docker service gracefully for a certain chaos duration.
- Causes replicas to be evicted or become unreachable on account of nodes turning unschedulable (Not Ready) due to Docker service kill.
- The application node should be healthy once the chaos is stopped and the services are re-accessable.
keywords:
- Kubernetes
platforms:
- GKE
- AKS
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- base64data: ""
mediatype: ""

View File

@@ -0,0 +1,27 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
auxiliaryAppInfo: ''
chaosServiceAccount: docker-service-kill-sa
experiments:
- name: docker-service-kill
spec:
components:
# nodeSelector:
# # provide the node labels
# kubernetes.io/hostname: 'node02'
env:
- name: TOTAL_CHAOS_DURATION
value: '90' # in seconds
- name: TARGET_NODE
value: ''
- name: NODE_LABEL
value: ""

View File

@@ -0,0 +1,84 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Kills the Docker service on the application node to check the resiliency.
kind: ChaosExperiment
metadata:
name: docker-service-kill
labels:
name: docker-service-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name docker-service-kill
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '90' # in seconds
# Period to wait before injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: NODE_LABEL
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'ubuntu:16.04'
# provide the target node name
- name: TARGET_NODE
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
labels:
name: docker-service-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/service-kill: "true"
app.kubernetes.io/version: ci

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 959 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

@@ -0,0 +1,26 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: kubelet-service-kill-sa
experiments:
- name: kubelet-service-kill
spec:
components:
# nodeSelector:
# # provide the node labels
# kubernetes.io/hostname: 'node02'
env:
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
# provide the target node name
- name: TARGET_NODE
value: ''

View File

@@ -0,0 +1,84 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Kills the Kubelet service on the application node to check the resiliency.
kind: ChaosExperiment
metadata:
name: kubelet-service-kill
labels:
name: kubelet-service-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name kubelet-service-kill
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: NODE_LABEL
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'ubuntu:16.04'
# provide the target node name
- name: TARGET_NODE
value: ''
- name: DEFAULT_HEALTH_CHECK
value: 'false'
labels:
name: kubelet-service-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/service-kill: "true"
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,33 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: kubelet-service-kill
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Kubelet Service Kill
categoryDescription: |
kubelet-service-kill fault kills the Kubelet service gracefully for a certain chaos duration.
- Replicas may be evicted or become unreachable on account on nodes turning unschedulable (Not Ready) due to Kubelet service kill.
- The application node should be healthy once chaos is stopped and the services are reaccessable.
keywords:
- Kubernetes
platforms:
- GKE
- Packet(Kubeadm)
- Minikube
- EKS
- AKS
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- base64data: ""
mediatype: ""

Some files were not shown because too many files have changed in this diff Show More