2165995170: version upgraded for chaos-charts

This commit is contained in:
litmusbot
2022-04-14 08:10:28 +00:00
parent 7b07fa66dc
commit 238784bb69
6 changed files with 3246 additions and 3246 deletions

View File

@@ -1,4 +1,135 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Execute AWS SSM Chaos on given ec2 instance IDs
kind: ChaosExperiment
metadata:
name: aws-ssm-chaos-by-id
labels:
name: aws-ssm-chaos-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name aws-ssm-chaos-by-id
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
- name: CHAOS_INTERVAL
value: '60'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# Instance ID of the target ec2 instance
# Multiple IDs can also be provided as comma separated values ex: id1,id2
- name: EC2_INSTANCE_ID
value: ''
- name: REGION
value: ''
# it defines the sequence of chaos execution for multiple target instances
# supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
# Provide the path of aws credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
# Provide the name of ssm doc
# if not using the default stress docs
- name: DOCUMENT_NAME
value: ''
# Provide the type of ssm doc
# if not using the default stress docs
- name: DOCUMENT_TYPE
value: ''
# Provide the format of ssm doc
# if not using the default stress docs
- name: DOCUMENT_FORMAT
value: ''
# Provide the path of ssm doc
# if not using the default stress docs
- name: DOCUMENT_PATH
value: ''
# if you want to install dependencies to run default ssm docs
- name: INSTALL_DEPENDENCIES
value: 'True'
# provide the number of workers for memory stress
- name: NUMBER_OF_WORKERS
value: '1'
# provide the percentage of available memory to stress
- name: MEMORY_PERCENTAGE
value: '80'
# provide the CPU chores to be comsumed
# 0 will consume all the available cpu cores
- name: CPU_CORE
value: '0'
# provide the LIB
# only litmus supported
- name: LIB
value: 'litmus'
labels:
name: aws-ssm-chaos-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Execute AWS SSM Chaos on given ec2 instance Tag
@@ -133,134 +264,3 @@ spec:
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Execute AWS SSM Chaos on given ec2 instance IDs
kind: ChaosExperiment
metadata:
name: aws-ssm-chaos-by-id
labels:
name: aws-ssm-chaos-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name aws-ssm-chaos-by-id
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
- name: CHAOS_INTERVAL
value: '60'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# Instance ID of the target ec2 instance
# Multiple IDs can also be provided as comma separated values ex: id1,id2
- name: EC2_INSTANCE_ID
value: ''
- name: REGION
value: ''
# it defines the sequence of chaos execution for multiple target instances
# supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
# Provide the path of aws credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
# Provide the name of ssm doc
# if not using the default stress docs
- name: DOCUMENT_NAME
value: ''
# Provide the type of ssm doc
# if not using the default stress docs
- name: DOCUMENT_TYPE
value: ''
# Provide the format of ssm doc
# if not using the default stress docs
- name: DOCUMENT_FORMAT
value: ''
# Provide the path of ssm doc
# if not using the default stress docs
- name: DOCUMENT_PATH
value: ''
# if you want to install dependencies to run default ssm docs
- name: INSTALL_DEPENDENCIES
value: 'True'
# provide the number of workers for memory stress
- name: NUMBER_OF_WORKERS
value: '1'
# provide the percentage of available memory to stress
- name: MEMORY_PERCENTAGE
value: '80'
# provide the CPU chores to be comsumed
# 0 will consume all the available cpu cores
- name: CPU_CORE
value: '0'
# provide the LIB
# only litmus supported
- name: LIB
value: 'litmus'
labels:
name: aws-ssm-chaos-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---

View File

@@ -1,4 +1,101 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaches disk from the VM and then re-attaches disk to the VM
kind: ChaosExperiment
metadata:
name: azure-disk-loss
labels:
name: azure-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name azure-disk-loss
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: LIB
value: 'litmus'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide the resource group of the instance
- name: RESOURCE_GROUP
value: ''
# accepts enable/disable, default is disable
- name: SCALE_SET
value: ''
# provide the virtual disk names (comma separated if multiple)
- name: VIRTUAL_DISK_NAMES
value: ''
# provide the sequence type for the run. Options: serial/parallel
- name: SEQUENCE
value: 'parallel'
# provide the path to aks credentials mounted from secret
- name: AZURE_AUTH_LOCATION
value: '/tmp/azure.auth'
labels:
name: azure-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Terminating azure VM instance
@@ -96,100 +193,3 @@ spec:
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaches disk from the VM and then re-attaches disk to the VM
kind: ChaosExperiment
metadata:
name: azure-disk-loss
labels:
name: azure-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name azure-disk-loss
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: LIB
value: 'litmus'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide the resource group of the instance
- name: RESOURCE_GROUP
value: ''
# accepts enable/disable, default is disable
- name: SCALE_SET
value: ''
# provide the virtual disk names (comma separated if multiple)
- name: VIRTUAL_DISK_NAMES
value: ''
# provide the sequence type for the run. Options: serial/parallel
- name: SEQUENCE
value: 'parallel'
# provide the path to aks credentials mounted from secret
- name: AZURE_AUTH_LOCATION
value: '/tmp/azure.auth'
labels:
name: azure-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---

View File

@@ -1,4 +1,105 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Causes loss of a non-boot storage persistent disk from a GCP VM instance for a specified duration of time
kind: ChaosExperiment
metadata:
name: gcp-vm-disk-loss
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-disk-loss
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: LIB
value: 'litmus'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: 'parallel'
# set the GCP project id
- name: GCP_PROJECT_ID
value: ''
# set the disk volume name(s) as comma seperated values
# eg. volume1,volume2,...
- name: DISK_VOLUME_NAMES
value: ''
# set the disk zone(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. zone1,zone2,...
- name: DISK_ZONES
value: ''
# set the device name(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. device1,device2,...
- name: DEVICE_NAMES
value: ''
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stops GCP VM instances and GKE nodes for a specified duration of time and later restarts them
@@ -103,104 +204,3 @@ spec:
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Causes loss of a non-boot storage persistent disk from a GCP VM instance for a specified duration of time
kind: ChaosExperiment
metadata:
name: gcp-vm-disk-loss
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-disk-loss
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: LIB
value: 'litmus'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: 'parallel'
# set the GCP project id
- name: GCP_PROJECT_ID
value: ''
# set the disk volume name(s) as comma seperated values
# eg. volume1,volume2,...
- name: DISK_VOLUME_NAMES
value: ''
# set the disk zone(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. zone1,zone2,...
- name: DISK_ZONES
value: ''
# set the device name(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. device1,device2,...
- name: DEVICE_NAMES
value: ''
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,12 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching an ebs volume from ec2 instance.
Stopping an EC2 instance identified by tag.
kind: ChaosExperiment
metadata:
name: ebs-loss-by-id
name: ec2-terminate-by-tag
labels:
name: ebs-loss-by-id
name: ec2-terminate-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
@@ -42,11 +42,15 @@ spec:
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get","list"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ebs-loss-by-id
- ./experiments -name ec2-terminate-by-tag
command:
- /bin/bash
env:
@@ -60,12 +64,20 @@ spec:
- name: RAMP_TIME
value: ''
- name: EBS_VOLUME_ID
- name: INSTANCE_TAG
value: ''
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: 'disable'
- name: REGION
value: ''
# Target the percentage of instance filtered from tag
- name: INSTANCE_AFFECTED_PERC
value: ''
- name: SEQUENCE
value: 'parallel'
@@ -79,7 +91,7 @@ spec:
value: 'litmus'
labels:
name: ebs-loss-by-id
name: ec2-terminate-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
@@ -191,12 +203,12 @@ spec:
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stopping an EC2 instance identified by tag.
Detaching an ebs volume from ec2 instance.
kind: ChaosExperiment
metadata:
name: ec2-terminate-by-tag
name: ebs-loss-by-id
labels:
name: ec2-terminate-by-tag
name: ebs-loss-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
@@ -232,15 +244,11 @@ spec:
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get","list"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ec2-terminate-by-tag
- ./experiments -name ebs-loss-by-id
command:
- /bin/bash
env:
@@ -254,20 +262,12 @@ spec:
- name: RAMP_TIME
value: ''
- name: INSTANCE_TAG
- name: EBS_VOLUME_ID
value: ''
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: 'disable'
- name: REGION
value: ''
# Target the percentage of instance filtered from tag
- name: INSTANCE_AFFECTED_PERC
value: ''
- name: SEQUENCE
value: 'parallel'
@@ -281,7 +281,7 @@ spec:
value: 'litmus'
labels:
name: ec2-terminate-by-tag
name: ebs-loss-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest

File diff suppressed because it is too large Load Diff