merges master

Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>
This commit is contained in:
neelanjan00
2023-06-20 12:31:59 +05:30
14 changed files with 5180 additions and 137 deletions

366
charts/gcp/experiments.yaml Normal file
View File

@@ -0,0 +1,366 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stops GCP VM instances and GKE nodes filtered by a label for a specified duration and later restarts them
kind: ChaosExperiment
metadata:
name: gcp-vm-instance-stop-by-label
labels:
name: gcp-vm-instance-stop-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get","list"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-instance-stop-by-label
command:
- /bin/bash
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
- name: SEQUENCE
value: 'parallel'
# GCP project ID to which the vm instances belong
- name: GCP_PROJECT_ID
value: ''
# Label of the target vm instance(s)
- name: INSTANCE_LABEL
value: ''
# Zone in which the target vm instance(s) filtered by the label exist
# all the instances should lie in a single zone
- name: ZONES
value: ''
# enable it if the target instance is a part of a managed instance group
- name: MANAGED_INSTANCE_GROUP
value: 'disable'
# set the percentage value of the instances with the given label
# which should be targeted as part of the chaos injection
- name: INSTANCE_AFFECTED_PERC
value: ''
labels:
name: gcp-vm-instance-stop-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stops GCP VM instances and GKE nodes for a specified duration and later restarts them
kind: ChaosExperiment
metadata:
name: gcp-vm-instance-stop
labels:
name: gcp-vm-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get","list"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-instance-stop
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# parallel or serial; determines how the VM instances are terminated, all at once or one at a time
- name: SEQUENCE
value: 'parallel'
# period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# enable or disable; shall be set to enable if the target instances are a part of a managed instance group
- name: MANAGED_INSTANCE_GROUP
value: 'disable'
# Instance name of the target vm instance(s)
# Multiple instance names can be provided as comma separated values ex: instance1,instance2
- name: VM_INSTANCE_NAMES
value: ''
# GCP project ID to which the vm instances belong
- name: GCP_PROJECT_ID
value: ''
# Instance zone(s) of the target vm instance(s)
# If more than one instance is targetted, provide zone for each in the order of their
# respective instance name in VM_INSTANCE_NAME as comma separated values ex: zone1,zone2
- name: ZONES
value: ''
labels:
name: gcp-vm-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Causes loss of a non-boot storage persistent disk from a GCP VM instance filtered by a label for a specified duration before attaching them back
kind: ChaosExperiment
metadata:
name: gcp-vm-disk-loss-by-label
labels:
name: gcp-vm-disk-loss-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-disk-loss-by-label
command:
- /bin/bash
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
# set the GCP project id
- name: GCP_PROJECT_ID
value: ''
# set the zone in which all the disks are created
# all the disks must exist in the same zone
- name: ZONES
value: ''
# set the label of the target disk volumes
- name: DISK_VOLUME_LABEL
value: ''
# set the percentage value of the disks with the given label
# which should be targeted as part of the chaos injection
- name: DISK_AFFECTED_PERC
value: ''
labels:
name: gcp-vm-disk-loss-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Causes loss of a non-boot storage persistent disk from a GCP VM instance for a specified duration before attaching them back
kind: ChaosExperiment
metadata:
name: gcp-vm-disk-loss
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-disk-loss
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: 'parallel'
# set the GCP project id
- name: GCP_PROJECT_ID
value: ''
# set the disk volume name(s) as comma seperated values
# eg. volume1,volume2,...
- name: DISK_VOLUME_NAMES
value: ''
# set the disk zone(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. zone1,zone2,...
- name: ZONES
value: ''
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,442 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stopping an EC2 instance identified by tag.
kind: ChaosExperiment
metadata:
name: ec2-terminate-by-tag
labels:
name: ec2-terminate-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get","list"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ec2-terminate-by-tag
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: INSTANCE_TAG
value: ''
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: 'disable'
- name: REGION
value: ''
# Target the percentage of instance filtered from tag
- name: INSTANCE_AFFECTED_PERC
value: ''
- name: SEQUENCE
value: 'parallel'
# Provide the path of aws credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
labels:
name: ec2-terminate-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stopping an EC2 instance identified by ID.
kind: ChaosExperiment
metadata:
name: ec2-terminate-by-id
labels:
name: ec2-terminate-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get","list"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ec2-terminate-by-id
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: 'disable'
# Instance ID of the target ec2 instance
# Multiple IDs can also be provided as comma separated values ex: id1,id2
- name: EC2_INSTANCE_ID
value: ''
- name: REGION
value: ''
- name: SEQUENCE
value: 'parallel'
# Provide the path of aws credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
labels:
name: ec2-terminate-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Execute aws-az chaos to detach the target zones from the load balancer
kind: ChaosExperiment
metadata:
name: aws-az-chaos
labels:
name: aws-az-chaos
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos/py-runner:latest"
imagePullPolicy: Always
args:
- -c
- python3 -u experiment -name aws-az-chaos
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: LIB
value: 'litmus'
- name: LOAD_BALANCER_NAME
value: ''
- name: LOAD_BALANCER_ZONES
value: ''
- name: LOAD_BALANCERNAME_ARN
value: 'na'
- name: AWS_SHARED_CREDENTIALS_FILE
value: "/tmp/cloud_config.yml"
- name: RAMP_TIME
value: ''
labels:
name: aws-az-chaos
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching an ebs volume from ec2 instance.
kind: ChaosExperiment
metadata:
name: ebs-loss-by-tag
labels:
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ebs-loss-by-tag
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: RAMP_TIME
value: ''
- name: EBS_VOLUME_TAG
value: ''
- name: REGION
value: ''
- name: SEQUENCE
value: 'parallel'
- name: VOLUME_AFFECTED_PERC
value: ''
# Provide the path of aws credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
labels:
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching an ebs volume from ec2 instance.
kind: ChaosExperiment
metadata:
name: ebs-loss-by-id
labels:
name: ebs-loss-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ebs-loss-by-id
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: EBS_VOLUME_ID
value: ''
- name: REGION
value: ''
- name: SEQUENCE
value: 'parallel'
# Provide the path of aws credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
labels:
name: ebs-loss-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---

View File

@@ -0,0 +1,605 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It can target random pods with a Spring Boot application and allows configuring the assaults to inject network latency
kind: ChaosExperiment
metadata:
name: spring-boot-exceptions
labels:
name: spring-boot-exceptions
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [ "" ]
resources: [ "pods" ]
verbs: [ "create","delete","get","list","patch","update", "deletecollection" ]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create","get","list","patch","update" ]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [ "" ]
resources: [ "pods/log" ]
verbs: [ "get","list","watch" ]
# for creating and managing to execute commands inside target container
- apiGroups: [ "" ]
resources: [ "pods/exec" ]
verbs: [ "get","list","create" ]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: [ "batch" ]
resources: [ "jobs" ]
verbs: [ "create","list","get","delete","deletecollection" ]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: [ "litmuschaos.io" ]
resources: [ "chaosengines","chaosexperiments","chaosresults" ]
verbs: [ "create","list","get","patch","update","delete" ]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name spring-boot-exceptions
command:
- /bin/bash
env:
# Type of raised exception
- name: CM_EXCEPTIONS_TYPE
value: 'java.lang.IllegalArgumentException'
# Argument of raised exception
- name: CM_EXCEPTIONS_ARGUMENTS
value: 'java.lang.String:custom illegal argument exception'
# port of the spring boot application
- name: CM_PORT
value: ''
#it contains number of requests are to be attacked
# n value means nth request will be affected
- name: CM_LEVEL
value: '1'
# it limits watched packages/classes/methods
- name: CM_WATCHED_CUSTOM_SERVICES
value: ''
# provide name of watcher
# it supports controller, restController, service, repository, component, webClient
- name: CM_WATCHERS
value: 'restController'
- name: TOTAL_CHAOS_DURATION
value: '30'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: spring-boot-exceptions
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It can target random pods with a Spring Boot application and allows configuring the assaults to inject cpu-stress
kind: ChaosExperiment
metadata:
name: spring-boot-cpu-stress
labels:
name: spring-boot-cpu-stress
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [ "" ]
resources: [ "pods" ]
verbs: [ "create","delete","get","list","patch","update", "deletecollection" ]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create","get","list","patch","update" ]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [ "" ]
resources: [ "pods/log" ]
verbs: [ "get","list","watch" ]
# for creating and managing to execute commands inside target container
- apiGroups: [ "" ]
resources: [ "pods/exec" ]
verbs: [ "get","list","create" ]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: [ "batch" ]
resources: [ "jobs" ]
verbs: [ "create","list","get","delete","deletecollection" ]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: [ "litmuschaos.io" ]
resources: [ "chaosengines","chaosexperiments","chaosresults" ]
verbs: [ "create","list","get","patch","update","delete" ]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name spring-boot-cpu-stress
command:
- /bin/bash
env:
# it contains fraction of cpu to be stressed(0.95 equals 95%)
# it supports value in range [0.1,1.0]
- name: CPU_LOAD_FRACTION
value: '0.9'
# port of the spring boot application
- name: CM_PORT
value: ''
#it contains number of requests are to be attacked
# n value means nth request will be affected
- name: CM_LEVEL
value: '1'
# it limits watched packages/classes/methods
- name: CM_WATCHED_CUSTOM_SERVICES
value: ''
# provide name of watcher
# it supports controller, restController, service, repository, component, webClient
- name: CM_WATCHERS
value: 'restController'
- name: TOTAL_CHAOS_DURATION
value: '30'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: spring-boot-cpu-stress
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It can target random pods with a Spring Boot application and allows configuring the assaults to inject memory-stress
kind: ChaosExperiment
metadata:
name: spring-boot-memory-stress
labels:
name: spring-boot-memory-stress
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [ "" ]
resources: [ "pods" ]
verbs: [ "create","delete","get","list","patch","update", "deletecollection" ]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create","get","list","patch","update" ]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [ "" ]
resources: [ "pods/log" ]
verbs: [ "get","list","watch" ]
# for creating and managing to execute commands inside target container
- apiGroups: [ "" ]
resources: [ "pods/exec" ]
verbs: [ "get","list","create" ]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: [ "batch" ]
resources: [ "jobs" ]
verbs: [ "create","list","get","delete","deletecollection" ]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: [ "litmuschaos.io" ]
resources: [ "chaosengines","chaosexperiments","chaosresults" ]
verbs: [ "create","list","get","patch","update","delete" ]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name spring-boot-memory-stress
command:
- /bin/bash
env:
# it contains fraction of memory to be stressed(0.70 equals 70%)
# it supports value in range [0.01,0.95]
- name: MEMORY_FILL_FRACTION
value: '0.70'
# port of the spring boot application
- name: CM_PORT
value: ''
# it contains number of requests are to be attacked
# n value means nth request will be affected
- name: CM_LEVEL
value: '1'
# it limits watched packages/classes/methods
- name: CM_WATCHED_CUSTOM_SERVICES
value: ''
# provide name of watcher
# it supports controller, restController, service, repository, component, webClient
- name: CM_WATCHERS
value: 'restController'
- name: TOTAL_CHAOS_DURATION
value: '30'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: spring-boot-memory-stress
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It can target random pods with a Spring Boot application and allows configuring the assaults to inject network latency
kind: ChaosExperiment
metadata:
name: spring-boot-latency
labels:
name: spring-boot-latency
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [ "" ]
resources: [ "pods" ]
verbs: [ "create","delete","get","list","patch","update", "deletecollection" ]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create","get","list","patch","update" ]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [ "" ]
resources: [ "pods/log" ]
verbs: [ "get","list","watch" ]
# for creating and managing to execute commands inside target container
- apiGroups: [ "" ]
resources: [ "pods/exec" ]
verbs: [ "get","list","create" ]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: [ "batch" ]
resources: [ "jobs" ]
verbs: [ "create","list","get","delete","deletecollection" ]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: [ "litmuschaos.io" ]
resources: [ "chaosengines","chaosexperiments","chaosresults" ]
verbs: [ "create","list","get","patch","update","delete" ]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name spring-boot-latency
command:
- /bin/bash
env:
# provide the latency (ms)
- name: LATENCY
value: '2000'
# port of the spring boot application
- name: CM_PORT
value: ''
# it contains number of requests are to be attacked
# n value means nth request will be affected
- name: CM_LEVEL
value: '1'
# it limits watched packages/classes/methods
- name: CM_WATCHED_CUSTOM_SERVICES
value: ''
# provide name of watcher
# it supports controller, restController, service, repository, component, webClient
- name: CM_WATCHERS
value: 'restController'
- name: TOTAL_CHAOS_DURATION
value: '30'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: spring-boot-latency
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It can target random pods with a Spring Boot application and allows configuring the assaults to inject app-kill
kind: ChaosExperiment
metadata:
name: spring-boot-app-kill
labels:
name: spring-boot-app-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [ "" ]
resources: [ "pods" ]
verbs: [ "create","delete","get","list","patch","update", "deletecollection" ]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create","get","list","patch","update" ]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [ "" ]
resources: [ "pods/log" ]
verbs: [ "get","list","watch" ]
# for creating and managing to execute commands inside target container
- apiGroups: [ "" ]
resources: [ "pods/exec" ]
verbs: [ "get","list","create" ]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: [ "batch" ]
resources: [ "jobs" ]
verbs: [ "create","list","get","delete","deletecollection" ]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: [ "litmuschaos.io" ]
resources: [ "chaosengines","chaosexperiments","chaosresults" ]
verbs: [ "create","list","get","patch","update","delete" ]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name spring-boot-app-kill
command:
- /bin/bash
env:
# port of the spring boot application
- name: CM_PORT
value: ''
#it contains number of requests are to be attacked
# n value means nth request will be affected
- name: CM_LEVEL
value: '1'
# it limits watched packages/classes/methods
- name: CM_WATCHED_CUSTOM_SERVICES
value: ''
# provide name of watcher
# it supports controller, restController, service, repository, component, webClient
- name: CM_WATCHERS
value: 'restController'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: spring-boot-app-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It can target random pods with a Spring Boot application and allows configuring the assaults to inject cpu-stress
kind: ChaosExperiment
metadata:
name: spring-boot-faults
labels:
name: spring-boot-faults
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [ "" ]
resources: [ "pods" ]
verbs: [ "create","delete","get","list","patch","update", "deletecollection" ]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create","get","list","patch","update" ]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [ "" ]
resources: [ "pods/log" ]
verbs: [ "get","list","watch" ]
# for creating and managing to execute commands inside target container
- apiGroups: [ "" ]
resources: [ "pods/exec" ]
verbs: [ "get","list","create" ]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: [ "batch" ]
resources: [ "jobs" ]
verbs: [ "create","list","get","delete","deletecollection" ]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: [ "litmuschaos.io" ]
resources: [ "chaosengines","chaosexperiments","chaosresults" ]
verbs: [ "create","list","get","patch","update","delete" ]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name spring-boot-faults
command:
- /bin/bash
env:
# it enables spring app-kill fault
- name: CM_KILL_APPLICATION_ACTIVE
value: ''
# it enables spring-boot latency fault
- name: CM_LATENCY_ACTIVE
value: ''
# provide the latency (ms)
# it is applicable when latency is active
- name: LATENCY
value: '2000'
# it enables spring-boot memory stress fault
- name: CM_MEMORY_ACTIVE
value: ''
# it contains fraction of memory to be stressed(0.70 equals 70%)
# it supports value in range [0.01,0.95]
# it is applicable when memory is active
- name: MEMORY_FILL_FRACTION
value: '0.70'
# it enables spring-boot cpu stress fault
- name: CM_CPU_ACTIVE
value: ''
# it contains fraction of cpu to be stressed(0.95 equals 95%)
# it supports value in range [0.1,1.0]
# it is applicable when cpu is active
- name: CPU_LOAD_FRACTION
value: '0.9'
# it enables spring-boot exceptions fault
- name: CM_EXCEPTIONS_ACTIVE
value: ''
# Type of raised exception
# it is applicable when exceptions is active
- name: CM_EXCEPTIONS_TYPE
value: 'java.lang.IllegalArgumentException'
# Argument of raised exception
# it is applicable when exceptions is active
- name: CM_EXCEPTIONS_ARGUMENTS
value: 'java.lang.String:custom illegal argument exception'
# port of the spring boot application
- name: CM_PORT
value: ''
#it contains number of requests are to be attacked
# n value means nth request will be affected
- name: CM_LEVEL
value: '1'
# it limits watched packages/classes/methods
- name: CM_WATCHED_CUSTOM_SERVICES
value: ''
# provide name of watcher
# it supports controller, restController, service, repository, component, webClient
- name: CM_WATCHERS
value: 'restController'
- name: TOTAL_CHAOS_DURATION
value: '30'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ''
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: spring-boot-faults
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
---

View File

@@ -42,8 +42,8 @@ spec:
command: [sh, -c] command: [sh, -c]
args: args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=charts/generic/experiments.yaml -n - "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=charts/generic/experiments.yaml -n
{{workflow.parameters.adminModeNamespace}} ; sleep 30" {{workflow.parameters.adminModeNamespace}} ; sleep 30"
- name: pod-network-loss - name: pod-network-loss
inputs: inputs:
artifacts: artifacts:
@@ -84,35 +84,35 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
components: components:
env: env:
- name: TOTAL_CHAOS_DURATION - name: TOTAL_CHAOS_DURATION
value: '90' value: '90'
- name: NETWORK_INTERFACE - name: NETWORK_INTERFACE
value: 'eth0' value: 'eth0'
- name: NETWORK_PACKET_LOSS_PERCENTAGE - name: NETWORK_PACKET_LOSS_PERCENTAGE
value: '100' value: '100'
- name: CONTAINER_RUNTIME - name: CONTAINER_RUNTIME
value: 'containerd' value: 'containerd'
- name: SOCKET_PATH - name: SOCKET_PATH
value: '/run/containerd/containerd.sock' value: '/run/containerd/containerd.sock'
container: container:
image: litmuschaos/litmus-checker:latest image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine.yaml","-saveName=/tmp/engine-name"] args: ["-file=/tmp/chaosengine.yaml","-saveName=/tmp/engine-name"]
- name: delete-application - name: delete-application
container: container:
image: litmuschaos/litmus-app-deployer:latest image: litmuschaos/litmus-app-deployer:latest
args: ["-namespace=bank","-typeName=resilient","-operation=delete", "-app=bank-of-anthos"] args: ["-namespace=bank","-typeName=resilient","-operation=delete", "-app=bank-of-anthos"]
- name: revert-chaos - name: revert-chaos
container: container:
image: litmuschaos/k8s:latest image: litmuschaos/k8s:latest
command: [sh, -c] command: [sh, -c]
args: args:
[ [
"kubectl delete chaosengine pod-network-loss-chaos -n {{workflow.parameters.adminModeNamespace}}", "kubectl delete chaosengine pod-network-loss-chaos -n {{workflow.parameters.adminModeNamespace}}",
] ]

View File

@@ -88,8 +88,8 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
components: components:
env: env:

View File

@@ -82,8 +82,8 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 1 attempt: 1
components: components:
env: env:

View File

@@ -86,8 +86,8 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 1 attempt: 1
components: components:
env: env:

View File

@@ -94,10 +94,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-probe-success" - name: "check-probe-success"
type: "promProbe" type: "promProbe"
promProbe/inputs: promProbe/inputs:
@@ -108,8 +108,8 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
components: components:
env: env:
@@ -164,10 +164,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-probe-success" - name: "check-probe-success"
type: "promProbe" type: "promProbe"
promProbe/inputs: promProbe/inputs:
@@ -178,8 +178,8 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
components: components:
env: env:
@@ -235,10 +235,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 12 interval: 100ms
attempt: 3 attempt: 3
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-probe-success" - name: "check-probe-success"
type: "promProbe" type: "promProbe"
promProbe/inputs: promProbe/inputs:
@@ -249,8 +249,8 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
components: components:
env: env:
@@ -308,10 +308,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 12 interval: 100ms
attempt: 3 attempt: 3
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-probe-success" - name: "check-probe-success"
type: "promProbe" type: "promProbe"
promProbe/inputs: promProbe/inputs:
@@ -322,8 +322,8 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
components: components:
env: env:
@@ -382,10 +382,10 @@ spec:
operation: "present" operation: "present"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 1 attempt: 1
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-probe-success" - name: "check-probe-success"
type: "promProbe" type: "promProbe"
promProbe/inputs: promProbe/inputs:
@@ -396,8 +396,8 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
components: components:
env: env:

View File

@@ -98,10 +98,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-probe-success" - name: "check-probe-success"
type: "promProbe" type: "promProbe"
promProbe/inputs: promProbe/inputs:
@@ -112,8 +112,8 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
components: components:
env: env:
@@ -168,10 +168,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-probe-success" - name: "check-probe-success"
type: "promProbe" type: "promProbe"
promProbe/inputs: promProbe/inputs:
@@ -182,8 +182,8 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
components: components:
env: env:
@@ -239,10 +239,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 12 interval: 100ms
attempt: 3 attempt: 3
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-probe-success" - name: "check-probe-success"
type: "promProbe" type: "promProbe"
promProbe/inputs: promProbe/inputs:
@@ -253,8 +253,8 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
components: components:
env: env:
@@ -312,10 +312,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 12 interval: 100ms
attempt: 3 attempt: 3
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-probe-success" - name: "check-probe-success"
type: "promProbe" type: "promProbe"
promProbe/inputs: promProbe/inputs:
@@ -326,8 +326,8 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
components: components:
env: env:
@@ -386,10 +386,10 @@ spec:
operation: "present" operation: "present"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 1 attempt: 1
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-probe-success" - name: "check-probe-success"
type: "promProbe" type: "promProbe"
promProbe/inputs: promProbe/inputs:
@@ -400,8 +400,8 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
components: components:
env: env:

View File

@@ -94,10 +94,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-benchmark" - name: "check-benchmark"
type: "cmdProbe" type: "cmdProbe"
cmdProbe/inputs: cmdProbe/inputs:
@@ -108,10 +108,10 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
initialDelaySeconds: 10 initialDelay: 10s
components: components:
env: env:
#number of cpu cores to be consumed #number of cpu cores to be consumed
@@ -165,10 +165,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-benchmark" - name: "check-benchmark"
type: "cmdProbe" type: "cmdProbe"
cmdProbe/inputs: cmdProbe/inputs:
@@ -179,10 +179,10 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
initialDelaySeconds: 10 initialDelay: 10s
components: components:
env: env:
- name: MEMORY_CONSUMPTION - name: MEMORY_CONSUMPTION
@@ -237,10 +237,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 12 interval: 100ms
attempt: 3 attempt: 3
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-benchmark" - name: "check-benchmark"
type: "cmdProbe" type: "cmdProbe"
cmdProbe/inputs: cmdProbe/inputs:
@@ -251,10 +251,10 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
initialDelaySeconds: 2 initialDelay: 2s
components: components:
env: env:
- name: TOTAL_CHAOS_DURATION - name: TOTAL_CHAOS_DURATION
@@ -310,10 +310,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 12 interval: 100ms
attempt: 3 attempt: 3
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-benchmark" - name: "check-benchmark"
type: "cmdProbe" type: "cmdProbe"
cmdProbe/inputs: cmdProbe/inputs:
@@ -324,25 +324,25 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
initialDelaySeconds: 2 initialDelay: 2s
components: components:
env: env:
- name: TOTAL_CHAOS_DURATION - name: TOTAL_CHAOS_DURATION
value: '30' value: '30'
- name: NETWORK_INTERFACE - name: NETWORK_INTERFACE
value: 'eth0' value: 'eth0'
- name: NETWORK_PACKET_LOSS_PERCENTAGE - name: NETWORK_PACKET_LOSS_PERCENTAGE
value: '100' value: '100'
- name: CONTAINER_RUNTIME - name: CONTAINER_RUNTIME
value: 'containerd' value: 'containerd'
- name: SOCKET_PATH - name: SOCKET_PATH
value: '/run/containerd/containerd.sock' value: '/run/containerd/containerd.sock'
container: container:
image: litmuschaos/litmus-checker:latest image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine.yaml","-saveName=/tmp/engine-name"] args: ["-file=/tmp/chaosengine.yaml","-saveName=/tmp/engine-name"]
- name: disk-fill - name: disk-fill
inputs: inputs:
@@ -385,10 +385,10 @@ spec:
operation: "present" operation: "present"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 1 attempt: 1
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-benchmark" - name: "check-benchmark"
type: "cmdProbe" type: "cmdProbe"
cmdProbe/inputs: cmdProbe/inputs:
@@ -399,10 +399,10 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
initialDelaySeconds: 1 initialDelay: 1s
components: components:
env: env:
- name: FILL_PERCENTAGE - name: FILL_PERCENTAGE
@@ -410,7 +410,7 @@ spec:
- name: TARGET_CONTAINER - name: TARGET_CONTAINER
value: '' value: ''
- name: TOTAL_CHAOS_DURATION - name: TOTAL_CHAOS_DURATION
value: '30' value: '30'
container: container:
image: litmuschaos/litmus-checker:latest image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine.yaml","-saveName=/tmp/engine-name"] args: ["-file=/tmp/chaosengine.yaml","-saveName=/tmp/engine-name"]
@@ -428,13 +428,13 @@ spec:
- name: delete-loadtest - name: delete-loadtest
container: container:
image: litmuschaos/litmus-app-deployer:latest image: litmuschaos/litmus-app-deployer:latest
args: ["-namespace=loadtest","-operation=delete", "-app=loadtest"] args: ["-namespace=loadtest","-operation=delete", "-app=loadtest"]
- name: revert-chaos - name: revert-chaos
container: container:
image: litmuschaos/k8s:latest image: litmuschaos/k8s:latest
command: [sh, -c] command: [sh, -c]
args: args:
[ [
"kubectl delete chaosengine pod-memory-hog-chaos pod-cpu-hog-chaos catalogue-pod-delete-chaos pod-network-loss-chaos -n {{workflow.parameters.adminModeNamespace}}", "kubectl delete chaosengine pod-memory-hog-chaos pod-cpu-hog-chaos catalogue-pod-delete-chaos pod-network-loss-chaos -n {{workflow.parameters.adminModeNamespace}}",
] ]

View File

@@ -98,10 +98,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-benchmark" - name: "check-benchmark"
type: "cmdProbe" type: "cmdProbe"
cmdProbe/inputs: cmdProbe/inputs:
@@ -112,10 +112,10 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
initialDelaySeconds: 10 initialDelay: 10s
components: components:
env: env:
#number of cpu cores to be consumed #number of cpu cores to be consumed
@@ -169,10 +169,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-benchmark" - name: "check-benchmark"
type: "cmdProbe" type: "cmdProbe"
cmdProbe/inputs: cmdProbe/inputs:
@@ -183,10 +183,10 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
initialDelaySeconds: 10 initialDelay: 10s
components: components:
env: env:
- name: MEMORY_CONSUMPTION - name: MEMORY_CONSUMPTION
@@ -241,10 +241,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 12 interval: 100ms
attempt: 3 attempt: 3
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-benchmark" - name: "check-benchmark"
type: "cmdProbe" type: "cmdProbe"
cmdProbe/inputs: cmdProbe/inputs:
@@ -255,10 +255,10 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
initialDelaySeconds: 2 initialDelay: 2s
components: components:
env: env:
- name: TOTAL_CHAOS_DURATION - name: TOTAL_CHAOS_DURATION
@@ -314,10 +314,10 @@ spec:
responseCode: "200" responseCode: "200"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 12 interval: 100ms
attempt: 3 attempt: 3
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-benchmark" - name: "check-benchmark"
type: "cmdProbe" type: "cmdProbe"
cmdProbe/inputs: cmdProbe/inputs:
@@ -328,10 +328,10 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
initialDelaySeconds: 2 initialDelay: 2s
components: components:
env: env:
- name: TOTAL_CHAOS_DURATION - name: TOTAL_CHAOS_DURATION
@@ -389,10 +389,10 @@ spec:
operation: "present" operation: "present"
mode: "Continuous" mode: "Continuous"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 1 attempt: 1
probePollingInterval: 1 probePollingInterval: 1s
- name: "check-benchmark" - name: "check-benchmark"
type: "cmdProbe" type: "cmdProbe"
cmdProbe/inputs: cmdProbe/inputs:
@@ -403,10 +403,10 @@ spec:
value: "100" value: "100"
mode: "Edge" mode: "Edge"
runProperties: runProperties:
probeTimeout: 1000 probeTimeout: 1s
interval: 1 interval: 100ms
attempt: 2 attempt: 2
initialDelaySeconds: 1 initialDelay: 1s
components: components:
env: env:
- name: FILL_PERCENTAGE - name: FILL_PERCENTAGE

View File

@@ -0,0 +1,114 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: argowf-podtato-head-chaos-
namespace: litmus
labels:
subject : "{{workflow.parameters.adminModeNamespace}}_podtato-main"
spec:
entrypoint: argowf-chaos
serviceAccountName: argo-chaos
securityContext:
runAsUser: 1000
runAsNonRoot: true
arguments:
parameters:
- name: adminModeNamespace
value: "litmus"
templates:
- name: argowf-chaos
steps:
- - name: install-application
template: install-application
- - name: install-chaos-experiments
template: install-chaos-experiments
- - name: pod-delete
template: pod-delete
- - name: revert-chaos
template: revert-chaos
- name: delete-application
template: delete-application
- name: install-application
container:
image: litmuschaos/litmus-app-deployer:latest
args: ["-namespace={{workflow.parameters.adminModeNamespace}}","-typeName=resilient","-operation=apply","-timeout=400", "-app=podtato-head","-scope=namespace"]
- name: install-chaos-experiments
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=charts/generic/experiments.yaml -n
{{workflow.parameters.adminModeNamespace}} ; sleep 30"
- name: pod-delete
inputs:
artifacts:
- name: pod-delete
path: /tmp/chaosengine.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: podtato-main-pod-delete-chaos
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: "{{workflow.parameters.adminModeNamespace}}_podtato-main"
spec:
appinfo:
appns: {{workflow.parameters.adminModeNamespace}}
applabel: 'name=podtato-main'
appkind: 'deployment'
engineState: 'active'
chaosServiceAccount: litmus-admin
jobCleanUpPolicy: 'retain'
components:
runner:
imagePullPolicy: Always
experiments:
- name: pod-delete
spec:
probe:
- name: "check-podtato-main-access-url"
type: "httpProbe"
httpProbe/inputs:
url: "http://podtato-main.{{workflow.parameters.adminModeNamespace}}.svc.cluster.local:9000"
insecureSkipVerify: false
method:
get:
criteria: "=="
responseCode: "200"
mode: "Continuous"
runProperties:
probeTimeout: 1s
interval: 100ms
attempt: 1
components:
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '10'
# pod failures without '--force' & default terminationGracePeriodSeconds
- name: FORCE
value: 'false'
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine.yaml","-saveName=/tmp/engine-name"]
- name: delete-application
container:
image: litmuschaos/litmus-app-deployer:latest
args: ["-namespace={{workflow.parameters.adminModeNamespace}}","-typeName=resilient","-operation=delete","-app=podtato-head"]
- name: revert-chaos
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
[
"kubectl delete chaosengine podtato-main-pod-delete-chaos -n {{workflow.parameters.adminModeNamespace}}",
]

View File

@@ -0,0 +1,118 @@
apiVersion: argoproj.io/v1alpha1
kind: CronWorkflow
metadata:
name: argowf-podtato-head-chaos-cron-wf
namespace: litmus
labels:
subject : "{{workflow.parameters.adminModeNamespace}}_podtato-head"
spec:
schedule: "0 * * * *"
concurrencyPolicy: "Forbid"
startingDeadlineSeconds: 0
workflowSpec:
entrypoint: argowf-chaos
serviceAccountName: argo-chaos
securityContext:
runAsUser: 1000
runAsNonRoot: true
arguments:
parameters:
- name: adminModeNamespace
value: "litmus"
templates:
- name: argowf-chaos
steps:
- - name: install-application
template: install-application
- - name: install-chaos-experiments
template: install-chaos-experiments
- - name: pod-delete
template: pod-delete
- - name: revert-chaos
template: revert-chaos
- name: delete-application
template: delete-application
- name: install-application
container:
image: litmuschaos/litmus-app-deployer:latest
args: ["-namespace={{workflow.parameters.adminModeNamespace}}","-typeName=resilient","-operation=apply","-timeout=400", "-app=podtato-head","-scope=namespace"]
- name: install-chaos-experiments
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=charts/generic/experiments.yaml -n
{{workflow.parameters.adminModeNamespace}} ; sleep 30"
- name: pod-delete
inputs:
artifacts:
- name: pod-delete
path: /tmp/chaosengine.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: podtato-main-pod-delete-chaos
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: "{{workflow.parameters.adminModeNamespace}}_podtato-main"
spec:
appinfo:
appns: {{workflow.parameters.adminModeNamespace}}
applabel: 'name=podtato-main'
appkind: 'deployment'
engineState: 'active'
chaosServiceAccount: litmus-admin
jobCleanUpPolicy: 'retain'
components:
runner:
imagePullPolicy: Always
experiments:
- name: pod-delete
spec:
probe:
- name: "check-podtato-main-access-url"
type: "httpProbe"
httpProbe/inputs:
url: "http://podtato-main.{{workflow.parameters.adminModeNamespace}}.svc.cluster.local:9000"
insecureSkipVerify: false
method:
get:
criteria: "=="
responseCode: "200"
mode: "Continuous"
runProperties:
probeTimeout: 1s
interval: 100ms
attempt: 1
components:
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: '10'
# pod failures without '--force' & default terminationGracePeriodSeconds
- name: FORCE
value: 'false'
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine.yaml","-saveName=/tmp/engine-name"]
- name: delete-application
container:
image: litmuschaos/litmus-app-deployer:latest
args: ["-namespace={{workflow.parameters.adminModeNamespace}}","-typeName=resilient","-operation=delete","-app=podtato-head"]
- name: revert-chaos
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
[
"kubectl delete chaosengine podtato-main-pod-delete-chaos -n {{workflow.parameters.adminModeNamespace}}",
]