14635851373: version upgraded for chaos-charts

This commit is contained in:
litmusbot
2025-04-24 07:25:06 +00:00
parent 44ddb75e96
commit 55299cbc7a
5 changed files with 3473 additions and 3471 deletions

View File

@@ -1,5 +1,412 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stopping an EC2 instance identified by tag.
kind: ChaosExperiment
metadata:
name: ec2-stop-by-tag
labels:
name: ec2-stop-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ec2-terminate-by-tag
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "30"
- name: CHAOS_INTERVAL
value: "30"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
# Provide a common tag to target ec2 instances
- name: EC2_INSTANCE_TAG
value: ""
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: "disable"
- name: REGION
value: ""
# Target the percentage of instance filtered from tag
- name: INSTANCE_AFFECTED_PERC
value: ""
- name: SEQUENCE
value: "parallel"
- name: DEFAULT_HEALTH_CHECK
value: "false"
# Provide the path of AWS credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: "/tmp/cloud_config.yml"
- name: SEQUENCE
value: "parallel"
labels:
name: ec2-stop-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stopping an EC2 instance identified by ID.
kind: ChaosExperiment
metadata:
name: ec2-stop-by-id
labels:
name: ec2-stop-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ec2-terminate-by-id
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "30"
- name: CHAOS_INTERVAL
value: "30"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: "disable"
# Instance ID of the target EC2 instance
# Multiple IDs can also be provided as comma separated values ex: id1,id2
- name: EC2_INSTANCE_ID
value: ""
- name: REGION
value: ""
- name: SEQUENCE
value: "parallel"
# Provide the path of AWS credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: "/tmp/cloud_config.yml"
labels:
name: ec2-stop-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It injects AZ chaos to detach the target zones from the load balancer.
kind: ChaosExperiment
metadata:
name: aws-az-chaos
labels:
name: aws-az-chaos
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/py-runner:latest"
imagePullPolicy: Always
args:
- -c
- python3 -u experiment -name aws-az-chaos
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: LIB
value: 'litmus'
- name: LOAD_BALANCER_NAME
value: ''
- name: LOAD_BALANCER_ZONES
value: ''
- name: LOAD_BALANCERNAME_ARN
value: 'na'
- name: AWS_SHARED_CREDENTIALS_FILE
value: "/tmp/cloud_config.yml"
- name: RAMP_TIME
value: ''
labels:
name: aws-az-chaos
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stopping an RDS instance identified by instance identifier.
kind: ChaosExperiment
metadata:
name: rds-instance-stop
labels:
name: rds-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name rds-instance-stop
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "30"
- name: CHAOS_INTERVAL
value: "30"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
# Provide the RDS instance identifier
- name: RDS_INSTANCE_IDENTIFIER
value: ""
- name: REGION
value: ""
- name: INSTANCE_AFFECTED_PERC
value: ""
- name: SEQUENCE
value: "parallel"
- name: DEFAULT_HEALTH_CHECK
value: "false"
# Provide the path of AWS credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: "/tmp/cloud_config.yml"
labels:
name: rds-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching an EBS volume from EC2 instance for a certain chaos duration.
@@ -243,12 +650,12 @@ spec:
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stopping an EC2 instance identified by tag.
Detaching an EBS volume from EC2 instance.
kind: ChaosExperiment
metadata:
name: ec2-stop-by-tag
name: ebs-loss-by-tag
labels:
name: ec2-stop-by-tag
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
@@ -293,15 +700,11 @@ spec:
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ec2-terminate-by-tag
- ./experiments -name ebs-loss-by-tag
command:
- /bin/bash
env:
@@ -311,28 +714,21 @@ spec:
- name: CHAOS_INTERVAL
value: "30"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
# Provide a common tag to target ec2 instances
- name: EC2_INSTANCE_TAG
- name: EBS_VOLUME_TAG
value: ""
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: "disable"
- name: REGION
value: ""
# Target the percentage of instance filtered from tag
- name: INSTANCE_AFFECTED_PERC
value: ""
- name: SEQUENCE
value: "parallel"
- name: VOLUME_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
@@ -340,11 +736,8 @@ spec:
- name: AWS_SHARED_CREDENTIALS_FILE
value: "/tmp/cloud_config.yml"
- name: SEQUENCE
value: "parallel"
labels:
name: ec2-stop-by-tag
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
@@ -492,394 +885,3 @@ spec:
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It injects AZ chaos to detach the target zones from the load balancer.
kind: ChaosExperiment
metadata:
name: aws-az-chaos
labels:
name: aws-az-chaos
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create","delete","get","list","patch","update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create","get","list","patch","update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets","configmaps"]
verbs: ["get","list",]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","list","watch"]
# for creating and managing to execute comands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get","list","create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update","delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/py-runner:latest"
imagePullPolicy: Always
args:
- -c
- python3 -u experiment -name aws-az-chaos
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: LIB
value: 'litmus'
- name: LOAD_BALANCER_NAME
value: ''
- name: LOAD_BALANCER_ZONES
value: ''
- name: LOAD_BALANCERNAME_ARN
value: 'na'
- name: AWS_SHARED_CREDENTIALS_FILE
value: "/tmp/cloud_config.yml"
- name: RAMP_TIME
value: ''
labels:
name: aws-az-chaos
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching an EBS volume from EC2 instance.
kind: ChaosExperiment
metadata:
name: ebs-loss-by-tag
labels:
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ebs-loss-by-tag
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "30"
- name: CHAOS_INTERVAL
value: "30"
- name: RAMP_TIME
value: ""
- name: EBS_VOLUME_TAG
value: ""
- name: REGION
value: ""
- name: SEQUENCE
value: "parallel"
- name: VOLUME_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# Provide the path of AWS credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: "/tmp/cloud_config.yml"
labels:
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stopping an EC2 instance identified by ID.
kind: ChaosExperiment
metadata:
name: ec2-stop-by-id
labels:
name: ec2-stop-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ec2-terminate-by-id
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "30"
- name: CHAOS_INTERVAL
value: "30"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: "disable"
# Instance ID of the target EC2 instance
# Multiple IDs can also be provided as comma separated values ex: id1,id2
- name: EC2_INSTANCE_ID
value: ""
- name: REGION
value: ""
- name: SEQUENCE
value: "parallel"
# Provide the path of AWS credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: "/tmp/cloud_config.yml"
labels:
name: ec2-stop-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stopping an RDS instance identified by instance identifier.
kind: ChaosExperiment
metadata:
name: rds-instance-stop
labels:
name: rds-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name rds-instance-stop
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "30"
- name: CHAOS_INTERVAL
value: "30"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
# Provide the RDS instance identifier
- name: RDS_INSTANCE_IDENTIFIER
value: ""
- name: REGION
value: ""
- name: INSTANCE_AFFECTED_PERC
value: ""
- name: SEQUENCE
value: "parallel"
- name: DEFAULT_HEALTH_CHECK
value: "false"
# Provide the path of AWS credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: "/tmp/cloud_config.yml"
labels:
name: rds-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -1,5 +1,109 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stops an Azure instance for a certain chaos duration
kind: ChaosExperiment
metadata:
name: azure-instance-stop
labels:
name: azure-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name azure-instance-stop
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "30"
- name: CHAOS_INTERVAL
value: "30"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
# provide the target instance name(s) (comma separated if multiple)
- name: AZURE_INSTANCE_NAMES
value: ""
# provide the resource group of the instance
- name: RESOURCE_GROUP
value: ""
# accepts enable/disable, default is disable
- name: SCALE_SET
value: ""
# Provide the path of aks credentials mounted from secret
- name: AZURE_AUTH_LOCATION
value: "/tmp/azure.auth"
- name: SEQUENCE
value: "parallel"
- name: DEFAULT_HEALTH_CHECK
value: "false"
labels:
name: azure-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaches disk from the VM and then re-attaches disk to the VM
@@ -103,107 +207,3 @@ spec:
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stops an Azure instance for a certain chaos duration
kind: ChaosExperiment
metadata:
name: azure-instance-stop
labels:
name: azure-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name azure-instance-stop
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "30"
- name: CHAOS_INTERVAL
value: "30"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
# provide the target instance name(s) (comma separated if multiple)
- name: AZURE_INSTANCE_NAMES
value: ""
# provide the resource group of the instance
- name: RESOURCE_GROUP
value: ""
# accepts enable/disable, default is disable
- name: SCALE_SET
value: ""
# Provide the path of aks credentials mounted from secret
- name: AZURE_AUTH_LOCATION
value: "/tmp/azure.auth"
- name: SEQUENCE
value: "parallel"
- name: DEFAULT_HEALTH_CHECK
value: "false"
labels:
name: azure-instance-stop
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---

View File

@@ -1,317 +1,5 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stops GCP VM instances and GKE nodes filtered by a label for a specified duration and later restarts them
kind: ChaosExperiment
metadata:
name: gcp-vm-instance-stop-by-label
labels:
name: gcp-vm-instance-stop-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-instance-stop-by-label
command:
- /bin/bash
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: "30"
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: "30"
- name: SEQUENCE
value: "parallel"
# GCP project ID to which the VM instances belong
- name: GCP_PROJECT_ID
value: ""
# Label of the target VM instance(s)
- name: INSTANCE_LABEL
value: ""
# Zone in which the target VM instance(s) filtered by the label exist
# all the instances should lie in a single zone
- name: ZONES
value: ""
# enable it if the target instance is a part of a managed instance group
- name: MANAGED_INSTANCE_GROUP
value: "disable"
# set the percentage value of the instances with the given label
# which should be targeted as part of the chaos injection
- name: INSTANCE_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
labels:
name: gcp-vm-instance-stop-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Causes loss of a non-boot storage persistent disk from a GCP VM instance filtered by a label for a specified duration before attaching them back
kind: ChaosExperiment
metadata:
name: gcp-vm-disk-loss-by-label
labels:
name: gcp-vm-disk-loss-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-disk-loss-by-label
command:
- /bin/bash
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: "30"
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: "30"
# set the GCP project id
- name: GCP_PROJECT_ID
value: ""
# set the zone in which all the disks are created
# all the disks must exist in the same zone
- name: ZONES
value: ""
# set the label of the target disk volumes
- name: DISK_VOLUME_LABEL
value: ""
# set the percentage value of the disks with the given label
# which should be targeted as part of the chaos injection
- name: DISK_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: SEQUENCE
value: "parallel"
labels:
name: gcp-vm-disk-loss-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Causes loss of a non-boot storage persistent disk from a GCP VM instance for a specified duration before attaching them back
kind: ChaosExperiment
metadata:
name: gcp-vm-disk-loss
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-disk-loss
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "30"
- name: CHAOS_INTERVAL
value: "30"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: "parallel"
# set the GCP project id
- name: GCP_PROJECT_ID
value: ""
# set the disk volume name(s) as comma seperated values
# eg. volume1,volume2,...
- name: DISK_VOLUME_NAMES
value: ""
# set the disk zone(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. zone1,zone2,...
- name: ZONES
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: "parallel"
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stops GCP VM instances and GKE nodes for a specified duration and later restarts them
@@ -422,3 +110,315 @@ spec:
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Causes loss of a non-boot storage persistent disk from a GCP VM instance filtered by a label for a specified duration before attaching them back
kind: ChaosExperiment
metadata:
name: gcp-vm-disk-loss-by-label
labels:
name: gcp-vm-disk-loss-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-disk-loss-by-label
command:
- /bin/bash
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: "30"
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: "30"
# set the GCP project id
- name: GCP_PROJECT_ID
value: ""
# set the zone in which all the disks are created
# all the disks must exist in the same zone
- name: ZONES
value: ""
# set the label of the target disk volumes
- name: DISK_VOLUME_LABEL
value: ""
# set the percentage value of the disks with the given label
# which should be targeted as part of the chaos injection
- name: DISK_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: SEQUENCE
value: "parallel"
labels:
name: gcp-vm-disk-loss-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Stops GCP VM instances and GKE nodes filtered by a label for a specified duration and later restarts them
kind: ChaosExperiment
metadata:
name: gcp-vm-instance-stop-by-label
labels:
name: gcp-vm-instance-stop-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-instance-stop-by-label
command:
- /bin/bash
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: "30"
# set chaos interval (in sec) as desired
- name: CHAOS_INTERVAL
value: "30"
- name: SEQUENCE
value: "parallel"
# GCP project ID to which the VM instances belong
- name: GCP_PROJECT_ID
value: ""
# Label of the target VM instance(s)
- name: INSTANCE_LABEL
value: ""
# Zone in which the target VM instance(s) filtered by the label exist
# all the instances should lie in a single zone
- name: ZONES
value: ""
# enable it if the target instance is a part of a managed instance group
- name: MANAGED_INSTANCE_GROUP
value: "disable"
# set the percentage value of the instances with the given label
# which should be targeted as part of the chaos injection
- name: INSTANCE_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
labels:
name: gcp-vm-instance-stop-by-label
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Causes loss of a non-boot storage persistent disk from a GCP VM instance for a specified duration before attaching them back
kind: ChaosExperiment
metadata:
name: gcp-vm-disk-loss
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name gcp-vm-disk-loss
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "30"
- name: CHAOS_INTERVAL
value: "30"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: "parallel"
# set the GCP project id
- name: GCP_PROJECT_ID
value: ""
# set the disk volume name(s) as comma seperated values
# eg. volume1,volume2,...
- name: DISK_VOLUME_NAMES
value: ""
# set the disk zone(s) as comma seperated values in the corresponding
# order of DISK_VOLUME_NAME
# eg. zone1,zone2,...
- name: ZONES
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# parallel or serial; determines how chaos is injected
- name: SEQUENCE
value: "parallel"
labels:
name: gcp-vm-disk-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
secrets:
- name: cloud-secret
mountPath: /tmp/
---

File diff suppressed because it is too large Load Diff

View File

@@ -2,12 +2,12 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It can target random pods with a Spring Boot application and allows configuring the assaults to inject network latency
It can target random pods with a Spring Boot application and allows configuring the assaults to inject app-kill
kind: ChaosExperiment
metadata:
name: spring-boot-latency
name: spring-boot-app-kill
labels:
name: spring-boot-latency
name: spring-boot-app-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
@@ -52,13 +52,217 @@ spec:
imagePullPolicy: Always
args:
- -c
- ./experiments -name spring-boot-latency
- ./experiments -name spring-boot-app-kill
command:
- /bin/bash
env:
# provide the latency (ms)
- name: LATENCY
value: "2000"
# port of the spring boot application
- name: CM_PORT
value: ""
#it contains number of requests are to be attacked
# n value means nth request will be affected
- name: CM_LEVEL
value: "1"
# it limits watched packages/classes/methods
- name: CM_WATCHED_CUSTOM_SERVICES
value: ""
# provide name of watcher
# it supports controller, restController, service, repository, component, webClient
- name: CM_WATCHERS
value: "restController"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: spring-boot-app-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It can target random pods with a Spring Boot application and allows configuring the assaults to inject network latency
kind: ChaosExperiment
metadata:
name: spring-boot-exceptions
labels:
name: spring-boot-exceptions
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name spring-boot-exceptions
command:
- /bin/bash
env:
# Type of raised exception
- name: CM_EXCEPTIONS_TYPE
value: "java.lang.IllegalArgumentException"
# Argument of raised exception
- name: CM_EXCEPTIONS_ARGUMENTS
value: "java.lang.String:custom illegal argument exception"
# port of the spring boot application
- name: CM_PORT
value: ""
#it contains number of requests are to be attacked
# n value means nth request will be affected
- name: CM_LEVEL
value: "1"
# it limits watched packages/classes/methods
- name: CM_WATCHED_CUSTOM_SERVICES
value: ""
# provide name of watcher
# it supports controller, restController, service, repository, component, webClient
- name: CM_WATCHERS
value: "restController"
- name: TOTAL_CHAOS_DURATION
value: "30"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: spring-boot-exceptions
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It can target random pods with a Spring Boot application and allows configuring the assaults to inject memory-stress
kind: ChaosExperiment
metadata:
name: spring-boot-memory-stress
labels:
name: spring-boot-memory-stress
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name spring-boot-memory-stress
command:
- /bin/bash
env:
# it contains fraction of memory to be stressed(0.70 equals 70%)
# it supports value in range [0.01,0.95]
- name: MEMORY_FILL_FRACTION
value: "0.70"
# port of the spring boot application
- name: CM_PORT
@@ -95,7 +299,7 @@ spec:
value: "parallel"
labels:
name: spring-boot-latency
name: spring-boot-memory-stress
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
@@ -358,9 +562,9 @@ description:
It can target random pods with a Spring Boot application and allows configuring the assaults to inject network latency
kind: ChaosExperiment
metadata:
name: spring-boot-exceptions
name: spring-boot-latency
labels:
name: spring-boot-exceptions
name: spring-boot-latency
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
@@ -405,217 +609,13 @@ spec:
imagePullPolicy: Always
args:
- -c
- ./experiments -name spring-boot-exceptions
- ./experiments -name spring-boot-latency
command:
- /bin/bash
env:
# Type of raised exception
- name: CM_EXCEPTIONS_TYPE
value: "java.lang.IllegalArgumentException"
# Argument of raised exception
- name: CM_EXCEPTIONS_ARGUMENTS
value: "java.lang.String:custom illegal argument exception"
# port of the spring boot application
- name: CM_PORT
value: ""
#it contains number of requests are to be attacked
# n value means nth request will be affected
- name: CM_LEVEL
value: "1"
# it limits watched packages/classes/methods
- name: CM_WATCHED_CUSTOM_SERVICES
value: ""
# provide name of watcher
# it supports controller, restController, service, repository, component, webClient
- name: CM_WATCHERS
value: "restController"
- name: TOTAL_CHAOS_DURATION
value: "30"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: spring-boot-exceptions
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It can target random pods with a Spring Boot application and allows configuring the assaults to inject app-kill
kind: ChaosExperiment
metadata:
name: spring-boot-app-kill
labels:
name: spring-boot-app-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name spring-boot-app-kill
command:
- /bin/bash
env:
# port of the spring boot application
- name: CM_PORT
value: ""
#it contains number of requests are to be attacked
# n value means nth request will be affected
- name: CM_LEVEL
value: "1"
# it limits watched packages/classes/methods
- name: CM_WATCHED_CUSTOM_SERVICES
value: ""
# provide name of watcher
# it supports controller, restController, service, repository, component, webClient
- name: CM_WATCHERS
value: "restController"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
## Period to wait before and after injection of chaos
- name: RAMP_TIME
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: spring-boot-app-kill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci
---
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
It can target random pods with a Spring Boot application and allows configuring the assaults to inject memory-stress
kind: ChaosExperiment
metadata:
name: spring-boot-memory-stress
labels:
name: spring-boot-memory-stress
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name spring-boot-memory-stress
command:
- /bin/bash
env:
# it contains fraction of memory to be stressed(0.70 equals 70%)
# it supports value in range [0.01,0.95]
- name: MEMORY_FILL_FRACTION
value: "0.70"
# provide the latency (ms)
- name: LATENCY
value: "2000"
# port of the spring boot application
- name: CM_PORT
@@ -652,7 +652,7 @@ spec:
value: "parallel"
labels:
name: spring-boot-memory-stress
name: spring-boot-latency
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci