Chore(ebs-loss): Add EBS Loss By Tag Experiment (#459)

* Chore(ebs-loss): Add EBS Loss By Tag Experiment

Signed-off-by: uditgaurav <udit@chaosnative.com>
This commit is contained in:
Udit Gaurav
2021-05-15 19:57:05 +05:30
committed by GitHub
parent f024ca968a
commit a9c07c7fd6
16 changed files with 257 additions and 47 deletions

View File

@@ -9,6 +9,7 @@ spec:
appkind: "deployment"
# It can be active/stop
engineState: "active"
annotationCheck: 'false'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ""
chaosServiceAccount: pod-dns-error-sa

View File

@@ -9,6 +9,7 @@ spec:
appkind: "deployment"
# It can be active/stop
engineState: "active"
annotationCheck: 'false'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ""
chaosServiceAccount: pod-dns-spoof-sa

View File

@@ -2,23 +2,23 @@ apiVersion: litmuchaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2020-10-28T10:28:08Z
name: ebs-loss
name: ebs-loss-by-id
version: 0.1.0
annotations:
categories: Kubernetes
vendor: CNCF
support: https://slack.kubernetes.io/
spec:
displayName: ebs-loss
displayName: ebs-loss-by-id
categoryDescription: |
EBS Loss contains chaos to disrupt state of infra resources. The experiment can induce ebs volume loss against specified application.
- Causes ebs volume loss from node or ec2 instance for a certain chaos duration
- Causes Pod to get Evicted if the Pod exceeds it Ephemeral Storage Limit.
EBS Loss By ID contains chaos to disrupt state of infra resources. The experiment can induce ebs volume loss against specified application for the give EBS Volume(s).
- Causes ebs volume loss from node or ec2 instance for a certain chaos interval from total chaos duration.
- Tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the application pod
keywords:
- Kubernetes
- EBS
- Volume
- ID
- State
platforms:
- AWS
@@ -34,12 +34,12 @@ spec:
app.kubernetes.io/version: latest
links:
- name: Source Code
url: https://github.com/litmuschaos/litmus-go/tree/master/experiments/kube-aws/ebs-loss
url: https://github.com/litmuschaos/litmus-go/tree/master/experiments/kube-aws/ebs-loss-by-id
- name: Documentation
url: https://docs.litmuschaos.io/docs/ebs-loss/
url: https://docs.litmuschaos.io/docs/ebs-loss-by-id/
- name: Video
url:
icon:
- url:
mediatype: ""
chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/kube-aws/ebs-loss/experiment.yaml
chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/kube-aws/ebs-loss-by-id/experiment.yaml

View File

@@ -0,0 +1,31 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
engineState: 'active'
annotationCheck: 'false'
chaosServiceAccount: ebs-loss-by-id-sa
# It can be retain/delete
jobCleanUpPolicy: 'delete'
experiments:
- name: ebs-loss-by-id
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '30'
# set chaos duration (in sec) as desired
- name: CHAOS_INTERVAL
value: '30'
# set target ebs volume ID
- name: EBS_VOLUME_ID
value: ''
# provide the region name of the instance
- name: REGION
value: ''

View File

@@ -4,9 +4,9 @@ description:
Detaching an ebs volume from ec2 instance.
kind: ChaosExperiment
metadata:
name: ebs-loss
name: ebs-loss-by-id
labels:
name: ebs-loss
name: ebs-loss-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
@@ -39,26 +39,29 @@ spec:
imagePullPolicy: Always
args:
- -c
- ./experiments -name ebs-loss
- ./experiments -name ebs-loss-by-id
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '60'
value: '30'
- name: CHAOS_INTERVAL
value: '30'
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: EC2_INSTANCE_ID
value: ''
- name: EBS_VOL_ID
- name: EBS_VOLUME_ID
value: ''
- name: REGION
value: ''
- name: SEQUENCE
value: 'parallel'
# Provide the path of aws credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
@@ -68,11 +71,8 @@ spec:
- name: LIB
value: 'litmus'
- name: DEVICE_NAME
value: ''
labels:
name: ebs-loss
name: ebs-loss-by-id
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest

View File

@@ -2,18 +2,18 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: ebs-loss-sa
name: ebs-loss-by-id-sa
namespace: default
labels:
name: ebs-loss-sa
name: ebs-loss-by-id-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ebs-loss-sa
name: ebs-loss-by-id-sa
labels:
name: ebs-loss-sa
name: ebs-loss-by-id-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: [""]
@@ -32,15 +32,15 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ebs-loss-sa
name: ebs-loss-by-id-sa
labels:
name: ebs-loss-sa
name: ebs-loss-by-id-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-loss-sa
name: ebs-loss-by-id-sa
subjects:
- kind: ServiceAccount
name: ebs-loss-sa
name: ebs-loss-by-id-sa
namespace: default

View File

@@ -0,0 +1,45 @@
apiVersion: litmuchaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
createdAt: 2021-05-15T10:28:08Z
name: ebs-loss-by-tag
version: 0.1.0
annotations:
categories: Kubernetes
vendor: CNCF
support: https://slack.kubernetes.io/
spec:
displayName: ebs-loss-by-tag
categoryDescription: |
EBS Loss By Tag contains chaos to disrupt state of infra resources. The experiment can induce ebs volume loss against specified application for given volume tag.
- Causes ebs volume loss by tag from node or ec2 instance for certain chaos interval from total chaos duration.
- Tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the application pod
keywords:
- Kubernetes
- EBS
- Volume
- Tag
- State
platforms:
- AWS
maturity: alpha
chaosType: infra
maintainers:
- name: Udit Gaurav
email: uditgaurav@mayadata.io
provider:
name: Mayadata
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: latest
links:
- name: Source Code
url: https://github.com/litmuschaos/litmus-go/tree/master/experiments/kube-aws/ebs-loss-by-tag
- name: Documentation
url: https://docs.litmuschaos.io/docs/ebs-loss-by-tag/
- name: Video
url:
icon:
- url:
mediatype: ""
chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/kube-aws/ebs-loss-by-tag/experiment.yaml

View File

@@ -5,30 +5,27 @@ metadata:
namespace: default
spec:
engineState: 'active'
chaosServiceAccount: ebs-loss-sa
annotationCheck: 'false'
chaosServiceAccount: ebs-loss-by-tag-sa
# It can be retain/delete
jobCleanUpPolicy: 'delete'
experiments:
- name: ebs-loss
- name: ebs-loss-by-tag
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '60'
value: '30'
# Instance ID of the target ec2 instance
- name: EC2_INSTANCE_ID
value: ''
- name: CHAOS_INTERVAL
value: '30'
# provide EBS volume id attached to the given instance
- name: EBS_VOL_ID
# provide EBS volume tag attached to the given instance
# it'll be in form of key:value (ex: 'team:devops')
- name: EBS_VOLUME_TAG
value: ''
# Enter the device name which you wanted to mount only for AWS.
- name: DEVICE_NAME
value: '/dev/sdb'
# provide the region name of the instance
- name: REGION
value: ''
value: ''

View File

@@ -0,0 +1,83 @@
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Detaching an ebs volume from ec2 instance.
kind: ChaosExperiment
metadata:
name: ebs-loss-by-tag
labels:
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Cluster
permissions:
- apiGroups:
- ""
- "batch"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "events"
- "pods/log"
- "pods/exec"
- "secrets"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name ebs-loss-by-tag
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '30'
- name: RAMP_TIME
value: ''
- name: EBS_VOLUME_TAG
value: ''
- name: REGION
value: ''
- name: SEQUENCE
value: 'parallel'
- name: VOLUME_AFFECTED_PERC
value: ''
# Provide the path of aws credentials mounted from secret
- name: AWS_SHARED_CREDENTIALS_FILE
value: '/tmp/cloud_config.yml'
# provide the LIB
# only litmus supported
- name: LIB
value: 'litmus'
labels:
name: ebs-loss-by-tag
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
secrets:
- name: cloud-secret
mountPath: /tmp/

View File

@@ -0,0 +1,46 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ebs-loss-by-tag-sa
namespace: default
labels:
name: ebs-loss-by-tag-sa
app.kubernetes.io/part-of: litmus
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ebs-loss-by-tag-sa
labels:
name: ebs-loss-by-tag-sa
app.kubernetes.io/part-of: litmus
rules:
- apiGroups: [""]
resources: ["pods","events","secrets"]
verbs: ["create","list","get","patch","update","delete","deletecollection"]
- apiGroups: [""]
resources: ["pods/exec","pods/log"]
verbs: ["create","list","get"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create","list","get","delete","deletecollection"]
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["create","list","get","patch","update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ebs-loss-by-tag-sa
labels:
name: ebs-loss-by-tag-sa
app.kubernetes.io/part-of: litmus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ebs-loss-by-tag-sa
subjects:
- kind: ServiceAccount
name: ebs-loss-by-tag-sa
namespace: default

View File

@@ -32,4 +32,4 @@ spec:
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: 'disable'
value: 'disable'

View File

@@ -53,6 +53,9 @@ spec:
- name: RAMP_TIME
value: ''
- name: INSTANCE_TAG
value: ''
# enable it if the target instance is a part of self-managed nodegroup.
- name: MANAGED_NODEGROUP
value: 'disable'

View File

Before

Width:  |  Height:  |  Size: 3.1 KiB

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

@@ -15,7 +15,8 @@ spec:
- k8-aws-ec2-terminate
- ec2-terminate-by-id
- ec2-terminate-by-tag
- ebs-loss
- ebs-loss-by-id
- ebs-loss-by-tag
keywords:
- AWS
- EC2

View File

@@ -9,7 +9,9 @@ experiments:
- name: ec2-terminate-by-tag
CSV: ec2-terminate-by-tag.chartserviceversion.yaml
desc: "ec2-terminate-by-tag"
- name: ebs-loss
CSV: ebs-loss.chartserviceversion.yaml
desc: "ebs-loss"
- name: ebs-loss-by-id
CSV: ebs-loss-by-id.chartserviceversion.yaml
desc: "ebs-loss-by-id"
- name: ebs-loss-by-tag
CSV: ebs-loss-by-tag.chartserviceversion.yaml
desc: "ebs-loss-by-tag"