Removed unused workflows (#536)

* Removed unused workflows

Signed-off-by: Amit Kumar Das <amit@chaosnative.com>

* Moved byoc-pod-delete to byoc directory and updated generic CSV

Signed-off-by: Amit Kumar Das <amit@chaosnative.com>

Co-authored-by: Udit Gaurav <35391335+uditgaurav@users.noreply.github.com>
This commit is contained in:
Amit Kumar Das
2022-04-28 17:53:50 +05:30
committed by GitHub
parent 238784bb69
commit 1f873641ef
37 changed files with 0 additions and 1547 deletions

View File

@@ -1,10 +0,0 @@
### This explain how you can execute the argo work flow,
### Assumption is that you have installed the litmus - https://docs.litmuschaos.io/docs/getstarted/
### Assumption is that you have install argo work flow on this cluster - https://github.com/litmuschaos/chaos-workflows#getting-started
### This execution will happen on against target namespace and assume that you have setup - https://hub.litmuschaos.io/generic/k8-calico-node
### Please ensure you have enough pods for this namespace
- Apply rbac for argo - `kubectl apply -f rbac-argo-service.yaml`
- Execute experiments for k8 - `argo submit workflow.yaml`

View File

@@ -1,45 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argowf-role
namespace: default
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get","watch","patch","list"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","watch"]
- apiGroups: ["argoproj.io"]
resources: ["workflow","workflows"]
verbs: ["get","create","update","patch","delete","list","watch"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get","create","delete","list"]
- apiGroups: ["","litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["get","create","update","patch","delete","list","watch","deletecollection"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argowf-svcacc
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: argowf-rolebinding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argowf-role
subjects:
[
{
"kind": "ServiceAccount",
"name": "argowf-svcacc",
"namespace": "default"
}
]

View File

@@ -1,152 +0,0 @@
# This test can be executed only in Chaos namespace
# this will launch the argo and chaos in chaos namespace
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: argowf-chaos-
labels:
subject: "{{workflow.parameters.appNamespace}}_calico-node"
spec:
entrypoint: pdbcreate
poddisruptionbudget:
minavailable: 100%
# must complete in 1m
activeDeadlineSeconds: 86400
# keep workflows for 3m
ttlStrategy:
secondsAfterCompletion: 3600
# delete all pods as soon as they complete
podGC:
strategy: OnPodCompletion
serviceAccountName: argowf-svcacc
arguments:
parameters:
- name: appNamespace
value: "kube-system"
- name: appCurrentNamespace
value: "default"
- name: appLabel
value: "k8s-app=calico-node"
- name: appEndpoint
value: "localhost"
- name: fileName
value: "pod-custom-kill-health.json"
- name: chaosServiceAccount
value: chaos-admin
- name: reportEndpoint
value: none
templates:
- name: argowf-chaos
steps:
- - name: pdbcreate
template: pdbcreate
- - name: run-chaos
template: run-chaos
- - name: revert-chaos
template: revert-chaos
- name: pdbcreate
container:
image: alpine:latest
command: [sh, -c]
args: [sleep 10]
- name: run-chaos
inputs:
artifacts:
- name: run-chaos
path: /tmp/createChaosEngine.yaml
raw:
data: |
# chaosengine.yaml
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: k8-pod-delete
namespace: {{workflow.parameters.appCurrentNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_calico-node"
spec:
#ex. values: ns1:name=percona,ns2:run=nginx
appinfo:
appns: {{workflow.parameters.appNamespace}}
# FYI, To see app label, apply kubectl get pods --show-labels
#applabel: "app=nginx"
applabel: "app={{workflow.parameters.appLabel}}"
appkind: deployment
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}}
experiments:
- name: k8-pod-delete
spec:
components:
env:
- name: NAME_SPACE
value: {{workflow.parameters.appNamespace}}
- name: LABEL_NAME
value: {{workflow.parameters.appLabel}}
- name: APP_ENDPOINT
value: {{workflow.parameters.appEndpoint}}
- name: FILE
value: {{workflow.parameters.fileName}}
- name: REPORT
value: 'false'
- name: REPORT_ENDPOINT
value: '{{workflow.parameters.reportEndpoint}}'
- name: TEST_NAMESPACE
value: {{workflow.parameters.appCurrentNamespace}}
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args: ['kubectl apply -f /tmp/createChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 ']
- name: revert-chaos
inputs:
artifacts:
- name: revert-chaos
path: /tmp/deleteChaosEngine.yaml
raw:
data: |
# chaosengine.yaml
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: k8-pod-delete
namespace: {{workflow.parameters.appCurrentNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_calico-node"
spec:
#ex. values: ns1:name=percona,ns2:run=nginx
appinfo:
appns: {{workflow.parameters.appNamespace}}
# FYI, To see app label, apply kubectl get pods --show-labels
#applabel: "app=nginx"
applabel: "app={{workflow.parameters.appLabel}}"
appkind: deployment
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}}
experiments:
- name: k8-pod-delete
spec:
components:
env:
- name: NAME_SPACE
value: {{workflow.parameters.appNamespace}}
- name: LABEL_NAME
value: {{workflow.parameters.appLabel}}
- name: APP_ENDPOINT
value: {{workflow.parameters.appEndpoint}}
- name: FILE
value: {{workflow.parameters.fileName}}
- name: REPORT
value: 'false'
- name: REPORT_ENDPOINT
value: '{{workflow.parameters.reportEndpoint}}'
- name: TEST_NAMESPACE
value: {{workflow.parameters.appCurrentNamespace}}
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args: [' sleep 20 | kubectl delete -f /tmp/deleteChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 ']

View File

@@ -1,10 +0,0 @@
### This explain how you can execute the argo work flow,
### Assumption is that you have installed the litmus - https://docs.litmuschaos.io/docs/getstarted/
### Assumption is that you have install argo work flow on this cluster - https://github.com/litmuschaos/chaos-workflows#getting-started
### This execution will happen on against target namespace and assume that you have setup - https://hub.litmuschaos.io/generic/k8-kiam
### Please ensure you have enough pods for this namespace
- Apply rbac for argo - `kubectl apply -f rbac-argo-service.yaml`
- Execute experiments for k8 - `argo submit workflow.yaml`

View File

@@ -1,45 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argowf-role
namespace: default
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get","watch","patch","list"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","watch"]
- apiGroups: ["argoproj.io"]
resources: ["workflow","workflows"]
verbs: ["get","create","update","patch","delete","list","watch"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get","create","delete","list"]
- apiGroups: ["","litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["get","create","update","patch","delete","list","watch","deletecollection"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argowf-svcacc
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: argowf-rolebinding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argowf-role
subjects:
[
{
"kind": "ServiceAccount",
"name": "argowf-svcacc",
"namespace": "default"
}
]

View File

@@ -1,152 +0,0 @@
# This test can be executed only in Chaos namespace
# this will launch the argo and chaos in chaos namespace
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: argowf-chaos-
labels:
subject: "{{workflow.parameters.appNamespace}}_kiam"
spec:
entrypoint: pdbcreate
poddisruptionbudget:
minavailable: 100%
# must complete in 1m
activeDeadlineSeconds: 86400
# keep workflows for 3m
ttlStrategy:
secondsAfterCompletion: 3600
# delete all pods as soon as they complete
podGC:
strategy: OnPodCompletion
serviceAccountName: argowf-svcacc
arguments:
parameters:
- name: appNamespace
value: "kube-system"
- name: appCurrentNamespace
value: "default"
- name: appLabel
value: "kiam"
- name: appEndpoint
value: "localhost"
- name: fileName
value: "pod-app-kill-health.json"
- name: chaosServiceAccount
value: chaos-admin
- name: reportEndpoint
value: none
templates:
- name: argowf-chaos
steps:
- - name: pdbcreate
template: pdbcreate
- - name: run-chaos
template: run-chaos
- - name: revert-chaos
template: revert-chaos
- name: pdbcreate
container:
image: alpine:latest
command: [sh, -c]
args: [sleep 10]
- name: run-chaos
inputs:
artifacts:
- name: run-chaos
path: /tmp/createChaosEngine.yaml
raw:
data: |
# chaosengine.yaml
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: k8-pod-delete
namespace: {{workflow.parameters.appCurrentNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_kiam"
spec:
#ex. values: ns1:name=percona,ns2:run=nginx
appinfo:
appns: {{workflow.parameters.appNamespace}}
# FYI, To see app label, apply kubectl get pods --show-labels
#applabel: "app=nginx"
applabel: "app={{workflow.parameters.appLabel}}"
appkind: deployment
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}}
experiments:
- name: k8-pod-delete
spec:
components:
env:
- name: NAME_SPACE
value: {{workflow.parameters.appNamespace}}
- name: LABEL_NAME
value: {{workflow.parameters.appLabel}}
- name: APP_ENDPOINT
value: {{workflow.parameters.appEndpoint}}
- name: FILE
value: {{workflow.parameters.fileName}}
- name: REPORT
value: 'false'
- name: REPORT_ENDPOINT
value: '{{workflow.parameters.reportEndpoint}}'
- name: TEST_NAMESPACE
value: {{workflow.parameters.appCurrentNamespace}}
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args: ['kubectl apply -f /tmp/createChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 ']
- name: revert-chaos
inputs:
artifacts:
- name: revert-chaos
path: /tmp/deleteChaosEngine.yaml
raw:
data: |
# chaosengine.yaml
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: k8-pod-delete
namespace: {{workflow.parameters.appCurrentNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_kiam"
spec:
#ex. values: ns1:name=percona,ns2:run=nginx
appinfo:
appns: {{workflow.parameters.appNamespace}}
# FYI, To see app label, apply kubectl get pods --show-labels
#applabel: "app=nginx"
applabel: "app={{workflow.parameters.appLabel}}"
appkind: deployment
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}}
experiments:
- name: k8-pod-delete
spec:
components:
env:
- name: NAME_SPACE
value: {{workflow.parameters.appNamespace}}
- name: LABEL_NAME
value: {{workflow.parameters.appLabel}}
- name: APP_ENDPOINT
value: {{workflow.parameters.appEndpoint}}
- name: FILE
value: {{workflow.parameters.fileName}}
- name: REPORT
value: 'false'
- name: REPORT_ENDPOINT
value: '{{workflow.parameters.reportEndpoint}}'
- name: TEST_NAMESPACE
value: {{workflow.parameters.appCurrentNamespace}}
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args: [' sleep 20 | kubectl delete -f /tmp/deleteChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 ']

View File

@@ -1,9 +0,0 @@
# This explain how you can execute the argo work flow,
### Assumption is that you have installed the litmus - https://docs.litmuschaos.io/docs/getstarted/
### Assumption is that you have install argo work flow on this cluster - https://github.com/litmuschaos/chaos-workflows#getting-started
### This execution will happen on your namespace and won't impact outside it
### Please ensure you have enough pods for this namespace
- Apply rbac for argo - `kubectl apply -f rbac-argo-service.yaml`
- Execute experiments for k8 - `argo submit workflow.yaml`

View File

@@ -1,45 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argowf-role
namespace: default
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get","watch","patch","list"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","watch"]
- apiGroups: ["argoproj.io"]
resources: ["workflow","workflows"]
verbs: ["get","create","update","patch","delete","list","watch"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get","create","delete","list"]
- apiGroups: ["","litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["get","create","update","patch","delete","list","watch","deletecollection"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argowf-svcacc
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: argowf-rolebinding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argowf-role
subjects:
[
{
"kind": "ServiceAccount",
"name": "argowf-svcacc",
"namespace": "default"
}
]

View File

@@ -1,191 +0,0 @@
# This test can be executed only in application namespace
# this will launch the argo, and the chaos pod in same namespace
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: argowf-chaos-
labels:
subject: "{{workflow.parameters.appNamespace}}_nginx-demo-app"
spec:
entrypoint: pdbcreate
poddisruptionbudget:
minavailable: 100%
# must complete in 1m
activeDeadlineSeconds: 86400
# keep workflows for 3m
ttlStrategy:
secondsAfterCompletion: 3600
# delete all pods as soon as they complete
podGC:
strategy: OnPodCompletion
serviceAccountName: argowf-svcacc
arguments:
parameters:
- name: appNamespace
value: "default"
- name: appCurrentNamespace
value: "default"
- name: appLabel
value: "nginx-demo-app"
- name: appEndpoint
value: "localhost"
- name: fileName
value: "pod-app-kill-health.json"
- name: chaosServiceAccount
value: k8-pod-delete-sa
- name: reportEndpoint
value: none
templates:
- name: argowf-chaos
steps:
- - name: pdbcreate
template: pdbcreate
- - name: install-chaos-experiments
template: install-chaos-experiments
- - name: install-chaos-rbac
template: install-chaos-rbac
- - name: run-chaos
template: run-chaos
- - name: revert-chaos
template: revert-chaos
- - name: revert-chaos-rbac
template: revert-chaos-rbac
- - name: revert-chaos-experiments
template: revert-chaos-experiments
- name: pdbcreate
container:
image: alpine:latest
command: [sh, -c]
args: [sleep 10]
- name: install-chaos-experiments
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/1.9.0?file=charts/generic/byoc-pod-delete/experiment.yaml -n
{{workflow.parameters.appNamespace}} | sleep 30"
- name: install-chaos-rbac
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/1.9.0?file=charts/generic/byoc-pod-delete/rbac.yaml -n
{{workflow.parameters.appNamespace}} | sleep 30"
- name: run-chaos
inputs:
artifacts:
- name: run-chaos
path: /tmp/createChaosEngine.yaml
raw:
data: |
# chaosengine.yaml
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: k8-pod-delete
namespace: {{workflow.parameters.appNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_nginx-demo-app"
spec:
#ex. values: ns1:name=percona,ns2:run=nginx
appinfo:
appns: {{workflow.parameters.appNamespace}}
# FYI, To see app label, apply kubectl get pods --show-labels
#applabel: "app=nginx"
applabel: "app={{workflow.parameters.appLabel}}"
appkind: deployment
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}}
experiments:
- name: k8-pod-delete
spec:
components:
env:
- name: NAME_SPACE
value: {{workflow.parameters.appNamespace}}
- name: LABEL_NAME
value: {{workflow.parameters.appLabel}}
- name: APP_ENDPOINT
value: {{workflow.parameters.appEndpoint}}
- name: FILE
value: {{workflow.parameters.fileName}}
- name: REPORT
value: 'false'
- name: REPORT_ENDPOINT
value: '{{workflow.parameters.reportEndpoint}}'
- name: TEST_NAMESPACE
value: {{workflow.parameters.appCurrentNamespace}}
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args: ['kubectl apply -f /tmp/createChaosEngine.yaml -n {{workflow.parameters.appNamespace}} | echo "sleeping for 60s" | sleep 60 ']
- name: revert-chaos
inputs:
artifacts:
- name: revert-chaos
path: /tmp/deleteChaosEngine.yaml
raw:
data: |
# chaosengine.yaml
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: k8-pod-delete
namespace: {{workflow.parameters.appNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_nginx-demo-app"
spec:
#ex. values: ns1:name=percona,ns2:run=nginx
appinfo:
appns: {{workflow.parameters.appNamespace}}
# FYI, To see app label, apply kubectl get pods --show-labels
#applabel: "app=nginx"
applabel: "app={{workflow.parameters.appLabel}}"
appkind: deployment
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}}
experiments:
- name: k8-pod-delete
spec:
components:
env:
- name: NAME_SPACE
value: {{workflow.parameters.appNamespace}}
- name: LABEL_NAME
value: {{workflow.parameters.appLabel}}
- name: APP_ENDPOINT
value: {{workflow.parameters.appEndpoint}}
- name: FILE
value: {{workflow.parameters.fileName}}
- name: REPORT
value: 'false'
- name: REPORT_ENDPOINT
value: '{{workflow.parameters.reportEndpoint}}'
- name: TEST_NAMESPACE
value: {{workflow.parameters.appCurrentNamespace}}
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args: [' sleep 20 | kubectl delete -f /tmp/deleteChaosEngine.yaml -n {{workflow.parameters.appNamespace}} | echo "sleeping for 60s" | sleep 60 ']
- name: revert-chaos-experiments
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
- "kubectl delete -f https://hub.litmuschaos.io/api/chaos/1.9.0?file=charts/generic/byoc-pod-delete/experiment.yaml -n
{{workflow.parameters.appNamespace}} | sleep 30"
- name: revert-chaos-rbac
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
- "kubectl delete -f https://hub.litmuschaos.io/api/chaos/1.9.0?file=charts/generic/byoc-pod-delete/rbac.yaml -n
{{workflow.parameters.appNamespace}} | sleep 30"

View File

@@ -1,10 +0,0 @@
### This explain how you can execute the argo work flow,
### Assumption is that you have installed the litmus - https://docs.litmuschaos.io/docs/getstarted/
### Assumption is that you have install argo work flow on this cluster - https://github.com/litmuschaos/chaos-workflows#getting-started
### This execution will happen on against target namespace and assume that you have setup - https://hub.litmuschaos.io/generic/k8-service-kill
### Please ensure you have enough pods for this namespace
- Apply rbac for argo - `kubectl apply -f rbac-argo-service.yaml`
- Execute experiments for k8 - `argo submit workflow.yaml`

View File

@@ -1,45 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argowf-role
namespace: default
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get","watch","patch","list"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","watch"]
- apiGroups: ["argoproj.io"]
resources: ["workflow","workflows"]
verbs: ["get","create","update","patch","delete","list","watch"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get","create","delete","list"]
- apiGroups: ["","litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["get","create","update","patch","delete","list","watch","deletecollection"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argowf-svcacc
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: argowf-rolebinding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argowf-role
subjects:
[
{
"kind": "ServiceAccount",
"name": "argowf-svcacc",
"namespace": "default"
}
]

View File

@@ -1,152 +0,0 @@
# This test can be executed only in Chaos namespace
# this will launch the argo and chaos in chaos namespace
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: argowf-chaos-
labels:
subject: "{{workflow.parameters.appNamespace}}_calico-node"
spec:
entrypoint: pdbcreate
poddisruptionbudget:
minavailable: 100%
# must complete in 1m
activeDeadlineSeconds: 86400
# keep workflows for 3m
ttlStrategy:
secondsAfterCompletion: 3600
# delete all pods as soon as they complete
podGC:
strategy: OnPodCompletion
serviceAccountName: argowf-svcacc
arguments:
parameters:
- name: appNamespace
value: "default"
- name: appCurrentNamespace
value: "default"
- name: appLabel
value: "k8s-app=calico-node"
- name: appEndpoint
value: "localhost"
- name: fileName
value: "service-app-kill-health.json"
- name: chaosServiceAccount
value: chaos-admin
- name: reportEndpoint
value: none
templates:
- name: argowf-chaos
steps:
- - name: pdbcreate
template: pdbcreate
- - name: run-chaos
template: run-chaos
- - name: revert-chaos
template: revert-chaos
- name: pdbcreate
container:
image: alpine:latest
command: [sh, -c]
args: [sleep 10]
- name: run-chaos
inputs:
artifacts:
- name: run-chaos
path: /tmp/createChaosEngine.yaml
raw:
data: |
# chaosengine.yaml
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: k8-pod-delete
namespace: {{workflow.parameters.appCurrentNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_calico-node"
spec:
#ex. values: ns1:name=percona,ns2:run=nginx
appinfo:
appns: {{workflow.parameters.appNamespace}}
# FYI, To see app label, apply kubectl get pods --show-labels
#applabel: "app=nginx"
applabel: "app={{workflow.parameters.appLabel}}"
appkind: deployment
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}}
experiments:
- name: k8-pod-delete
spec:
components:
env:
- name: NAME_SPACE
value: {{workflow.parameters.appNamespace}}
- name: LABEL_NAME
value: {{workflow.parameters.appLabel}}
- name: APP_ENDPOINT
value: {{workflow.parameters.appEndpoint}}
- name: FILE
value: {{workflow.parameters.fileName}}
- name: REPORT
value: 'false'
- name: REPORT_ENDPOINT
value: '{{workflow.parameters.reportEndpoint}}'
- name: TEST_NAMESPACE
value: {{workflow.parameters.appCurrentNamespace}}
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args: ['kubectl apply -f /tmp/createChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 ']
- name: revert-chaos
inputs:
artifacts:
- name: revert-chaos
path: /tmp/deleteChaosEngine.yaml
raw:
data: |
# chaosengine.yaml
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: k8-pod-delete
namespace: {{workflow.parameters.appCurrentNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_calico-node"
spec:
#ex. values: ns1:name=percona,ns2:run=nginx
appinfo:
appns: {{workflow.parameters.appNamespace}}
# FYI, To see app label, apply kubectl get pods --show-labels
#applabel: "app=nginx"
applabel: "app={{workflow.parameters.appLabel}}"
appkind: deployment
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}}
experiments:
- name: k8-pod-delete
spec:
components:
env:
- name: NAME_SPACE
value: {{workflow.parameters.appNamespace}}
- name: LABEL_NAME
value: {{workflow.parameters.appLabel}}
- name: APP_ENDPOINT
value: {{workflow.parameters.appEndpoint}}
- name: FILE
value: {{workflow.parameters.fileName}}
- name: REPORT
value: 'false'
- name: REPORT_ENDPOINT
value: '{{workflow.parameters.reportEndpoint}}'
- name: TEST_NAMESPACE
value: {{workflow.parameters.appCurrentNamespace}}
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args: [' sleep 20 | kubectl delete -f /tmp/deleteChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 ']

View File

@@ -1,10 +0,0 @@
### This explain how you can execute the argo work flow,
### Assumption is that you have installed the litmus - https://docs.litmuschaos.io/docs/getstarted/
### Assumption is that you have install argo work flow on this cluster - https://github.com/litmuschaos/chaos-workflows#getting-started
### This execution will happen on against target namespace and assume that you have setup - https://hub.litmuschaos.io/generic/k8-wavefront-collector
### Please ensure you have enough pods for this namespace
- Apply rbac for argo - `kubectl apply -f rbac-argo-service.yaml`
- Execute experiments for k8 - `argo submit workflow.yaml`

View File

@@ -1,45 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argowf-role
namespace: default
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get","watch","patch","list"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get","watch"]
- apiGroups: ["argoproj.io"]
resources: ["workflow","workflows"]
verbs: ["get","create","update","patch","delete","list","watch"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get","create","delete","list"]
- apiGroups: ["","litmuschaos.io"]
resources: ["chaosengines","chaosexperiments","chaosresults"]
verbs: ["get","create","update","patch","delete","list","watch","deletecollection"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argowf-svcacc
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: argowf-rolebinding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argowf-role
subjects:
[
{
"kind": "ServiceAccount",
"name": "argowf-svcacc",
"namespace": "default"
}
]

View File

@@ -1,152 +0,0 @@
# This test can be executed only in Chaos namespace
# this will launch the argo and chaos in chaos namespace
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: argowf-chaos-
labels:
subject: "{{workflow.parameters.appNamespace}}_wavefront-collector"
spec:
entrypoint: pdbcreate
poddisruptionbudget:
minavailable: 100%
# must complete in 1m
activeDeadlineSeconds: 86400
# keep workflows for 3m
ttlStrategy:
secondsAfterCompletion: 3600
# delete all pods as soon as they complete
podGC:
strategy: OnPodCompletion
serviceAccountName: argowf-svcacc
arguments:
parameters:
- name: appNamespace
value: "kube-system"
- name: appCurrentNamespace
value: "default"
- name: appLabel
value: "k8s-app=wavefront-collector"
- name: appEndpoint
value: "localhost"
- name: fileName
value: "pod-custom-kill-health.json"
- name: chaosServiceAccount
value: chaos-admin
- name: reportEndpoint
value: none
templates:
- name: argowf-chaos
steps:
- - name: pdbcreate
template: pdbcreate
- - name: run-chaos
template: run-chaos
- - name: revert-chaos
template: revert-chaos
- name: pdbcreate
container:
image: alpine:latest
command: [sh, -c]
args: [sleep 10]
- name: run-chaos
inputs:
artifacts:
- name: run-chaos
path: /tmp/createChaosEngine.yaml
raw:
data: |
# chaosengine.yaml
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: k8-pod-delete
namespace: {{workflow.parameters.appCurrentNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_wavefront-collector"
spec:
#ex. values: ns1:name=percona,ns2:run=nginx
appinfo:
appns: {{workflow.parameters.appNamespace}}
# FYI, To see app label, apply kubectl get pods --show-labels
#applabel: "app=nginx"
applabel: "app={{workflow.parameters.appLabel}}"
appkind: deployment
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}}
experiments:
- name: k8-pod-delete
spec:
components:
env:
- name: NAME_SPACE
value: {{workflow.parameters.appNamespace}}
- name: LABEL_NAME
value: {{workflow.parameters.appLabel}}
- name: APP_ENDPOINT
value: {{workflow.parameters.appEndpoint}}
- name: FILE
value: {{workflow.parameters.fileName}}
- name: REPORT
value: 'false'
- name: REPORT_ENDPOINT
value: '{{workflow.parameters.reportEndpoint}}'
- name: TEST_NAMESPACE
value: {{workflow.parameters.appCurrentNamespace}}
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args: ['kubectl apply -f /tmp/createChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 ']
- name: revert-chaos
inputs:
artifacts:
- name: revert-chaos
path: /tmp/deleteChaosEngine.yaml
raw:
data: |
# chaosengine.yaml
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: k8-pod-delete
namespace: {{workflow.parameters.appCurrentNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_wavefront-collector"
spec:
#ex. values: ns1:name=percona,ns2:run=nginx
appinfo:
appns: {{workflow.parameters.appNamespace}}
# FYI, To see app label, apply kubectl get pods --show-labels
#applabel: "app=nginx"
applabel: "app={{workflow.parameters.appLabel}}"
appkind: deployment
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}}
experiments:
- name: k8-pod-delete
spec:
components:
env:
- name: NAME_SPACE
value: {{workflow.parameters.appNamespace}}
- name: LABEL_NAME
value: {{workflow.parameters.appLabel}}
- name: APP_ENDPOINT
value: {{workflow.parameters.appEndpoint}}
- name: FILE
value: {{workflow.parameters.fileName}}
- name: REPORT
value: 'false'
- name: REPORT_ENDPOINT
value: '{{workflow.parameters.reportEndpoint}}'
- name: TEST_NAMESPACE
value: {{workflow.parameters.appCurrentNamespace}}
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args: [' sleep 20 | kubectl delete -f /tmp/deleteChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 ']

View File

@@ -1,235 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: argowf-chaos-kube-proxy-all-
namespace: litmus
labels:
subject: "{{workflow.parameters.appNamespace}}_kube-proxy"
spec:
entrypoint: argowf-chaos
serviceAccountName: argo-chaos
securityContext:
runAsUser: 1000
runAsNonRoot: true
arguments:
parameters:
- name: adminModeNamespace
value: litmus
- name: appNamespace
value: "kube-system"
- name: contextCPUHog
value: "node-cpu-hog_infra"
- name: contextMemoryHog
value: "node-memory-hog_infra"
templates:
- name: argowf-chaos
steps:
- - name: install-chaos-experiments
template: install-chaos-experiments
- - name: node-cpu-hog
template: node-cpu-hog
- name: pod-memory-hog
template: pod-memory-hog
- - name: pod-cpu-hog
template: pod-cpu-hog
- name: node-memory-hog
template: node-memory-hog
- - name: pod-delete
template: pod-delete
- - name: revert-kube-proxy-chaos
template: revert-kube-proxy-chaos
- name: install-chaos-experiments
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=charts/generic/experiments.yaml -n
{{workflow.parameters.adminModeNamespace}} | sleep 30"
- name: node-cpu-hog
inputs:
artifacts:
- name: node-cpu-hog
path: /tmp/chaosengine-node-cpu-hog.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: kube-proxy-node-cpu-hog
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: {{workflow.parameters.contextCPUHog}}
spec:
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: node-cpu-hog
spec:
components:
env:
- name: NODE_CPU_CORE
value: '1'
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine-node-cpu-hog.yaml","-saveName=/tmp/engine-name"]
- name: pod-memory-hog
inputs:
artifacts:
- name: pod-memory-hog
path: /tmp/chaosengine-pod-memory-hog.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: kube-proxy-pod-memory-hog-chaos
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy"
spec:
appinfo:
appns: kube-system
applabel: "k8s-app=kube-proxy"
appkind: daemonset
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: pod-memory-hog
spec:
components:
env:
- name: TARGET_CONTAINER
value: 'kube-proxy'
- name: MEMORY_CONSUMPTION
value: '500'
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
- name: CHAOS_KILL_COMMAND
value: "kill $(find /proc -name exe -lname '*/dd' 2>&1 | grep -v 'Permission denied' | awk -F/ '{print $(NF-1)}' | head -n 1)"
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine-pod-memory-hog.yaml","-saveName=/tmp/engine-name"]
- name: pod-cpu-hog
inputs:
artifacts:
- name: pod-cpu-hog
path: /tmp/chaosengine-pod-cpu-hog.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: kube-proxy-pod-cpu-hog-chaos
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy"
spec:
appinfo:
appns: kube-system
applabel: "k8s-app=kube-proxy"
appkind: daemonset
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: pod-cpu-hog
spec:
components:
env:
- name: TARGET_CONTAINER
value: 'kube-proxy'
- name: CPU_CORES
value: '1'
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
- name: CHAOS_KILL_COMMAND
value: "kill $(find /proc -name exe -lname '*/md5sum' 2>&1 | grep -v 'Permission denied' | awk -F/ '{print $(NF-1)}' | head -n 1)"
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine-pod-cpu-hog.yaml","-saveName=/tmp/engine-name"]
- name: node-memory-hog
inputs:
artifacts:
- name: node-memory-hog
path: /tmp/chaosengine-node-memory-hog.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: kube-proxy-node-memory-hog-chaos
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: {{workflow.parameters.contextMemoryHog}}
spec:
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: node-memory-hog
spec:
components:
env:
- name: MEMORY_PERCENTAGE
value: '50'
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine-node-memory-hog.yaml","-saveName=/tmp/engine-name"]
- name: pod-delete
inputs:
artifacts:
- name: pod-delete
path: /tmp/chaosengine-pod-delete.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: kube-proxy-pod-delete-chaos
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy"
spec:
appinfo:
appns: kube-system
applabel: "k8s-app=kube-proxy"
appkind: daemonset
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: pod-delete
spec:
components:
env:
- name: TOTAL_CHAOS_DURATION
value: "60"
- name: CHAOS_INTERVAL
value: "10"
- name: FORCE
value: "false"
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine-pod-delete.yaml","-saveName=/tmp/engine-name"]
- name: revert-kube-proxy-chaos
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
- "kubectl delete chaosengines kube-proxy-node-cpu-hog kube-proxy-pod-memory-hog-chaos kube-proxy-pod-cpu-hog-chaos kube-proxy-node-memory-hog-chaos kube-proxy-pod-delete-chaos -n
{{workflow.parameters.adminModeNamespace}}"

View File

@@ -1,238 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: CronWorkflow
metadata:
name: argo-chaos-kube-proxy-cron-wf
namespace: litmus
labels:
subject: "{{workflow.parameters.appNamespace}}_kube-proxy"
spec:
schedule: "0 * * * *"
concurrencyPolicy: "Forbid"
startingDeadlineSeconds: 0
workflowSpec:
entrypoint: argowf-chaos
serviceAccountName: argo-chaos
securityContext:
runAsUser: 1000
runAsNonRoot: true
arguments:
parameters:
- name: adminModeNamespace
value: "litmus"
- name: appNamespace
value: "kube-system"
- name: contextCPUHog
value: "node-cpu-hog_infra"
- name: contextMemoryHog
value: "node-memory-hog_infra"
templates:
- name: argowf-chaos
steps:
- - name: install-chaos-experiments
template: install-chaos-experiments
- - name: node-cpu-hog
template: node-cpu-hog
- name: pod-memory-hog
template: pod-memory-hog
- - name: pod-cpu-hog
template: pod-cpu-hog
- name: node-memory-hog
template: node-memory-hog
- - name: pod-delete
template: pod-delete
- - name: revert-kube-proxy-chaos
template: revert-kube-proxy-chaos
- name: install-chaos-experiments
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=charts/generic/experiments.yaml -n
{{workflow.parameters.adminModeNamespace}} | sleep 30"
- name: node-cpu-hog
inputs:
artifacts:
- name: node-cpu-hog
path: /tmp/chaosengine-node-cpu-hog.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: kube-proxy-node-cpu-hog
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: {{workflow.parameters.contextCPUHog}}
spec:
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: node-cpu-hog
spec:
components:
env:
- name: NODE_CPU_CORE
value: '1'
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine-node-cpu-hog.yaml","-saveName=/tmp/engine-name"]
- name: pod-memory-hog
inputs:
artifacts:
- name: pod-memory-hog
path: /tmp/chaosengine-pod-memory-hog.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: kube-proxy-pod-memory-hog-chaos
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy"
spec:
appinfo:
appns: kube-system
applabel: "k8s-app=kube-proxy"
appkind: daemonset
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: pod-memory-hog
spec:
components:
env:
- name: TARGET_CONTAINER
value: 'kube-proxy'
- name: MEMORY_CONSUMPTION
value: '500'
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
- name: CHAOS_KILL_COMMAND
value: "kill $(find /proc -name exe -lname '*/dd' 2>&1 | grep -v 'Permission denied' | awk -F/ '{print $(NF-1)}' | head -n 1)"
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine-pod-memory-hog.yaml","-saveName=/tmp/engine-name"]
- name: pod-cpu-hog
inputs:
artifacts:
- name: pod-cpu-hog
path: /tmp/chaosengine-pod-cpu-hog.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: kube-proxy-pod-cpu-hog-chaos
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy"
spec:
appinfo:
appns: kube-system
applabel: "k8s-app=kube-proxy"
appkind: daemonset
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: pod-cpu-hog
spec:
components:
env:
- name: TARGET_CONTAINER
value: 'kube-proxy'
- name: CPU_CORES
value: '1'
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
- name: CHAOS_KILL_COMMAND
value: "kill $(find /proc -name exe -lname '*/md5sum' 2>&1 | grep -v 'Permission denied' | awk -F/ '{print $(NF-1)}' | head -n 1)"
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine-pod-cpu-hog.yaml","-saveName=/tmp/engine-name"]
- name: node-memory-hog
inputs:
artifacts:
- name: node-memory-hog
path: /tmp/chaosengine-node-memory-hog.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: kube-proxy-node-memory-hog-chaos
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: {{workflow.parameters.contextMemoryHog}}
spec:
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: node-memory-hog
spec:
components:
env:
- name: MEMORY_PERCENTAGE
value: '50'
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine-node-memory-hog.yaml","-saveName=/tmp/engine-name"]
- name: pod-delete
inputs:
artifacts:
- name: pod-delete
path: /tmp/chaosengine-pod-delete.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: kube-proxy-pod-delete-chaos
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy"
spec:
appinfo:
appns: kube-system
applabel: "k8s-app=kube-proxy"
appkind: daemonset
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: pod-delete
spec:
components:
env:
- name: TOTAL_CHAOS_DURATION
value: "60"
- name: CHAOS_INTERVAL
value: "10"
- name: FORCE
value: "false"
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine-pod-delete.yaml","-saveName=/tmp/engine-name"]
- name: revert-kube-proxy-chaos
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
- "kubectl delete chaosengines kube-proxy-node-cpu-hog kube-proxy-pod-memory-hog-chaos kube-proxy-pod-cpu-hog-chaos kube-proxy-node-memory-hog-chaos kube-proxy-pod-delete-chaos -n
{{workflow.parameters.adminModeNamespace}}"