Files
litmus-hub/workflows/pod-memory-hog/workflow.yaml
Ishan Gupta 82c8194444 Added sample workflows for litmus-portal. (#308)
* Added sample workflows for litmus-portal.

Signed-off-by: ishangupta-ds <ishan.gupta@mayadata.io>
2020-09-08 10:59:01 +05:30

159 lines
5.3 KiB
YAML

apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: argowf-chaos-pod-memory-hog-
namespace: litmus
spec:
entrypoint: argowf-chaos
serviceAccountName: argo-chaos
arguments:
parameters:
- name: adminModeNamespace
value: "litmus"
templates:
- name: argowf-chaos
steps:
- - name: install-experiment
template: install-experiment
- - name: run-chaos
template: run-chaos
- - name: revert-chaos
template: revert-chaos
- name: install-experiment
inputs:
artifacts:
- name: install-experiment
path: /tmp/pod-memory-hog.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects memory consumption on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-memory-hog
version: 0.1.3
spec:
definition:
scope: Namespaced
permissions:
- apiGroups:
- ""
- "batch"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "pods/log"
- "events"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
image: "litmuschaos/go-runner:ci"
args:
- -c
- ./experiments/pod-memory-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '10'
## enter the amount of memory in megabytes to be consumed by the application pod
- name: MEMORY_CONSUMPTION
value: '500'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
## Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
## env var that describes the library used to execute the chaos
## default: litmus. Supported values: litmus, powerfulseal, chaoskube
- name: LIB
value: 'litmus'
- name: TARGET_POD
value: ''
labels:
name: pod-memory-hog
container:
image: lachlanevenson/k8s-kubectl
command: [sh, -c]
args:
[
"kubectl apply -f /tmp/pod-memory-hog.yaml -n {{workflow.parameters.adminModeNamespace}}",
]
- name: run-chaos
inputs:
artifacts:
- name: run-chaos
path: /tmp/chaosengine.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: kube-proxy-pod-memory-hog-chaos
namespace: {{workflow.parameters.adminModeNamespace}}
spec:
appinfo:
appns: kube-system
applabel: "k8s-app=kube-proxy"
appkind: daemonset
jobCleanUpPolicy: retain
monitoring: false
annotationCheck: 'false'
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: pod-memory-hog
spec:
components:
experimentImage: "litmuschaos/go-runner:ci"
env:
- name: TARGET_CONTAINER
value: 'kube-proxy'
- name: MEMORY_CONSUMPTION
value: '500'
- name: TOTAL_CHAOS_DURATION
value: '240' # in seconds
- name: CHAOS_KILL_COMMAND
value: "kill -9 $(ps afx | grep \"[dd] if /dev/zero\" | awk '{print $1}' | tr '\n' ' ')"
container:
image: lachlanevenson/k8s-kubectl
command: [sh, -c]
args:
[
'kubectl apply -f /tmp/chaosengine.yaml -n {{workflow.parameters.adminModeNamespace}} | echo "sleeping for 120s" | sleep 120 ',
]
- name: revert-chaos
container:
image: lachlanevenson/k8s-kubectl
command: [sh, -c]
args:
[
" sleep 20 | kubectl delete chaosengine kube-proxy-pod-memory-hog-chaos -n {{workflow.parameters.adminModeNamespace}}",
]