chore: Fix experiments charts for 3.0.0 (#613)

* adds metadata.annotation in chaosengine and replaces install-chaos-faults step to use artifacts.data.raw manifests

Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>

* fixes annotation -> annotations

Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>

* updates labels

Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>

---------

Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>
This commit is contained in:
Neelanjan Manna
2023-10-04 10:37:55 +05:30
committed by GitHub
parent c7f3d2683c
commit d482aa76af
20 changed files with 3481 additions and 105 deletions

View File

@@ -37,12 +37,168 @@ spec:
args: ["-namespace=bank","-typeName=resilient","-operation=apply","-timeout=400", "-app=bank-of-anthos","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak) args: ["-namespace=bank","-typeName=resilient","-operation=apply","-timeout=400", "-app=bank-of-anthos","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak)
- name: install-chaos-faults - name: install-chaos-faults
inputs:
artifacts:
- name: pod-network-loss-x1w
path: /tmp/pod-network-loss-x1w.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects network packet loss on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-network-loss
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-network-loss
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
# provide lib image
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
- name: NETWORK_INTERFACE
value: "eth0"
- name: TC_IMAGE
value: "gaiadocker/iproute2"
- name: NETWORK_PACKET_LOSS_PERCENTAGE
value: "100" #in PERCENTAGE
- name: TOTAL_CHAOS_DURATION
value: "60" # in seconds
# time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: TARGET_PODS
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the destination ips
# chaos injection will be triggered for these destination ips
- name: DESTINATION_IPS
value: ""
# provide the destination hosts
# chaos injection will be triggered for these destination hosts
- name: DESTINATION_HOSTS
value: ""
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
outputs: {}
metadata: {}
container: container:
name: ""
image: litmuschaos/k8s:latest image: litmuschaos/k8s:latest
command: [sh, -c] command:
- sh
- "-c"
args: args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=faults/kubernetes/experiments.yaml -n - kubectl apply -f /tmp/pod-network-loss-x1w.yaml -n {{workflow.parameters.adminModeNamespace}} && sleep 30
{{workflow.parameters.adminModeNamespace}} ; sleep 30" resources: {}
- name: pod-network-loss - name: pod-network-loss
inputs: inputs:
@@ -58,6 +214,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_bank-of-anthos" context: "{{workflow.parameters.appNamespace}}_bank-of-anthos"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'bank' appns: 'bank'

View File

@@ -41,12 +41,168 @@ spec:
args: ["-namespace=bank","-typeName=resilient","-operation=apply","-timeout=400", "-app=bank-of-anthos","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak) args: ["-namespace=bank","-typeName=resilient","-operation=apply","-timeout=400", "-app=bank-of-anthos","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak)
- name: install-chaos-faults - name: install-chaos-faults
inputs:
artifacts:
- name: pod-network-loss-x1w
path: /tmp/pod-network-loss-x1w.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects network packet loss on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-network-loss
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-network-loss
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
# provide lib image
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
- name: NETWORK_INTERFACE
value: "eth0"
- name: TC_IMAGE
value: "gaiadocker/iproute2"
- name: NETWORK_PACKET_LOSS_PERCENTAGE
value: "100" #in PERCENTAGE
- name: TOTAL_CHAOS_DURATION
value: "60" # in seconds
# time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: TARGET_PODS
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the destination ips
# chaos injection will be triggered for these destination ips
- name: DESTINATION_IPS
value: ""
# provide the destination hosts
# chaos injection will be triggered for these destination hosts
- name: DESTINATION_HOSTS
value: ""
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
outputs: {}
metadata: {}
container: container:
name: ""
image: litmuschaos/k8s:latest image: litmuschaos/k8s:latest
command: [sh, -c] command:
- sh
- "-c"
args: args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=faults/kubernetes/experiments.yaml -n - kubectl apply -f /tmp/pod-network-loss-x1w.yaml -n {{workflow.parameters.adminModeNamespace}} && sleep 30
{{workflow.parameters.adminModeNamespace}} ; sleep 30" resources: {}
- name: pod-network-loss - name: pod-network-loss
inputs: inputs:
@@ -62,6 +218,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_bank-of-anthos" context: "{{workflow.parameters.appNamespace}}_bank-of-anthos"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'bank' appns: 'bank'

View File

@@ -125,6 +125,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.adminModeNamespace}}_hello-world" context: "{{workflow.parameters.adminModeNamespace}}_hello-world"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: {{workflow.parameters.adminModeNamespace}} appns: {{workflow.parameters.adminModeNamespace}}

View File

@@ -129,6 +129,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.adminModeNamespace}}_hello-world" context: "{{workflow.parameters.adminModeNamespace}}_hello-world"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: {{workflow.parameters.adminModeNamespace}} appns: {{workflow.parameters.adminModeNamespace}}

View File

@@ -121,6 +121,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.context}}" context: "{{workflow.parameters.context}}"
annotations: {}
spec: spec:
jobCleanUpPolicy: retain jobCleanUpPolicy: retain
engineState: 'active' engineState: 'active'

View File

@@ -125,6 +125,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.context}}" context: "{{workflow.parameters.context}}"
annotations: {}
spec: spec:
jobCleanUpPolicy: retain jobCleanUpPolicy: retain
engineState: 'active' engineState: 'active'

View File

@@ -121,6 +121,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.context}}" context: "{{workflow.parameters.context}}"
annotations: {}
spec: spec:
jobCleanUpPolicy: retain jobCleanUpPolicy: retain
engineState: 'active' engineState: 'active'

View File

@@ -118,6 +118,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.context}}" context: "{{workflow.parameters.context}}"
annotations: {}
spec: spec:
jobCleanUpPolicy: retain jobCleanUpPolicy: retain
engineState: 'active' engineState: 'active'

View File

@@ -117,6 +117,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy" context: "{{workflow.parameters.appNamespace}}_kube-proxy"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: kube-system appns: kube-system

View File

@@ -121,6 +121,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy" context: "{{workflow.parameters.appNamespace}}_kube-proxy"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: kube-system appns: kube-system

View File

@@ -124,6 +124,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy" context: "{{workflow.parameters.appNamespace}}_kube-proxy"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: kube-system appns: kube-system

View File

@@ -128,6 +128,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy" context: "{{workflow.parameters.appNamespace}}_kube-proxy"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: kube-system appns: kube-system

View File

@@ -116,6 +116,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy" context: "{{workflow.parameters.appNamespace}}_kube-proxy"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: kube-system appns: kube-system

View File

@@ -120,6 +120,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy" context: "{{workflow.parameters.appNamespace}}_kube-proxy"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: kube-system appns: kube-system

View File

@@ -35,12 +35,140 @@ spec:
args: ["-namespace={{workflow.parameters.adminModeNamespace}}","-typeName=resilient","-operation=apply","-timeout=400", "-app=podtato-head","-scope=namespace"] args: ["-namespace={{workflow.parameters.adminModeNamespace}}","-typeName=resilient","-operation=apply","-timeout=400", "-app=podtato-head","-scope=namespace"]
- name: install-chaos-faults - name: install-chaos-faults
inputs:
artifacts:
- name: pod-delete-rm5
path: /tmp/pod-delete-rm5.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes a pod belonging to a deployment/statefulset/daemonset
kind: ChaosExperiment
metadata:
name: pod-delete
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-delete
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "15"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
- name: FORCE
value: "true"
- name: CHAOS_INTERVAL
value: "5"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
outputs: {}
metadata: {}
container: container:
name: ""
image: litmuschaos/k8s:latest image: litmuschaos/k8s:latest
command: [sh, -c] command:
- sh
- "-c"
args: args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=faults/kubernetes/experiments.yaml -n - kubectl apply -f /tmp/pod-delete-rm5.yaml -n {{workflow.parameters.adminModeNamespace}} && sleep 30
{{workflow.parameters.adminModeNamespace}} ; sleep 30" resources: {}
- name: pod-delete - name: pod-delete
inputs: inputs:
@@ -56,6 +184,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.adminModeNamespace}}_podtato-main" context: "{{workflow.parameters.adminModeNamespace}}_podtato-main"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: {{workflow.parameters.adminModeNamespace}} appns: {{workflow.parameters.adminModeNamespace}}

View File

@@ -39,12 +39,140 @@ spec:
args: ["-namespace={{workflow.parameters.adminModeNamespace}}","-typeName=resilient","-operation=apply","-timeout=400", "-app=podtato-head","-scope=namespace"] args: ["-namespace={{workflow.parameters.adminModeNamespace}}","-typeName=resilient","-operation=apply","-timeout=400", "-app=podtato-head","-scope=namespace"]
- name: install-chaos-faults - name: install-chaos-faults
inputs:
artifacts:
- name: pod-delete-rm5
path: /tmp/pod-delete-rm5.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes a pod belonging to a deployment/statefulset/daemonset
kind: ChaosExperiment
metadata:
name: pod-delete
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-delete
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "15"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
- name: FORCE
value: "true"
- name: CHAOS_INTERVAL
value: "5"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
outputs: {}
metadata: {}
container: container:
name: ""
image: litmuschaos/k8s:latest image: litmuschaos/k8s:latest
command: [sh, -c] command:
- sh
- "-c"
args: args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=faults/kubernetes/experiments.yaml -n - kubectl apply -f /tmp/pod-delete-rm5.yaml -n {{workflow.parameters.adminModeNamespace}} && sleep 30
{{workflow.parameters.adminModeNamespace}} ; sleep 30" resources: {}
- name: pod-delete - name: pod-delete
inputs: inputs:
@@ -60,6 +188,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.adminModeNamespace}}_podtato-main" context: "{{workflow.parameters.adminModeNamespace}}_podtato-main"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: {{workflow.parameters.adminModeNamespace}} appns: {{workflow.parameters.adminModeNamespace}}

View File

@@ -49,12 +49,705 @@ spec:
args: ["-namespace=sock-shop","-typeName=resilient","-operation=apply","-timeout=400", "-app=sock-shop","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak) args: ["-namespace=sock-shop","-typeName=resilient","-operation=apply","-timeout=400", "-app=sock-shop","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak)
- name: install-chaos-faults - name: install-chaos-faults
inputs:
artifacts:
- name: pod-cpu-hog-2sf
path: /tmp/pod-cpu-hog-2sf.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects CPU consumption on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-cpu-hog
labels:
name: pod-cpu-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-cpu-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "60"
## Number of CPU cores to stress
- name: CPU_CORES
value: "1"
## LOAD CPU WITH GIVEN PERCENT LOADING FOR THE CPU STRESS WORKERS.
## 0 IS EFFECTIVELY A SLEEP (NO LOAD) AND 100 IS FULL LOADING
- name: CPU_LOAD
value: "100"
## Percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
## Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## It is used in pumba lib only
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
## It is used in pumba lib only
- name: STRESS_IMAGE
value: "alexeiled/stress-ng:latest-ubuntu"
## provide the cluster runtime
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-cpu-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
- name: pod-delete-rm5
path: /tmp/pod-delete-rm5.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes a pod belonging to a deployment/statefulset/daemonset
kind: ChaosExperiment
metadata:
name: pod-delete
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-delete
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "15"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
- name: FORCE
value: "true"
- name: CHAOS_INTERVAL
value: "5"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
- name: pod-network-loss-x1w
path: /tmp/pod-network-loss-x1w.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects network packet loss on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-network-loss
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-network-loss
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
# provide lib image
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
- name: NETWORK_INTERFACE
value: "eth0"
- name: TC_IMAGE
value: "gaiadocker/iproute2"
- name: NETWORK_PACKET_LOSS_PERCENTAGE
value: "100" #in PERCENTAGE
- name: TOTAL_CHAOS_DURATION
value: "60" # in seconds
# time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: TARGET_PODS
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the destination ips
# chaos injection will be triggered for these destination ips
- name: DESTINATION_IPS
value: ""
# provide the destination hosts
# chaos injection will be triggered for these destination hosts
- name: DESTINATION_HOSTS
value: ""
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
- name: pod-memory-hog-49a
path: /tmp/pod-memory-hog-49a.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects memory consumption on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-memory-hog
labels:
name: pod-memory-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-memory-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "60"
## enter the amount of memory in megabytes to be consumed by the application pod
- name: MEMORY_CONSUMPTION
value: "500"
## Number of workers to perform stress
- name: NUMBER_OF_WORKERS
value: "1"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
## Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## It is used in pumba lib only
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
## It is used in pumba lib only
- name: STRESS_IMAGE
value: "alexeiled/stress-ng:latest-ubuntu"
## provide the cluster runtime
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
labels:
name: pod-memory-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
- name: disk-fill-h7d
path: /tmp/disk-fill-h7d.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Fillup Ephemeral Storage of a Resource
kind: ChaosExperiment
metadata:
name: disk-fill
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name disk-fill
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
- name: FILL_PERCENTAGE
value: "80"
- name: TOTAL_CHAOS_DURATION
value: "60"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
# provide the data block size
# supported unit is KB
- name: DATA_BLOCK_SIZE
value: "256"
- name: TARGET_PODS
value: ""
- name: EPHEMERAL_STORAGE_MEBIBYTES
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
# Provide the container runtime path
# Default set to docker container path
- name: CONTAINER_PATH
value: "/var/lib/docker/containers"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/host-path-usage: "true"
app.kubernetes.io/version: latest
outputs: {}
metadata: {}
container: container:
name: ""
image: litmuschaos/k8s:latest image: litmuschaos/k8s:latest
command: [sh, -c] command:
- sh
- "-c"
args: args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=faults/kubernetes/experiments.yaml -n - kubectl apply -f /tmp/ -n {{workflow.parameters.adminModeNamespace}} && sleep 30
{{workflow.parameters.adminModeNamespace}} ; sleep 30" resources: {}
- name: pod-cpu-hog - name: pod-cpu-hog
inputs: inputs:
@@ -70,6 +763,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_carts" context: "{{workflow.parameters.appNamespace}}_carts"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -140,6 +834,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_orders" context: "{{workflow.parameters.appNamespace}}_orders"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -208,6 +903,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_catalogue" context: "{{workflow.parameters.appNamespace}}_catalogue"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -280,6 +976,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_user-db" context: "{{workflow.parameters.appNamespace}}_user-db"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -355,6 +1052,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_catalogue-db" context: "{{workflow.parameters.appNamespace}}_catalogue-db"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'

View File

@@ -53,12 +53,705 @@ spec:
args: ["-namespace=sock-shop","-typeName=resilient","-operation=apply","-timeout=400", "-app=sock-shop","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak) args: ["-namespace=sock-shop","-typeName=resilient","-operation=apply","-timeout=400", "-app=sock-shop","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak)
- name: install-chaos-faults - name: install-chaos-faults
inputs:
artifacts:
- name: pod-cpu-hog-2sf
path: /tmp/pod-cpu-hog-2sf.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects CPU consumption on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-cpu-hog
labels:
name: pod-cpu-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-cpu-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "60"
## Number of CPU cores to stress
- name: CPU_CORES
value: "1"
## LOAD CPU WITH GIVEN PERCENT LOADING FOR THE CPU STRESS WORKERS.
## 0 IS EFFECTIVELY A SLEEP (NO LOAD) AND 100 IS FULL LOADING
- name: CPU_LOAD
value: "100"
## Percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
## Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## It is used in pumba lib only
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
## It is used in pumba lib only
- name: STRESS_IMAGE
value: "alexeiled/stress-ng:latest-ubuntu"
## provide the cluster runtime
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-cpu-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
- name: pod-delete-rm5
path: /tmp/pod-delete-rm5.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes a pod belonging to a deployment/statefulset/daemonset
kind: ChaosExperiment
metadata:
name: pod-delete
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-delete
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "15"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
- name: FORCE
value: "true"
- name: CHAOS_INTERVAL
value: "5"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
- name: pod-network-loss-x1w
path: /tmp/pod-network-loss-x1w.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects network packet loss on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-network-loss
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-network-loss
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
# provide lib image
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
- name: NETWORK_INTERFACE
value: "eth0"
- name: TC_IMAGE
value: "gaiadocker/iproute2"
- name: NETWORK_PACKET_LOSS_PERCENTAGE
value: "100" #in PERCENTAGE
- name: TOTAL_CHAOS_DURATION
value: "60" # in seconds
# time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: TARGET_PODS
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the destination ips
# chaos injection will be triggered for these destination ips
- name: DESTINATION_IPS
value: ""
# provide the destination hosts
# chaos injection will be triggered for these destination hosts
- name: DESTINATION_HOSTS
value: ""
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
- name: pod-memory-hog-49a
path: /tmp/pod-memory-hog-49a.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects memory consumption on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-memory-hog
labels:
name: pod-memory-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-memory-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "60"
## enter the amount of memory in megabytes to be consumed by the application pod
- name: MEMORY_CONSUMPTION
value: "500"
## Number of workers to perform stress
- name: NUMBER_OF_WORKERS
value: "1"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
## Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## It is used in pumba lib only
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
## It is used in pumba lib only
- name: STRESS_IMAGE
value: "alexeiled/stress-ng:latest-ubuntu"
## provide the cluster runtime
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
labels:
name: pod-memory-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
- name: disk-fill-h7d
path: /tmp/disk-fill-h7d.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Fillup Ephemeral Storage of a Resource
kind: ChaosExperiment
metadata:
name: disk-fill
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name disk-fill
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
- name: FILL_PERCENTAGE
value: "80"
- name: TOTAL_CHAOS_DURATION
value: "60"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
# provide the data block size
# supported unit is KB
- name: DATA_BLOCK_SIZE
value: "256"
- name: TARGET_PODS
value: ""
- name: EPHEMERAL_STORAGE_MEBIBYTES
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
# Provide the container runtime path
# Default set to docker container path
- name: CONTAINER_PATH
value: "/var/lib/docker/containers"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/host-path-usage: "true"
app.kubernetes.io/version: latest
outputs: {}
metadata: {}
container: container:
name: ""
image: litmuschaos/k8s:latest image: litmuschaos/k8s:latest
command: [sh, -c] command:
- sh
- "-c"
args: args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=faults/kubernetes/experiments.yaml -n - kubectl apply -f /tmp/ -n {{workflow.parameters.adminModeNamespace}} && sleep 30
{{workflow.parameters.adminModeNamespace}} ; sleep 30" resources: {}
- name: pod-cpu-hog - name: pod-cpu-hog
inputs: inputs:
@@ -74,6 +767,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_carts" context: "{{workflow.parameters.appNamespace}}_carts"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -144,6 +838,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_orders" context: "{{workflow.parameters.appNamespace}}_orders"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -212,6 +907,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.env.appNamespace}}_catalogue" context: "{{workflow.env.appNamespace}}_catalogue"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -284,6 +980,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_user-db" context: "{{workflow.parameters.appNamespace}}_user-db"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -359,6 +1056,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_catalogue-db" context: "{{workflow.parameters.appNamespace}}_catalogue-db"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'

View File

@@ -49,12 +49,705 @@ spec:
args: ["-namespace=sock-shop","-typeName=resilient","-operation=apply","-timeout=400", "-app=sock-shop","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak) args: ["-namespace=sock-shop","-typeName=resilient","-operation=apply","-timeout=400", "-app=sock-shop","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak)
- name: install-chaos-faults - name: install-chaos-faults
inputs:
artifacts:
- name: pod-cpu-hog-2sf
path: /tmp/pod-cpu-hog-2sf.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects CPU consumption on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-cpu-hog
labels:
name: pod-cpu-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-cpu-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "60"
## Number of CPU cores to stress
- name: CPU_CORES
value: "1"
## LOAD CPU WITH GIVEN PERCENT LOADING FOR THE CPU STRESS WORKERS.
## 0 IS EFFECTIVELY A SLEEP (NO LOAD) AND 100 IS FULL LOADING
- name: CPU_LOAD
value: "100"
## Percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
## Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## It is used in pumba lib only
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
## It is used in pumba lib only
- name: STRESS_IMAGE
value: "alexeiled/stress-ng:latest-ubuntu"
## provide the cluster runtime
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-cpu-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
- name: pod-delete-rm5
path: /tmp/pod-delete-rm5.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes a pod belonging to a deployment/statefulset/daemonset
kind: ChaosExperiment
metadata:
name: pod-delete
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-delete
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "15"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
- name: FORCE
value: "true"
- name: CHAOS_INTERVAL
value: "5"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
- name: pod-network-loss-x1w
path: /tmp/pod-network-loss-x1w.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects network packet loss on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-network-loss
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-network-loss
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
# provide lib image
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
- name: NETWORK_INTERFACE
value: "eth0"
- name: TC_IMAGE
value: "gaiadocker/iproute2"
- name: NETWORK_PACKET_LOSS_PERCENTAGE
value: "100" #in PERCENTAGE
- name: TOTAL_CHAOS_DURATION
value: "60" # in seconds
# time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: TARGET_PODS
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the destination ips
# chaos injection will be triggered for these destination ips
- name: DESTINATION_IPS
value: ""
# provide the destination hosts
# chaos injection will be triggered for these destination hosts
- name: DESTINATION_HOSTS
value: ""
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
- name: pod-memory-hog-49a
path: /tmp/pod-memory-hog-49a.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects memory consumption on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-memory-hog
labels:
name: pod-memory-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-memory-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "60"
## enter the amount of memory in megabytes to be consumed by the application pod
- name: MEMORY_CONSUMPTION
value: "500"
## Number of workers to perform stress
- name: NUMBER_OF_WORKERS
value: "1"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
## Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## It is used in pumba lib only
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
## It is used in pumba lib only
- name: STRESS_IMAGE
value: "alexeiled/stress-ng:latest-ubuntu"
## provide the cluster runtime
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
labels:
name: pod-memory-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
- name: disk-fill-h7d
path: /tmp/disk-fill-h7d.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Fillup Ephemeral Storage of a Resource
kind: ChaosExperiment
metadata:
name: disk-fill
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name disk-fill
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
- name: FILL_PERCENTAGE
value: "80"
- name: TOTAL_CHAOS_DURATION
value: "60"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
# provide the data block size
# supported unit is KB
- name: DATA_BLOCK_SIZE
value: "256"
- name: TARGET_PODS
value: ""
- name: EPHEMERAL_STORAGE_MEBIBYTES
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
# Provide the container runtime path
# Default set to docker container path
- name: CONTAINER_PATH
value: "/var/lib/docker/containers"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/host-path-usage: "true"
app.kubernetes.io/version: latest
outputs: {}
metadata: {}
container: container:
name: ""
image: litmuschaos/k8s:latest image: litmuschaos/k8s:latest
command: [sh, -c] command:
- sh
- "-c"
args: args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=faults/kubernetes/experiments.yaml -n - kubectl apply -f /tmp/ -n {{workflow.parameters.adminModeNamespace}} && sleep 30
{{workflow.parameters.adminModeNamespace}} ; sleep 30" resources: {}
- name: pod-cpu-hog - name: pod-cpu-hog
inputs: inputs:
@@ -70,6 +763,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_carts" context: "{{workflow.parameters.appNamespace}}_carts"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -141,6 +835,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_orders" context: "{{workflow.parameters.appNamespace}}_orders"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -210,6 +905,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_catalogue" context: "{{workflow.parameters.appNamespace}}_catalogue"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -283,6 +979,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_user-db" context: "{{workflow.parameters.appNamespace}}_user-db"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -358,6 +1055,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_catalogue-db" context: "{{workflow.parameters.appNamespace}}_catalogue-db"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'

View File

@@ -53,12 +53,705 @@ spec:
args: ["-namespace=sock-shop","-typeName=resilient","-operation=apply","-timeout=400", "-app=sock-shop","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak) args: ["-namespace=sock-shop","-typeName=resilient","-operation=apply","-timeout=400", "-app=sock-shop","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak)
- name: install-chaos-faults - name: install-chaos-faults
inputs:
artifacts:
- name: pod-cpu-hog-2sf
path: /tmp/pod-cpu-hog-2sf.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects CPU consumption on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-cpu-hog
labels:
name: pod-cpu-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-cpu-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "60"
## Number of CPU cores to stress
- name: CPU_CORES
value: "1"
## LOAD CPU WITH GIVEN PERCENT LOADING FOR THE CPU STRESS WORKERS.
## 0 IS EFFECTIVELY A SLEEP (NO LOAD) AND 100 IS FULL LOADING
- name: CPU_LOAD
value: "100"
## Percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
## Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## It is used in pumba lib only
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
## It is used in pumba lib only
- name: STRESS_IMAGE
value: "alexeiled/stress-ng:latest-ubuntu"
## provide the cluster runtime
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-cpu-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
- name: pod-delete-rm5
path: /tmp/pod-delete-rm5.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Deletes a pod belonging to a deployment/statefulset/daemonset
kind: ChaosExperiment
metadata:
name: pod-delete
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-delete
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "15"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
- name: FORCE
value: "true"
- name: CHAOS_INTERVAL
value: "5"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-delete
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: latest
- name: pod-network-loss-x1w
path: /tmp/pod-network-loss-x1w.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects network packet loss on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-network-loss
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-network-loss
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
# provide lib image
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
- name: NETWORK_INTERFACE
value: "eth0"
- name: TC_IMAGE
value: "gaiadocker/iproute2"
- name: NETWORK_PACKET_LOSS_PERCENTAGE
value: "100" #in PERCENTAGE
- name: TOTAL_CHAOS_DURATION
value: "60" # in seconds
# time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: TARGET_PODS
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the destination ips
# chaos injection will be triggered for these destination ips
- name: DESTINATION_IPS
value: ""
# provide the destination hosts
# chaos injection will be triggered for these destination hosts
- name: DESTINATION_HOSTS
value: ""
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
- name: pod-memory-hog-49a
path: /tmp/pod-memory-hog-49a.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects memory consumption on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-memory-hog
labels:
name: pod-memory-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-memory-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: "60"
## enter the amount of memory in megabytes to be consumed by the application pod
- name: MEMORY_CONSUMPTION
value: "500"
## Number of workers to perform stress
- name: NUMBER_OF_WORKERS
value: "1"
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
## Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## It is used in pumba lib only
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
## It is used in pumba lib only
- name: STRESS_IMAGE
value: "alexeiled/stress-ng:latest-ubuntu"
## provide the cluster runtime
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: TARGET_CONTAINER
value: ""
- name: TARGET_PODS
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
labels:
name: pod-memory-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
- name: disk-fill-h7d
path: /tmp/disk-fill-h7d.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Fillup Ephemeral Storage of a Resource
kind: ChaosExperiment
metadata:
name: disk-fill
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name disk-fill
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
- name: FILL_PERCENTAGE
value: "80"
- name: TOTAL_CHAOS_DURATION
value: "60"
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
# provide the data block size
# supported unit is KB
- name: DATA_BLOCK_SIZE
value: "256"
- name: TARGET_PODS
value: ""
- name: EPHEMERAL_STORAGE_MEBIBYTES
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
# Provide the container runtime path
# Default set to docker container path
- name: CONTAINER_PATH
value: "/var/lib/docker/containers"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: disk-fill
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/host-path-usage: "true"
app.kubernetes.io/version: latest
outputs: {}
metadata: {}
container: container:
name: ""
image: litmuschaos/k8s:latest image: litmuschaos/k8s:latest
command: [sh, -c] command:
- sh
- "-c"
args: args:
- "kubectl apply -f https://hub.litmuschaos.io/api/chaos/master?file=faults/kubernetes/experiments.yaml -n - kubectl apply -f /tmp/ -n {{workflow.parameters.adminModeNamespace}} && sleep 30
{{workflow.parameters.adminModeNamespace}} ; sleep 30" resources: {}
- name: pod-cpu-hog - name: pod-cpu-hog
inputs: inputs:
@@ -74,6 +767,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_carts" context: "{{workflow.parameters.appNamespace}}_carts"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -145,6 +839,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_orders" context: "{{workflow.parameters.appNamespace}}_orders"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -214,6 +909,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_catalogue" context: "{{workflow.parameters.appNamespace}}_catalogue"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -287,6 +983,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_user-db" context: "{{workflow.parameters.appNamespace}}_user-db"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'
@@ -362,6 +1059,7 @@ spec:
namespace: {{workflow.parameters.adminModeNamespace}} namespace: {{workflow.parameters.adminModeNamespace}}
labels: labels:
context: "{{workflow.parameters.appNamespace}}_catalogue-db" context: "{{workflow.parameters.appNamespace}}_catalogue-db"
annotations: {}
spec: spec:
appinfo: appinfo:
appns: 'sock-shop' appns: 'sock-shop'