Files
litmus-hub/experiments/bank-of-anthos/experiment.yaml
Neelanjan Manna d482aa76af chore: Fix experiments charts for 3.0.0 (#613)
* adds metadata.annotation in chaosengine and replaces install-chaos-faults step to use artifacts.data.raw manifests

Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>

* fixes annotation -> annotations

Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>

* updates labels

Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>

---------

Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>
2023-10-04 10:37:55 +05:30

276 lines
11 KiB
YAML

apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
name: argowf-chaos-bank-of-anthos-resiliency
namespace: litmus
labels:
subject: "{{workflow.parameters.appNamespace}}_bank-of-anthos"
spec:
entrypoint: argowf-chaos
serviceAccountName: argo-chaos
securityContext:
runAsUser: 1000
runAsNonRoot: true
arguments:
parameters:
- name: adminModeNamespace
value: "litmus"
- name: appNamespace
value: "bank"
templates:
- name: argowf-chaos
steps:
- - name: install-application
template: install-application
- - name: install-chaos-faults
template: install-chaos-faults
- - name: pod-network-loss
template: pod-network-loss
- - name: cleanup-chaos-resources
template: cleanup-chaos-resources
- name: delete-application
template: delete-application
- name: install-application
container:
image: litmuschaos/litmus-app-deployer:latest
args: ["-namespace=bank","-typeName=resilient","-operation=apply","-timeout=400", "-app=bank-of-anthos","-scope=cluster"] #for weak provide type flagName as resilient(-typeName=weak)
- name: install-chaos-faults
inputs:
artifacts:
- name: pod-network-loss-x1w
path: /tmp/pod-network-loss-x1w.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects network packet loss on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-network-loss
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: latest
spec:
definition:
scope: Namespaced
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs:
[
"create",
"delete",
"get",
"list",
"patch",
"update",
"deletecollection",
]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: ["apps.openshift.io"]
resources: ["deploymentconfigs"]
verbs: ["list", "get"]
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
- apiGroups: [""]
resources: ["replicationcontrollers"]
verbs: ["get", "list"]
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
- apiGroups: ["argoproj.io"]
resources: ["rollouts"]
verbs: ["list", "get"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
image: "litmuschaos/go-runner:latest"
imagePullPolicy: Always
args:
- -c
- ./experiments -name pod-network-loss
command:
- /bin/bash
env:
- name: TARGET_CONTAINER
value: ""
# provide lib image
- name: LIB_IMAGE
value: "litmuschaos/go-runner:latest"
- name: NETWORK_INTERFACE
value: "eth0"
- name: TC_IMAGE
value: "gaiadocker/iproute2"
- name: NETWORK_PACKET_LOSS_PERCENTAGE
value: "100" #in PERCENTAGE
- name: TOTAL_CHAOS_DURATION
value: "60" # in seconds
# time period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ""
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ""
- name: DEFAULT_HEALTH_CHECK
value: "false"
- name: TARGET_PODS
value: ""
# To select pods on specific node(s)
- name: NODE_LABEL
value: ""
# provide the name of container runtime
# for litmus LIB, it supports docker, containerd, crio
# for pumba LIB, it supports docker only
- name: CONTAINER_RUNTIME
value: "containerd"
# provide the destination ips
# chaos injection will be triggered for these destination ips
- name: DESTINATION_IPS
value: ""
# provide the destination hosts
# chaos injection will be triggered for these destination hosts
- name: DESTINATION_HOSTS
value: ""
# provide the socket file path
- name: SOCKET_PATH
value: "/run/containerd/containerd.sock"
## it defines the sequence of chaos execution for multiple target pods
## supported values: serial, parallel
- name: SEQUENCE
value: "parallel"
labels:
name: pod-network-loss
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/runtime-api-usage: "true"
app.kubernetes.io/version: latest
outputs: {}
metadata: {}
container:
name: ""
image: litmuschaos/k8s:latest
command:
- sh
- "-c"
args:
- kubectl apply -f /tmp/pod-network-loss-x1w.yaml -n {{workflow.parameters.adminModeNamespace}} && sleep 30
resources: {}
- name: pod-network-loss
inputs:
artifacts:
- name: pod-network-loss
path: /tmp/chaosengine.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: pod-network-loss-chaos
namespace: {{workflow.parameters.adminModeNamespace}}
labels:
context: "{{workflow.parameters.appNamespace}}_bank-of-anthos"
annotations: {}
spec:
appinfo:
appns: 'bank'
applabel: 'name in (balancereader,transactionhistory)'
appkind: 'deployment'
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: litmus-admin
components:
runner:
imagePullPolicy: Always
experiments:
- name: pod-network-loss
spec:
probe:
- name: "check-frontend-access-url"
type: "httpProbe"
httpProbe/inputs:
url: "http://frontend.bank.svc.cluster.local:80"
responseTimeout: 100
method:
get:
criteria: "=="
responseCode: "200"
mode: "Continuous"
runProperties:
probeTimeout: 1s
interval: 100ms
attempt: 2
components:
env:
- name: TOTAL_CHAOS_DURATION
value: '90'
- name: NETWORK_INTERFACE
value: 'eth0'
- name: NETWORK_PACKET_LOSS_PERCENTAGE
value: '100'
- name: CONTAINER_RUNTIME
value: 'containerd'
- name: SOCKET_PATH
value: '/run/containerd/containerd.sock'
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine.yaml","-saveName=/tmp/engine-name"]
- name: delete-application
container:
image: litmuschaos/litmus-app-deployer:latest
args: ["-namespace=bank","-typeName=resilient","-operation=delete", "-app=bank-of-anthos"]
- name: cleanup-chaos-resources
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
[
"kubectl delete chaosengine pod-network-loss-chaos -n {{workflow.parameters.adminModeNamespace}}",
]