refactors directory and file structure
Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>
This commit is contained in:
38
faults/kubernetes/pod-memory-hog/engine.yaml
Normal file
38
faults/kubernetes/pod-memory-hog/engine.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
apiVersion: litmuschaos.io/v1alpha1
|
||||
kind: ChaosEngine
|
||||
metadata:
|
||||
name: nginx-chaos
|
||||
namespace: default
|
||||
spec:
|
||||
# It can be active/stop
|
||||
engineState: 'active'
|
||||
appinfo:
|
||||
appns: ''
|
||||
applabel: ''
|
||||
appkind: ''
|
||||
chaosServiceAccount: pod-memory-hog-sa
|
||||
experiments:
|
||||
- name: pod-memory-hog
|
||||
spec:
|
||||
components:
|
||||
env:
|
||||
- name: TOTAL_CHAOS_DURATION
|
||||
value: '60' # in seconds
|
||||
|
||||
# Enter the amount of memory in megabytes to be consumed by the application pod
|
||||
- name: MEMORY_CONSUMPTION
|
||||
value: '500'
|
||||
|
||||
## percentage of total pods to target
|
||||
- name: PODS_AFFECTED_PERC
|
||||
value: ''
|
||||
|
||||
## provide the cluster runtime
|
||||
- name: CONTAINER_RUNTIME
|
||||
value: 'containerd'
|
||||
|
||||
# provide the socket file path
|
||||
- name: SOCKET_PATH
|
||||
value: '/run/containerd/containerd.sock'
|
||||
|
128
faults/kubernetes/pod-memory-hog/fault.yaml
Normal file
128
faults/kubernetes/pod-memory-hog/fault.yaml
Normal file
@@ -0,0 +1,128 @@
|
||||
---
|
||||
apiVersion: litmuschaos.io/v1alpha1
|
||||
description:
|
||||
message: |
|
||||
Injects memory consumption on pods belonging to an app deployment
|
||||
kind: ChaosExperiment
|
||||
metadata:
|
||||
name: pod-memory-hog
|
||||
labels:
|
||||
name: pod-memory-hog
|
||||
app.kubernetes.io/part-of: litmus
|
||||
app.kubernetes.io/component: chaosexperiment
|
||||
app.kubernetes.io/version: ci
|
||||
spec:
|
||||
definition:
|
||||
scope: Namespaced
|
||||
permissions:
|
||||
# Create and monitor the experiment & helper pods
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
|
||||
# Performs CRUD operations on the events inside chaosengine and chaosresult
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "get", "list", "patch", "update"]
|
||||
# Fetch configmaps details and mount it to the experiment pod (if specified)
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list"]
|
||||
# Track and get the runner, experiment, and helper pods log
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/log"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
# for creating and managing to execute commands inside target container
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/exec"]
|
||||
verbs: ["get", "list", "create"]
|
||||
# deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
|
||||
verbs: ["list", "get"]
|
||||
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
|
||||
- apiGroups: ["apps.openshift.io"]
|
||||
resources: ["deploymentconfigs"]
|
||||
verbs: ["list", "get"]
|
||||
# deriving the parent/owner details of the pod(if parent is deploymentConfig)
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers"]
|
||||
verbs: ["get", "list"]
|
||||
# deriving the parent/owner details of the pod(if parent is argo-rollouts)
|
||||
- apiGroups: ["argoproj.io"]
|
||||
resources: ["rollouts"]
|
||||
verbs: ["list", "get"]
|
||||
# for configuring and monitor the experiment job by the chaos-runner pod
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs"]
|
||||
verbs: ["create", "list", "get", "delete", "deletecollection"]
|
||||
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
|
||||
- apiGroups: ["litmuschaos.io"]
|
||||
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
|
||||
verbs: ["create", "list", "get", "patch", "update", "delete"]
|
||||
image: "chaosnative/go-runner:ci"
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- -c
|
||||
- ./experiments -name pod-memory-hog
|
||||
command:
|
||||
- /bin/bash
|
||||
env:
|
||||
- name: TOTAL_CHAOS_DURATION
|
||||
value: '60'
|
||||
|
||||
## enter the amount of memory in megabytes to be consumed by the application pod
|
||||
- name: MEMORY_CONSUMPTION
|
||||
value: '500'
|
||||
|
||||
## Number of workers to perform stress
|
||||
- name: NUMBER_OF_WORKERS
|
||||
value: '1'
|
||||
|
||||
## percentage of total pods to target
|
||||
- name: PODS_AFFECTED_PERC
|
||||
value: ''
|
||||
|
||||
## Period to wait before and after injection of chaos in sec
|
||||
- name: RAMP_TIME
|
||||
value: ''
|
||||
|
||||
## It is used in pumba lib only
|
||||
- name: LIB_IMAGE
|
||||
value: 'chaosnative/go-runner:ci'
|
||||
|
||||
## It is used in pumba lib only
|
||||
- name: STRESS_IMAGE
|
||||
value: 'alexeiled/stress-ng:latest-ubuntu'
|
||||
|
||||
## provide the cluster runtime
|
||||
- name: CONTAINER_RUNTIME
|
||||
value: 'containerd'
|
||||
|
||||
# provide the socket file path
|
||||
- name: SOCKET_PATH
|
||||
value: '/run/containerd/containerd.sock'
|
||||
|
||||
## it defines the sequence of chaos execution for multiple target pods
|
||||
## supported values: serial, parallel
|
||||
- name: SEQUENCE
|
||||
value: 'parallel'
|
||||
|
||||
- name: DEFAULT_HEALTH_CHECK
|
||||
value: 'false'
|
||||
|
||||
- name: TARGET_CONTAINER
|
||||
value: ''
|
||||
|
||||
- name: TARGET_PODS
|
||||
value: ''
|
||||
|
||||
# To select pods on specific node(s)
|
||||
- name: NODE_LABEL
|
||||
value: ''
|
||||
|
||||
labels:
|
||||
name: pod-memory-hog
|
||||
app.kubernetes.io/part-of: litmus
|
||||
app.kubernetes.io/component: experiment-job
|
||||
app.kubernetes.io/runtime-api-usage: "true"
|
||||
app.kubernetes.io/version: ci
|
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: litmuschaos.io/v1alpha1
|
||||
kind: ChartServiceVersion
|
||||
metadata:
|
||||
name: pod-memory-hog
|
||||
version: 0.1.0
|
||||
annotations:
|
||||
categories: Kubernetes
|
||||
vendor: Harness
|
||||
spec:
|
||||
displayName: Pod Memory Hog
|
||||
categoryDescription: |
|
||||
Pod-Memory-Hog fault consumes memory resources of specified containers in Kubernetes pods.
|
||||
- Causes memory resource consumption on specified application containers using cgroups and litmus nsutil that consume memory resources of the given target containers.
|
||||
- It tests the application's resilience to potential slowness/unavailability of some replicas due to high memory load.
|
||||
- The application pod should be healthy once chaos is stopped. Expectation is that service-requests should be served despite chaos.
|
||||
keywords:
|
||||
- Kubernetes
|
||||
platforms:
|
||||
- GKE
|
||||
- Packet(Kubeadm)
|
||||
- Minikube
|
||||
- EKS
|
||||
- AKS
|
||||
- OpenShift(OKD)
|
||||
experiments:
|
||||
- pod-memory-hog
|
||||
- sock-shop
|
||||
- sock-shop-promProbe
|
||||
minKubeVersion: 1.12.0
|
||||
labels:
|
||||
app.kubernetes.io/component: chartserviceversion
|
||||
app.kubernetes.io/version: ci
|
||||
links:
|
||||
- name: Documentation
|
||||
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
|
||||
icon:
|
||||
- base64data: ""
|
||||
mediatype: ""
|
Reference in New Issue
Block a user