refactors directory and file structure

Signed-off-by: neelanjan00 <neelanjan.manna@harness.io>
This commit is contained in:
neelanjan00
2023-06-05 13:15:56 +05:30
parent 9bf371a651
commit ae8467237a
840 changed files with 6787 additions and 43442 deletions

View File

@@ -0,0 +1,38 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: default
spec:
# It can be active/stop
engineState: 'active'
#ex. values: ns1:name=percona,ns2:run=nginx
auxiliaryAppInfo: ''
chaosServiceAccount: node-memory-hog-sa
experiments:
- name: node-memory-hog
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '120'
## percentage of total nodes to target
- name: NODES_AFFECTED_PERC
value: ''
# provide the comma separated target node names
- name: TARGET_NODES
value: ''
## Specify the size as percent of total node capacity Ex: '30'
## NOTE: for selecting this option keep MEMORY_CONSUMPTION_MEBIBYTES empty
- name: MEMORY_CONSUMPTION_PERCENTAGE
value: '0'
## Specify the amount of memory to be consumed in mebibytes
## NOTE: for selecting this option keep MEMORY_CONSUMPTION_PERCENTAGE empty
- name: MEMORY_CONSUMPTION_MEBIBYTES
value: '0'

View File

@@ -0,0 +1,105 @@
---
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Give a memory hog on a node belonging to a deployment
kind: ChaosExperiment
metadata:
name: node-memory-hog
labels:
name: node-memory-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: chaosexperiment
app.kubernetes.io/version: ci
spec:
definition:
scope: Cluster
permissions:
# Create and monitor the experiment & helper pods
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "delete", "get", "list", "patch", "update", "deletecollection"]
# Performs CRUD operations on the events inside chaosengine and chaosresult
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "get", "list", "patch", "update"]
# Fetch configmaps details and mount it to the experiment pod (if specified)
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
# Track and get the runner, experiment, and helper pods log
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
# for creating and managing to execute commands inside target container
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "list", "create"]
# for configuring and monitor the experiment job by the chaos-runner pod
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["create", "list", "get", "delete", "deletecollection"]
# for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
- apiGroups: ["litmuschaos.io"]
resources: ["chaosengines", "chaosexperiments", "chaosresults"]
verbs: ["create", "list", "get", "patch", "update", "delete"]
# for experiment to perform node status checks
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list"]
image: "chaosnative/go-runner:ci"
imagePullPolicy: Always
args:
- -c
- ./experiments -name node-memory-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '120'
## Specify the size as percent of total node capacity Ex: '30'
## NOTE: for selecting this option keep MEMORY_CONSUMPTION_MEBIBYTES empty
- name: MEMORY_CONSUMPTION_PERCENTAGE
value: ''
## Specify the amount of memory to be consumed in mebibytes
## NOTE: for selecting this option keep MEMORY_CONSUMPTION_PERCENTAGE empty
- name: MEMORY_CONSUMPTION_MEBIBYTES
value: ''
- name: NUMBER_OF_WORKERS
value: '1'
# ENTER THE COMMA SEPARATED TARGET NODES NAME
- name: TARGET_NODES
value: ''
- name: NODE_LABEL
value: ''
# Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
# provide lib image
- name: LIB_IMAGE
value: 'chaosnative/go-runner:ci'
- name: DEFAULT_HEALTH_CHECK
value: 'false'
## percentage of total nodes to target
- name: NODES_AFFECTED_PERC
value: ''
## it defines the sequence of chaos execution for multiple target nodes
## supported values: serial, parallel
- name: SEQUENCE
value: 'parallel'
labels:
name: node-memory-hog
app.kubernetes.io/part-of: litmus
app.kubernetes.io/component: experiment-job
app.kubernetes.io/version: ci

View File

@@ -0,0 +1,37 @@
---
apiVersion: litmuschaos.io/v1alpha1
kind: ChartServiceVersion
metadata:
name: node-memory-hog
version: 0.1.0
annotations:
categories: Kubernetes
vendor: Harness
spec:
displayName: Node Memory Hog
categoryDescription: |
Kubernetes Node memory hog fault disrupts the state of Kubernetes resources. This fault injects a memory spike on a node where the application pod is scheduled.
- Memory hog on a particular node where the application deployment is available.
- After the test, the recovery should be manual for the application pod and node in case they are not in an appropriate state.
keywords:
- Kubernetes
platforms:
- GKE
- EKS
- AKS
- Kind
- Rancher
- OpenShift(OKD)
experiments:
- node-memory-hog
chaosType: infra
minKubeVersion: 1.12.0
labels:
app.kubernetes.io/component: chartserviceversion
app.kubernetes.io/version: ci
links:
- name: Documentation
url: https://docs.harness.io/category/kl0mxwpfw1-hce-category
icon:
- url: ""
mediatype: ""