From 2df1f674a34807024d30461f83fa946ac05ed4f3 Mon Sep 17 00:00:00 2001 From: sumit Nagal Date: Mon, 26 Oct 2020 09:10:19 -0700 Subject: [PATCH] K8 usecases (#355) * Changes for chaostoolkit --- charts/generic/k8-calico-node/Readme.md | 9 + charts/generic/k8-calico-node/engine.yaml | 39 ++++ charts/generic/k8-calico-node/experiment.yaml | 58 ++++++ .../k8-calico-node.chartserviceversion.yaml | 33 +++ charts/generic/k8-calico-node/rbac-admin.yaml | 38 ++++ .../Cluster/engine-kiam-count.yaml | 0 .../Cluster/engine-kiam-custom-count.yaml | 0 .../Cluster/engine-kiam-custom-health.yaml | 0 .../Cluster/engine-kiam-health.yaml | 0 .../generic/k8-kiam/Cluster/rbac-admin.yaml | 38 ++++ charts/generic/k8-kiam/Readme.md | 9 + charts/generic/k8-kiam/engine.yaml | 39 ++++ charts/generic/k8-kiam/experiment.yaml | 58 ++++++ .../k8-kiam/k8-kiam.chartserviceversion.yaml | 33 +++ charts/generic/k8-kiam/rbac-admin.yaml | 38 ++++ .../Cluster/engine-app-all-health.yaml} | 7 +- .../Cluster/engine-app-count.yaml | 39 ++++ .../Cluster/engine-app-health.yaml | 39 ++++ .../Cluster/engine-custom-all-health.yaml | 39 ++++ .../Cluster/engine-custom-count.yaml | 39 ++++ .../Cluster/engine-custom-health.yaml | 39 ++++ .../k8-pod-delete/Cluster/rbac-admin.yaml | 35 ---- .../generic/k8-pod-delete/Cluster/rbac.yaml | 46 +++++ .../generic/k8-pod-delete/Service/rbac.yaml | 14 +- charts/generic/k8-pod-delete/rbac.yaml | 11 +- .../k8-service-kill/Service/experiment.yaml | 75 ------- .../generic/k8-service-kill/Service/rbac.yaml | 35 ---- charts/generic/k8-service-kill/engine.yaml | 4 +- .../generic/k8-service-kill/experiment.yaml | 9 +- charts/generic/k8-service-kill/rbac.yaml | 34 ++-- .../generic/k8-wavefront-collector/Readme.md | 10 + .../k8-wavefront-collector/engine.yaml | 39 ++++ .../k8-wavefront-collector/experiment.yaml | 58 ++++++ ...vefront-collector.chartserviceversion.yaml | 33 +++ .../k8-wavefront-collector/rbac-admin.yaml | 38 ++++ workflows/k8-calico-node/Readme.md | 10 + .../k8-calico-node/rbac-argo-service.yaml | 45 +++++ workflows/k8-calico-node/workflow.yaml | 150 ++++++++++++++ workflows/k8-kiam/Readme.md | 10 + workflows/k8-kiam/rbac-argo-service.yaml | 45 +++++ workflows/k8-kiam/workflow.yaml | 150 ++++++++++++++ workflows/k8-pod-delete/Readme.md | 9 + .../k8-pod-delete/rbac-argo-service.yaml | 45 +++++ workflows/k8-pod-delete/workflow.yaml | 189 ++++++++++++++++++ workflows/k8-service-kill/Readme.md | 10 + .../k8-service-kill/rbac-argo-service.yaml | 45 +++++ workflows/k8-service-kill/workflow.yaml | 150 ++++++++++++++ workflows/k8-wavefront-collector/Readme.md | 10 + .../rbac-argo-service.yaml | 45 +++++ .../k8-wavefront-collector/workflow.yaml | 150 ++++++++++++++ 50 files changed, 1917 insertions(+), 181 deletions(-) create mode 100644 charts/generic/k8-calico-node/Readme.md create mode 100644 charts/generic/k8-calico-node/engine.yaml create mode 100644 charts/generic/k8-calico-node/experiment.yaml create mode 100644 charts/generic/k8-calico-node/k8-calico-node.chartserviceversion.yaml create mode 100644 charts/generic/k8-calico-node/rbac-admin.yaml rename charts/generic/{k8-pod-delete => k8-kiam}/Cluster/engine-kiam-count.yaml (100%) rename charts/generic/{k8-pod-delete => k8-kiam}/Cluster/engine-kiam-custom-count.yaml (100%) rename charts/generic/{k8-pod-delete => k8-kiam}/Cluster/engine-kiam-custom-health.yaml (100%) rename charts/generic/{k8-pod-delete => k8-kiam}/Cluster/engine-kiam-health.yaml (100%) create mode 100644 charts/generic/k8-kiam/Cluster/rbac-admin.yaml create mode 100644 charts/generic/k8-kiam/Readme.md create mode 100644 charts/generic/k8-kiam/engine.yaml create mode 100644 charts/generic/k8-kiam/experiment.yaml create mode 100644 charts/generic/k8-kiam/k8-kiam.chartserviceversion.yaml create mode 100644 charts/generic/k8-kiam/rbac-admin.yaml rename charts/generic/{k8-service-kill/Service/engine.yaml => k8-pod-delete/Cluster/engine-app-all-health.yaml} (88%) create mode 100644 charts/generic/k8-pod-delete/Cluster/engine-app-count.yaml create mode 100644 charts/generic/k8-pod-delete/Cluster/engine-app-health.yaml create mode 100644 charts/generic/k8-pod-delete/Cluster/engine-custom-all-health.yaml create mode 100644 charts/generic/k8-pod-delete/Cluster/engine-custom-count.yaml create mode 100644 charts/generic/k8-pod-delete/Cluster/engine-custom-health.yaml delete mode 100644 charts/generic/k8-pod-delete/Cluster/rbac-admin.yaml create mode 100644 charts/generic/k8-pod-delete/Cluster/rbac.yaml delete mode 100644 charts/generic/k8-service-kill/Service/experiment.yaml delete mode 100644 charts/generic/k8-service-kill/Service/rbac.yaml create mode 100644 charts/generic/k8-wavefront-collector/Readme.md create mode 100644 charts/generic/k8-wavefront-collector/engine.yaml create mode 100644 charts/generic/k8-wavefront-collector/experiment.yaml create mode 100644 charts/generic/k8-wavefront-collector/k8-wavefront-collector.chartserviceversion.yaml create mode 100644 charts/generic/k8-wavefront-collector/rbac-admin.yaml create mode 100644 workflows/k8-calico-node/Readme.md create mode 100644 workflows/k8-calico-node/rbac-argo-service.yaml create mode 100644 workflows/k8-calico-node/workflow.yaml create mode 100644 workflows/k8-kiam/Readme.md create mode 100644 workflows/k8-kiam/rbac-argo-service.yaml create mode 100644 workflows/k8-kiam/workflow.yaml create mode 100644 workflows/k8-pod-delete/Readme.md create mode 100644 workflows/k8-pod-delete/rbac-argo-service.yaml create mode 100644 workflows/k8-pod-delete/workflow.yaml create mode 100644 workflows/k8-service-kill/Readme.md create mode 100644 workflows/k8-service-kill/rbac-argo-service.yaml create mode 100644 workflows/k8-service-kill/workflow.yaml create mode 100644 workflows/k8-wavefront-collector/Readme.md create mode 100644 workflows/k8-wavefront-collector/rbac-argo-service.yaml create mode 100644 workflows/k8-wavefront-collector/workflow.yaml diff --git a/charts/generic/k8-calico-node/Readme.md b/charts/generic/k8-calico-node/Readme.md new file mode 100644 index 0000000..f822a6f --- /dev/null +++ b/charts/generic/k8-calico-node/Readme.md @@ -0,0 +1,9 @@ +# Remote namespace +* Apply experiments for K8 - `kubectl apply -f experiments.yaml` +* Validate the experiments for k8 - `kubectl get chaosexperiments` +* Setup RBAC as admin mode - `kubectl apply -f rbac-admin.yaml` +* Create pod Experiment - for health experiment for IKS 1.0 -`kubectl create -f engine.yaml` +* Validate experiment - `kubectl get pods -w` +* Validate logs - `kubectl logs -f ` +* Clean up chaosexperiment -`kubectl delete -f engine.yaml` +* Clean up rbac-admin -`kubectl delete -f rbac-admin.yaml` diff --git a/charts/generic/k8-calico-node/engine.yaml b/charts/generic/k8-calico-node/engine.yaml new file mode 100644 index 0000000..0411c72 --- /dev/null +++ b/charts/generic/k8-calico-node/engine.yaml @@ -0,0 +1,39 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: k8-calico-node + namespace: default +spec: + appinfo: + appns: 'default' + applabel: "k8s-app=calico-node" + appkind: deployment + annotationCheck: 'false' + engineState: 'active' + chaosServiceAccount: chaos-admin + monitoring: false + jobCleanUpPolicy: 'retain' + experiments: + - name: k8-pod-delete + spec: + components: + env: + # set chaos namespace + - name: NAME_SPACE + value: kube-system + # set chaos label name + - name: LABEL_NAME + value: k8s-app=calico-node + # pod endpoint + - name: APP_ENDPOINT + value: 'localhost' + - name: FILE + value: 'pod-custom-kill-health.json' + - name: REPORT + value: 'true' + - name: REPORT_ENDPOINT + value: 'none' + - name: TEST_NAMESPACE + value: 'default' + + diff --git a/charts/generic/k8-calico-node/experiment.yaml b/charts/generic/k8-calico-node/experiment.yaml new file mode 100644 index 0000000..0105ff3 --- /dev/null +++ b/charts/generic/k8-calico-node/experiment.yaml @@ -0,0 +1,58 @@ +# Generic Chaos experiment for Application team, who want to participate in Game Day +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Deletes a pod belonging to a deployment/statefulset/daemonset +kind: ChaosExperiment +metadata: + name: k8-pod-delete +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: ["","apps","batch"] + resources: ["jobs","deployments","daemonsets"] + verbs: ["create","list","get","patch","delete"] + - apiGroups: ["","litmuschaos.io"] + resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"] + verbs: ["get","create","update","patch","delete","list"] + - apiGroups: [""] + resources: ["nodes"] + verbs : ["get","list"] + labels: + name: k8-pod-delete + app.kubernetes.io/part-of: litmus + image: "litmuschaos/chaostoolkit:latest" + args: + - -c + - python /app/chaos/chaostest/kubernetes/k8_wrapper.py ; exit 0 + command: + - /bin/bash + env: + + - name: CHAOSTOOLKIT_IN_POD + value: 'true' + + - name: FILE + value: 'pod-app-kill-count.json' + + - name: NAME_SPACE + value: '' + + - name: LABEL_NAME + value: '' + + - name: APP_ENDPOINT + value: '' + + - name: PERCENTAGE + value: '50' + + - name: REPORT + value: 'true' + + - name: REPORT_ENDPOINT + value: 'none' + + - name: TEST_NAMESPACE + value: 'default' diff --git a/charts/generic/k8-calico-node/k8-calico-node.chartserviceversion.yaml b/charts/generic/k8-calico-node/k8-calico-node.chartserviceversion.yaml new file mode 100644 index 0000000..8bbeb20 --- /dev/null +++ b/charts/generic/k8-calico-node/k8-calico-node.chartserviceversion.yaml @@ -0,0 +1,33 @@ +apiVersion: litmuchaos.io/v1alpha1 +kind: ChartServiceVersion +metadata: + name: k8-pod-delete + version: 0.0.4 + annotations: + categories: Kubernetes + vendor: CNCF + createdAt: 2020-02-24T10:28:08Z + support: https://slack.kubernetes.io/ +spec: + displayName: k8-pod-delete + categoryDescription: | + K8 Pod delete contains chaos to disrupt state of kubernetes resources. It uses chaostoolkit to inject random pod delete failures against specified applications + keywords: + - Kubernetes + - State + platforms: + - Minikube + maturity: alpha + maintainers: + - name: sumit + email: sumit_nagal@intuit.com + minKubeVersion: 1.12.0 + provider: + name: Intuit + links: + - name: Source Code + url: https://github.com/litmuschaos/litmus-python/tree/master/chaos-test + icon: + - url: + mediatype: "" + chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/generic/k8-pod-delete/experiment.yaml diff --git a/charts/generic/k8-calico-node/rbac-admin.yaml b/charts/generic/k8-calico-node/rbac-admin.yaml new file mode 100644 index 0000000..85a7c8d --- /dev/null +++ b/charts/generic/k8-calico-node/rbac-admin.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: chaos-admin + labels: + name: chaos-admin +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: chaos-admin + labels: + name: chaos-admin +rules: + - apiGroups: ["","apps","batch"] + resources: ["jobs","deployments","daemonsets"] + verbs: ["create","list","get","patch","delete"] + - apiGroups: ["","litmuschaos.io"] + resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"] + verbs: ["get","create","update","patch","delete","list"] + - apiGroups: [""] + resources: ["nodes"] + verbs : ["get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: chaos-admin + labels: + name: chaos-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: chaos-admin +subjects: +- kind: ServiceAccount + name: chaos-admin + namespace: default diff --git a/charts/generic/k8-pod-delete/Cluster/engine-kiam-count.yaml b/charts/generic/k8-kiam/Cluster/engine-kiam-count.yaml similarity index 100% rename from charts/generic/k8-pod-delete/Cluster/engine-kiam-count.yaml rename to charts/generic/k8-kiam/Cluster/engine-kiam-count.yaml diff --git a/charts/generic/k8-pod-delete/Cluster/engine-kiam-custom-count.yaml b/charts/generic/k8-kiam/Cluster/engine-kiam-custom-count.yaml similarity index 100% rename from charts/generic/k8-pod-delete/Cluster/engine-kiam-custom-count.yaml rename to charts/generic/k8-kiam/Cluster/engine-kiam-custom-count.yaml diff --git a/charts/generic/k8-pod-delete/Cluster/engine-kiam-custom-health.yaml b/charts/generic/k8-kiam/Cluster/engine-kiam-custom-health.yaml similarity index 100% rename from charts/generic/k8-pod-delete/Cluster/engine-kiam-custom-health.yaml rename to charts/generic/k8-kiam/Cluster/engine-kiam-custom-health.yaml diff --git a/charts/generic/k8-pod-delete/Cluster/engine-kiam-health.yaml b/charts/generic/k8-kiam/Cluster/engine-kiam-health.yaml similarity index 100% rename from charts/generic/k8-pod-delete/Cluster/engine-kiam-health.yaml rename to charts/generic/k8-kiam/Cluster/engine-kiam-health.yaml diff --git a/charts/generic/k8-kiam/Cluster/rbac-admin.yaml b/charts/generic/k8-kiam/Cluster/rbac-admin.yaml new file mode 100644 index 0000000..85a7c8d --- /dev/null +++ b/charts/generic/k8-kiam/Cluster/rbac-admin.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: chaos-admin + labels: + name: chaos-admin +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: chaos-admin + labels: + name: chaos-admin +rules: + - apiGroups: ["","apps","batch"] + resources: ["jobs","deployments","daemonsets"] + verbs: ["create","list","get","patch","delete"] + - apiGroups: ["","litmuschaos.io"] + resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"] + verbs: ["get","create","update","patch","delete","list"] + - apiGroups: [""] + resources: ["nodes"] + verbs : ["get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: chaos-admin + labels: + name: chaos-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: chaos-admin +subjects: +- kind: ServiceAccount + name: chaos-admin + namespace: default diff --git a/charts/generic/k8-kiam/Readme.md b/charts/generic/k8-kiam/Readme.md new file mode 100644 index 0000000..f822a6f --- /dev/null +++ b/charts/generic/k8-kiam/Readme.md @@ -0,0 +1,9 @@ +# Remote namespace +* Apply experiments for K8 - `kubectl apply -f experiments.yaml` +* Validate the experiments for k8 - `kubectl get chaosexperiments` +* Setup RBAC as admin mode - `kubectl apply -f rbac-admin.yaml` +* Create pod Experiment - for health experiment for IKS 1.0 -`kubectl create -f engine.yaml` +* Validate experiment - `kubectl get pods -w` +* Validate logs - `kubectl logs -f ` +* Clean up chaosexperiment -`kubectl delete -f engine.yaml` +* Clean up rbac-admin -`kubectl delete -f rbac-admin.yaml` diff --git a/charts/generic/k8-kiam/engine.yaml b/charts/generic/k8-kiam/engine.yaml new file mode 100644 index 0000000..07c6aa6 --- /dev/null +++ b/charts/generic/k8-kiam/engine.yaml @@ -0,0 +1,39 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: k8-calico-node + namespace: default +spec: + appinfo: + appns: 'default' + applabel: "app=kiam" + appkind: deployment + annotationCheck: 'false' + engineState: 'active' + chaosServiceAccount: chaos-admin + monitoring: false + jobCleanUpPolicy: 'retain' + experiments: + - name: k8-pod-delete + spec: + components: + env: + # set chaos namespace + - name: NAME_SPACE + value: kube-system + # set chaos label name + - name: LABEL_NAME + value: k8s-app=calico-node + # pod endpoint + - name: APP_ENDPOINT + value: 'localhost' + - name: FILE + value: 'pod-app-kill-health.json' + - name: REPORT + value: 'true' + - name: REPORT_ENDPOINT + value: 'none' + - name: TEST_NAMESPACE + value: 'default' + + diff --git a/charts/generic/k8-kiam/experiment.yaml b/charts/generic/k8-kiam/experiment.yaml new file mode 100644 index 0000000..0105ff3 --- /dev/null +++ b/charts/generic/k8-kiam/experiment.yaml @@ -0,0 +1,58 @@ +# Generic Chaos experiment for Application team, who want to participate in Game Day +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Deletes a pod belonging to a deployment/statefulset/daemonset +kind: ChaosExperiment +metadata: + name: k8-pod-delete +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: ["","apps","batch"] + resources: ["jobs","deployments","daemonsets"] + verbs: ["create","list","get","patch","delete"] + - apiGroups: ["","litmuschaos.io"] + resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"] + verbs: ["get","create","update","patch","delete","list"] + - apiGroups: [""] + resources: ["nodes"] + verbs : ["get","list"] + labels: + name: k8-pod-delete + app.kubernetes.io/part-of: litmus + image: "litmuschaos/chaostoolkit:latest" + args: + - -c + - python /app/chaos/chaostest/kubernetes/k8_wrapper.py ; exit 0 + command: + - /bin/bash + env: + + - name: CHAOSTOOLKIT_IN_POD + value: 'true' + + - name: FILE + value: 'pod-app-kill-count.json' + + - name: NAME_SPACE + value: '' + + - name: LABEL_NAME + value: '' + + - name: APP_ENDPOINT + value: '' + + - name: PERCENTAGE + value: '50' + + - name: REPORT + value: 'true' + + - name: REPORT_ENDPOINT + value: 'none' + + - name: TEST_NAMESPACE + value: 'default' diff --git a/charts/generic/k8-kiam/k8-kiam.chartserviceversion.yaml b/charts/generic/k8-kiam/k8-kiam.chartserviceversion.yaml new file mode 100644 index 0000000..8bbeb20 --- /dev/null +++ b/charts/generic/k8-kiam/k8-kiam.chartserviceversion.yaml @@ -0,0 +1,33 @@ +apiVersion: litmuchaos.io/v1alpha1 +kind: ChartServiceVersion +metadata: + name: k8-pod-delete + version: 0.0.4 + annotations: + categories: Kubernetes + vendor: CNCF + createdAt: 2020-02-24T10:28:08Z + support: https://slack.kubernetes.io/ +spec: + displayName: k8-pod-delete + categoryDescription: | + K8 Pod delete contains chaos to disrupt state of kubernetes resources. It uses chaostoolkit to inject random pod delete failures against specified applications + keywords: + - Kubernetes + - State + platforms: + - Minikube + maturity: alpha + maintainers: + - name: sumit + email: sumit_nagal@intuit.com + minKubeVersion: 1.12.0 + provider: + name: Intuit + links: + - name: Source Code + url: https://github.com/litmuschaos/litmus-python/tree/master/chaos-test + icon: + - url: + mediatype: "" + chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/generic/k8-pod-delete/experiment.yaml diff --git a/charts/generic/k8-kiam/rbac-admin.yaml b/charts/generic/k8-kiam/rbac-admin.yaml new file mode 100644 index 0000000..b005115 --- /dev/null +++ b/charts/generic/k8-kiam/rbac-admin.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: chaos-admin + labels: + name: chaos-admin +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: chaos-admin + labels: + name: chaos-admin +rules: + - apiGroups: ["","apps","batch"] + resources: ["jobs","deployments","daemonsets"] + verbs: ["create","list","get","patch","delete"] + - apiGroups: ["","litmuschaos.io"] + resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"] + verbs: ["get","create","update","patch","delete","list"] + - apiGroups: [""] + resources: ["nodes"] + verbs : ["get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: chaos-admin + labels: + name: chaos-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: chaos-admin +subjects: +- kind: ServiceAccount + name: chaos-admin + namespace: default \ No newline at end of file diff --git a/charts/generic/k8-service-kill/Service/engine.yaml b/charts/generic/k8-pod-delete/Cluster/engine-app-all-health.yaml similarity index 88% rename from charts/generic/k8-service-kill/Service/engine.yaml rename to charts/generic/k8-pod-delete/Cluster/engine-app-all-health.yaml index ab3d39b..afd2125 100644 --- a/charts/generic/k8-service-kill/Service/engine.yaml +++ b/charts/generic/k8-pod-delete/Cluster/engine-app-all-health.yaml @@ -1,7 +1,7 @@ apiVersion: litmuschaos.io/v1alpha1 kind: ChaosEngine metadata: - name: nginx-chaos-service-health + name: nginx-chaos-cluster-all-health namespace: default spec: appinfo: @@ -14,7 +14,7 @@ spec: monitoring: false jobCleanUpPolicy: 'retain' experiments: - - name: k8-service-kill + - name: k8-pod-delete spec: components: env: @@ -28,7 +28,7 @@ spec: - name: APP_ENDPOINT value: 'localhost' - name: FILE - value: 'service-app-kill-health.json' + value: 'pod-app-kill-all.json' - name: REPORT value: 'true' - name: REPORT_ENDPOINT @@ -36,3 +36,4 @@ spec: - name: TEST_NAMESPACE value: 'default' + diff --git a/charts/generic/k8-pod-delete/Cluster/engine-app-count.yaml b/charts/generic/k8-pod-delete/Cluster/engine-app-count.yaml new file mode 100644 index 0000000..7ae8139 --- /dev/null +++ b/charts/generic/k8-pod-delete/Cluster/engine-app-count.yaml @@ -0,0 +1,39 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos-cluster-count + namespace: default +spec: + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + annotationCheck: 'true' + engineState: 'active' + chaosServiceAccount: chaos-admin + monitoring: false + jobCleanUpPolicy: 'retain' + experiments: + - name: k8-pod-delete + spec: + components: + env: + # set chaos namespace + - name: NAME_SPACE + value: 'default' + # set chaos label name + - name: LABEL_NAME + value: 'nginx' + # pod endpoint + - name: APP_ENDPOINT + value: 'localhost' + - name: FILE + value: 'pod-app-kill-count.json' + - name: REPORT + value: 'true' + - name: REPORT_ENDPOINT + value: 'none' + - name: TEST_NAMESPACE + value: 'default' + + diff --git a/charts/generic/k8-pod-delete/Cluster/engine-app-health.yaml b/charts/generic/k8-pod-delete/Cluster/engine-app-health.yaml new file mode 100644 index 0000000..921ad46 --- /dev/null +++ b/charts/generic/k8-pod-delete/Cluster/engine-app-health.yaml @@ -0,0 +1,39 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos-cluster-health + namespace: default +spec: + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + annotationCheck: 'true' + engineState: 'active' + chaosServiceAccount: chaos-admin + monitoring: false + jobCleanUpPolicy: 'retain' + experiments: + - name: k8-pod-delete + spec: + components: + env: + # set chaos namespace + - name: NAME_SPACE + value: 'default' + # set chaos label name + - name: LABEL_NAME + value: 'nginx' + # pod endpoint + - name: APP_ENDPOINT + value: 'localhost' + - name: FILE + value: 'pod-app-kill-health.json' + - name: REPORT + value: 'true' + - name: REPORT_ENDPOINT + value: 'none' + - name: TEST_NAMESPACE + value: 'default' + + diff --git a/charts/generic/k8-pod-delete/Cluster/engine-custom-all-health.yaml b/charts/generic/k8-pod-delete/Cluster/engine-custom-all-health.yaml new file mode 100644 index 0000000..bba35a6 --- /dev/null +++ b/charts/generic/k8-pod-delete/Cluster/engine-custom-all-health.yaml @@ -0,0 +1,39 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos-cluster-custom-all-health + namespace: default +spec: + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + annotationCheck: 'true' + engineState: 'active' + chaosServiceAccount: chaos-admin + monitoring: false + jobCleanUpPolicy: 'retain' + experiments: + - name: k8-pod-delete + spec: + components: + env: + # set chaos namespace + - name: NAME_SPACE + value: 'default' + # set chaos label name + - name: LABEL_NAME + value: 'app=nginx' + # pod endpoint + - name: APP_ENDPOINT + value: 'localhost' + - name: FILE + value: 'pod-custom-kill-all.json' + - name: REPORT + value: 'true' + - name: REPORT_ENDPOINT + value: 'none' + - name: TEST_NAMESPACE + value: 'default' + + diff --git a/charts/generic/k8-pod-delete/Cluster/engine-custom-count.yaml b/charts/generic/k8-pod-delete/Cluster/engine-custom-count.yaml new file mode 100644 index 0000000..098edfc --- /dev/null +++ b/charts/generic/k8-pod-delete/Cluster/engine-custom-count.yaml @@ -0,0 +1,39 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos-cluster-custom-count + namespace: default +spec: + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + annotationCheck: 'true' + engineState: 'active' + chaosServiceAccount: chaos-admin + monitoring: false + jobCleanUpPolicy: 'retain' + experiments: + - name: k8-pod-delete + spec: + components: + env: + # set chaos namespace + - name: NAME_SPACE + value: 'default' + # set chaos label name + - name: LABEL_NAME + value: 'app=nginx' + # pod endpoint + - name: APP_ENDPOINT + value: 'localhost' + - name: FILE + value: 'pod-custom-kill-count.json' + - name: REPORT + value: 'true' + - name: REPORT_ENDPOINT + value: 'none' + - name: TEST_NAMESPACE + value: 'default' + + diff --git a/charts/generic/k8-pod-delete/Cluster/engine-custom-health.yaml b/charts/generic/k8-pod-delete/Cluster/engine-custom-health.yaml new file mode 100644 index 0000000..7ffc56b --- /dev/null +++ b/charts/generic/k8-pod-delete/Cluster/engine-custom-health.yaml @@ -0,0 +1,39 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: nginx-chaos-cluster-custom-health + namespace: default +spec: + appinfo: + appns: 'default' + applabel: 'app=nginx' + appkind: 'deployment' + annotationCheck: 'true' + engineState: 'active' + chaosServiceAccount: chaos-admin + monitoring: false + jobCleanUpPolicy: 'retain' + experiments: + - name: k8-pod-delete + spec: + components: + env: + # set chaos namespace + - name: NAME_SPACE + value: 'default' + # set chaos label name + - name: LABEL_NAME + value: 'app=nginx' + # pod endpoint + - name: APP_ENDPOINT + value: 'localhost' + - name: FILE + value: 'pod-custom-kill-health.json' + - name: REPORT + value: 'true' + - name: REPORT_ENDPOINT + value: 'none' + - name: TEST_NAMESPACE + value: 'default' + + diff --git a/charts/generic/k8-pod-delete/Cluster/rbac-admin.yaml b/charts/generic/k8-pod-delete/Cluster/rbac-admin.yaml deleted file mode 100644 index f386c6c..0000000 --- a/charts/generic/k8-pod-delete/Cluster/rbac-admin.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: chaos-admin - labels: - name: chaos-admin ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: chaos-admin - labels: - name: chaos-admin -rules: -- apiGroups: ["","apps","batch","extensions","litmuschaos.io","openebs.io","storage.k8s.io"] - resources: ["chaosengines","chaosexperiments","chaosresults","configmaps","cstorpools","cstorvolumereplicas","events","jobs","persistentvolumeclaims","persistentvolumes","pods","pods/exec","pods/log","secrets","storageclasses","chaosengines","chaosexperiments","chaosresults","configmaps","cstorpools","cstorvolumereplicas","daemonsets","deployments","events","jobs","persistentvolumeclaims","persistentvolumes","pods","pods/eviction","pods/exec","pods/log","replicasets","secrets","services","statefulsets","storageclasses"] - verbs: ["create","delete","get","list","patch","update"] -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get","list","patch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: chaos-admin - labels: - name: chaos-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: chaos-admin -subjects: -- kind: ServiceAccount - name: chaos-admin - namespace: default diff --git a/charts/generic/k8-pod-delete/Cluster/rbac.yaml b/charts/generic/k8-pod-delete/Cluster/rbac.yaml new file mode 100644 index 0000000..a31c3c8 --- /dev/null +++ b/charts/generic/k8-pod-delete/Cluster/rbac.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: k8-pod-delete-sa + namespace: default + labels: + name: k8-pod-delete-sa + app.kubernetes.io/part-of: litmus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: k8-pod-delete-sa + namespace: default + labels: + name: k8-pod-delete-sa + app.kubernetes.io/part-of: litmus +rules: +- apiGroups: ["","apps","batch"] + resources: ["jobs","deployments","daemonsets"] + verbs: ["create","list","get","patch","delete"] +- apiGroups: ["","litmuschaos.io"] + resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"] + verbs: ["get","create","update","patch","delete","list"] +- apiGroups: [""] + resources: ["nodes"] + verbs : ["get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: k8-pod-delete-sa + namespace: default + labels: + name: k8-pod-delete-sa + app.kubernetes.io/part-of: litmus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: k8-pod-delete-sa +subjects: +- kind: ServiceAccount + name: k8-pod-delete-sa + namespace: default + diff --git a/charts/generic/k8-pod-delete/Service/rbac.yaml b/charts/generic/k8-pod-delete/Service/rbac.yaml index ff2b321..a31c3c8 100644 --- a/charts/generic/k8-pod-delete/Service/rbac.yaml +++ b/charts/generic/k8-pod-delete/Service/rbac.yaml @@ -6,6 +6,7 @@ metadata: namespace: default labels: name: k8-pod-delete-sa + app.kubernetes.io/part-of: litmus --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role @@ -14,13 +15,17 @@ metadata: namespace: default labels: name: k8-pod-delete-sa + app.kubernetes.io/part-of: litmus rules: -- apiGroups: ["","litmuschaos.io","batch","apps"] - resources: ["pods","deployments","jobs","configmaps","chaosengines","chaosexperiments","chaosresults"] - verbs: ["create","list","get","patch","update","delete"] +- apiGroups: ["","apps","batch"] + resources: ["jobs","deployments","daemonsets"] + verbs: ["create","list","get","patch","delete"] +- apiGroups: ["","litmuschaos.io"] + resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"] + verbs: ["get","create","update","patch","delete","list"] - apiGroups: [""] resources: ["nodes"] - verbs : ["get","list"] + verbs : ["get","list"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding @@ -29,6 +34,7 @@ metadata: namespace: default labels: name: k8-pod-delete-sa + app.kubernetes.io/part-of: litmus roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/charts/generic/k8-pod-delete/rbac.yaml b/charts/generic/k8-pod-delete/rbac.yaml index 4c598f0..a31c3c8 100644 --- a/charts/generic/k8-pod-delete/rbac.yaml +++ b/charts/generic/k8-pod-delete/rbac.yaml @@ -17,12 +17,15 @@ metadata: name: k8-pod-delete-sa app.kubernetes.io/part-of: litmus rules: -- apiGroups: ["","litmuschaos.io","batch","apps"] - resources: ["pods","deployments","jobs","configmaps","chaosengines","chaosexperiments","chaosresults"] - verbs: ["create","list","get","patch","update","delete"] +- apiGroups: ["","apps","batch"] + resources: ["jobs","deployments","daemonsets"] + verbs: ["create","list","get","patch","delete"] +- apiGroups: ["","litmuschaos.io"] + resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"] + verbs: ["get","create","update","patch","delete","list"] - apiGroups: [""] resources: ["nodes"] - verbs : ["get","list"] + verbs : ["get","list"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/charts/generic/k8-service-kill/Service/experiment.yaml b/charts/generic/k8-service-kill/Service/experiment.yaml deleted file mode 100644 index 8dbf7cf..0000000 --- a/charts/generic/k8-service-kill/Service/experiment.yaml +++ /dev/null @@ -1,75 +0,0 @@ -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Deletes a pod belonging to a deployment/statefulset/daemonset -kind: ChaosExperiment -metadata: - name: k8-service-kill -spec: - definition: - scope: Namespaced - permissions: - - apiGroups: - - "" - - "apps" - - "batch" - - "litmuschaos.io" - resources: - - "deployments" - - "jobs" - - "pods" - - "configmaps" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - apiGroups: - - "" - resources: - - "nodes" - verbs : - - "get" - - "list" - image: "litmuschaos/chaostoolkit:latest" - args: - - -c - - python /app/chaos/chaostest/kubernetes/k8_wrapper.py; exit 0 - command: - - /bin/bash - env: - - name: CHAOSTOOLKIT_IN_POD - value: 'true' - - - name: FILE - value: 'service-app-kill-health.json' - - - name: NAME_SPACE - value: '' - - - name: LABEL_NAME - value: '' - - - name: APP_ENDPOINT - value: '' - - - name: PERCENTAGE - value: '50' - - - name: REPORT - value: 'true' - - - name: REPORT_ENDPOINT - value: 'none' - - - name: TEST_NAMESPACE - value: 'default' - - - labels: - name: k8-service-kill diff --git a/charts/generic/k8-service-kill/Service/rbac.yaml b/charts/generic/k8-service-kill/Service/rbac.yaml deleted file mode 100644 index f386c6c..0000000 --- a/charts/generic/k8-service-kill/Service/rbac.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: chaos-admin - labels: - name: chaos-admin ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: chaos-admin - labels: - name: chaos-admin -rules: -- apiGroups: ["","apps","batch","extensions","litmuschaos.io","openebs.io","storage.k8s.io"] - resources: ["chaosengines","chaosexperiments","chaosresults","configmaps","cstorpools","cstorvolumereplicas","events","jobs","persistentvolumeclaims","persistentvolumes","pods","pods/exec","pods/log","secrets","storageclasses","chaosengines","chaosexperiments","chaosresults","configmaps","cstorpools","cstorvolumereplicas","daemonsets","deployments","events","jobs","persistentvolumeclaims","persistentvolumes","pods","pods/eviction","pods/exec","pods/log","replicasets","secrets","services","statefulsets","storageclasses"] - verbs: ["create","delete","get","list","patch","update"] -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get","list","patch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: chaos-admin - labels: - name: chaos-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: chaos-admin -subjects: -- kind: ServiceAccount - name: chaos-admin - namespace: default diff --git a/charts/generic/k8-service-kill/engine.yaml b/charts/generic/k8-service-kill/engine.yaml index ab3d39b..96c52f7 100644 --- a/charts/generic/k8-service-kill/engine.yaml +++ b/charts/generic/k8-service-kill/engine.yaml @@ -1,14 +1,14 @@ apiVersion: litmuschaos.io/v1alpha1 kind: ChaosEngine metadata: - name: nginx-chaos-service-health + name: k8-service-kill-health namespace: default spec: appinfo: appns: 'default' applabel: 'app=nginx' appkind: 'deployment' - annotationCheck: 'true' + annotationCheck: 'false' engineState: 'active' chaosServiceAccount: chaos-admin monitoring: false diff --git a/charts/generic/k8-service-kill/experiment.yaml b/charts/generic/k8-service-kill/experiment.yaml index e4098ad..1816806 100644 --- a/charts/generic/k8-service-kill/experiment.yaml +++ b/charts/generic/k8-service-kill/experiment.yaml @@ -41,6 +41,9 @@ spec: verbs : - "get" - "list" + labels: + name: k8-service-kill + app.kubernetes.io/part-of: litmus image: "litmuschaos/chaostoolkit:latest" args: - -c @@ -75,9 +78,3 @@ spec: - name: TEST_NAMESPACE value: 'default' - - labels: - name: k8-service-kill - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: latest diff --git a/charts/generic/k8-service-kill/rbac.yaml b/charts/generic/k8-service-kill/rbac.yaml index c653968..85a7c8d 100644 --- a/charts/generic/k8-service-kill/rbac.yaml +++ b/charts/generic/k8-service-kill/rbac.yaml @@ -1,38 +1,38 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: k8-service-kill-sa + name: chaos-admin labels: - name: k8-service-kill-sa - app.kubernetes.io/part-of: litmus + name: chaos-admin --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: k8-service-kill-sa + name: chaos-admin labels: - name: k8-service-kill-sa - app.kubernetes.io/part-of: litmus + name: chaos-admin rules: -- apiGroups: ["","apps","batch","extensions","litmuschaos.io","openebs.io","storage.k8s.io"] - resources: ["chaosengines","chaosexperiments","chaosresults","configmaps","cstorpools","cstorvolumereplicas","events","jobs","persistentvolumeclaims","persistentvolumes","pods","pods/exec","pods/log","secrets","storageclasses","chaosengines","chaosexperiments","chaosresults","configmaps","cstorpools","cstorvolumereplicas","daemonsets","deployments","events","jobs","persistentvolumeclaims","persistentvolumes","pods","pods/eviction","pods/exec","pods/log","replicasets","secrets","services","statefulsets","storageclasses"] - verbs: ["create","delete","get","list","patch","update"] -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get","list","patch"] + - apiGroups: ["","apps","batch"] + resources: ["jobs","deployments","daemonsets"] + verbs: ["create","list","get","patch","delete"] + - apiGroups: ["","litmuschaos.io"] + resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"] + verbs: ["get","create","update","patch","delete","list"] + - apiGroups: [""] + resources: ["nodes"] + verbs : ["get","list"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: k8-service-kill-sa + name: chaos-admin labels: - name: k8-service-kill-sa - app.kubernetes.io/part-of: litmus + name: chaos-admin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: k8-service-kill-sa + name: chaos-admin subjects: - kind: ServiceAccount - name: k8-service-kill-sa + name: chaos-admin namespace: default diff --git a/charts/generic/k8-wavefront-collector/Readme.md b/charts/generic/k8-wavefront-collector/Readme.md new file mode 100644 index 0000000..f683d31 --- /dev/null +++ b/charts/generic/k8-wavefront-collector/Readme.md @@ -0,0 +1,10 @@ +# Remote namespace +# wavefront collector information - https://github.com/wavefrontHQ/wavefront-collector +* Apply experiments for K8 - `kubectl apply -f experiments.yaml` +* Validate the experiments for k8 - `kubectl get chaosexperiments` +* Setup RBAC as admin mode - `kubectl apply -f rbac-admin.yaml` +* Create pod Experiment - for health experiment -`kubectl create -f engine.yaml` +* Validate experiment - `kubectl get pods -w` +* Validate logs - `kubectl logs -f ` +* Clean up chaosexperiment -`kubectl delete -f engine.yaml` +* Clean up rbac-admin -`kubectl delete -f rbac-admin.yaml` diff --git a/charts/generic/k8-wavefront-collector/engine.yaml b/charts/generic/k8-wavefront-collector/engine.yaml new file mode 100644 index 0000000..58f0186 --- /dev/null +++ b/charts/generic/k8-wavefront-collector/engine.yaml @@ -0,0 +1,39 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: k8-calico-node + namespace: default +spec: + appinfo: + appns: 'default' + applabel: "k8s-app=wavefront-collector" + appkind: deployment + annotationCheck: 'false' + engineState: 'active' + chaosServiceAccount: chaos-admin + monitoring: false + jobCleanUpPolicy: 'retain' + experiments: + - name: k8-pod-delete + spec: + components: + env: + # set chaos namespace, we assume you are using the kube-system if not modify the below namespace + - name: NAME_SPACE + value: kube-system + # set chaos label name + - name: LABEL_NAME + value: k8s-app=wavefront-collector + # pod endpoint + - name: APP_ENDPOINT + value: 'localhost' + - name: FILE + value: 'pod-custom-kill-health.json' + - name: REPORT + value: 'true' + - name: REPORT_ENDPOINT + value: 'none' + - name: TEST_NAMESPACE + value: 'default' + + diff --git a/charts/generic/k8-wavefront-collector/experiment.yaml b/charts/generic/k8-wavefront-collector/experiment.yaml new file mode 100644 index 0000000..0105ff3 --- /dev/null +++ b/charts/generic/k8-wavefront-collector/experiment.yaml @@ -0,0 +1,58 @@ +# Generic Chaos experiment for Application team, who want to participate in Game Day +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Deletes a pod belonging to a deployment/statefulset/daemonset +kind: ChaosExperiment +metadata: + name: k8-pod-delete +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: ["","apps","batch"] + resources: ["jobs","deployments","daemonsets"] + verbs: ["create","list","get","patch","delete"] + - apiGroups: ["","litmuschaos.io"] + resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"] + verbs: ["get","create","update","patch","delete","list"] + - apiGroups: [""] + resources: ["nodes"] + verbs : ["get","list"] + labels: + name: k8-pod-delete + app.kubernetes.io/part-of: litmus + image: "litmuschaos/chaostoolkit:latest" + args: + - -c + - python /app/chaos/chaostest/kubernetes/k8_wrapper.py ; exit 0 + command: + - /bin/bash + env: + + - name: CHAOSTOOLKIT_IN_POD + value: 'true' + + - name: FILE + value: 'pod-app-kill-count.json' + + - name: NAME_SPACE + value: '' + + - name: LABEL_NAME + value: '' + + - name: APP_ENDPOINT + value: '' + + - name: PERCENTAGE + value: '50' + + - name: REPORT + value: 'true' + + - name: REPORT_ENDPOINT + value: 'none' + + - name: TEST_NAMESPACE + value: 'default' diff --git a/charts/generic/k8-wavefront-collector/k8-wavefront-collector.chartserviceversion.yaml b/charts/generic/k8-wavefront-collector/k8-wavefront-collector.chartserviceversion.yaml new file mode 100644 index 0000000..8bbeb20 --- /dev/null +++ b/charts/generic/k8-wavefront-collector/k8-wavefront-collector.chartserviceversion.yaml @@ -0,0 +1,33 @@ +apiVersion: litmuchaos.io/v1alpha1 +kind: ChartServiceVersion +metadata: + name: k8-pod-delete + version: 0.0.4 + annotations: + categories: Kubernetes + vendor: CNCF + createdAt: 2020-02-24T10:28:08Z + support: https://slack.kubernetes.io/ +spec: + displayName: k8-pod-delete + categoryDescription: | + K8 Pod delete contains chaos to disrupt state of kubernetes resources. It uses chaostoolkit to inject random pod delete failures against specified applications + keywords: + - Kubernetes + - State + platforms: + - Minikube + maturity: alpha + maintainers: + - name: sumit + email: sumit_nagal@intuit.com + minKubeVersion: 1.12.0 + provider: + name: Intuit + links: + - name: Source Code + url: https://github.com/litmuschaos/litmus-python/tree/master/chaos-test + icon: + - url: + mediatype: "" + chaosexpcrdlink: https://raw.githubusercontent.com/litmuschaos/chaos-charts/master/charts/generic/k8-pod-delete/experiment.yaml diff --git a/charts/generic/k8-wavefront-collector/rbac-admin.yaml b/charts/generic/k8-wavefront-collector/rbac-admin.yaml new file mode 100644 index 0000000..85a7c8d --- /dev/null +++ b/charts/generic/k8-wavefront-collector/rbac-admin.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: chaos-admin + labels: + name: chaos-admin +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: chaos-admin + labels: + name: chaos-admin +rules: + - apiGroups: ["","apps","batch"] + resources: ["jobs","deployments","daemonsets"] + verbs: ["create","list","get","patch","delete"] + - apiGroups: ["","litmuschaos.io"] + resources: ["pods","configmaps","events","services","chaosengines","chaosexperiments","chaosresults","deployments","jobs"] + verbs: ["get","create","update","patch","delete","list"] + - apiGroups: [""] + resources: ["nodes"] + verbs : ["get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: chaos-admin + labels: + name: chaos-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: chaos-admin +subjects: +- kind: ServiceAccount + name: chaos-admin + namespace: default diff --git a/workflows/k8-calico-node/Readme.md b/workflows/k8-calico-node/Readme.md new file mode 100644 index 0000000..b9b55e1 --- /dev/null +++ b/workflows/k8-calico-node/Readme.md @@ -0,0 +1,10 @@ +### This explain how you can execute the argo work flow, +### Assumption is that you have installed the litmus - https://docs.litmuschaos.io/docs/getstarted/ +### Assumption is that you have install argo work flow on this cluster - https://github.com/litmuschaos/chaos-workflows#getting-started +### This execution will happen on against target namespace and assume that you have setup - https://hub.litmuschaos.io/generic/k8-calico-node +### Please ensure you have enough pods for this namespace +- Apply rbac for argo - `kubectl apply -f rbac-argo-service.yaml` +- Execute experiments for k8 - `argo submit workflow.yaml` + + + diff --git a/workflows/k8-calico-node/rbac-argo-service.yaml b/workflows/k8-calico-node/rbac-argo-service.yaml new file mode 100644 index 0000000..98823e2 --- /dev/null +++ b/workflows/k8-calico-node/rbac-argo-service.yaml @@ -0,0 +1,45 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argowf-role + namespace: default +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get","watch","patch","list"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get","watch"] +- apiGroups: ["argoproj.io"] + resources: ["workflow","workflows"] + verbs: ["get","create","update","patch","delete","list","watch"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["get","create","delete","list"] +- apiGroups: ["","litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["get","create","update","patch","delete","list","watch","deletecollection"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argowf-svcacc + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: argowf-rolebinding + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argowf-role +subjects: + [ + { + "kind": "ServiceAccount", + "name": "argowf-svcacc", + "namespace": "default" + } + ] \ No newline at end of file diff --git a/workflows/k8-calico-node/workflow.yaml b/workflows/k8-calico-node/workflow.yaml new file mode 100644 index 0000000..d56dcbe --- /dev/null +++ b/workflows/k8-calico-node/workflow.yaml @@ -0,0 +1,150 @@ +# This test can be executed only in Chaos namespace +# this will launch the argo and chaos in chaos namespace +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: argowf-chaos- +spec: + entrypoint: pdbcreate + poddisruptionbudget: + minavailable: 100% + # must complete in 1m + activeDeadlineSeconds: 86400 + # keep workflows for 3m + ttlStrategy: + secondsAfterCompletion: 3600 + # delete all pods as soon as they complete + podGC: + strategy: OnPodCompletion + serviceAccountName: argowf-svcacc + arguments: + parameters: + - name: appNamespace + value: "kube-system" + - name: appCurrentNamespace + value: "default" + - name: appLabel + value: "k8s-app=calico-node" + - name: appEndpoint + value: "localhost" + - name: fileName + value: "pod-custom-kill-health.json" + - name: chaosServiceAccount + value: chaos-admin + - name: reportEndpoint + value: none + templates: + - name: argowf-chaos + steps: + - - name: pdbcreate + template: pdbcreate + - - name: run-chaos + template: run-chaos + - - name: revert-chaos + template: revert-chaos + + - name: pdbcreate + container: + image: alpine:latest + command: [sh, -c] + args: [sleep 10] + + - name: run-chaos + inputs: + artifacts: + - name: run-chaos + path: /tmp/createChaosEngine.yaml + raw: + data: | + # chaosengine.yaml + apiVersion: litmuschaos.io/v1alpha1 + kind: ChaosEngine + metadata: + name: k8-pod-delete + namespace: {{workflow.parameters.appCurrentNamespace}} + spec: + #ex. values: ns1:name=percona,ns2:run=nginx + appinfo: + appns: {{workflow.parameters.appNamespace}} + # FYI, To see app label, apply kubectl get pods --show-labels + #applabel: "app=nginx" + applabel: "app={{workflow.parameters.appLabel}}" + appkind: deployment + jobCleanUpPolicy: delete + monitoring: false + annotationCheck: 'false' + engineState: 'active' + chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}} + experiments: + - name: k8-pod-delete + spec: + components: + env: + - name: NAME_SPACE + value: {{workflow.parameters.appNamespace}} + - name: LABEL_NAME + value: {{workflow.parameters.appLabel}} + - name: APP_ENDPOINT + value: {{workflow.parameters.appEndpoint}} + - name: FILE + value: {{workflow.parameters.fileName}} + - name: REPORT + value: 'false' + - name: REPORT_ENDPOINT + value: '{{workflow.parameters.reportEndpoint}}' + - name: TEST_NAMESPACE + value: {{workflow.parameters.appCurrentNamespace}} + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: ['kubectl apply -f /tmp/createChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 '] + + - name: revert-chaos + inputs: + artifacts: + - name: revert-chaos + path: /tmp/deleteChaosEngine.yaml + raw: + data: | + # chaosengine.yaml + apiVersion: litmuschaos.io/v1alpha1 + kind: ChaosEngine + metadata: + name: k8-pod-delete + namespace: {{workflow.parameters.appCurrentNamespace}} + spec: + #ex. values: ns1:name=percona,ns2:run=nginx + appinfo: + appns: {{workflow.parameters.appNamespace}} + # FYI, To see app label, apply kubectl get pods --show-labels + #applabel: "app=nginx" + applabel: "app={{workflow.parameters.appLabel}}" + appkind: deployment + jobCleanUpPolicy: delete + monitoring: false + annotationCheck: 'false' + engineState: 'active' + chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}} + experiments: + - name: k8-pod-delete + spec: + components: + env: + - name: NAME_SPACE + value: {{workflow.parameters.appNamespace}} + - name: LABEL_NAME + value: {{workflow.parameters.appLabel}} + - name: APP_ENDPOINT + value: {{workflow.parameters.appEndpoint}} + - name: FILE + value: {{workflow.parameters.fileName}} + - name: REPORT + value: 'false' + - name: REPORT_ENDPOINT + value: '{{workflow.parameters.reportEndpoint}}' + - name: TEST_NAMESPACE + value: {{workflow.parameters.appCurrentNamespace}} + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: [' sleep 20 | kubectl delete -f /tmp/deleteChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 '] diff --git a/workflows/k8-kiam/Readme.md b/workflows/k8-kiam/Readme.md new file mode 100644 index 0000000..5cf2673 --- /dev/null +++ b/workflows/k8-kiam/Readme.md @@ -0,0 +1,10 @@ +### This explain how you can execute the argo work flow, +### Assumption is that you have installed the litmus - https://docs.litmuschaos.io/docs/getstarted/ +### Assumption is that you have install argo work flow on this cluster - https://github.com/litmuschaos/chaos-workflows#getting-started +### This execution will happen on against target namespace and assume that you have setup - https://hub.litmuschaos.io/generic/k8-kiam +### Please ensure you have enough pods for this namespace +- Apply rbac for argo - `kubectl apply -f rbac-argo-service.yaml` +- Execute experiments for k8 - `argo submit workflow.yaml` + + + diff --git a/workflows/k8-kiam/rbac-argo-service.yaml b/workflows/k8-kiam/rbac-argo-service.yaml new file mode 100644 index 0000000..98823e2 --- /dev/null +++ b/workflows/k8-kiam/rbac-argo-service.yaml @@ -0,0 +1,45 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argowf-role + namespace: default +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get","watch","patch","list"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get","watch"] +- apiGroups: ["argoproj.io"] + resources: ["workflow","workflows"] + verbs: ["get","create","update","patch","delete","list","watch"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["get","create","delete","list"] +- apiGroups: ["","litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["get","create","update","patch","delete","list","watch","deletecollection"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argowf-svcacc + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: argowf-rolebinding + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argowf-role +subjects: + [ + { + "kind": "ServiceAccount", + "name": "argowf-svcacc", + "namespace": "default" + } + ] \ No newline at end of file diff --git a/workflows/k8-kiam/workflow.yaml b/workflows/k8-kiam/workflow.yaml new file mode 100644 index 0000000..2802a96 --- /dev/null +++ b/workflows/k8-kiam/workflow.yaml @@ -0,0 +1,150 @@ +# This test can be executed only in Chaos namespace +# this will launch the argo and chaos in chaos namespace +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: argowf-chaos- +spec: + entrypoint: pdbcreate + poddisruptionbudget: + minavailable: 100% + # must complete in 1m + activeDeadlineSeconds: 86400 + # keep workflows for 3m + ttlStrategy: + secondsAfterCompletion: 3600 + # delete all pods as soon as they complete + podGC: + strategy: OnPodCompletion + serviceAccountName: argowf-svcacc + arguments: + parameters: + - name: appNamespace + value: "kube-system" + - name: appCurrentNamespace + value: "default" + - name: appLabel + value: "kiam" + - name: appEndpoint + value: "localhost" + - name: fileName + value: "pod-app-kill-health.json" + - name: chaosServiceAccount + value: chaos-admin + - name: reportEndpoint + value: none + templates: + - name: argowf-chaos + steps: + - - name: pdbcreate + template: pdbcreate + - - name: run-chaos + template: run-chaos + - - name: revert-chaos + template: revert-chaos + + - name: pdbcreate + container: + image: alpine:latest + command: [sh, -c] + args: [sleep 10] + + - name: run-chaos + inputs: + artifacts: + - name: run-chaos + path: /tmp/createChaosEngine.yaml + raw: + data: | + # chaosengine.yaml + apiVersion: litmuschaos.io/v1alpha1 + kind: ChaosEngine + metadata: + name: k8-pod-delete + namespace: {{workflow.parameters.appCurrentNamespace}} + spec: + #ex. values: ns1:name=percona,ns2:run=nginx + appinfo: + appns: {{workflow.parameters.appNamespace}} + # FYI, To see app label, apply kubectl get pods --show-labels + #applabel: "app=nginx" + applabel: "app={{workflow.parameters.appLabel}}" + appkind: deployment + jobCleanUpPolicy: delete + monitoring: false + annotationCheck: 'false' + engineState: 'active' + chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}} + experiments: + - name: k8-pod-delete + spec: + components: + env: + - name: NAME_SPACE + value: {{workflow.parameters.appNamespace}} + - name: LABEL_NAME + value: {{workflow.parameters.appLabel}} + - name: APP_ENDPOINT + value: {{workflow.parameters.appEndpoint}} + - name: FILE + value: {{workflow.parameters.fileName}} + - name: REPORT + value: 'false' + - name: REPORT_ENDPOINT + value: '{{workflow.parameters.reportEndpoint}}' + - name: TEST_NAMESPACE + value: {{workflow.parameters.appCurrentNamespace}} + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: ['kubectl apply -f /tmp/createChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 '] + + - name: revert-chaos + inputs: + artifacts: + - name: revert-chaos + path: /tmp/deleteChaosEngine.yaml + raw: + data: | + # chaosengine.yaml + apiVersion: litmuschaos.io/v1alpha1 + kind: ChaosEngine + metadata: + name: k8-pod-delete + namespace: {{workflow.parameters.appCurrentNamespace}} + spec: + #ex. values: ns1:name=percona,ns2:run=nginx + appinfo: + appns: {{workflow.parameters.appNamespace}} + # FYI, To see app label, apply kubectl get pods --show-labels + #applabel: "app=nginx" + applabel: "app={{workflow.parameters.appLabel}}" + appkind: deployment + jobCleanUpPolicy: delete + monitoring: false + annotationCheck: 'false' + engineState: 'active' + chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}} + experiments: + - name: k8-pod-delete + spec: + components: + env: + - name: NAME_SPACE + value: {{workflow.parameters.appNamespace}} + - name: LABEL_NAME + value: {{workflow.parameters.appLabel}} + - name: APP_ENDPOINT + value: {{workflow.parameters.appEndpoint}} + - name: FILE + value: {{workflow.parameters.fileName}} + - name: REPORT + value: 'false' + - name: REPORT_ENDPOINT + value: '{{workflow.parameters.reportEndpoint}}' + - name: TEST_NAMESPACE + value: {{workflow.parameters.appCurrentNamespace}} + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: [' sleep 20 | kubectl delete -f /tmp/deleteChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 '] diff --git a/workflows/k8-pod-delete/Readme.md b/workflows/k8-pod-delete/Readme.md new file mode 100644 index 0000000..64732e5 --- /dev/null +++ b/workflows/k8-pod-delete/Readme.md @@ -0,0 +1,9 @@ +# This explain how you can execute the argo work flow, +### Assumption is that you have installed the litmus - https://docs.litmuschaos.io/docs/getstarted/ +### Assumption is that you have install argo work flow on this cluster - https://github.com/litmuschaos/chaos-workflows#getting-started +### This execution will happen on your namespace and won't impact outside it +### Please ensure you have enough pods for this namespace +- Apply rbac for argo - `kubectl apply -f rbac-argo-service.yaml` +- Execute experiments for k8 - `argo submit workflow.yaml` + + diff --git a/workflows/k8-pod-delete/rbac-argo-service.yaml b/workflows/k8-pod-delete/rbac-argo-service.yaml new file mode 100644 index 0000000..98823e2 --- /dev/null +++ b/workflows/k8-pod-delete/rbac-argo-service.yaml @@ -0,0 +1,45 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argowf-role + namespace: default +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get","watch","patch","list"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get","watch"] +- apiGroups: ["argoproj.io"] + resources: ["workflow","workflows"] + verbs: ["get","create","update","patch","delete","list","watch"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["get","create","delete","list"] +- apiGroups: ["","litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["get","create","update","patch","delete","list","watch","deletecollection"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argowf-svcacc + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: argowf-rolebinding + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argowf-role +subjects: + [ + { + "kind": "ServiceAccount", + "name": "argowf-svcacc", + "namespace": "default" + } + ] \ No newline at end of file diff --git a/workflows/k8-pod-delete/workflow.yaml b/workflows/k8-pod-delete/workflow.yaml new file mode 100644 index 0000000..e238124 --- /dev/null +++ b/workflows/k8-pod-delete/workflow.yaml @@ -0,0 +1,189 @@ +# This test can be executed only in application namespace +# this will launch the argo, and the chaos pod in same namespace +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: argowf-chaos- +spec: + entrypoint: pdbcreate + poddisruptionbudget: + minavailable: 100% + # must complete in 1m + activeDeadlineSeconds: 86400 + # keep workflows for 3m + ttlStrategy: + secondsAfterCompletion: 3600 + # delete all pods as soon as they complete + podGC: + strategy: OnPodCompletion + serviceAccountName: argowf-svcacc + arguments: + parameters: + - name: appNamespace + value: "default" + - name: appCurrentNamespace + value: "default" + - name: appLabel + value: "nginx-demo-app" + - name: appEndpoint + value: "localhost" + - name: fileName + value: "pod-app-kill-health.json" + - name: chaosServiceAccount + value: k8-pod-delete-sa + - name: reportEndpoint + value: none + templates: + - name: argowf-chaos + steps: + - - name: pdbcreate + template: pdbcreate + - - name: install-chaos-experiments + template: install-chaos-experiments + - - name: install-chaos-rbac + template: install-chaos-rbac + - - name: run-chaos + template: run-chaos + - - name: revert-chaos + template: revert-chaos + - - name: revert-chaos-rbac + template: revert-chaos-rbac + - - name: revert-chaos-experiments + template: revert-chaos-experiments + - name: pdbcreate + container: + image: alpine:latest + command: [sh, -c] + args: [sleep 10] + + - name: install-chaos-experiments + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: + - "kubectl apply -f https://hub.litmuschaos.io/api/chaos/1.9.0?file=charts/generic/k8-pod-delete/experiments.yaml -n + {{workflow.parameters.appNamespace}} | sleep 30" + + - name: install-chaos-rbac + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: + - "kubectl apply -f https://hub.litmuschaos.io/api/chaos/1.9.0?file=charts/generic/k8-pod-delete/rbac.yaml -n + {{workflow.parameters.appNamespace}} | sleep 30" + + - name: run-chaos + inputs: + artifacts: + - name: run-chaos + path: /tmp/createChaosEngine.yaml + raw: + data: | + # chaosengine.yaml + apiVersion: litmuschaos.io/v1alpha1 + kind: ChaosEngine + metadata: + name: k8-pod-delete + namespace: {{workflow.parameters.appNamespace}} + spec: + #ex. values: ns1:name=percona,ns2:run=nginx + appinfo: + appns: {{workflow.parameters.appNamespace}} + # FYI, To see app label, apply kubectl get pods --show-labels + #applabel: "app=nginx" + applabel: "app={{workflow.parameters.appLabel}}" + appkind: deployment + jobCleanUpPolicy: delete + monitoring: false + annotationCheck: 'true' + engineState: 'active' + chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}} + experiments: + - name: k8-pod-delete + spec: + components: + env: + - name: NAME_SPACE + value: {{workflow.parameters.appNamespace}} + - name: LABEL_NAME + value: {{workflow.parameters.appLabel}} + - name: APP_ENDPOINT + value: {{workflow.parameters.appEndpoint}} + - name: FILE + value: {{workflow.parameters.fileName}} + - name: REPORT + value: 'false' + - name: REPORT_ENDPOINT + value: '{{workflow.parameters.reportEndpoint}}' + - name: TEST_NAMESPACE + value: {{workflow.parameters.appCurrentNamespace}} + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: ['kubectl apply -f /tmp/createChaosEngine.yaml -n {{workflow.parameters.appNamespace}} | echo "sleeping for 60s" | sleep 60 '] + + - name: revert-chaos + inputs: + artifacts: + - name: revert-chaos + path: /tmp/deleteChaosEngine.yaml + raw: + data: | + # chaosengine.yaml + apiVersion: litmuschaos.io/v1alpha1 + kind: ChaosEngine + metadata: + name: k8-pod-delete + namespace: {{workflow.parameters.appNamespace}} + spec: + #ex. values: ns1:name=percona,ns2:run=nginx + appinfo: + appns: {{workflow.parameters.appNamespace}} + # FYI, To see app label, apply kubectl get pods --show-labels + #applabel: "app=nginx" + applabel: "app={{workflow.parameters.appLabel}}" + appkind: deployment + jobCleanUpPolicy: delete + monitoring: false + annotationCheck: 'true' + engineState: 'active' + chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}} + experiments: + - name: k8-pod-delete + spec: + components: + env: + - name: NAME_SPACE + value: {{workflow.parameters.appNamespace}} + - name: LABEL_NAME + value: {{workflow.parameters.appLabel}} + - name: APP_ENDPOINT + value: {{workflow.parameters.appEndpoint}} + - name: FILE + value: {{workflow.parameters.fileName}} + - name: REPORT + value: 'false' + - name: REPORT_ENDPOINT + value: '{{workflow.parameters.reportEndpoint}}' + - name: TEST_NAMESPACE + value: {{workflow.parameters.appCurrentNamespace}} + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: [' sleep 20 | kubectl delete -f /tmp/deleteChaosEngine.yaml -n {{workflow.parameters.appNamespace}} | echo "sleeping for 60s" | sleep 60 '] + + - name: revert-chaos-experiments + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: + - "kubectl delete -f https://hub.litmuschaos.io/api/chaos/1.9.0?file=charts/generic/k8-pod-delete/experiments.yaml -n + {{workflow.parameters.appNamespace}} | sleep 30" + + - name: revert-chaos-rbac + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: + - "kubectl delete -f https://hub.litmuschaos.io/api/chaos/1.9.0?file=charts/generic/k8-pod-delete/rbac.yaml -n + {{workflow.parameters.appNamespace}} | sleep 30" diff --git a/workflows/k8-service-kill/Readme.md b/workflows/k8-service-kill/Readme.md new file mode 100644 index 0000000..f26000e --- /dev/null +++ b/workflows/k8-service-kill/Readme.md @@ -0,0 +1,10 @@ +### This explain how you can execute the argo work flow, +### Assumption is that you have installed the litmus - https://docs.litmuschaos.io/docs/getstarted/ +### Assumption is that you have install argo work flow on this cluster - https://github.com/litmuschaos/chaos-workflows#getting-started +### This execution will happen on against target namespace and assume that you have setup - https://hub.litmuschaos.io/generic/k8-service-kill +### Please ensure you have enough pods for this namespace +- Apply rbac for argo - `kubectl apply -f rbac-argo-service.yaml` +- Execute experiments for k8 - `argo submit workflow.yaml` + + + diff --git a/workflows/k8-service-kill/rbac-argo-service.yaml b/workflows/k8-service-kill/rbac-argo-service.yaml new file mode 100644 index 0000000..98823e2 --- /dev/null +++ b/workflows/k8-service-kill/rbac-argo-service.yaml @@ -0,0 +1,45 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argowf-role + namespace: default +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get","watch","patch","list"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get","watch"] +- apiGroups: ["argoproj.io"] + resources: ["workflow","workflows"] + verbs: ["get","create","update","patch","delete","list","watch"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["get","create","delete","list"] +- apiGroups: ["","litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["get","create","update","patch","delete","list","watch","deletecollection"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argowf-svcacc + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: argowf-rolebinding + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argowf-role +subjects: + [ + { + "kind": "ServiceAccount", + "name": "argowf-svcacc", + "namespace": "default" + } + ] \ No newline at end of file diff --git a/workflows/k8-service-kill/workflow.yaml b/workflows/k8-service-kill/workflow.yaml new file mode 100644 index 0000000..bfecf44 --- /dev/null +++ b/workflows/k8-service-kill/workflow.yaml @@ -0,0 +1,150 @@ +# This test can be executed only in Chaos namespace +# this will launch the argo and chaos in chaos namespace +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: argowf-chaos- +spec: + entrypoint: pdbcreate + poddisruptionbudget: + minavailable: 100% + # must complete in 1m + activeDeadlineSeconds: 86400 + # keep workflows for 3m + ttlStrategy: + secondsAfterCompletion: 3600 + # delete all pods as soon as they complete + podGC: + strategy: OnPodCompletion + serviceAccountName: argowf-svcacc + arguments: + parameters: + - name: appNamespace + value: "default" + - name: appCurrentNamespace + value: "default" + - name: appLabel + value: "k8s-app=calico-node" + - name: appEndpoint + value: "localhost" + - name: fileName + value: "service-app-kill-health.json" + - name: chaosServiceAccount + value: chaos-admin + - name: reportEndpoint + value: none + templates: + - name: argowf-chaos + steps: + - - name: pdbcreate + template: pdbcreate + - - name: run-chaos + template: run-chaos + - - name: revert-chaos + template: revert-chaos + + - name: pdbcreate + container: + image: alpine:latest + command: [sh, -c] + args: [sleep 10] + + - name: run-chaos + inputs: + artifacts: + - name: run-chaos + path: /tmp/createChaosEngine.yaml + raw: + data: | + # chaosengine.yaml + apiVersion: litmuschaos.io/v1alpha1 + kind: ChaosEngine + metadata: + name: k8-pod-delete + namespace: {{workflow.parameters.appCurrentNamespace}} + spec: + #ex. values: ns1:name=percona,ns2:run=nginx + appinfo: + appns: {{workflow.parameters.appNamespace}} + # FYI, To see app label, apply kubectl get pods --show-labels + #applabel: "app=nginx" + applabel: "app={{workflow.parameters.appLabel}}" + appkind: deployment + jobCleanUpPolicy: delete + monitoring: false + annotationCheck: 'false' + engineState: 'active' + chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}} + experiments: + - name: k8-pod-delete + spec: + components: + env: + - name: NAME_SPACE + value: {{workflow.parameters.appNamespace}} + - name: LABEL_NAME + value: {{workflow.parameters.appLabel}} + - name: APP_ENDPOINT + value: {{workflow.parameters.appEndpoint}} + - name: FILE + value: {{workflow.parameters.fileName}} + - name: REPORT + value: 'false' + - name: REPORT_ENDPOINT + value: '{{workflow.parameters.reportEndpoint}}' + - name: TEST_NAMESPACE + value: {{workflow.parameters.appCurrentNamespace}} + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: ['kubectl apply -f /tmp/createChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 '] + + - name: revert-chaos + inputs: + artifacts: + - name: revert-chaos + path: /tmp/deleteChaosEngine.yaml + raw: + data: | + # chaosengine.yaml + apiVersion: litmuschaos.io/v1alpha1 + kind: ChaosEngine + metadata: + name: k8-pod-delete + namespace: {{workflow.parameters.appCurrentNamespace}} + spec: + #ex. values: ns1:name=percona,ns2:run=nginx + appinfo: + appns: {{workflow.parameters.appNamespace}} + # FYI, To see app label, apply kubectl get pods --show-labels + #applabel: "app=nginx" + applabel: "app={{workflow.parameters.appLabel}}" + appkind: deployment + jobCleanUpPolicy: delete + monitoring: false + annotationCheck: 'false' + engineState: 'active' + chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}} + experiments: + - name: k8-pod-delete + spec: + components: + env: + - name: NAME_SPACE + value: {{workflow.parameters.appNamespace}} + - name: LABEL_NAME + value: {{workflow.parameters.appLabel}} + - name: APP_ENDPOINT + value: {{workflow.parameters.appEndpoint}} + - name: FILE + value: {{workflow.parameters.fileName}} + - name: REPORT + value: 'false' + - name: REPORT_ENDPOINT + value: '{{workflow.parameters.reportEndpoint}}' + - name: TEST_NAMESPACE + value: {{workflow.parameters.appCurrentNamespace}} + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: [' sleep 20 | kubectl delete -f /tmp/deleteChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 '] diff --git a/workflows/k8-wavefront-collector/Readme.md b/workflows/k8-wavefront-collector/Readme.md new file mode 100644 index 0000000..bdcba8c --- /dev/null +++ b/workflows/k8-wavefront-collector/Readme.md @@ -0,0 +1,10 @@ +### This explain how you can execute the argo work flow, +### Assumption is that you have installed the litmus - https://docs.litmuschaos.io/docs/getstarted/ +### Assumption is that you have install argo work flow on this cluster - https://github.com/litmuschaos/chaos-workflows#getting-started +### This execution will happen on against target namespace and assume that you have setup - https://hub.litmuschaos.io/generic/k8-wavefront-collector +### Please ensure you have enough pods for this namespace +- Apply rbac for argo - `kubectl apply -f rbac-argo-service.yaml` +- Execute experiments for k8 - `argo submit workflow.yaml` + + + diff --git a/workflows/k8-wavefront-collector/rbac-argo-service.yaml b/workflows/k8-wavefront-collector/rbac-argo-service.yaml new file mode 100644 index 0000000..98823e2 --- /dev/null +++ b/workflows/k8-wavefront-collector/rbac-argo-service.yaml @@ -0,0 +1,45 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argowf-role + namespace: default +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get","watch","patch","list"] +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get","watch"] +- apiGroups: ["argoproj.io"] + resources: ["workflow","workflows"] + verbs: ["get","create","update","patch","delete","list","watch"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["get","create","delete","list"] +- apiGroups: ["","litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["get","create","update","patch","delete","list","watch","deletecollection"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argowf-svcacc + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: argowf-rolebinding + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argowf-role +subjects: + [ + { + "kind": "ServiceAccount", + "name": "argowf-svcacc", + "namespace": "default" + } + ] \ No newline at end of file diff --git a/workflows/k8-wavefront-collector/workflow.yaml b/workflows/k8-wavefront-collector/workflow.yaml new file mode 100644 index 0000000..d6fe66d --- /dev/null +++ b/workflows/k8-wavefront-collector/workflow.yaml @@ -0,0 +1,150 @@ +# This test can be executed only in Chaos namespace +# this will launch the argo and chaos in chaos namespace +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: argowf-chaos- +spec: + entrypoint: pdbcreate + poddisruptionbudget: + minavailable: 100% + # must complete in 1m + activeDeadlineSeconds: 86400 + # keep workflows for 3m + ttlStrategy: + secondsAfterCompletion: 3600 + # delete all pods as soon as they complete + podGC: + strategy: OnPodCompletion + serviceAccountName: argowf-svcacc + arguments: + parameters: + - name: appNamespace + value: "kube-system" + - name: appCurrentNamespace + value: "default" + - name: appLabel + value: "k8s-app=wavefront-collector" + - name: appEndpoint + value: "localhost" + - name: fileName + value: "pod-custom-kill-health.json" + - name: chaosServiceAccount + value: chaos-admin + - name: reportEndpoint + value: none + templates: + - name: argowf-chaos + steps: + - - name: pdbcreate + template: pdbcreate + - - name: run-chaos + template: run-chaos + - - name: revert-chaos + template: revert-chaos + + - name: pdbcreate + container: + image: alpine:latest + command: [sh, -c] + args: [sleep 10] + + - name: run-chaos + inputs: + artifacts: + - name: run-chaos + path: /tmp/createChaosEngine.yaml + raw: + data: | + # chaosengine.yaml + apiVersion: litmuschaos.io/v1alpha1 + kind: ChaosEngine + metadata: + name: k8-pod-delete + namespace: {{workflow.parameters.appCurrentNamespace}} + spec: + #ex. values: ns1:name=percona,ns2:run=nginx + appinfo: + appns: {{workflow.parameters.appNamespace}} + # FYI, To see app label, apply kubectl get pods --show-labels + #applabel: "app=nginx" + applabel: "app={{workflow.parameters.appLabel}}" + appkind: deployment + jobCleanUpPolicy: delete + monitoring: false + annotationCheck: 'false' + engineState: 'active' + chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}} + experiments: + - name: k8-pod-delete + spec: + components: + env: + - name: NAME_SPACE + value: {{workflow.parameters.appNamespace}} + - name: LABEL_NAME + value: {{workflow.parameters.appLabel}} + - name: APP_ENDPOINT + value: {{workflow.parameters.appEndpoint}} + - name: FILE + value: {{workflow.parameters.fileName}} + - name: REPORT + value: 'false' + - name: REPORT_ENDPOINT + value: '{{workflow.parameters.reportEndpoint}}' + - name: TEST_NAMESPACE + value: {{workflow.parameters.appCurrentNamespace}} + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: ['kubectl apply -f /tmp/createChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 '] + + - name: revert-chaos + inputs: + artifacts: + - name: revert-chaos + path: /tmp/deleteChaosEngine.yaml + raw: + data: | + # chaosengine.yaml + apiVersion: litmuschaos.io/v1alpha1 + kind: ChaosEngine + metadata: + name: k8-pod-delete + namespace: {{workflow.parameters.appCurrentNamespace}} + spec: + #ex. values: ns1:name=percona,ns2:run=nginx + appinfo: + appns: {{workflow.parameters.appNamespace}} + # FYI, To see app label, apply kubectl get pods --show-labels + #applabel: "app=nginx" + applabel: "app={{workflow.parameters.appLabel}}" + appkind: deployment + jobCleanUpPolicy: delete + monitoring: false + annotationCheck: 'false' + engineState: 'active' + chaosServiceAccount: {{workflow.parameters.chaosServiceAccount}} + experiments: + - name: k8-pod-delete + spec: + components: + env: + - name: NAME_SPACE + value: {{workflow.parameters.appNamespace}} + - name: LABEL_NAME + value: {{workflow.parameters.appLabel}} + - name: APP_ENDPOINT + value: {{workflow.parameters.appEndpoint}} + - name: FILE + value: {{workflow.parameters.fileName}} + - name: REPORT + value: 'false' + - name: REPORT_ENDPOINT + value: '{{workflow.parameters.reportEndpoint}}' + - name: TEST_NAMESPACE + value: {{workflow.parameters.appCurrentNamespace}} + container: + image: lachlanevenson/k8s-kubectl + command: [sh, -c] + args: [' sleep 20 | kubectl delete -f /tmp/deleteChaosEngine.yaml -n {{workflow.parameters.appCurrentNamespace}} | echo "sleeping for 60s" | sleep 60 ']