diff --git a/.envrc b/.envrc
index 76da7d1..206c5f8 100644
--- a/.envrc
+++ b/.envrc
@@ -1,2 +1,2 @@
use flake
-export IN_NIX_SHELL="arnal#chaos-monkey"
+export IN_NIX_SHELL="chaos-monkey"
diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl
index 5687a49..58b62d6 100644
--- a/.terraform.lock.hcl
+++ b/.terraform.lock.hcl
@@ -23,23 +23,6 @@ provider "registry.terraform.io/alekc/kubectl" {
]
}
-provider "registry.terraform.io/gavinbunney/kubectl" {
- version = "1.14.0"
- constraints = "~> 1.14.0"
- hashes = [
- "h1:gLFn+RvP37sVzp9qnFCwngRjjFV649r6apjxvJ1E/SE=",
- "zh:0350f3122ff711984bbc36f6093c1fe19043173fad5a904bce27f86afe3cc858",
- "zh:07ca36c7aa7533e8325b38232c77c04d6ef1081cb0bac9d56e8ccd51f12f2030",
- "zh:0c351afd91d9e994a71fe64bbd1662d0024006b3493bb61d46c23ea3e42a7cf5",
- "zh:39f1a0aa1d589a7e815b62b5aa11041040903b061672c4cfc7de38622866cbc4",
- "zh:428d3a321043b78e23c91a8d641f2d08d6b97f74c195c654f04d2c455e017de5",
- "zh:4baf5b1de2dfe9968cc0f57fd4be5a741deb5b34ee0989519267697af5f3eee5",
- "zh:6131a927f9dffa014ab5ca5364ac965fe9b19830d2bbf916a5b2865b956fdfcf",
- "zh:c62e0c9fd052cbf68c5c2612af4f6408c61c7e37b615dc347918d2442dd05e93",
- "zh:f0beffd7ce78f49ead612e4b1aefb7cb6a461d040428f514f4f9cc4e5698ac65",
- ]
-}
-
provider "registry.terraform.io/hashicorp/kubernetes" {
version = "2.31.0"
constraints = "~> 2.31.0"
@@ -82,6 +65,29 @@ provider "registry.terraform.io/kreuzwerker/docker" {
]
}
+provider "registry.terraform.io/marcofranssen/curl" {
+ version = "0.6.1"
+ constraints = "~> 0.6.1"
+ hashes = [
+ "h1:A4okacCSY8t5M8i3S6tjWGjLpmVg5nL1nS7DehKAsF0=",
+ "zh:0f38688f134d41da20bd5d884c956510c7198bbd08c597441352474ffa06b8f2",
+ "zh:1f4491e378df2e273abd3ec4d9934c431a70c9cee523b6b03a7681691f8a35c6",
+ "zh:2764f784bb75c807c8beb289f2b5118458b739088a5a22f4674ea6b8ae2904de",
+ "zh:3a77923292b92db014f882afc3e5b5ccc440710343613e43e644826a5b4442aa",
+ "zh:3b0c65f625e5a43bff851209c61ebe3575f05eb537dc237e7e14c149536342f4",
+ "zh:7f948fe56f997808ebe60458940768f84aa26b10b1565d9df47c1e241e6c81e9",
+ "zh:83d0730b905ffec2e695e77f4f11af1bec9a17706bbd0d3550a4b17ce98dee04",
+ "zh:880985d8d02bea45fa17a30a0a03c20842790a05dfc6efc706b6de043d042e12",
+ "zh:b7b8d05526f91589c6541edd857c145b97699da1b5125c7731bc5d62b192eb30",
+ "zh:d0a5d2624341c4e9a93bd7b3fe3d1019935da92e32b1f039c8795f8a717c5826",
+ "zh:d711ffbfc139d2668b397bb2d4481b92bf48e0db2aa39fbcb1d0646e248f6316",
+ "zh:ee9042845d4fbf01fb02f22a8546ceef6365b242cb416ebef7923615ea514ea6",
+ "zh:f809ab383cca0a5f83072981c64208cbd7fa67e986a86ee02dd2c82333221e32",
+ "zh:fa9edba6b336c8aca23a4ea8a3ceae9a4b320dc1eb82f95d38e214d254b3080d",
+ "zh:ffe9b7d164bfba134575514cb097dbd14347f8dc5a8745268ddfdfc7e4b56586",
+ ]
+}
+
provider "registry.terraform.io/scottwinkler/shell" {
version = "1.7.10"
constraints = "~> 1.7.10"
diff --git a/Dockerfile b/Dockerfile
index 6470a8b..4a252ea 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -13,6 +13,7 @@ RUN apk add --no-cache gcc musl-dev make && make
FROM alpine:3
EXPOSE 9000
+EXPOSE 9443
# hadolint ignore=DL3018
RUN \
@@ -24,4 +25,8 @@ COPY --from=builder /build/bin/chaos-monkey /usr/bin/chaos-monkey
WORKDIR /home/chaosmonkey
USER chaosmonkey
+# Copy the certificates over
+COPY --chown=chaosmonkey:users ./certs/chaos-monkey.chaosmonkey.svc.crt ./main.crt
+COPY --chown=chaosmonkey:users ./certs/chaos-monkey.chaosmonkey.svc.key ./main.key
+
CMD ["chaos-monkey"]
diff --git a/Makefile b/Makefile
index 7fbed74..5a120c5 100644
--- a/Makefile
+++ b/Makefile
@@ -3,7 +3,7 @@ TERRAFORM := $(shell which terraform)
DOCKER := $(shell which docker)
APPNAME ?= chaos-monkey
IMAGE ?= chaos-monkey
-TAG ?= 2.2.0
+TAG ?= 3.0.0
all: bin/$(APPNAME)
.PHONY: clean generate bin/$(APPNAME) image-version cluster-test
diff --git a/README.md b/README.md
index 973b27e..37f659d 100644
--- a/README.md
+++ b/README.md
@@ -1,58 +1,52 @@
+# Chaos Monkey
+
-
+
-# Chaos Monkey
-This small project written using [Golang](https://go.dev) implements the ideas of the
-[Netflix's Chaos Monkey](https://netflix.github.io/chaosmonkey/) natively for
-[Kubernetes](https://kubernetes.io) clusters.
+[Golang](https://go.dev) implementation of the ideas of [Netflix's Chaos Monkey](https://netflix.github.io/chaosmonkey/) natively for [Kubernetes](https://kubernetes.io) clusters.
-For this small project I have decided not to use the official
-[Operator Framework for Golang](https://sdk.operatorframework.io/docs/building-operators/golang/tutorial/),
+For this small project I have decided not to use the official [Operator Framework for Golang](https://sdk.operatorframework.io/docs/building-operators/golang/tutorial/),
mainly because I wanted to familiarize with the core concepts of CRDs and Watchers with Golang
before adventuring further. In the future I might want to migrate to using the Operator Framework.
## Architecture
The architecture of the Chaos Monkey is fairly simple and all fits in a single Pod.
-As you can imagine, we rely heavily on
-[Kubernetes' API](https://kubernetes.io/docs/reference/using-api/api-concepts/) to react
-based on what happens inside the cluster.
+As you can imagine, we rely heavily on [Kubernetes' API](https://kubernetes.io/docs/reference/using-api/api-concepts/) to react based on what happens inside the cluster.
Four main components are part of the current architecture.
-
+
### Namespace Watcher
The code for the `NamespaceWatcher` can be found [here](./internal/watcher/namespace.go).
+
Its role is to constantly monitor the changes in the Namespaces of the cluster, and start
the CRD Watchers for those Namespaces. We start the watch by passing `ResourceVersion: ""`
to the Kubernetes API, which means that the first events we receive are synthetic events
(`ADD`) to help us rebuild the current state of the cluster. After that, we react to both
the `ADDED` and the `DELETED` events accordingly.
-Basically, it spawns a new [goroutine](https://go.dev/tour/concurrency/1) with a
-[CRD Watcher](#crd-watcher) every time a new namespace is detected and it stops the
-corresponding goroutine when a namespace is deleted.
+Basically, it spawns a new [goroutine](https://go.dev/tour/concurrency/1) with a [CRD Watcher](#crd-watcher) every time a new namespace is
+detected and it stops the corresponding goroutine when a namespace is deleted.
-The Namespace can be [configured](#configuration) to either monitor all namespaces by
-default (with an opt-out strategy) or to monitor only the namespaces which contain the
-label `cm.massix.github.io/namespace="true"`. Check the [Configuration](#configuration)
-paragraph for more details.
+The Namespace can be [configured](#configuration) to either monitor all namespaces by default (with an
+opt-out strategy) or to monitor only the namespaces which contain the label
+`cm.massix.github.io/namespace="true"`.
+
+Check the [Configuration](#configuration) paragraph for more details.
### CRD Watcher
-We make use of a
-[Custom Resource Definition (CRD)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
-in order to trigger the Chaos Monkey. The CRD is defined using the
-[OpenAPI](https://www.openapis.org/) specification, which you can find
-[here](./crds/chaosmonkey-configuration.yaml).
+We make use of a [Custom Resource Definition (CRD)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) in order to trigger the Chaos Monkey.
+The CRD is defined using the [OpenAPI](https://www.openapis.org/) specification, which you can find [here](./crds/chaosmonkey-configuration.yaml).
-Following the schema, this is a valid definition of a CRD which can be injected inside of
-a namespace:
+Following the schema, this is a valid definition of a CRD which can be injected inside
+of a namespace:
```yaml
-apiVersion: cm.massix.github.io/v1alpha1
+apiVersion: cm.massix.github.io/v1
kind: ChaosMonkeyConfiguration
metadata:
name: chaosmonkey-nginx
@@ -62,8 +56,9 @@ spec:
minReplicas: 0
maxReplicas: 9
timeout: 10s
- deploymentName: nginx
- podMode: true
+ deployment:
+ name: nginx
+ scalingMode: killPod
```
The CRD is **namespaced**, meaning that it **must** reside inside a Namespace and cannot be
@@ -74,13 +69,18 @@ The CRD Watcher, similarly to the [namespace one](#namespace-watcher), reacts to
reacts to the `MODIFIED` event, making it possible to modify a configuration while the
Monkey is running.
-Depending on the value of the `podMode` flag, the CRD watcher will either create a
+Depending on the value of the `scalingMode` flag, the CRD watcher will either create a
[DeploymentWatcher](#deployment-watcher) or a [PodWatcher](#pod-watcher) The difference between
the two is highlighted in the right paragraph, but in short: the DeploymentWatcher
operates by modifying the `spec.replicas` field of the Deployment, using the
`deployment/scale` APIs, while the PodWatcher simply deletes a random pod using the
same `spec.selector` value of the targeted Deployment.
+As of now, three values are supported by the `scalingMode` field:
+* `randomScale`, which will create a [DeploymentWatcher](#deployment-watcher), it will randomly modify the scales of the given deployment;
+* `killPod`, which will create a [PodWatcher](#pod-watcher), it will randomly kill a pod;
+* `antiPressure`, which will create a [AntiPressureWatcher](#antipressure-watcher).
+
### Deployment Watcher
This is where the fun begins, the Deployment Watcher is responsible of creating the
Chaos inside the cluster. The watcher is associated to a specific deployment (see the
@@ -103,6 +103,17 @@ of the CRD, it will randomly kill a pod matching the field.
The Pod Watcher **ignores** the `maxReplicas` and `minReplicas` fields of the CRD,
thus generating real chaos inside the cluster.
+### AntiPressure Watcher
+This is another point where the fun begins. The AntiPressure Watcher is responsible
+of creating Chaos inside the cluster by detecting which pod of a given container
+is using the most CPU and simply kill it. It works the opposite of a classic
+[Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/), in the code is often referred to as `antiHPA` for this reason.
+
+**WARNING**: for the AntiPressure Watcher to work, your cluster **must** have a
+[metrics server](https://github.com/kubernetes-sigs/metrics-server) installed, this often comes installed by default on most Cloud providers.
+If you want to install it locally, please refer to the [terraform configuration](./main.tf) included
+in the project itself.
+
## Deployment inside a Kubernetes Cluster
In order to be able to deploy the ChaosMonkey inside a Kubernetes cluster you **must**
first create a [ServiceAccount](https://kubernetes.io/docs/concepts/security/service-accounts/),
@@ -153,6 +164,9 @@ rules:
- verbs: ["create", "patch"]
resources: ["events"]
apiGroups: ["*"]
+ - verbs: ["get"]
+ resources: ["pods"]
+ apiGroups: ["metrics.k8s.io"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
@@ -179,6 +193,17 @@ spec:
serviceAccountName: chaosmonkey
```
+## A note on CRD
+The CRD defines multiple versions of the APIs (at the moment two versions are supported:
+`v1alpha1` and `v1`). You should **always** use the latest version available (`v1`), but
+there is a conversion endpoint in case you are still using the older version of the API.
+
+The only caveat is that if you **need** to use the conversion Webhook, you **must** install the
+chaosmonkey in a namespace named `chaosmonkey` and create a service named `chaos-monkey`
+for it.
+
+If in doubt, do not use the older version of the API.
+
## Configuration
There are some configurable parts of the ChaosMonkey (on top of what the [CRD](./crds/chaosmonkey-configuration.yaml)
already permits of course).
@@ -290,6 +315,11 @@ of kubernetes included in the `client-go` library. The problem is that when test
with mocks, most of the times you end up testing the mocks and not the code. That's
the reason why there are also some [integration tests](#integration-tests) included.
+For the future, I have plans to completely rewrite the way the tests are run, create
+more _pure_ functions and test those functions in the unit tests, and let the
+[integration tests](#integration-tests) do the rest. If you want to help me out in reaching this goal, feel
+free to open a pull request!
+
### Integration Tests
These tests should cover the basic functionalities of the Chaos Monkey in a local
Kubernetes cluster. The script file is [here](./tests/kubetest.sh) and before launching
@@ -303,3 +333,6 @@ It should be as easy as launching:
You can also activate a more verbose logging for the tests with
TEST_DEBUG=true ./tests/kubetest.sh
+
+# Contributions
+All kinds of contributions are welcome, simply open a pull request or an issue!
diff --git a/assets/cm-architecture.png b/assets/cm-architecture.png
index e707371..f918d30 100644
Binary files a/assets/cm-architecture.png and b/assets/cm-architecture.png differ
diff --git a/assets/grafana-dashboard.json b/assets/grafana-dashboard.json
index 854272c..bc581de 100644
--- a/assets/grafana-dashboard.json
+++ b/assets/grafana-dashboard.json
@@ -74,7 +74,7 @@
},
"gridPos": {
"h": 7,
- "w": 8,
+ "w": 4,
"x": 0,
"y": 1
},
@@ -150,8 +150,8 @@
},
"gridPos": {
"h": 7,
- "w": 8,
- "x": 8,
+ "w": 7,
+ "x": 4,
"y": 1
},
"id": 2,
@@ -228,8 +228,8 @@
},
"gridPos": {
"h": 7,
- "w": 8,
- "x": 16,
+ "w": 7,
+ "x": 11,
"y": 1
},
"id": 3,
@@ -275,6 +275,84 @@
"title": "Active DeploymentWatchers",
"type": "bargauge"
},
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "displayName": "${__series.name}",
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 18,
+ "y": 1
+ },
+ "id": 18,
+ "options": {
+ "displayMode": "basic",
+ "maxVizHeight": 300,
+ "minVizHeight": 16,
+ "minVizWidth": 8,
+ "namePlacement": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showUnfilled": true,
+ "sizing": "auto",
+ "valueMode": "color"
+ },
+ "pluginVersion": "11.1.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "chaos_monkey_crdwatcher_ah_active{instance=\"$instance\"}",
+ "format": "heatmap",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "instant": true,
+ "legendFormat": "{{namespace}}",
+ "range": false,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Active AntiHPAWatchers",
+ "type": "bargauge"
+ },
{
"datasource": {
"type": "prometheus",
@@ -1076,7 +1154,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1170,7 +1249,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1277,7 +1357,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1371,7 +1452,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1435,7 +1517,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1487,6 +1570,213 @@
],
"title": "Last used scale for DeploymentWatcher",
"type": "stat"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 50
+ },
+ "id": 19,
+ "panels": [],
+ "title": "AntiHPAWatchers",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 51
+ },
+ "id": 20,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (namespace, selector) (rate(chaos_monkey_antihpawatcher_pods_killed{instance=\"$instance\"}[15m]) * 60)",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{selector}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Pods killed per minute",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${datasource}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 51
+ },
+ "id": 21,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (namespace, selector) ((rate(chaos_monkey_antihpawatcher_average_cpu{instance=\"$instance\"}[15m]) / rate(chaos_monkey_antihpawatcher_pods_killed{instance=\"$instance\"}[15m])) * 60)",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{selector}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Average Value of CPU for Pods killed",
+ "type": "timeseries"
}
],
"refresh": "30s",
@@ -1498,7 +1788,7 @@
"current": {
"selected": false,
"text": "Prometheus",
- "value": "00000000000000001"
+ "value": "PBFA97CFB590B2093"
},
"hide": 0,
"includeAll": false,
@@ -1515,8 +1805,12 @@
{
"current": {
"selected": false,
- "text": "127.0.0.1:9000",
- "value": "127.0.0.1:9000"
+ "text": "10.244.0.16:9000",
+ "value": "10.244.0.16:9000"
+ },
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
},
"definition": "label_values(chaos_monkey_nswatcher_events,instance)",
"hide": 0,
diff --git a/certs/ca.key b/certs/ca.key
new file mode 100644
index 0000000..c0ff660
--- /dev/null
+++ b/certs/ca.key
@@ -0,0 +1,52 @@
+-----BEGIN PRIVATE KEY-----
+MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDCInGleQRqsbFq
+CCvpm6ioB5Mbi4r6PG6VWfxzXBecQ1+6mm2YlCxxjFYTi4oJxjrWh7Vv/32AI85r
+igrz9Ms6kP98T/Nh/WQ8jKeST2fyqTz4ntU5Sl7v2WeApnwrRDhV287aObbd54nQ
+BILHoMrOt1jVzlIEBgsmF+KhqwvCG/FoIO7SNf8OgFI+5odeMVRIPjdw9dU0wv7J
+XqipG++CjoHn3esctRwMtaP1mgYVDKiuL402W+x5B+GeOLjsLLIvK7llHJN9/0dK
+o5ASz1Zm1clek3FSgrE6rT6n900orm5NVqNJg3aratMdCc7oY23M2Uzm56wHb+26
+OfZsJV1AgCY2H7yoRzeC0DRUHhQLyQ52yMu1uQRmDX3K69K9ymHfJwN59Z57RTH4
+dkD1KAJMZrLdHiH9DomjAYPqWFYmNK1BQWWWo+PTFCymAlNfzquJfm+MPJZTLAOa
+bAmF8SxFxliyR0QNMjZ+GU6txBR/XfJseXYfSlsP0OjlvQWREMiEySTkJK7wPF30
+blYvC8VLtad/JstzWYqBRtLFyqc2L8s6JUrwNw726iEvasJDlgrr/VLwi/4fC7by
+aCI6XsJ0w8agUa/Tf/IJZgEH9Ed4y/1iJYI/N/HKiiMzRCUmdsJivbeCAj9FTbc4
+qoPD//F1pWSrFOkJUxS4YK8p1h+tOwIDAQABAoICAFf+2NiSdCgQh0Ryk8T944LW
+fB2+MK6gedeExGNd2O3htCCXIxpH/6UwYUvWsikgC0d2mA0+F5rfoA8qsTBHQ182
+njew3bcK7kZr0wEEsS9S+uoOxMOFLGc3eizcSegqsgZo7egIxRTsgJEjmkv2XDc6
+8C9pp89fFeqcZQO2zkub8t6qHtZ11zixfZY5VB6j6XodiW/QXmXeBTMtjvGoUfYx
+tjxtYMbQaPhx1PHccOlVpZfewfljzVpG8kOPnnKfB91NymlEfK5d6eixHvsHtDhE
+q1QNLQ6jYsYgBQiJlhXciu7PWJBX4JBuiWbeLUU03G5ACRWY6WctmuoQn9wBS7TI
+gaL86dJcIENqJD74TXFunbeeAK5dZTC5b1vOIHhYYm1ICECCBRBwrSgq85oW8/B5
+9iY28VA4r4LHUXVhRq4TuikwqZ0Yz23ouQnYRBC78VR9whYB1H8P9P624H0K9aZ4
+VdKFUZyUyWaGYxfCCnzJxjm86JGomOy/25+lT2XE8TWpE6eTOBptxRyBPnL6Yoki
+U/aU5K5qM8ytgOBUWfaReFaGDbPTg+Nn3c23278IMPuyCY7ImTG+9yZxOT8tcN94
+qLs5GeYk36/rlqeTJVI/2FwZaaD8tXwCT3IHTXUJ6a65pvq9Ofgehw/ZWGStg/Es
+xR+DFposia8szJFC5YQxAoIBAQDq3esOpFDWRIDix+45mxzcWgK4J4Z3d7mkSgxn
+wpDRN8Hv0a4WS+61ErMIhDCc/9Z3rkTTAWV+gMTmPaoFDtFf03QLPQJV3yHv2MCT
+tjVsdjLXiD5EcJ12w3D4PZBTudlAlpWL499XIfTpfuJSwytMzD8u5XdQSXbyTqZR
+Oc6dQjufo8j7krcCuEt7bHav0xLJXFlEwQu1rBVxwCpiNBVCEVABQxrDoUWA7c9a
+94MxvS6EGBtTu1jg+Aj8FeSICbC9SlI+Pg4hUxwcz/4Mcn2V3w3jkVgAza+jgyvl
+2iDl8gCSXpaULJs4z1dy/ncxSNdyMLtKJEwOX5Us1U4wderfAoIBAQDTmkUdorcM
+IgwgnbMNKXrGCRzNv1ynZfqEZi6abr5DB49G1Mne5S91aRV6dY8BKMNW6JpRXuN8
+QCElUhpz0aKcsfrCEeqmvha+iyGf7PmSiU50qCfLEmpeDixHsp+zUBzghK1hWmf0
+FtWKzTVGy5/4X3R25G7zlUhWk37kQ3xeVWaHcbEeqxM0YIIqcBHN+x68F+p/wKR9
+xr/K9ZEJ8c1bstkbzqoXyhXCMdyx7oH3EIDLqCJC7gqBMRFwmd7SOQrQ3SuAubxq
+7a/HQ8xXgsFtfbqAP/5pXIc2BFHWOP2KEZSFKhq9ZvO1L7tLi93YbQdUtM39xIEf
+pv1U8g5kraUlAoIBABMOABUHBbvsgNxlRhGMYRILh27zXfhxUTFgBJOieGdQ874G
+L+6FKI+uPbIyL1N9eiPpkpHf6iESR2c6l4Gjix8QI7kJsRfQa4tu9WjGfp0XFdbg
+qdSZolzRTGgHbp8sU9DmpqlbynHiQmFzNt9qEChB5dpjKYPtAeZ0tQ+f9gBSME4q
+VpL6eziQRSd1TEfFgx+tC10FoHKTzIWXBplDCnp6txfzHsfCXMYyBQGnVRCC/bQf
+1I+9gl92IBx7ljfnOVySHAwKstDHUl+QDFdsLn4rQ5Az6YTfKyHD8ZrLUKH3OU6m
+t+a7m7hHMhwVQwxPkZMlGvzP5w+/d3XUtGxAFNcCggEBALyzeqdvD+YWBtKfT6mR
+MBei0Lj6ylnOK5Yeubimwa1s9GyqHkxT/fqqd6j5ZUoXW/wI+nVONXf8iJKLoWNf
+s7AhWAnxiFyicA4EbTv9TKNNJ1YJD1OdXJX79akKtjhmXRort6J1yPZ31n8teTFl
+LYeFOIs+M2Ot4RC0ABj3xdUqO4DV9qnuzfWLiwjlYDtckpASk+DRnt87epY4X9uK
+cyQvXkXaCv2kDbtcU/+pPuVhJjp3+fXN6jhD1dWgooC+tdKFKeJlwI1q4bLF11jD
+FlPaJ5NvfiXfigS13XU45YFXJCeM4MO/J96Qbmp0lKYItNzpUDnaH3xmosw6AHPw
+FrECggEBANHOcl0L88de5ANWBxYuISgYD4bNcpA1+ZR1GJ87zrg+576IcnRh1slC
+xzlBAb6iWiA2PeXJPxSLgB+D278b0NJERAjrd9Ym92ZIP6O1vLOKy198Rd4b0ihX
+h0QDj1n2f3wCVWv5nf6+FujdHTpM8BHjNrnAvPiT4l51y0ryHym2JSp12IntI956
+g8RIdHFHg3J32DyQQPNHgCbCGW8ZB6BT1C9N4G8FTaQj0vdy/+ChXYpXa8aSq7sP
+mkaXTLHr/U3cdVrLF+krKs/EtDVRTJlknwEHvolXStLm3RTnB1GbTsJqjU+qvHkH
++yguFaA4utr9XESsZk0R5RprwjvHK+0=
+-----END PRIVATE KEY-----
diff --git a/certs/ca.pem b/certs/ca.pem
new file mode 100644
index 0000000..51a9e77
--- /dev/null
+++ b/certs/ca.pem
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIUBSenMfCoyedpog3XhYsIr+pFa2EwDQYJKoZIhvcNAQEL
+BQAwRTELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
+GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yNDA3MjAxMzE0MDdaFw0yOTA3
+MTkxMzE0MDdaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
+HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQDCInGleQRqsbFqCCvpm6ioB5Mbi4r6PG6VWfxzXBec
+Q1+6mm2YlCxxjFYTi4oJxjrWh7Vv/32AI85rigrz9Ms6kP98T/Nh/WQ8jKeST2fy
+qTz4ntU5Sl7v2WeApnwrRDhV287aObbd54nQBILHoMrOt1jVzlIEBgsmF+KhqwvC
+G/FoIO7SNf8OgFI+5odeMVRIPjdw9dU0wv7JXqipG++CjoHn3esctRwMtaP1mgYV
+DKiuL402W+x5B+GeOLjsLLIvK7llHJN9/0dKo5ASz1Zm1clek3FSgrE6rT6n900o
+rm5NVqNJg3aratMdCc7oY23M2Uzm56wHb+26OfZsJV1AgCY2H7yoRzeC0DRUHhQL
+yQ52yMu1uQRmDX3K69K9ymHfJwN59Z57RTH4dkD1KAJMZrLdHiH9DomjAYPqWFYm
+NK1BQWWWo+PTFCymAlNfzquJfm+MPJZTLAOabAmF8SxFxliyR0QNMjZ+GU6txBR/
+XfJseXYfSlsP0OjlvQWREMiEySTkJK7wPF30blYvC8VLtad/JstzWYqBRtLFyqc2
+L8s6JUrwNw726iEvasJDlgrr/VLwi/4fC7byaCI6XsJ0w8agUa/Tf/IJZgEH9Ed4
+y/1iJYI/N/HKiiMzRCUmdsJivbeCAj9FTbc4qoPD//F1pWSrFOkJUxS4YK8p1h+t
+OwIDAQABo1MwUTAdBgNVHQ4EFgQU3V1QDjdSPbkz7RTolAhphXmAg1AwHwYDVR0j
+BBgwFoAU3V1QDjdSPbkz7RTolAhphXmAg1AwDwYDVR0TAQH/BAUwAwEB/zANBgkq
+hkiG9w0BAQsFAAOCAgEAABnCrjKTLPUG/LzqYU9vIS56IC1D+hYAY2jK+UVZuCx9
+m1uHb7z/We1h5kX6Y7PhWcB1j/WaGLmO56RwqPAbOuGygttUYELJhesaM7oa1PmT
+6f2Dhju6GzqTI6+6qNGSqJLwmmF5SoW1skmKGo/Dp7YBrnC4o8k+ztodEi2tFEIU
+JAJeAuI889ldYvve56AOGT6lcjqghVs/zdaNNdOe9q9PDlDQfygfQaFmEbYVTz9T
+MdtNQGIUjK13qn7yal6OrgMLTv2MBIK5QUkK2Ipaz6DOh8EN+J8LGNA/zH/nrRq+
+vZSkHEXdmGptmxnU8+j9PG+wcLebPCb80u4QP5tpd2dAb4lgvI9qe5XMEoA5ZSn2
+PO8ZUtl8aRJSVDKX+NlCOd3AvR3LZb7+F+VsI+8cqdyaV53JkRG61RFEdLd1tjHo
+r3gNobyIYAwZnH5xuwMDbvFfI0nI2rawUs7KMrTRlxutka2kbjE97CtLIBnXjkck
+lPKZDrCfnH5mghJK/qwW0Pca+HPOgdqYzsw4o4gUbYeX8vqvOPxntGx8HOXN8q3l
+vo2m/76dE63k4jtvJz4FNdBkDjSldnKDnPtIYLM7YMxxaWO574bkwl2I2t75Ig5X
+uCuQ49vfisjjVnDmb+y044fXWAdxL4J4szuVlT3ke3Eh55uFnAocfqBTV9HoQfY=
+-----END CERTIFICATE-----
diff --git a/certs/ca.srl b/certs/ca.srl
new file mode 100644
index 0000000..1529ac4
--- /dev/null
+++ b/certs/ca.srl
@@ -0,0 +1 @@
+6465863EC2FD9368982E213156354C60AF25DCC5
diff --git a/certs/chaos-monkey.chaosmonkey.svc.crt b/certs/chaos-monkey.chaosmonkey.svc.crt
new file mode 100644
index 0000000..1d1a830
--- /dev/null
+++ b/certs/chaos-monkey.chaosmonkey.svc.crt
@@ -0,0 +1,27 @@
+-----BEGIN CERTIFICATE-----
+MIIEnTCCAoWgAwIBAgIUZGWGPsL9k2iYLiExVjVMYK8l3MUwDQYJKoZIhvcNAQEL
+BQAwRTELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
+GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yNDA3MjAxMzU5NDRaFw0yNjEw
+MjMxMzU5NDRaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
+HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQC23z2lnx/FR3LK6QGKA9W1LkefoD/2urJevjaBK1lg
+bEzcxSArDMauh+pOGJOhoR4/wwaOAlhXe54vc5xx1hWpKK/GjTxaP6X3nMTaVvQn
+oiqJZrskt9SN4LOx0ro93sT62XB006jeFXcJ31OVh1RZoQ5UPIbyb3Sc9s/fPJ0D
+D8TVSN4Z1xjpE0A9wBu6h7ZPXh7/wBXmmjukNwC/bzgXs0OCNe9ufXdjwL8EvnsL
+vdAIuQNhHW5qzFj+QB/vqRVdJvp+HqSHr0PDWfNfqmflhf3ffR/xk5S7bwlSrP04
+gtS1aJ0m1XmJg8SsopJF8HXGXNQBxJMUXt3OGm3etjBFAgMBAAGjgYQwgYEwHwYD
+VR0jBBgwFoAU3V1QDjdSPbkz7RTolAhphXmAg1AwCQYDVR0TBAIwADALBgNVHQ8E
+BAMCBPAwJwYDVR0RBCAwHoIcY2hhb3MtbW9ua2V5LmNoYW9zbW9ua2V5LnN2YzAd
+BgNVHQ4EFgQUHx/md1rCS0Z/LgUJvgZuzQNl3x0wDQYJKoZIhvcNAQELBQADggIB
+ACLlozd5WtyWzl3xYkL50PZVyeSWstkDAu6Ud6ZJlm1kC9niWFgXjnmcHyfpCp8a
+ZgwEqHdYcnbqCMzjPtDo9gaYf1SWoC0HG65gQhJVll0Ft/htnNnsUAnHEWoNnSfq
+1LK//kqgz5C47dd2hDRHuk3nqTcokGkS5g/kJCpTCQiAfV9nSiZNfFfIiUEEUaIR
+4+tWA6CrtnL4zKG0XgOSX3vj66JTkMtNO+S9dMGSYmdDWHyLlrsMr+fJWAcRgRK3
+Kn8oJQZTIiy2T4I+MVhjHYUU+IQKXoYOQHdQVZX/BZ/dLeiCXvib180QsKjdf9Vk
+zGByjCY/xBgAz8uBk332PhBesu+08EPG/TJPV/XNZ1jX2GaGBIC83Bk4Xj7HiK2R
+hNkHtRnTMhLFgwP2D8d35z5shuhsUFzZUK9MQcYBTzh7c1eCwWvf4IoWQgy98fAz
+4xpNyuImvG3w3mrIU5YCVGxqOjekHuLZP6YABc8ZPnoanLy6qDg8aSP0nw6I8smJ
+UOLzHuwlVsg1Z8dguDqo9eqTLwHPxXNHXCaetILHN+HAf5kn6+6TMj2hw0GC6wqw
+OosBrXdi+Rbaz6LxPEyo1URswVwsjmyevZDhvnb7Y8dPZKg18HmfFnYKE/37qS8g
+3eSQuZrJznjMx5h3YOXqBILeVXE0uIKCCv2AVdBPo1fo
+-----END CERTIFICATE-----
diff --git a/certs/chaos-monkey.chaosmonkey.svc.csr b/certs/chaos-monkey.chaosmonkey.svc.csr
new file mode 100644
index 0000000..aebcf0d
--- /dev/null
+++ b/certs/chaos-monkey.chaosmonkey.svc.csr
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICijCCAXICAQAwRTELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUx
+ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBALbfPaWfH8VHcsrpAYoD1bUuR5+gP/a6sl6+NoEr
+WWBsTNzFICsMxq6H6k4Yk6GhHj/DBo4CWFd7ni9znHHWFakor8aNPFo/pfecxNpW
+9CeiKolmuyS31I3gs7HSuj3exPrZcHTTqN4VdwnfU5WHVFmhDlQ8hvJvdJz2z988
+nQMPxNVI3hnXGOkTQD3AG7qHtk9eHv/AFeaaO6Q3AL9vOBezQ4I17259d2PAvwS+
+ewu90Ai5A2EdbmrMWP5AH++pFV0m+n4epIevQ8NZ81+qZ+WF/d99H/GTlLtvCVKs
+/TiC1LVonSbVeYmDxKyikkXwdcZc1AHEkxRe3c4abd62MEUCAwEAAaAAMA0GCSqG
+SIb3DQEBCwUAA4IBAQAGxMd9tGwXhCTxYF86OsdK++ABynt8p5HyRGr1PnOiLJcf
+eSKWeVRLrzPQXAO/2FiFH1bnsfnaqpcywQWsMlwOLu4vaHk8eeOBjUHdikld4g0b
+2Ko3VqNF6i9cqcyBKaLk/THhRkXZmSVDYSp0Auq578k1WIbH76kOmg1QvZlyKla4
+JFzL9WDfHxnuMeN8xt/fJV+x5Bn0vHpb0HbTafgebAeeP38JXIAZt3vORWTW9r9z
+6JyjtKOX54eOslMO0jJqnfAcOE3ziR0SycmXWb6vKCVPueX1XML3+8DDNI4tg85i
+t5flORI3L/unZC5+Rew/0Pt7YjSS3KG5vDxU3gKe
+-----END CERTIFICATE REQUEST-----
diff --git a/certs/chaos-monkey.chaosmonkey.svc.ext b/certs/chaos-monkey.chaosmonkey.svc.ext
new file mode 100644
index 0000000..8485644
--- /dev/null
+++ b/certs/chaos-monkey.chaosmonkey.svc.ext
@@ -0,0 +1,7 @@
+authorityKeyIdentifier=keyid,issuer
+basicConstraints=CA:FALSE
+keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
+subjectAltName = @alt_names
+
+[alt_names]
+DNS.1 = chaos-monkey.chaosmonkey.svc
diff --git a/certs/chaos-monkey.chaosmonkey.svc.key b/certs/chaos-monkey.chaosmonkey.svc.key
new file mode 100644
index 0000000..3c7d73f
--- /dev/null
+++ b/certs/chaos-monkey.chaosmonkey.svc.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC23z2lnx/FR3LK
+6QGKA9W1LkefoD/2urJevjaBK1lgbEzcxSArDMauh+pOGJOhoR4/wwaOAlhXe54v
+c5xx1hWpKK/GjTxaP6X3nMTaVvQnoiqJZrskt9SN4LOx0ro93sT62XB006jeFXcJ
+31OVh1RZoQ5UPIbyb3Sc9s/fPJ0DD8TVSN4Z1xjpE0A9wBu6h7ZPXh7/wBXmmjuk
+NwC/bzgXs0OCNe9ufXdjwL8EvnsLvdAIuQNhHW5qzFj+QB/vqRVdJvp+HqSHr0PD
+WfNfqmflhf3ffR/xk5S7bwlSrP04gtS1aJ0m1XmJg8SsopJF8HXGXNQBxJMUXt3O
+Gm3etjBFAgMBAAECggEABSpZcu1Y4o5/QxUBep+wxdKIqmJxPBZk8GvTXU2sqMkW
+WLa8Dws9gh42Xj4FJC/JYMgGTjTpwDrZKoJLp87Xbh+clhv/hEBqVIwEkXLwUuhF
+PFFUYEKjCd46z9SMdy/s2iBnM/LrQQP+iJ8Fcjf63XdA+4ckJq1C8CYWlA8saN2D
+xFdVlxuHiamjXEiIprqUud9I3fBYYorPRPMpasVTzbCu9r0QLo8iYw4gNWHaWP2L
+ooaNJF9S21n8bb0+6LQyILoTWE6S5D5uOIHc8vTD3oAgzc2L3+eIpNlcuYmVfY5X
+scQ6m+Ni+xr+mUbrLfiLWRte4GdiOhwu8nGyWLUMVwKBgQDeQ+ce6Ye8pWWDJpYY
+T49arNXVThfLIxzwqmRIDGk0vQPYP6RP81CgHg0fAZvlDIkxY7k+nJ5tOv/vIPBO
+QIW8YJihS6aA5sGNVSmY5C+FK8kAWeBY16/z2bToYsqNy+8MHAxiR0qKJHKDNwEU
+yzPOxZB5Bjae3fe6ra9jkyiIQwKBgQDSoLfXsoSDZbJv9Tpu1XH9w8RRuuS/6Wxm
+vIvvT4MiHyEwMovfMYOx+gIvHpPQie0m5ckqoSrebajPl7REYW1PgFHEV8vNCZfu
+TwQDnUkf/3Xd1virtAcAUaB3JwUektZ/W9q253jWS8lcth5CAae0BxVUToQpzRtG
+z6jFI8lA1wKBgAiF0An2aHcKXI8f3qeF/Xg7omNwgCcQ7J4p2niMyUf1a31SS2mp
+adEJ9fTXafPpfCVBrqvA8646ke/Ico59mOM1TQT6UqMktg1GU7cCIPRZTnbN+3VM
+p/mQwogaqauwQzVSqgLmuuv7I8z2QftbI1xtZPHPFC8ZUdN9r3kA/o/jAoGBAMHD
+vJyfy6wdaFP6ozDHB0DOUdJmPn4WPPUeSxVybk57mMqAFZDUOjUGq++xD4Zu/E4m
+YjPnZEqPUxb2lo25mxfiElav9fq/8pb29dhlX5oiJspYMxeyXCZ88UPsD5eaud0c
+IU7WDqIFwk6Z86gBYcgKJCMUwBy8ZXV1H+uMCl6tAoGBAJrtV3sMH4jP+AjJtuvC
+zNG1pOhZ6hWHaKsZ+p3Wpcbq3KxGDpk0ss3kqKM542JpepHGf1nWwzMcdIJVYWds
+QXVDZ+FfzM4J8OzKxWBe6duESfgEMSgcjR7XwFQZZQHYUiZQfvZAQo2TaJcdj4n7
+LaOtW/zcx9nBNK5vkDu0qpRN
+-----END PRIVATE KEY-----
diff --git a/cmd/chaosmonkey/main.go b/cmd/chaosmonkey/main.go
index c280b79..2a98626 100644
--- a/cmd/chaosmonkey/main.go
+++ b/cmd/chaosmonkey/main.go
@@ -8,7 +8,7 @@ import (
"sync"
"syscall"
- "github.com/massix/chaos-monkey/internal/apis/clientset/versioned"
+ cmcv "github.com/massix/chaos-monkey/internal/apis/clientset/versioned"
"github.com/massix/chaos-monkey/internal/configuration"
"github.com/massix/chaos-monkey/internal/endpoints"
"github.com/massix/chaos-monkey/internal/watcher"
@@ -16,6 +16,7 @@ import (
"github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
+ "k8s.io/metrics/pkg/client/clientset/versioned"
)
var Version string
@@ -46,9 +47,10 @@ func main() {
}
clientset := kubernetes.NewForConfigOrDie(cfg)
- cmcClientset := versioned.NewForConfigOrDie(cfg)
+ cmcClientset := cmcv.NewForConfigOrDie(cfg)
+ metricsClientset := versioned.NewForConfigOrDie(cfg)
- nsWatcher := watcher.DefaultNamespaceFactory(clientset, cmcClientset, nil, namespace, conf.Behavior)
+ nsWatcher := watcher.DefaultNamespaceFactory(clientset, cmcClientset, metricsClientset, nil, namespace, conf.Behavior)
// Hook signals
s := make(chan os.Signal, 1)
@@ -63,29 +65,47 @@ func main() {
}()
// Spawn the HTTP Server for Prometheus in background
- srv := &http.Server{
+ httpServer := &http.Server{
Addr: "0.0.0.0:9000",
}
+ tlsServer := &http.Server{
+ Addr: "0.0.0.0:9443",
+ }
+
// Register methods
http.Handle("GET /metrics", promhttp.Handler())
http.Handle("GET /health", endpoints.NewHealthEndpoint(nsWatcher.(*watcher.NamespaceWatcher)))
+ http.Handle("POST /convertcrd", endpoints.NewConversionEndpoint())
+
wg.Add(1)
go func() {
defer wg.Done()
- if err := srv.ListenAndServe(); err != nil {
+ if err := httpServer.ListenAndServe(); err != nil {
log.Warnf("Could not spawn http server: %s", err)
}
}()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if err := tlsServer.ListenAndServeTLS("./main.crt", "./main.key"); err != nil {
+ log.Errorf("Could not spawn https server: %s", err)
+ }
+ }()
+
// Wait for a signal to arrive
<-s
- if err := srv.Shutdown(context.Background()); err != nil {
+ if err := httpServer.Shutdown(context.Background()); err != nil {
log.Warnf("Could not shutdown http server: %s", err)
}
+ if err := tlsServer.Shutdown(context.Background()); err != nil {
+ log.Warnf("Could not shutdown https server: %s", err)
+ }
+
log.Info("Shutting down...")
cancel()
diff --git a/crds/chaosmonkey-configuration.yaml b/crds/chaosmonkey-configuration.yaml
index 1bfc21c..0c965bb 100644
--- a/crds/chaosmonkey-configuration.yaml
+++ b/crds/chaosmonkey-configuration.yaml
@@ -5,16 +5,144 @@ metadata:
spec:
group: cm.massix.github.io
scope: Namespaced
+ preserveUnknownFields: false
names:
plural: chaosmonkeyconfigurations
singular: chaosmonkeyconfiguration
kind: ChaosMonkeyConfiguration
- shortNames:
- - cmc
+ shortNames: ["cmc"]
+ conversion:
+ strategy: Webhook
+ webhook:
+ conversionReviewVersions: ["v1"]
+ clientConfig:
+ service:
+ name: chaos-monkey
+ namespace: chaosmonkey
+ port: 443
+ path: /convertcrd
+ caBundle: |-
+ LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZhekNDQTFPZ0F3SUJBZ0lVQlNlbk1mQ295
+ ZWRwb2czWGhZc0lyK3BGYTJFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1JURUxNQWtHQTFVRUJoTUNS
+ bEl4RXpBUkJnTlZCQWdNQ2xOdmJXVXRVM1JoZEdVeElUQWZCZ05WQkFvTQpHRWx1ZEdWeWJtVjBJ
+ RmRwWkdkcGRITWdVSFI1SUV4MFpEQWVGdzB5TkRBM01qQXhNekUwTURkYUZ3MHlPVEEzCk1Ua3hN
+ ekUwTURkYU1FVXhDekFKQmdOVkJBWVRBa1pTTVJNd0VRWURWUVFJREFwVGIyMWxMVk4wWVhSbE1T
+ RXcKSHdZRFZRUUtEQmhKYm5SbGNtNWxkQ0JYYVdSbmFYUnpJRkIwZVNCTWRHUXdnZ0lpTUEwR0NT
+ cUdTSWIzRFFFQgpBUVVBQTRJQ0R3QXdnZ0lLQW9JQ0FRRENJbkdsZVFScXNiRnFDQ3ZwbTZpb0I1
+ TWJpNHI2UEc2VldmeHpYQmVjClExKzZtbTJZbEN4eGpGWVRpNG9KeGpyV2g3VnYvMzJBSTg1cmln
+ cno5TXM2a1A5OFQvTmgvV1E4aktlU1QyZnkKcVR6NG50VTVTbDd2MldlQXBud3JSRGhWMjg3YU9i
+ YmQ1NG5RQklMSG9Nck90MWpWemxJRUJnc21GK0tocXd2QwpHL0ZvSU83U05mOE9nRkkrNW9kZU1W
+ UklQamR3OWRVMHd2N0pYcWlwRysrQ2pvSG4zZXNjdFJ3TXRhUDFtZ1lWCkRLaXVMNDAyVyt4NUIr
+ R2VPTGpzTExJdks3bGxISk45LzBkS281QVN6MVptMWNsZWszRlNnckU2clQ2bjkwMG8Kcm01TlZx
+ TkpnM2FyYXRNZENjN29ZMjNNMlV6bTU2d0hiKzI2T2Zac0pWMUFnQ1kySDd5b1J6ZUMwRFJVSGhR
+ TAp5UTUyeU11MXVRUm1EWDNLNjlLOXltSGZKd041OVo1N1JUSDRka0QxS0FKTVpyTGRIaUg5RG9t
+ akFZUHFXRlltCk5LMUJRV1dXbytQVEZDeW1BbE5menF1SmZtK01QSlpUTEFPYWJBbUY4U3hGeGxp
+ eVIwUU5NalorR1U2dHhCUi8KWGZKc2VYWWZTbHNQME9qbHZRV1JFTWlFeVNUa0pLN3dQRjMwYmxZ
+ dkM4Vkx0YWQvSnN0eldZcUJSdExGeXFjMgpMOHM2SlVyd053NzI2aUV2YXNKRGxncnIvVkx3aS80
+ ZkM3YnlhQ0k2WHNKMHc4YWdVYS9UZi9JSlpnRUg5RWQ0CnkvMWlKWUkvTi9IS2lpTXpSQ1VtZHNK
+ aXZiZUNBajlGVGJjNHFvUEQvL0YxcFdTckZPa0pVeFM0WUs4cDFoK3QKT3dJREFRQUJvMU13VVRB
+ ZEJnTlZIUTRFRmdRVTNWMVFEamRTUGJrejdSVG9sQWhwaFhtQWcxQXdId1lEVlIwagpCQmd3Rm9B
+ VTNWMVFEamRTUGJrejdSVG9sQWhwaFhtQWcxQXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QU5CZ2tx
+ CmhraUc5dzBCQVFzRkFBT0NBZ0VBQUJuQ3JqS1RMUFVHL0x6cVlVOXZJUzU2SUMxRCtoWUFZMmpL
+ K1VWWnVDeDkKbTF1SGI3ei9XZTFoNWtYNlk3UGhXY0Ixai9XYUdMbU81NlJ3cVBBYk91R3lndHRV
+ WUVMSmhlc2FNN29hMVBtVAo2ZjJEaGp1Nkd6cVRJNis2cU5HU3FKTHdtbUY1U29XMXNrbUtHby9E
+ cDdZQnJuQzRvOGsrenRvZEVpMnRGRUlVCkpBSmVBdUk4ODlsZFl2dmU1NkFPR1Q2bGNqcWdoVnMv
+ emRhTk5kT2U5cTlQRGxEUWZ5Z2ZRYUZtRWJZVlR6OVQKTWR0TlFHSVVqSzEzcW43eWFsNk9yZ01M
+ VHYyTUJJSzVRVWtLMklwYXo2RE9oOEVOK0o4TEdOQS96SC9uclJxKwp2WlNrSEVYZG1HcHRteG5V
+ OCtqOVBHK3djTGViUENiODB1NFFQNXRwZDJkQWI0bGd2STlxZTVYTUVvQTVaU24yClBPOFpVdGw4
+ YVJKU1ZES1grTmxDT2QzQXZSM0xaYjcrRitWc0krOGNxZHlhVjUzSmtSRzYxUkZFZExkMXRqSG8K
+ cjNnTm9ieUlZQXdabkg1eHV3TURidkZmSTBuSTJyYXdVczdLTXJUUmx4dXRrYTJrYmpFOTdDdExJ
+ Qm5YamtjawpsUEtaRHJDZm5INW1naEpLL3F3VzBQY2ErSFBPZ2RxWXpzdzRvNGdVYlllWDh2cXZP
+ UHhudEd4OEhPWE44cTNsCnZvMm0vNzZkRTYzazRqdHZKejRGTmRCa0RqU2xkbktEblB0SVlMTTdZ
+ TXh4YVdPNTc0Ymt3bDJJMnQ3NUlnNVgKdUN1UTQ5dmZpc2pqVm5EbWIreTA0NGZYV0FkeEw0SjRz
+ enVWbFQza2UzRWg1NXVGbkFvY2ZxQlRWOUhvUWZZPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t
+ Cg==
versions:
- - name: v1alpha1
+ - name: v1
served: true
storage: true
+ additionalPrinterColumns:
+ - jsonPath: .spec.enabled
+ name: Enabled
+ type: boolean
+ - jsonPath: .spec.scalingMode
+ name: Scaling Mode
+ type: string
+ - jsonPath: .spec.deployment.name
+ name: Target Deployment
+ type: string
+ - jsonPath: .spec.minReplicas
+ name: Min Replicas
+ type: integer
+ - jsonPath: .spec.maxReplicas
+ name: Max Replicas
+ type: integer
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ spec:
+ type: object
+ x-kubernetes-validations:
+ - rule: "self.minReplicas <= self.maxReplicas"
+ message: "minReplicas must be less than or equal to maxReplicas"
+ properties:
+ enabled:
+ type: boolean
+ description: |
+ Whether or not this configuration of the ChaosMonkey is enabled or not. Setting this
+ to `false` will disable the ChaosMonkey, regardless of all other configurations.
+ minReplicas:
+ type: integer
+ description: |
+ When using the "randomScale" or "antiPressure" mode, this sets the minimum amount of
+ replicas that should exist in the system. This value is ignored if using the "killPod"
+ scaling mode.
+ x-kubernetes-validations:
+ - rule: "self >= 0"
+ message: "minReplicas must be greater than or equal to 0"
+ maxReplicas:
+ type: integer
+ description: |
+ When using the "randomScale" or "antiPressure" mode, this sets the maximum amount of
+ replicas that should exist in the system. This value is ignored if using the "killPod"
+ scaling mode.
+ scalingMode:
+ type: string
+ enum: ["randomScale", "antiPressure", "killPod"]
+ description: |
+ This sets the behavior of the ChaosMonkey. Currently, three behaviors are supported:
+ - randomScale: The ChaosMonkey will modify the .spec.replicas value of the targeted
+ deployment, respecting the `minReplicas` and `maxReplicas` values;
+ - killPod: The ChaosMonkey will randomly kill pods belonging to the targeted deployment,
+ this scaling mode won't modify the existing deployment and will ignore the values
+ set in `minReplicas` and `maxReplicas`;
+ - antiPressure: The ChaosMonkey will consistently monitor the resources usage of the
+ pods belonging to the targeted deployment, when the timeout expires, it will choose
+ the pod which is currently consuming most resources, this should kill your most
+ active pod in the system. This scaling mode won't modify the existing deployment but
+ it will respect the `minReplicas` value, avoiding to kill a pod if the system is already
+ under heavy pressure.
+ deployment:
+ type: object
+ properties:
+ name:
+ type: string
+ description: The name of the targeted deployment.
+ x-kubernetes-validations:
+ - rule: "self != ''"
+ message: "deployment name must not be empty"
+ timeout:
+ type: string
+ description: |
+ The timeout for the ChaosMonkey, everytime the timeout expires, the ChaosMonkey will
+ be triggered and possibly disrupt the system in some ways. The string *must* respect
+ the format of Golang's ParseDuration function (e.g. "10m" means every 10 minutes)
+ - name: v1alpha1
+ served: true
+ storage: false
+ deprecated: true
+ deprecationWarning: "cm.massix.github.io/v1alpha1 is now deprecated, please migrate to cm.massix.github.io/v1"
schema:
openAPIV3Schema:
type: object
@@ -67,8 +195,6 @@ spec:
lastKnownReplicas:
type: integer
description: Last known number of replicas
- subresources:
- status: {}
additionalPrinterColumns:
- name: Enabled
type: boolean
diff --git a/go.mod b/go.mod
index d33b567..5652f17 100644
--- a/go.mod
+++ b/go.mod
@@ -5,10 +5,11 @@ go 1.22.3
require (
github.com/prometheus/client_golang v1.19.1
github.com/sirupsen/logrus v1.9.3
- k8s.io/api v0.30.2
- k8s.io/apimachinery v0.30.2
- k8s.io/client-go v0.30.2
- k8s.io/code-generator v0.30.2
+ k8s.io/api v0.30.3
+ k8s.io/apimachinery v0.30.3
+ k8s.io/client-go v0.30.3
+ k8s.io/code-generator v0.30.3
+ k8s.io/metrics v0.30.3
)
require (
diff --git a/go.sum b/go.sum
index 4196537..0e8e8e2 100644
--- a/go.sum
+++ b/go.sum
@@ -156,20 +156,22 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI=
-k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI=
-k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
-k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
-k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50=
-k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs=
-k8s.io/code-generator v0.30.2 h1:ZY1+aGkqZVwKIyGsOzquaeZ5rSfE6wZHur8z3jQAaiw=
-k8s.io/code-generator v0.30.2/go.mod h1:RQP5L67QxqgkVquk704CyvWFIq0e6RCMmLTXxjE8dVA=
+k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ=
+k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04=
+k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc=
+k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
+k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k=
+k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U=
+k8s.io/code-generator v0.30.3 h1:bmtnLJKagDS5f5uOEpLyJiDfIMKXGMKgOLBdde+w0Mc=
+k8s.io/code-generator v0.30.3/go.mod h1:PFgBiv+miFV7TZYp+RXgROkhA+sWYZ+mtpbMLofMke8=
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
+k8s.io/metrics v0.30.3 h1:gKCpte5zykrOmQhZ8qmsxyJslMdiLN+sqbBfIWNpbGM=
+k8s.io/metrics v0.30.3/go.mod h1:W06L2nXRhOwPkFYDJYWdEIS3u6JcJy3ebIPYbndRs6A=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
diff --git a/internal/apis/clientset/versioned/clientset.go b/internal/apis/clientset/versioned/clientset.go
index 1394f58..9a40d82 100644
--- a/internal/apis/clientset/versioned/clientset.go
+++ b/internal/apis/clientset/versioned/clientset.go
@@ -8,6 +8,7 @@ import (
"fmt"
"net/http"
+ chaosmonkeyconfigurationv1 "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/typed/apis/v1"
chaosmonkeyconfigurationv1alpha1 "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/typed/apis/v1alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
@@ -16,15 +17,22 @@ import (
type Interface interface {
Discovery() discovery.DiscoveryInterface
+ ChaosMonkeyConfigurationV1() chaosmonkeyconfigurationv1.ChaosMonkeyConfigurationV1Interface
ChaosMonkeyConfigurationV1alpha1() chaosmonkeyconfigurationv1alpha1.ChaosMonkeyConfigurationV1alpha1Interface
}
// Clientset contains the clients for groups.
type Clientset struct {
*discovery.DiscoveryClient
+ chaosMonkeyConfigurationV1 *chaosmonkeyconfigurationv1.ChaosMonkeyConfigurationV1Client
chaosMonkeyConfigurationV1alpha1 *chaosmonkeyconfigurationv1alpha1.ChaosMonkeyConfigurationV1alpha1Client
}
+// ChaosMonkeyConfigurationV1 retrieves the ChaosMonkeyConfigurationV1Client
+func (c *Clientset) ChaosMonkeyConfigurationV1() chaosmonkeyconfigurationv1.ChaosMonkeyConfigurationV1Interface {
+ return c.chaosMonkeyConfigurationV1
+}
+
// ChaosMonkeyConfigurationV1alpha1 retrieves the ChaosMonkeyConfigurationV1alpha1Client
func (c *Clientset) ChaosMonkeyConfigurationV1alpha1() chaosmonkeyconfigurationv1alpha1.ChaosMonkeyConfigurationV1alpha1Interface {
return c.chaosMonkeyConfigurationV1alpha1
@@ -74,6 +82,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
var cs Clientset
var err error
+ cs.chaosMonkeyConfigurationV1, err = chaosmonkeyconfigurationv1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
cs.chaosMonkeyConfigurationV1alpha1, err = chaosmonkeyconfigurationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
@@ -99,6 +111,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
+ cs.chaosMonkeyConfigurationV1 = chaosmonkeyconfigurationv1.New(c)
cs.chaosMonkeyConfigurationV1alpha1 = chaosmonkeyconfigurationv1alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
diff --git a/internal/apis/clientset/versioned/fake/clientset_generated.go b/internal/apis/clientset/versioned/fake/clientset_generated.go
index 4f9bb71..de8dd4a 100644
--- a/internal/apis/clientset/versioned/fake/clientset_generated.go
+++ b/internal/apis/clientset/versioned/fake/clientset_generated.go
@@ -6,6 +6,8 @@ package fake
import (
clientset "github.com/massix/chaos-monkey/internal/apis/clientset/versioned"
+ chaosmonkeyconfigurationv1 "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/typed/apis/v1"
+ fakechaosmonkeyconfigurationv1 "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/typed/apis/v1/fake"
chaosmonkeyconfigurationv1alpha1 "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/typed/apis/v1alpha1"
fakechaosmonkeyconfigurationv1alpha1 "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/typed/apis/v1alpha1/fake"
"k8s.io/apimachinery/pkg/runtime"
@@ -65,6 +67,11 @@ var (
_ testing.FakeClient = &Clientset{}
)
+// ChaosMonkeyConfigurationV1 retrieves the ChaosMonkeyConfigurationV1Client
+func (c *Clientset) ChaosMonkeyConfigurationV1() chaosmonkeyconfigurationv1.ChaosMonkeyConfigurationV1Interface {
+ return &fakechaosmonkeyconfigurationv1.FakeChaosMonkeyConfigurationV1{Fake: &c.Fake}
+}
+
// ChaosMonkeyConfigurationV1alpha1 retrieves the ChaosMonkeyConfigurationV1alpha1Client
func (c *Clientset) ChaosMonkeyConfigurationV1alpha1() chaosmonkeyconfigurationv1alpha1.ChaosMonkeyConfigurationV1alpha1Interface {
return &fakechaosmonkeyconfigurationv1alpha1.FakeChaosMonkeyConfigurationV1alpha1{Fake: &c.Fake}
diff --git a/internal/apis/clientset/versioned/fake/register.go b/internal/apis/clientset/versioned/fake/register.go
index 457ac53..715f41c 100644
--- a/internal/apis/clientset/versioned/fake/register.go
+++ b/internal/apis/clientset/versioned/fake/register.go
@@ -5,6 +5,7 @@
package fake
import (
+ chaosmonkeyconfigurationv1 "github.com/massix/chaos-monkey/internal/apis/v1"
chaosmonkeyconfigurationv1alpha1 "github.com/massix/chaos-monkey/internal/apis/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
@@ -17,6 +18,7 @@ var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
+ chaosmonkeyconfigurationv1.AddToScheme,
chaosmonkeyconfigurationv1alpha1.AddToScheme,
}
diff --git a/internal/apis/clientset/versioned/scheme/register.go b/internal/apis/clientset/versioned/scheme/register.go
index 75f0dbf..e9b1637 100644
--- a/internal/apis/clientset/versioned/scheme/register.go
+++ b/internal/apis/clientset/versioned/scheme/register.go
@@ -5,6 +5,7 @@
package scheme
import (
+ chaosmonkeyconfigurationv1 "github.com/massix/chaos-monkey/internal/apis/v1"
chaosmonkeyconfigurationv1alpha1 "github.com/massix/chaos-monkey/internal/apis/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
@@ -17,6 +18,7 @@ var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
+ chaosmonkeyconfigurationv1.AddToScheme,
chaosmonkeyconfigurationv1alpha1.AddToScheme,
}
diff --git a/internal/apis/clientset/versioned/typed/apis/v1/apis_client.go b/internal/apis/clientset/versioned/typed/apis/v1/apis_client.go
new file mode 100644
index 0000000..9802113
--- /dev/null
+++ b/internal/apis/clientset/versioned/typed/apis/v1/apis_client.go
@@ -0,0 +1,93 @@
+// Generated code, do not touch
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "net/http"
+
+ "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/scheme"
+ v1 "github.com/massix/chaos-monkey/internal/apis/v1"
+ rest "k8s.io/client-go/rest"
+)
+
+type ChaosMonkeyConfigurationV1Interface interface {
+ RESTClient() rest.Interface
+ ChaosMonkeyConfigurationsGetter
+}
+
+// ChaosMonkeyConfigurationV1Client is used to interact with features provided by the cm.massix.github.io group.
+type ChaosMonkeyConfigurationV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *ChaosMonkeyConfigurationV1Client) ChaosMonkeyConfigurations(namespace string) ChaosMonkeyConfigurationInterface {
+ return newChaosMonkeyConfigurations(c, namespace)
+}
+
+// NewForConfig creates a new ChaosMonkeyConfigurationV1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*ChaosMonkeyConfigurationV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new ChaosMonkeyConfigurationV1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ChaosMonkeyConfigurationV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &ChaosMonkeyConfigurationV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new ChaosMonkeyConfigurationV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *ChaosMonkeyConfigurationV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new ChaosMonkeyConfigurationV1Client for the given RESTClient.
+func New(c rest.Interface) *ChaosMonkeyConfigurationV1Client {
+ return &ChaosMonkeyConfigurationV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *ChaosMonkeyConfigurationV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/internal/apis/clientset/versioned/typed/apis/v1/chaosmonkeyconfiguration.go b/internal/apis/clientset/versioned/typed/apis/v1/chaosmonkeyconfiguration.go
new file mode 100644
index 0000000..f533059
--- /dev/null
+++ b/internal/apis/clientset/versioned/typed/apis/v1/chaosmonkeyconfiguration.go
@@ -0,0 +1,164 @@
+// Generated code, do not touch
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ scheme "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/scheme"
+ v1 "github.com/massix/chaos-monkey/internal/apis/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ChaosMonkeyConfigurationsGetter has a method to return a ChaosMonkeyConfigurationInterface.
+// A group's client should implement this interface.
+type ChaosMonkeyConfigurationsGetter interface {
+ ChaosMonkeyConfigurations(namespace string) ChaosMonkeyConfigurationInterface
+}
+
+// ChaosMonkeyConfigurationInterface has methods to work with ChaosMonkeyConfiguration resources.
+type ChaosMonkeyConfigurationInterface interface {
+ Create(ctx context.Context, chaosMonkeyConfiguration *v1.ChaosMonkeyConfiguration, opts metav1.CreateOptions) (*v1.ChaosMonkeyConfiguration, error)
+ Update(ctx context.Context, chaosMonkeyConfiguration *v1.ChaosMonkeyConfiguration, opts metav1.UpdateOptions) (*v1.ChaosMonkeyConfiguration, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ChaosMonkeyConfiguration, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ChaosMonkeyConfigurationList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ChaosMonkeyConfiguration, err error)
+ ChaosMonkeyConfigurationExpansion
+}
+
+// chaosMonkeyConfigurations implements ChaosMonkeyConfigurationInterface
+type chaosMonkeyConfigurations struct {
+ client rest.Interface
+ ns string
+}
+
+// newChaosMonkeyConfigurations returns a ChaosMonkeyConfigurations
+func newChaosMonkeyConfigurations(c *ChaosMonkeyConfigurationV1Client, namespace string) *chaosMonkeyConfigurations {
+ return &chaosMonkeyConfigurations{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the chaosMonkeyConfiguration, and returns the corresponding chaosMonkeyConfiguration object, and an error if there is any.
+func (c *chaosMonkeyConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ChaosMonkeyConfiguration, err error) {
+ result = &v1.ChaosMonkeyConfiguration{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("chaosmonkeyconfigurations").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ChaosMonkeyConfigurations that match those selectors.
+func (c *chaosMonkeyConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ChaosMonkeyConfigurationList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ChaosMonkeyConfigurationList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("chaosmonkeyconfigurations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested chaosMonkeyConfigurations.
+func (c *chaosMonkeyConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("chaosmonkeyconfigurations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a chaosMonkeyConfiguration and creates it. Returns the server's representation of the chaosMonkeyConfiguration, and an error, if there is any.
+func (c *chaosMonkeyConfigurations) Create(ctx context.Context, chaosMonkeyConfiguration *v1.ChaosMonkeyConfiguration, opts metav1.CreateOptions) (result *v1.ChaosMonkeyConfiguration, err error) {
+ result = &v1.ChaosMonkeyConfiguration{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("chaosmonkeyconfigurations").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(chaosMonkeyConfiguration).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a chaosMonkeyConfiguration and updates it. Returns the server's representation of the chaosMonkeyConfiguration, and an error, if there is any.
+func (c *chaosMonkeyConfigurations) Update(ctx context.Context, chaosMonkeyConfiguration *v1.ChaosMonkeyConfiguration, opts metav1.UpdateOptions) (result *v1.ChaosMonkeyConfiguration, err error) {
+ result = &v1.ChaosMonkeyConfiguration{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("chaosmonkeyconfigurations").
+ Name(chaosMonkeyConfiguration.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(chaosMonkeyConfiguration).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the chaosMonkeyConfiguration and deletes it. Returns an error if one occurs.
+func (c *chaosMonkeyConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("chaosmonkeyconfigurations").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *chaosMonkeyConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("chaosmonkeyconfigurations").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched chaosMonkeyConfiguration.
+func (c *chaosMonkeyConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ChaosMonkeyConfiguration, err error) {
+ result = &v1.ChaosMonkeyConfiguration{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("chaosmonkeyconfigurations").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/internal/apis/clientset/versioned/typed/apis/v1/doc.go b/internal/apis/clientset/versioned/typed/apis/v1/doc.go
new file mode 100644
index 0000000..8d6d4e5
--- /dev/null
+++ b/internal/apis/clientset/versioned/typed/apis/v1/doc.go
@@ -0,0 +1,6 @@
+// Generated code, do not touch
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/internal/apis/clientset/versioned/typed/apis/v1/fake/doc.go b/internal/apis/clientset/versioned/typed/apis/v1/fake/doc.go
new file mode 100644
index 0000000..e321864
--- /dev/null
+++ b/internal/apis/clientset/versioned/typed/apis/v1/fake/doc.go
@@ -0,0 +1,6 @@
+// Generated code, do not touch
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/internal/apis/clientset/versioned/typed/apis/v1/fake/fake_apis_client.go b/internal/apis/clientset/versioned/typed/apis/v1/fake/fake_apis_client.go
new file mode 100644
index 0000000..f1ee961
--- /dev/null
+++ b/internal/apis/clientset/versioned/typed/apis/v1/fake/fake_apis_client.go
@@ -0,0 +1,26 @@
+// Generated code, do not touch
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/typed/apis/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeChaosMonkeyConfigurationV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeChaosMonkeyConfigurationV1) ChaosMonkeyConfigurations(namespace string) v1.ChaosMonkeyConfigurationInterface {
+ return &FakeChaosMonkeyConfigurations{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeChaosMonkeyConfigurationV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/internal/apis/clientset/versioned/typed/apis/v1/fake/fake_chaosmonkeyconfiguration.go b/internal/apis/clientset/versioned/typed/apis/v1/fake/fake_chaosmonkeyconfiguration.go
new file mode 100644
index 0000000..6faf299
--- /dev/null
+++ b/internal/apis/clientset/versioned/typed/apis/v1/fake/fake_chaosmonkeyconfiguration.go
@@ -0,0 +1,115 @@
+// Generated code, do not touch
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1 "github.com/massix/chaos-monkey/internal/apis/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeChaosMonkeyConfigurations implements ChaosMonkeyConfigurationInterface
+type FakeChaosMonkeyConfigurations struct {
+ Fake *FakeChaosMonkeyConfigurationV1
+ ns string
+}
+
+var chaosmonkeyconfigurationsResource = v1.SchemeGroupVersion.WithResource("chaosmonkeyconfigurations")
+
+var chaosmonkeyconfigurationsKind = v1.SchemeGroupVersion.WithKind("ChaosMonkeyConfiguration")
+
+// Get takes name of the chaosMonkeyConfiguration, and returns the corresponding chaosMonkeyConfiguration object, and an error if there is any.
+func (c *FakeChaosMonkeyConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ChaosMonkeyConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(chaosmonkeyconfigurationsResource, c.ns, name), &v1.ChaosMonkeyConfiguration{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.ChaosMonkeyConfiguration), err
+}
+
+// List takes label and field selectors, and returns the list of ChaosMonkeyConfigurations that match those selectors.
+func (c *FakeChaosMonkeyConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ChaosMonkeyConfigurationList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(chaosmonkeyconfigurationsResource, chaosmonkeyconfigurationsKind, c.ns, opts), &v1.ChaosMonkeyConfigurationList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1.ChaosMonkeyConfigurationList{ListMeta: obj.(*v1.ChaosMonkeyConfigurationList).ListMeta}
+ for _, item := range obj.(*v1.ChaosMonkeyConfigurationList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested chaosMonkeyConfigurations.
+func (c *FakeChaosMonkeyConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(chaosmonkeyconfigurationsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a chaosMonkeyConfiguration and creates it. Returns the server's representation of the chaosMonkeyConfiguration, and an error, if there is any.
+func (c *FakeChaosMonkeyConfigurations) Create(ctx context.Context, chaosMonkeyConfiguration *v1.ChaosMonkeyConfiguration, opts metav1.CreateOptions) (result *v1.ChaosMonkeyConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(chaosmonkeyconfigurationsResource, c.ns, chaosMonkeyConfiguration), &v1.ChaosMonkeyConfiguration{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.ChaosMonkeyConfiguration), err
+}
+
+// Update takes the representation of a chaosMonkeyConfiguration and updates it. Returns the server's representation of the chaosMonkeyConfiguration, and an error, if there is any.
+func (c *FakeChaosMonkeyConfigurations) Update(ctx context.Context, chaosMonkeyConfiguration *v1.ChaosMonkeyConfiguration, opts metav1.UpdateOptions) (result *v1.ChaosMonkeyConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(chaosmonkeyconfigurationsResource, c.ns, chaosMonkeyConfiguration), &v1.ChaosMonkeyConfiguration{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.ChaosMonkeyConfiguration), err
+}
+
+// Delete takes name of the chaosMonkeyConfiguration and deletes it. Returns an error if one occurs.
+func (c *FakeChaosMonkeyConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(chaosmonkeyconfigurationsResource, c.ns, name, opts), &v1.ChaosMonkeyConfiguration{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeChaosMonkeyConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(chaosmonkeyconfigurationsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1.ChaosMonkeyConfigurationList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched chaosMonkeyConfiguration.
+func (c *FakeChaosMonkeyConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ChaosMonkeyConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(chaosmonkeyconfigurationsResource, c.ns, name, pt, data, subresources...), &v1.ChaosMonkeyConfiguration{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.ChaosMonkeyConfiguration), err
+}
diff --git a/internal/apis/clientset/versioned/typed/apis/v1/generated_expansion.go b/internal/apis/clientset/versioned/typed/apis/v1/generated_expansion.go
new file mode 100644
index 0000000..fc2b08c
--- /dev/null
+++ b/internal/apis/clientset/versioned/typed/apis/v1/generated_expansion.go
@@ -0,0 +1,7 @@
+// Generated code, do not touch
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type ChaosMonkeyConfigurationExpansion interface{}
diff --git a/internal/apis/clientset/versioned/typed/apis/v1alpha1/chaosmonkeyconfiguration.go b/internal/apis/clientset/versioned/typed/apis/v1alpha1/chaosmonkeyconfiguration.go
index 28622c0..aa6bf35 100644
--- a/internal/apis/clientset/versioned/typed/apis/v1alpha1/chaosmonkeyconfiguration.go
+++ b/internal/apis/clientset/versioned/typed/apis/v1alpha1/chaosmonkeyconfiguration.go
@@ -26,7 +26,6 @@ type ChaosMonkeyConfigurationsGetter interface {
type ChaosMonkeyConfigurationInterface interface {
Create(ctx context.Context, chaosMonkeyConfiguration *v1alpha1.ChaosMonkeyConfiguration, opts v1.CreateOptions) (*v1alpha1.ChaosMonkeyConfiguration, error)
Update(ctx context.Context, chaosMonkeyConfiguration *v1alpha1.ChaosMonkeyConfiguration, opts v1.UpdateOptions) (*v1alpha1.ChaosMonkeyConfiguration, error)
- UpdateStatus(ctx context.Context, chaosMonkeyConfiguration *v1alpha1.ChaosMonkeyConfiguration, opts v1.UpdateOptions) (*v1alpha1.ChaosMonkeyConfiguration, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ChaosMonkeyConfiguration, error)
@@ -122,22 +121,6 @@ func (c *chaosMonkeyConfigurations) Update(ctx context.Context, chaosMonkeyConfi
return
}
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-func (c *chaosMonkeyConfigurations) UpdateStatus(ctx context.Context, chaosMonkeyConfiguration *v1alpha1.ChaosMonkeyConfiguration, opts v1.UpdateOptions) (result *v1alpha1.ChaosMonkeyConfiguration, err error) {
- result = &v1alpha1.ChaosMonkeyConfiguration{}
- err = c.client.Put().
- Namespace(c.ns).
- Resource("chaosmonkeyconfigurations").
- Name(chaosMonkeyConfiguration.Name).
- SubResource("status").
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(chaosMonkeyConfiguration).
- Do(ctx).
- Into(result)
- return
-}
-
// Delete takes name of the chaosMonkeyConfiguration and deletes it. Returns an error if one occurs.
func (c *chaosMonkeyConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
diff --git a/internal/apis/clientset/versioned/typed/apis/v1alpha1/fake/fake_chaosmonkeyconfiguration.go b/internal/apis/clientset/versioned/typed/apis/v1alpha1/fake/fake_chaosmonkeyconfiguration.go
index 19e47b4..08b9dbc 100644
--- a/internal/apis/clientset/versioned/typed/apis/v1alpha1/fake/fake_chaosmonkeyconfiguration.go
+++ b/internal/apis/clientset/versioned/typed/apis/v1alpha1/fake/fake_chaosmonkeyconfiguration.go
@@ -87,18 +87,6 @@ func (c *FakeChaosMonkeyConfigurations) Update(ctx context.Context, chaosMonkeyC
return obj.(*v1alpha1.ChaosMonkeyConfiguration), err
}
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-func (c *FakeChaosMonkeyConfigurations) UpdateStatus(ctx context.Context, chaosMonkeyConfiguration *v1alpha1.ChaosMonkeyConfiguration, opts v1.UpdateOptions) (*v1alpha1.ChaosMonkeyConfiguration, error) {
- obj, err := c.Fake.
- Invokes(testing.NewUpdateSubresourceAction(chaosmonkeyconfigurationsResource, "status", c.ns, chaosMonkeyConfiguration), &v1alpha1.ChaosMonkeyConfiguration{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*v1alpha1.ChaosMonkeyConfiguration), err
-}
-
// Delete takes name of the chaosMonkeyConfiguration and deletes it. Returns an error if one occurs.
func (c *FakeChaosMonkeyConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
diff --git a/internal/apis/informers/externalversions/apis/interface.go b/internal/apis/informers/externalversions/apis/interface.go
index 6a9694c..b1ae191 100644
--- a/internal/apis/informers/externalversions/apis/interface.go
+++ b/internal/apis/informers/externalversions/apis/interface.go
@@ -5,12 +5,15 @@
package apis
import (
+ v1 "github.com/massix/chaos-monkey/internal/apis/informers/externalversions/apis/v1"
v1alpha1 "github.com/massix/chaos-monkey/internal/apis/informers/externalversions/apis/v1alpha1"
internalinterfaces "github.com/massix/chaos-monkey/internal/apis/informers/externalversions/internalinterfaces"
)
// Interface provides access to each of this group's versions.
type Interface interface {
+ // V1 provides access to shared informers for resources in V1.
+ V1() v1.Interface
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
}
@@ -26,6 +29,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
+// V1 returns a new v1.Interface.
+func (g *group) V1() v1.Interface {
+ return v1.New(g.factory, g.namespace, g.tweakListOptions)
+}
+
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
diff --git a/internal/apis/informers/externalversions/apis/v1/chaosmonkeyconfiguration.go b/internal/apis/informers/externalversions/apis/v1/chaosmonkeyconfiguration.go
new file mode 100644
index 0000000..3d40882
--- /dev/null
+++ b/internal/apis/informers/externalversions/apis/v1/chaosmonkeyconfiguration.go
@@ -0,0 +1,76 @@
+// Generated code, do not touch
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ versioned "github.com/massix/chaos-monkey/internal/apis/clientset/versioned"
+ internalinterfaces "github.com/massix/chaos-monkey/internal/apis/informers/externalversions/internalinterfaces"
+ v1 "github.com/massix/chaos-monkey/internal/apis/listers/apis/v1"
+ apisv1 "github.com/massix/chaos-monkey/internal/apis/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ChaosMonkeyConfigurationInformer provides access to a shared informer and lister for
+// ChaosMonkeyConfigurations.
+type ChaosMonkeyConfigurationInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ChaosMonkeyConfigurationLister
+}
+
+type chaosMonkeyConfigurationInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewChaosMonkeyConfigurationInformer constructs a new informer for ChaosMonkeyConfiguration type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewChaosMonkeyConfigurationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredChaosMonkeyConfigurationInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredChaosMonkeyConfigurationInformer constructs a new informer for ChaosMonkeyConfiguration type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredChaosMonkeyConfigurationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ChaosMonkeyConfigurationV1().ChaosMonkeyConfigurations(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.ChaosMonkeyConfigurationV1().ChaosMonkeyConfigurations(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &apisv1.ChaosMonkeyConfiguration{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *chaosMonkeyConfigurationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredChaosMonkeyConfigurationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *chaosMonkeyConfigurationInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&apisv1.ChaosMonkeyConfiguration{}, f.defaultInformer)
+}
+
+func (f *chaosMonkeyConfigurationInformer) Lister() v1.ChaosMonkeyConfigurationLister {
+ return v1.NewChaosMonkeyConfigurationLister(f.Informer().GetIndexer())
+}
diff --git a/internal/apis/informers/externalversions/apis/v1/interface.go b/internal/apis/informers/externalversions/apis/v1/interface.go
new file mode 100644
index 0000000..60b41cf
--- /dev/null
+++ b/internal/apis/informers/externalversions/apis/v1/interface.go
@@ -0,0 +1,31 @@
+// Generated code, do not touch
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ internalinterfaces "github.com/massix/chaos-monkey/internal/apis/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // ChaosMonkeyConfigurations returns a ChaosMonkeyConfigurationInformer.
+ ChaosMonkeyConfigurations() ChaosMonkeyConfigurationInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// ChaosMonkeyConfigurations returns a ChaosMonkeyConfigurationInformer.
+func (v *version) ChaosMonkeyConfigurations() ChaosMonkeyConfigurationInformer {
+ return &chaosMonkeyConfigurationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/internal/apis/informers/externalversions/generic.go b/internal/apis/informers/externalversions/generic.go
index ce1b34e..5d914a1 100644
--- a/internal/apis/informers/externalversions/generic.go
+++ b/internal/apis/informers/externalversions/generic.go
@@ -7,6 +7,7 @@ package externalversions
import (
"fmt"
+ v1 "github.com/massix/chaos-monkey/internal/apis/v1"
v1alpha1 "github.com/massix/chaos-monkey/internal/apis/v1alpha1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
@@ -38,7 +39,11 @@ func (f *genericInformer) Lister() cache.GenericLister {
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
- // Group=cm.massix.github.io, Version=v1alpha1
+ // Group=cm.massix.github.io, Version=v1
+ case v1.SchemeGroupVersion.WithResource("chaosmonkeyconfigurations"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.ChaosMonkeyConfiguration().V1().ChaosMonkeyConfigurations().Informer()}, nil
+
+ // Group=cm.massix.github.io, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("chaosmonkeyconfigurations"):
return &genericInformer{resource: resource.GroupResource(), informer: f.ChaosMonkeyConfiguration().V1alpha1().ChaosMonkeyConfigurations().Informer()}, nil
diff --git a/internal/apis/listers/apis/v1/chaosmonkeyconfiguration.go b/internal/apis/listers/apis/v1/chaosmonkeyconfiguration.go
new file mode 100644
index 0000000..586d002
--- /dev/null
+++ b/internal/apis/listers/apis/v1/chaosmonkeyconfiguration.go
@@ -0,0 +1,85 @@
+// Generated code, do not touch
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ v1 "github.com/massix/chaos-monkey/internal/apis/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ChaosMonkeyConfigurationLister helps list ChaosMonkeyConfigurations.
+// All objects returned here must be treated as read-only.
+type ChaosMonkeyConfigurationLister interface {
+ // List lists all ChaosMonkeyConfigurations in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.ChaosMonkeyConfiguration, err error)
+ // ChaosMonkeyConfigurations returns an object that can list and get ChaosMonkeyConfigurations.
+ ChaosMonkeyConfigurations(namespace string) ChaosMonkeyConfigurationNamespaceLister
+ ChaosMonkeyConfigurationListerExpansion
+}
+
+// chaosMonkeyConfigurationLister implements the ChaosMonkeyConfigurationLister interface.
+type chaosMonkeyConfigurationLister struct {
+ indexer cache.Indexer
+}
+
+// NewChaosMonkeyConfigurationLister returns a new ChaosMonkeyConfigurationLister.
+func NewChaosMonkeyConfigurationLister(indexer cache.Indexer) ChaosMonkeyConfigurationLister {
+ return &chaosMonkeyConfigurationLister{indexer: indexer}
+}
+
+// List lists all ChaosMonkeyConfigurations in the indexer.
+func (s *chaosMonkeyConfigurationLister) List(selector labels.Selector) (ret []*v1.ChaosMonkeyConfiguration, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.ChaosMonkeyConfiguration))
+ })
+ return ret, err
+}
+
+// ChaosMonkeyConfigurations returns an object that can list and get ChaosMonkeyConfigurations.
+func (s *chaosMonkeyConfigurationLister) ChaosMonkeyConfigurations(namespace string) ChaosMonkeyConfigurationNamespaceLister {
+ return chaosMonkeyConfigurationNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// ChaosMonkeyConfigurationNamespaceLister helps list and get ChaosMonkeyConfigurations.
+// All objects returned here must be treated as read-only.
+type ChaosMonkeyConfigurationNamespaceLister interface {
+ // List lists all ChaosMonkeyConfigurations in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.ChaosMonkeyConfiguration, err error)
+ // Get retrieves the ChaosMonkeyConfiguration from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.ChaosMonkeyConfiguration, error)
+ ChaosMonkeyConfigurationNamespaceListerExpansion
+}
+
+// chaosMonkeyConfigurationNamespaceLister implements the ChaosMonkeyConfigurationNamespaceLister
+// interface.
+type chaosMonkeyConfigurationNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all ChaosMonkeyConfigurations in the indexer for a given namespace.
+func (s chaosMonkeyConfigurationNamespaceLister) List(selector labels.Selector) (ret []*v1.ChaosMonkeyConfiguration, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.ChaosMonkeyConfiguration))
+ })
+ return ret, err
+}
+
+// Get retrieves the ChaosMonkeyConfiguration from the indexer for a given namespace and name.
+func (s chaosMonkeyConfigurationNamespaceLister) Get(name string) (*v1.ChaosMonkeyConfiguration, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("chaosmonkeyconfiguration"), name)
+ }
+ return obj.(*v1.ChaosMonkeyConfiguration), nil
+}
diff --git a/internal/apis/listers/apis/v1/expansion_generated.go b/internal/apis/listers/apis/v1/expansion_generated.go
new file mode 100644
index 0000000..c2cb29d
--- /dev/null
+++ b/internal/apis/listers/apis/v1/expansion_generated.go
@@ -0,0 +1,13 @@
+// Generated code, do not touch
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+// ChaosMonkeyConfigurationListerExpansion allows custom methods to be added to
+// ChaosMonkeyConfigurationLister.
+type ChaosMonkeyConfigurationListerExpansion interface{}
+
+// ChaosMonkeyConfigurationNamespaceListerExpansion allows custom methods to be added to
+// ChaosMonkeyConfigurationNamespaceLister.
+type ChaosMonkeyConfigurationNamespaceListerExpansion interface{}
diff --git a/internal/apis/v1/doc.go b/internal/apis/v1/doc.go
new file mode 100644
index 0000000..14c0bb6
--- /dev/null
+++ b/internal/apis/v1/doc.go
@@ -0,0 +1,5 @@
+// +k8s:deepcopy-gen=package
+// +groupName=cm.massix.github.io
+// +groupGoName=ChaosMonkeyConfiguration
+
+package v1 // import "github.com/massix/chaos-monkey/internal/apis/v1"
diff --git a/internal/apis/v1/register.go b/internal/apis/v1/register.go
new file mode 100644
index 0000000..ad30e93
--- /dev/null
+++ b/internal/apis/v1/register.go
@@ -0,0 +1,32 @@
+package v1
+
+import (
+ cmmassixgithubio "github.com/massix/chaos-monkey/internal/apis"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var SchemeGroupVersion = schema.GroupVersion{Group: cmmassixgithubio.GroupName, Version: "v1"}
+
+var (
+ SchemeBuilder runtime.SchemeBuilder
+ localSchemeBuilder = &SchemeBuilder
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+ localSchemeBuilder.Register(func(s *runtime.Scheme) error {
+ s.AddKnownTypes(SchemeGroupVersion, &ChaosMonkeyConfiguration{}, &ChaosMonkeyConfigurationList{})
+ metav1.AddToGroupVersion(s, SchemeGroupVersion)
+ return nil
+ })
+}
+
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
diff --git a/internal/apis/v1/types.go b/internal/apis/v1/types.go
new file mode 100644
index 0000000..bc970f5
--- /dev/null
+++ b/internal/apis/v1/types.go
@@ -0,0 +1,230 @@
+package v1
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+)
+
+type ScalingMode string
+
+const (
+ ScalingModeRandomScale ScalingMode = "randomScale"
+ ScalingModeAntiPressure ScalingMode = "antiPressure"
+ ScalingModeKillPod ScalingMode = "killPod"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type ChaosMonkeyConfiguration struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec ChaosMonkeyConfigurationSpec `json:"spec"`
+}
+
+type ChaosMonkeyConfigurationSpec struct {
+ Enabled bool `json:"enabled"`
+ MinReplicas int `json:"minReplicas"`
+ MaxReplicas int `json:"maxReplicas"`
+ ScalingMode ScalingMode `json:"scalingMode"`
+ Deployment ChaosMonkeyConfigurationSpecDeployment `json:"deployment"`
+ Timeout time.Duration `json:"timeout"`
+}
+
+var (
+ _ = (json.Marshaler)((*ChaosMonkeyConfigurationSpec)(nil))
+ _ = (json.Unmarshaler)((*ChaosMonkeyConfigurationSpec)(nil))
+)
+
+type ChaosMonkeyConfigurationSpecDeployment struct {
+ Name string `json:"name"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type ChaosMonkeyConfigurationList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ Items []ChaosMonkeyConfiguration `json:"items"`
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (c *ChaosMonkeyConfigurationSpec) UnmarshalJSON(data []byte) error {
+ logger := logrus.WithFields(logrus.Fields{"component": "CMCUnmarshaller"})
+ logger.Debugf("Unmarshalling CMC: %q", string(data))
+
+ defaultTimeout := 10 * time.Minute
+
+ var tmp struct {
+ Enabled bool `json:"enabled"`
+ MinReplicas int `json:"minReplicas"`
+ MaxReplicas int `json:"maxReplicas"`
+ ScalingMode ScalingMode `json:"scalingMode"`
+ Deployment ChaosMonkeyConfigurationSpecDeployment `json:"deployment"`
+ Timeout string `json:"timeout"`
+ }
+
+ if err := json.Unmarshal(data, &tmp); err != nil {
+ return err
+ }
+
+ logger.Debugf("Intermediate parsing: %+v", tmp)
+
+ c.Enabled = tmp.Enabled
+ c.MinReplicas = tmp.MinReplicas
+ c.MaxReplicas = tmp.MaxReplicas
+ c.ScalingMode = tmp.ScalingMode
+ c.Deployment = tmp.Deployment
+
+ if parsedTimeout, err := time.ParseDuration(tmp.Timeout); err != nil {
+ logger.Warnf("Failed to parse duration: %s, using default: %v", err, defaultTimeout)
+ c.Timeout = defaultTimeout
+ } else {
+ logger.Debugf("Parsed timeout: %v", parsedTimeout)
+ c.Timeout = parsedTimeout
+ }
+
+ return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (c *ChaosMonkeyConfigurationSpec) MarshalJSON() ([]byte, error) {
+ var tmp struct {
+ Enabled bool `json:"enabled"`
+ MinReplicas int `json:"minReplicas"`
+ MaxReplicas int `json:"maxReplicas"`
+ ScalingMode ScalingMode `json:"scalingMode"`
+ Deployment ChaosMonkeyConfigurationSpecDeployment `json:"deployment"`
+ Timeout string `json:"timeout"`
+ }
+
+ tmp.Enabled = c.Enabled
+ tmp.MinReplicas = c.MinReplicas
+ tmp.MaxReplicas = c.MaxReplicas
+ tmp.ScalingMode = c.ScalingMode
+ tmp.Deployment = c.Deployment
+ tmp.Timeout = c.Timeout.String()
+
+ return json.Marshal(tmp)
+}
+
+func (c *ChaosMonkeyConfiguration) ToUnstructured() (*unstructured.Unstructured, error) {
+ ret := &unstructured.Unstructured{}
+
+ ret.SetKind(c.Kind)
+ ret.SetAPIVersion(c.APIVersion)
+ ret.SetName(c.Name)
+ ret.SetNamespace(c.Namespace)
+ ret.SetLabels(c.Labels)
+ ret.SetAnnotations(c.Annotations)
+ ret.SetResourceVersion(c.ResourceVersion)
+ ret.SetManagedFields(c.ManagedFields)
+ ret.SetUID(c.UID)
+
+ // Create the spec field now
+ err := errors.Join(
+ unstructured.SetNestedField(ret.Object, c.Spec.Enabled, "spec", "enabled"),
+ unstructured.SetNestedField(ret.Object, int64(c.Spec.MinReplicas), "spec", "minReplicas"),
+ unstructured.SetNestedField(ret.Object, int64(c.Spec.MaxReplicas), "spec", "maxReplicas"),
+ unstructured.SetNestedField(ret.Object, string(c.Spec.ScalingMode), "spec", "scalingMode"),
+ unstructured.SetNestedField(ret.Object, c.Spec.Deployment.Name, "spec", "deployment", "name"),
+ unstructured.SetNestedField(ret.Object, c.Spec.Timeout.String(), "spec", "timeout"),
+ )
+
+ return ret, err
+}
+
+type fieldFound struct {
+ FieldName string
+ Found bool
+}
+
+func FromUnstructured(in *unstructured.Unstructured) (*ChaosMonkeyConfiguration, error) {
+ if in.GetAPIVersion() != "cm.massix.github.io/v1" {
+ return nil, fmt.Errorf("Wrong APIVersion: %s", in.GetAPIVersion())
+ }
+
+ if in.GetKind() != "ChaosMonkeyConfiguration" {
+ return nil, fmt.Errorf("Wrong Kind: %s", in.GetKind())
+ }
+
+ booleansToErrors := func(in []fieldFound) []error {
+ var errors []error
+ for _, v := range in {
+ if !v.Found {
+ errors = append(errors, fmt.Errorf("Field %s not found", v.FieldName))
+ }
+ }
+
+ return errors
+ }
+
+ res := &ChaosMonkeyConfiguration{}
+ typeMeta := metav1.TypeMeta{}
+ typeMeta.Kind = in.GetKind()
+ typeMeta.APIVersion = in.GetAPIVersion()
+ res.TypeMeta = typeMeta
+
+ objectMeta := metav1.ObjectMeta{}
+ objectMeta.Name = in.GetName()
+ objectMeta.Namespace = in.GetNamespace()
+ objectMeta.Labels = in.GetLabels()
+ objectMeta.Annotations = in.GetAnnotations()
+ objectMeta.ResourceVersion = in.GetResourceVersion()
+ objectMeta.ManagedFields = in.GetManagedFields()
+ objectMeta.UID = in.GetUID()
+ res.ObjectMeta = objectMeta
+
+ spec := ChaosMonkeyConfigurationSpec{}
+ depName, depNameFound, depNameErr := unstructured.NestedString(in.Object, "spec", "deployment", "name")
+ timeout, timeoutFound, timeoutErr := unstructured.NestedString(in.Object, "spec", "timeout")
+ minReplicas, minReplicasFound, minReplicasErr := unstructured.NestedInt64(in.Object, "spec", "minReplicas")
+ maxReplicas, maxReplicasFound, maxReplicasErr := unstructured.NestedInt64(in.Object, "spec", "maxReplicas")
+ enabled, enabledFound, enabledErr := unstructured.NestedBool(in.Object, "spec", "enabled")
+ scalingMode, scalingModeFound, scalingModeErr := unstructured.NestedString(in.Object, "spec", "scalingMode")
+
+ allErrors := errors.Join(
+ depNameErr,
+ timeoutErr,
+ minReplicasErr,
+ maxReplicasErr,
+ enabledErr,
+ scalingModeErr,
+ errors.Join(
+ booleansToErrors([]fieldFound{
+ {".spec.deploymentName", depNameFound},
+ {".spec.timeout", timeoutFound},
+ {".spec.minReplicas", minReplicasFound},
+ {".spec.maxReplicas", maxReplicasFound},
+ {".spec.enabled", enabledFound},
+ {".spec.scalingMode", scalingModeFound},
+ })...,
+ ),
+ )
+
+ if allErrors != nil {
+ return nil, allErrors
+ }
+
+ if parsedTimeout, err := time.ParseDuration(timeout); err != nil {
+ logrus.WithField("component", "CMCv1ToUnstructured").Errorf("Failed to parse timeout: %v", err)
+ spec.Timeout = 10 * time.Minute
+ } else {
+ spec.Timeout = parsedTimeout
+ }
+
+ spec.Deployment = ChaosMonkeyConfigurationSpecDeployment{depName}
+ spec.MinReplicas = int(minReplicas)
+ spec.MaxReplicas = int(maxReplicas)
+ spec.Enabled = enabled
+ spec.ScalingMode = ScalingMode(scalingMode)
+ res.Spec = spec
+
+ return res, nil
+}
diff --git a/internal/apis/v1/types_test.go b/internal/apis/v1/types_test.go
new file mode 100644
index 0000000..0197e8f
--- /dev/null
+++ b/internal/apis/v1/types_test.go
@@ -0,0 +1,229 @@
+package v1
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+)
+
+func TestChaosMonkeyConfigurationSpec_UnmarshalJson(t *testing.T) {
+ t.Run("Valid Timeout", func(t *testing.T) {
+ jsonString := `
+ {
+ "kind": "ChaosMonkeyConfiguration",
+ "apiVersion": "cm.massix.github.io/v1",
+ "metadata": {
+ "name": "cmc1",
+ "namespace": "cmc1"
+ },
+ "spec": {
+ "enabled": true,
+ "minReplicas": 0,
+ "maxReplicas": 4,
+ "deployment": { "name": "target-deployment" },
+ "timeout": "20m"
+ }
+ }
+ `
+
+ var cmc ChaosMonkeyConfiguration
+ if err := json.Unmarshal([]byte(jsonString), &cmc); err != nil {
+ t.Fatal(err)
+ }
+
+ if cmc.Spec.Timeout != 20*time.Minute {
+ t.Errorf("Expected timeout to be 20 minutes, got %s", cmc.Spec.Timeout)
+ }
+
+ if cmc.Spec.Deployment.Name != "target-deployment" {
+ t.Errorf("Expected deployment name to be 'target-deployment', got %s", cmc.Spec.Deployment.Name)
+ }
+ })
+
+ t.Run("Invalid Timeout (should use default)", func(t *testing.T) {
+ jsonString := `
+ {
+ "kind": "ChaosMonkeyConfiguration",
+ "apiVersion": "cm.massix.github.io/v1",
+ "metadata": {
+ "name": "cmc1",
+ "namespace": "cmc1"
+ },
+ "spec": {
+ "enabled": true,
+ "minReplicas": 0,
+ "maxReplicas": 4,
+ "deployment": { "name": "target-deployment" },
+ "timeout": "this is not valid"
+ }
+ }
+ `
+
+ var cmc ChaosMonkeyConfiguration
+ if err := json.Unmarshal([]byte(jsonString), &cmc); err != nil {
+ t.Fatal(err)
+ }
+
+ if cmc.Spec.Timeout != 10*time.Minute {
+ t.Errorf("Expected timeout to be 10 minutes (default value), got %s", cmc.Spec.Timeout)
+ }
+
+ if cmc.Spec.Deployment.Name != "target-deployment" {
+ t.Errorf("Expected deployment name to be 'target-deployment', got %s", cmc.Spec.Deployment.Name)
+ }
+ })
+}
+
+func createCMC() *ChaosMonkeyConfiguration {
+ return &ChaosMonkeyConfiguration{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ChaosMonkeyConfiguration",
+ APIVersion: "cm.massix.github.io/v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+ Spec: ChaosMonkeyConfigurationSpec{
+ MinReplicas: 0,
+ MaxReplicas: 3,
+ Timeout: 30 * time.Second,
+ Deployment: ChaosMonkeyConfigurationSpecDeployment{
+ Name: "test",
+ },
+ ScalingMode: ScalingModeAntiPressure,
+ Enabled: false,
+ },
+ }
+}
+
+func TestChaosMonkeyConfigurationSpec_MarshalJson(t *testing.T) {
+ t.Run("Can convert to JSON", func(t *testing.T) {
+ cmc := createCMC()
+
+ b, err := json.Marshal(cmc)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(b) != `{"kind":"ChaosMonkeyConfiguration","apiVersion":"cm.massix.github.io/v1","metadata":{"name":"test","namespace":"test","creationTimestamp":null},"spec":{"enabled":false,"minReplicas":0,"maxReplicas":3,"scalingMode":"antiPressure","deployment":{"name":"test"},"timeout":"30s"}}` {
+ t.Fatalf("Unexpected JSON: %s", string(b))
+ }
+ })
+}
+
+func TestChaosMonkeyConfiguration_ToUnstructured(t *testing.T) {
+ cmc := createCMC()
+ res, err := cmc.ToUnstructured()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.GetAPIVersion() != "cm.massix.github.io/v1" {
+ t.Fatal("Wrong APIVersion")
+ }
+
+ depName, _, _ := unstructured.NestedString(res.Object, "spec", "deployment", "name")
+ if depName != "test" {
+ t.Fatalf("Expected 'test', received %s instead", depName)
+ }
+}
+
+func TestChaosMonkeyConfiguration_FromUnstructured(t *testing.T) {
+ t.Run("All is good", func(t *testing.T) {
+ cmc := createCMC()
+ uns, _ := cmc.ToUnstructured()
+
+ res, err := FromUnstructured(uns)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.Spec.Deployment.Name != "test" {
+ t.Fatalf("Was expecting 'test', received %s instead", res.Spec.Deployment.Name)
+ }
+
+ if res.Spec.Timeout != 30*time.Second {
+ t.Fatalf("Was expecting '30s', received %s instead", res.Spec.Timeout)
+ }
+ })
+
+ t.Run("Missing fields", func(t *testing.T) {
+ cmc := createCMC()
+ uns, _ := cmc.ToUnstructured()
+ unstructured.RemoveNestedField(uns.Object, "spec", "timeout")
+ unstructured.RemoveNestedField(uns.Object, "spec", "deployment", "name")
+
+ _, err := FromUnstructured(uns)
+ if err == nil {
+ t.Fatal("Was expecting error, received nil instead")
+ }
+
+ if err.Error() != "Field .spec.deploymentName not found\nField .spec.timeout not found" {
+ t.Fatalf("Received %q instead", err.Error())
+ }
+ })
+
+ t.Run("Wrong fields", func(t *testing.T) {
+ cmc := createCMC()
+ uns, _ := cmc.ToUnstructured()
+ _ = unstructured.SetNestedField(uns.Object, int64(42), "spec", "deployment", "name")
+
+ _, err := FromUnstructured(uns)
+ if err == nil {
+ t.Fatal("Was expecting error, received nil instead")
+ }
+
+ if err.Error() != ".spec.deployment.name accessor error: 42 is of the type int64, expected string\nField .spec.deploymentName not found" {
+ t.Fatalf("Received %q instead", err.Error())
+ }
+ })
+
+ t.Run("Failed to parse timeout", func(t *testing.T) {
+ cmc := createCMC()
+ uns, _ := cmc.ToUnstructured()
+ _ = unstructured.SetNestedField(uns.Object, "invalid", "spec", "timeout")
+
+ res, err := FromUnstructured(uns)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.Spec.Timeout != 10*time.Minute {
+ t.Fatalf("Was expecting '10m', received %s instead", res.Spec.Timeout)
+ }
+ })
+
+ t.Run("Wrong APIVersion", func(t *testing.T) {
+ uns := &unstructured.Unstructured{}
+ uns.SetKind("ChaosMonkeyConfiguration")
+ uns.SetAPIVersion("wrong")
+
+ _, err := FromUnstructured(uns)
+ if err == nil {
+ t.Fatal("Was expecting error, received nil instead")
+ }
+
+ if err.Error() != "Wrong APIVersion: wrong" {
+ t.Fatalf("Received %q instead", err.Error())
+ }
+ })
+
+ t.Run("Wrong Kind", func(t *testing.T) {
+ uns := &unstructured.Unstructured{}
+ uns.SetAPIVersion("cm.massix.github.io/v1")
+ uns.SetKind("wrong")
+
+ _, err := FromUnstructured(uns)
+ if err == nil {
+ t.Fatal("Was expecting error, received nil instead")
+ }
+
+ if err.Error() != "Wrong Kind: wrong" {
+ t.Fatalf("Received %q instead", err.Error())
+ }
+ })
+}
diff --git a/internal/apis/v1/zz_generated.deepcopy.go b/internal/apis/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..328ee03
--- /dev/null
+++ b/internal/apis/v1/zz_generated.deepcopy.go
@@ -0,0 +1,105 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// Generated code, do not touch
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChaosMonkeyConfiguration) DeepCopyInto(out *ChaosMonkeyConfiguration) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosMonkeyConfiguration.
+func (in *ChaosMonkeyConfiguration) DeepCopy() *ChaosMonkeyConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ChaosMonkeyConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ChaosMonkeyConfiguration) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChaosMonkeyConfigurationList) DeepCopyInto(out *ChaosMonkeyConfigurationList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ChaosMonkeyConfiguration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosMonkeyConfigurationList.
+func (in *ChaosMonkeyConfigurationList) DeepCopy() *ChaosMonkeyConfigurationList {
+ if in == nil {
+ return nil
+ }
+ out := new(ChaosMonkeyConfigurationList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ChaosMonkeyConfigurationList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChaosMonkeyConfigurationSpec) DeepCopyInto(out *ChaosMonkeyConfigurationSpec) {
+ *out = *in
+ out.Deployment = in.Deployment
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosMonkeyConfigurationSpec.
+func (in *ChaosMonkeyConfigurationSpec) DeepCopy() *ChaosMonkeyConfigurationSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ChaosMonkeyConfigurationSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChaosMonkeyConfigurationSpecDeployment) DeepCopyInto(out *ChaosMonkeyConfigurationSpecDeployment) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosMonkeyConfigurationSpecDeployment.
+func (in *ChaosMonkeyConfigurationSpecDeployment) DeepCopy() *ChaosMonkeyConfigurationSpecDeployment {
+ if in == nil {
+ return nil
+ }
+ out := new(ChaosMonkeyConfigurationSpecDeployment)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/internal/apis/v1alpha1/types.go b/internal/apis/v1alpha1/types.go
index 470565c..847dcde 100644
--- a/internal/apis/v1alpha1/types.go
+++ b/internal/apis/v1alpha1/types.go
@@ -1,13 +1,16 @@
package v1alpha1
import (
+ "errors"
+ "fmt"
+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ChaosMonkeyConfiguration struct {
- Status ChaosMonkeyConfigurationStatus `json:"status"`
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec ChaosMonkeyConfigurationSpec `json:"spec"`
@@ -22,12 +25,6 @@ type ChaosMonkeyConfigurationSpec struct {
PodMode bool `json:"podMode"`
}
-type ChaosMonkeyConfigurationStatus struct {
- LastExecution *metav1.Time `json:"lastExecution"`
- LastKnownReplicas *int `json:"lastKnownReplicas"`
- Accepted bool `json:"accepted"`
-}
-
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ChaosMonkeyConfigurationList struct {
metav1.TypeMeta `json:",inline"`
@@ -35,3 +32,113 @@ type ChaosMonkeyConfigurationList struct {
Items []ChaosMonkeyConfiguration `json:"items"`
}
+
+func (c *ChaosMonkeyConfiguration) ToUnstructured() (*unstructured.Unstructured, error) {
+ ret := &unstructured.Unstructured{}
+
+ // Set all the common fields
+ ret.SetKind(c.Kind)
+ ret.SetAPIVersion(c.APIVersion)
+ ret.SetName(c.Name)
+ ret.SetNamespace(c.Namespace)
+ ret.SetLabels(c.Labels)
+ ret.SetAnnotations(c.Annotations)
+ ret.SetResourceVersion(c.ResourceVersion)
+ ret.SetManagedFields(c.ManagedFields)
+ ret.SetUID(c.UID)
+
+ // Now create the spec
+ err := errors.Join(
+ unstructured.SetNestedField(ret.Object, c.Spec.DeploymentName, "spec", "deploymentName"),
+ unstructured.SetNestedField(ret.Object, c.Spec.Timeout, "spec", "timeout"),
+ unstructured.SetNestedField(ret.Object, int64(c.Spec.MinReplicas), "spec", "minReplicas"),
+ unstructured.SetNestedField(ret.Object, int64(c.Spec.MaxReplicas), "spec", "maxReplicas"),
+ unstructured.SetNestedField(ret.Object, c.Spec.Enabled, "spec", "enabled"),
+ unstructured.SetNestedField(ret.Object, c.Spec.PodMode, "spec", "podMode"),
+ )
+
+ return ret, err
+}
+
+type fieldFound struct {
+ FieldName string
+ Found bool
+}
+
+func FromUnstructured(in *unstructured.Unstructured) (*ChaosMonkeyConfiguration, error) {
+ if in.GetAPIVersion() != "cm.massix.github.io/v1alpha1" {
+ return nil, fmt.Errorf("Wrong APIVersion: %s", in.GetAPIVersion())
+ }
+
+ if in.GetKind() != "ChaosMonkeyConfiguration" {
+ return nil, fmt.Errorf("Wrong Kind: %s", in.GetKind())
+ }
+
+ booleansToErrors := func(in []fieldFound) []error {
+ var errors []error
+ for _, v := range in {
+ if !v.Found {
+ errors = append(errors, fmt.Errorf("Field %s not found", v.FieldName))
+ }
+ }
+
+ return errors
+ }
+
+ res := &ChaosMonkeyConfiguration{}
+ typeMeta := metav1.TypeMeta{}
+ typeMeta.Kind = in.GetKind()
+ typeMeta.APIVersion = in.GetAPIVersion()
+ res.TypeMeta = typeMeta
+
+ objectMeta := metav1.ObjectMeta{}
+ objectMeta.Name = in.GetName()
+ objectMeta.Namespace = in.GetNamespace()
+ objectMeta.Labels = in.GetLabels()
+ objectMeta.Annotations = in.GetAnnotations()
+ objectMeta.ResourceVersion = in.GetResourceVersion()
+ objectMeta.ManagedFields = in.GetManagedFields()
+ objectMeta.UID = in.GetUID()
+ res.ObjectMeta = objectMeta
+
+ spec := ChaosMonkeyConfigurationSpec{}
+ depName, depNameFound, depNameErr := unstructured.NestedString(in.Object, "spec", "deploymentName")
+ timeout, timeoutFound, timeoutErr := unstructured.NestedString(in.Object, "spec", "timeout")
+ minReplicas, minReplicasFound, minReplicasErr := unstructured.NestedInt64(in.Object, "spec", "minReplicas")
+ maxReplicas, maxReplicasFound, maxReplicasErr := unstructured.NestedInt64(in.Object, "spec", "maxReplicas")
+ enabled, enabledFound, enabledErr := unstructured.NestedBool(in.Object, "spec", "enabled")
+ podMode, podModeFound, podModeErr := unstructured.NestedBool(in.Object, "spec", "podMode")
+
+ allErrors := errors.Join(
+ depNameErr,
+ timeoutErr,
+ minReplicasErr,
+ maxReplicasErr,
+ enabledErr,
+ podModeErr,
+ errors.Join(
+ booleansToErrors([]fieldFound{
+ {".spec.deploymentName", depNameFound},
+ {".spec.timeout", timeoutFound},
+ {".spec.minReplicas", minReplicasFound},
+ {".spec.maxReplicas", maxReplicasFound},
+ {".spec.enabled", enabledFound},
+ {".spec.podMode", podModeFound},
+ })...,
+ ),
+ )
+
+ if allErrors != nil {
+ return nil, allErrors
+ }
+
+ spec.DeploymentName = depName
+ spec.Timeout = timeout
+ spec.MinReplicas = int(minReplicas)
+ spec.MaxReplicas = int(maxReplicas)
+ spec.Enabled = enabled
+ spec.PodMode = podMode
+ res.Spec = spec
+
+ return res, nil
+}
diff --git a/internal/apis/v1alpha1/types_test.go b/internal/apis/v1alpha1/types_test.go
new file mode 100644
index 0000000..935ecaa
--- /dev/null
+++ b/internal/apis/v1alpha1/types_test.go
@@ -0,0 +1,129 @@
+package v1alpha1_test
+
+import (
+ "testing"
+
+ "github.com/massix/chaos-monkey/internal/apis/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+)
+
+func createCMC() *v1alpha1.ChaosMonkeyConfiguration {
+ return &v1alpha1.ChaosMonkeyConfiguration{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ChaosMonkeyConfiguration",
+ APIVersion: "cm.massix.github.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ },
+
+ Spec: v1alpha1.ChaosMonkeyConfigurationSpec{
+ MinReplicas: 3,
+ MaxReplicas: 4,
+ Enabled: true,
+ PodMode: false,
+ Timeout: "30s",
+ DeploymentName: "test-deployment",
+ },
+ }
+}
+
+func TestChaosMonkeyConfiguration_ToUnstructured(t *testing.T) {
+ cmc := createCMC()
+
+ res, err := cmc.ToUnstructured()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.GetAPIVersion() != "cm.massix.github.io/v1alpha1" {
+ t.Fatalf("Wrong APIVersion: %s", res.GetAPIVersion())
+ }
+
+ if res.GetName() != "test" {
+ t.Fatalf("Wrong name: %s", res.GetName())
+ }
+
+ depName, _, _ := unstructured.NestedString(res.Object, "spec", "deploymentName")
+ if depName != "test-deployment" {
+ t.Fatalf("Expected 'test-deployment', received %s instead", depName)
+ }
+}
+
+func TestChaosMonkeyConfiguration_FromUnstructured(t *testing.T) {
+ t.Run("All is good", func(t *testing.T) {
+ cmc := createCMC()
+ uns, _ := cmc.ToUnstructured()
+
+ res, err := v1alpha1.FromUnstructured(uns)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.Spec.DeploymentName != "test-deployment" {
+ t.Fatalf("Was expecting 'test-deployment', received %s instead", res.Spec.DeploymentName)
+ }
+ })
+
+ t.Run("Missing fields", func(t *testing.T) {
+ cmc := createCMC()
+ uns, _ := cmc.ToUnstructured()
+ unstructured.RemoveNestedField(uns.Object, "spec", "timeout")
+
+ _, err := v1alpha1.FromUnstructured(uns)
+ if err == nil {
+ t.Fatal("Was expecting error, received nil instead")
+ }
+
+ if err.Error() != "Field .spec.timeout not found" {
+ t.Fatalf("Unexpected error message: %s", err.Error())
+ }
+ })
+
+ t.Run("Wrong format for field", func(t *testing.T) {
+ cmc := createCMC()
+ uns, _ := cmc.ToUnstructured()
+ unstructured.RemoveNestedField(uns.Object, "spec", "deploymentName")
+ _ = unstructured.SetNestedField(uns.Object, int64(42), "spec", "deploymentName")
+
+ _, err := v1alpha1.FromUnstructured(uns)
+ if err == nil {
+ t.Fatal("Was expecting error, received nil instead")
+ }
+
+ if err.Error() != ".spec.deploymentName accessor error: 42 is of the type int64, expected string\nField .spec.deploymentName not found" {
+ t.Fatalf("Unexpected error message: %q", err.Error())
+ }
+ })
+
+ t.Run("Wrong APIVersion", func(t *testing.T) {
+ uns := &unstructured.Unstructured{}
+ uns.SetAPIVersion("wrong")
+
+ _, err := v1alpha1.FromUnstructured(uns)
+ if err == nil {
+ t.Fatal("Was expecting error, received nil instead")
+ }
+
+ if err.Error() != "Wrong APIVersion: wrong" {
+ t.Fatalf("Unexpected error message: %q", err.Error())
+ }
+ })
+
+ t.Run("Wrong Kind", func(t *testing.T) {
+ uns := &unstructured.Unstructured{}
+ uns.SetAPIVersion("cm.massix.github.io/v1alpha1")
+ uns.SetKind("wrong")
+
+ _, err := v1alpha1.FromUnstructured(uns)
+ if err == nil {
+ t.Fatal("Was expecting error, received nil instead")
+ }
+
+ if err.Error() != "Wrong Kind: wrong" {
+ t.Fatalf("Unexpected error message: %q", err.Error())
+ }
+ })
+}
diff --git a/internal/apis/v1alpha1/zz_generated.deepcopy.go b/internal/apis/v1alpha1/zz_generated.deepcopy.go
index e9b752a..52f5078 100644
--- a/internal/apis/v1alpha1/zz_generated.deepcopy.go
+++ b/internal/apis/v1alpha1/zz_generated.deepcopy.go
@@ -14,7 +14,6 @@ import (
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChaosMonkeyConfiguration) DeepCopyInto(out *ChaosMonkeyConfiguration) {
*out = *in
- in.Status.DeepCopyInto(&out.Status)
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
@@ -87,28 +86,3 @@ func (in *ChaosMonkeyConfigurationSpec) DeepCopy() *ChaosMonkeyConfigurationSpec
in.DeepCopyInto(out)
return out
}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ChaosMonkeyConfigurationStatus) DeepCopyInto(out *ChaosMonkeyConfigurationStatus) {
- *out = *in
- if in.LastExecution != nil {
- in, out := &in.LastExecution, &out.LastExecution
- *out = (*in).DeepCopy()
- }
- if in.LastKnownReplicas != nil {
- in, out := &in.LastKnownReplicas, &out.LastKnownReplicas
- *out = new(int)
- **out = **in
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosMonkeyConfigurationStatus.
-func (in *ChaosMonkeyConfigurationStatus) DeepCopy() *ChaosMonkeyConfigurationStatus {
- if in == nil {
- return nil
- }
- out := new(ChaosMonkeyConfigurationStatus)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/internal/endpoints/conversion.go b/internal/endpoints/conversion.go
new file mode 100644
index 0000000..9a33d20
--- /dev/null
+++ b/internal/endpoints/conversion.go
@@ -0,0 +1,211 @@
+package endpoints
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "time"
+
+ v1 "github.com/massix/chaos-monkey/internal/apis/v1"
+ "github.com/massix/chaos-monkey/internal/apis/v1alpha1"
+ "github.com/sirupsen/logrus"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+)
+
+type ConversionEndpoint struct {
+ Logrus logrus.FieldLogger
+}
+
+var _ = (http.Handler)((*ConversionEndpoint)(nil))
+
+type conversionReview struct {
+ metav1.TypeMeta `json:",inline"`
+ Request *conversionReviewRequest `json:"request,omitempty"`
+ Response *conversionReviewResponse `json:"response,omitempty"`
+}
+
+// I can safely say that I will always accept a v1alpha1 version of the APIs
+type conversionReviewRequest struct {
+ Id string `json:"uid"`
+ DesiredAPIVersion string `json:"desiredAPIVersion"`
+ Objects []*unstructured.Unstructured `json:"objects"`
+}
+
+// I can safely say that I will always reply with v1 version of the APIs
+type conversionReviewResponse struct {
+ Id string `json:"uid"`
+ Result *conversionReviewResponseResult `json:"result"`
+ ConvertedObjects []*unstructured.Unstructured `json:"convertedObjects"`
+}
+
+type conversionReviewResponseResult struct {
+ Status string `json:"status"` // Either "Success" or "Failure"
+ Message string `json:"message,omitempty"`
+}
+
+func newConversionReviewFailuref(id, format string, args ...interface{}) *conversionReview {
+ return &conversionReview{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ConversionReview",
+ APIVersion: "apiextensions.k8s.io/v1",
+ },
+ Response: &conversionReviewResponse{
+ Id: id,
+ Result: &conversionReviewResponseResult{
+ Status: metav1.StatusFailure,
+ Message: fmt.Sprintf(format, args...),
+ },
+ },
+ }
+}
+
+func newConversionReviewSuccess(id string, convertedObjects []*unstructured.Unstructured) *conversionReview {
+ return &conversionReview{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ConversionReview",
+ APIVersion: "apiextensions.k8s.io/v1",
+ },
+ Response: &conversionReviewResponse{
+ Id: id,
+ Result: &conversionReviewResponseResult{
+ Status: metav1.StatusSuccess,
+ },
+ ConvertedObjects: convertedObjects,
+ },
+ }
+}
+
+func getField[T any](result T, found bool, err error) (T, error) {
+ if !found {
+ return *new(T), fmt.Errorf("Field not found")
+ }
+
+ if err != nil {
+ return *new(T), err
+ }
+
+ return result, nil
+}
+
+func (c *ConversionEndpoint) convertFromV1Alpha1ToV1(objects []*unstructured.Unstructured) ([]*unstructured.Unstructured, error) {
+ var res []*unstructured.Unstructured
+
+ for _, obj := range objects {
+ cmv1alpha1, err := v1alpha1.FromUnstructured(obj)
+ if err != nil {
+ return nil, err
+ }
+
+ cmv1 := &v1.ChaosMonkeyConfiguration{}
+ cmv1.TypeMeta = cmv1alpha1.TypeMeta
+ cmv1.ObjectMeta = cmv1alpha1.ObjectMeta
+
+ cmv1.APIVersion = "cm.massix.github.io/v1"
+
+ cmv1.Spec = v1.ChaosMonkeyConfigurationSpec{
+ Deployment: v1.ChaosMonkeyConfigurationSpecDeployment{Name: cmv1alpha1.Spec.DeploymentName},
+ MinReplicas: cmv1alpha1.Spec.MinReplicas,
+ MaxReplicas: cmv1alpha1.Spec.MaxReplicas,
+ Enabled: cmv1alpha1.Spec.Enabled,
+ }
+
+ if parsedTimeout, err := time.ParseDuration(cmv1alpha1.Spec.Timeout); err == nil {
+ cmv1.Spec.Timeout = parsedTimeout
+ } else {
+ c.Logrus.Errorf("While parsing %q: %s, using default value 10m", cmv1alpha1.Spec.Timeout, err)
+ cmv1.Spec.Timeout = 10 * time.Minute
+ }
+
+ scalingMode := v1.ScalingModeRandomScale
+ if cmv1alpha1.Spec.PodMode {
+ scalingMode = v1.ScalingModeKillPod
+ }
+
+ cmv1.Spec.ScalingMode = scalingMode
+ if uns, err := cmv1.ToUnstructured(); err != nil {
+ return nil, err
+ } else {
+ res = append(res, uns)
+ }
+ }
+
+ return res, nil
+}
+
+func (c *ConversionEndpoint) convertFromV1ToV1Alpha1(objects []*unstructured.Unstructured) ([]*unstructured.Unstructured, error) {
+ var res []*unstructured.Unstructured
+ for _, obj := range objects {
+ cmv1, err := v1.FromUnstructured(obj)
+ if err != nil {
+ return nil, err
+ }
+
+ cmv1alpha1 := &v1alpha1.ChaosMonkeyConfiguration{}
+ cmv1alpha1.TypeMeta = cmv1.TypeMeta
+ cmv1alpha1.ObjectMeta = cmv1.ObjectMeta
+
+ cmv1alpha1.TypeMeta.APIVersion = "cm.massix.github.io/v1alpha1"
+
+ cmv1alpha1.Spec = v1alpha1.ChaosMonkeyConfigurationSpec{
+ DeploymentName: cmv1.Spec.Deployment.Name,
+ MinReplicas: cmv1.Spec.MinReplicas,
+ MaxReplicas: cmv1.Spec.MaxReplicas,
+ Enabled: cmv1.Spec.Enabled,
+ Timeout: cmv1.Spec.Timeout.String(),
+ PodMode: cmv1.Spec.ScalingMode == v1.ScalingModeKillPod,
+ }
+
+ if uns, err := cmv1alpha1.ToUnstructured(); err != nil {
+ return nil, err
+ } else {
+ res = append(res, uns)
+ }
+ }
+
+ return res, nil
+}
+
+// ServeHTTP implements http.Handler.
+func (c *ConversionEndpoint) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ var in conversionReview
+
+ err := json.NewDecoder(req.Body).Decode(&in)
+ if err != nil {
+ failure := newConversionReviewFailuref("", "Failed to decode: %v", err)
+ _ = json.NewEncoder(w).Encode(failure)
+ return
+ }
+
+ switch in.Request.DesiredAPIVersion {
+ case "cm.massix.github.io/v1alpha1":
+ c.Logrus.Info("Converting to v1alpha1")
+ result, err := c.convertFromV1ToV1Alpha1(in.Request.Objects)
+ if err != nil {
+ failure := newConversionReviewFailuref(in.Request.Id, "Failed to convert: %v", err)
+ _ = json.NewEncoder(w).Encode(failure)
+ return
+ }
+
+ success := newConversionReviewSuccess(in.Request.Id, result)
+ _ = json.NewEncoder(w).Encode(success)
+ case "cm.massix.github.io/v1":
+ c.Logrus.Info("Converting to v1")
+ result, err := c.convertFromV1Alpha1ToV1(in.Request.Objects)
+ if err != nil {
+ failure := newConversionReviewFailuref(in.Request.Id, "Failed to convert: %v", err)
+ _ = json.NewEncoder(w).Encode(failure)
+ }
+
+ success := newConversionReviewSuccess(in.Request.Id, result)
+ _ = json.NewEncoder(w).Encode(success)
+ }
+
+ c.Logrus.Info("Done")
+}
+
+func NewConversionEndpoint() *ConversionEndpoint {
+ return &ConversionEndpoint{
+ Logrus: logrus.WithFields(logrus.Fields{"component": "ConversionEndpoint"}),
+ }
+}
diff --git a/internal/endpoints/conversion_test.go b/internal/endpoints/conversion_test.go
new file mode 100644
index 0000000..775a783
--- /dev/null
+++ b/internal/endpoints/conversion_test.go
@@ -0,0 +1,128 @@
+package endpoints
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ v1 "github.com/massix/chaos-monkey/internal/apis/v1"
+ "github.com/massix/chaos-monkey/internal/apis/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+)
+
+func createCMCv1alpha1(name, namespace string) *v1alpha1.ChaosMonkeyConfiguration {
+ return &v1alpha1.ChaosMonkeyConfiguration{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ChaosMonkeyConfiguration",
+ APIVersion: "cm.massix.github.io/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Spec: v1alpha1.ChaosMonkeyConfigurationSpec{
+ MinReplicas: 1,
+ MaxReplicas: 2,
+ DeploymentName: "test",
+ Timeout: "30s",
+ Enabled: true,
+ PodMode: false,
+ },
+ }
+}
+
+func createCMCv1(name, namespace string) *v1.ChaosMonkeyConfiguration {
+ return &v1.ChaosMonkeyConfiguration{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ChaosMonkeyConfiguration",
+ APIVersion: "cm.massix.github.io/v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Spec: v1.ChaosMonkeyConfigurationSpec{
+ MinReplicas: 1,
+ MaxReplicas: 2,
+ Deployment: v1.ChaosMonkeyConfigurationSpecDeployment{
+ Name: "test",
+ },
+ Timeout: 30 * time.Second,
+ Enabled: true,
+ ScalingMode: v1.ScalingModeKillPod,
+ },
+ }
+}
+
+func TestConversionEndpoint_convertFromV1Alpha1ToV1(t *testing.T) {
+ ep := NewConversionEndpoint()
+
+ t.Run("All is good", func(t *testing.T) {
+ objects := []*unstructured.Unstructured{}
+ for i := range [10]int{} {
+ r, _ := createCMCv1alpha1(fmt.Sprintf("test-%d", i), "test").ToUnstructured()
+ objects = append(objects, r)
+ }
+
+ res, err := ep.convertFromV1Alpha1ToV1(objects)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(res) != 10 {
+ t.Fatalf("Expected 10, received %d", len(res))
+ }
+
+ for i, item := range res {
+ conf, err := v1.FromUnstructured(item)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if conf.Name != fmt.Sprintf("test-%d", i) {
+ t.Fatalf("Expected 'test-%d', received %s instead", i, conf.Name)
+ }
+
+ if conf.Spec.Deployment.Name != "test" {
+ t.Fatalf("Expected 'test', received %s instead", conf.Spec.Deployment.Name)
+ }
+ }
+ })
+}
+
+func TestConversionEdnpoint_convertFromV1ToV1Alpha1(t *testing.T) {
+ ep := NewConversionEndpoint()
+
+ t.Run("All is good", func(t *testing.T) {
+ objects := []*unstructured.Unstructured{}
+ for i := range [10]int{} {
+ r, _ := createCMCv1(fmt.Sprintf("test-%d", i), "test").ToUnstructured()
+ objects = append(objects, r)
+ }
+
+ res, err := ep.convertFromV1ToV1Alpha1(objects)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(res) != 10 {
+ t.Fatalf("Expected 10, received %d", len(res))
+ }
+
+ for i, item := range res {
+ conf, err := v1alpha1.FromUnstructured(item)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if conf.Name != fmt.Sprintf("test-%d", i) {
+ t.Fatalf("Expected 'test-%d', received %s instead", i, conf.Name)
+ }
+
+ if conf.Spec.DeploymentName != "test" {
+ t.Fatalf("Expected 'test', received %s instead", conf.Spec.DeploymentName)
+ }
+ }
+ })
+}
diff --git a/internal/watcher/antihpa.go b/internal/watcher/antihpa.go
new file mode 100644
index 0000000..4392720
--- /dev/null
+++ b/internal/watcher/antihpa.go
@@ -0,0 +1,286 @@
+package watcher
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/sirupsen/logrus"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+ mv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
+ metricsv "k8s.io/metrics/pkg/client/clientset/versioned"
+ typedmv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
+)
+
+type antiHPAmetrics struct {
+ // Number of runs
+ runs prometheus.Counter
+
+ // Number of pods killed
+ podsKilled prometheus.Counter
+
+ // Total value of CPU of pods killed
+ totalCpu prometheus.Gauge
+}
+
+func (a *antiHPAmetrics) unregister() {
+ prometheus.Unregister(a.runs)
+ prometheus.Unregister(a.podsKilled)
+ prometheus.Unregister(a.totalCpu)
+}
+
+func newAntiHPAMetrics(namespace, selector string) *antiHPAmetrics {
+ constLabels := map[string]string{
+ "namespace": namespace,
+ "selector": selector,
+ }
+
+ return &antiHPAmetrics{
+ runs: promauto.NewCounter(prometheus.CounterOpts{
+ Namespace: "chaos_monkey",
+ Subsystem: "antihpawatcher",
+ Name: "runs",
+ Help: "Number of runs",
+ ConstLabels: constLabels,
+ }),
+ podsKilled: promauto.NewCounter(prometheus.CounterOpts{
+ Namespace: "chaos_monkey",
+ Subsystem: "antihpawatcher",
+ Name: "pods_killed",
+ Help: "Number of pods killed",
+ ConstLabels: constLabels,
+ }),
+ totalCpu: promauto.NewGauge(prometheus.GaugeOpts{
+ Namespace: "chaos_monkey",
+ Subsystem: "antihpawatcher",
+ Name: "average_cpu",
+ Help: "Total value of CPU of all the killed pods",
+ ConstLabels: constLabels,
+ }),
+ }
+}
+
+type AntiHPAWatcher struct {
+ Enabled bool
+ Logrus logrus.FieldLogger
+ MetricsClient typedmv1beta1.MetricsV1beta1Interface
+ PodsClient typedcorev1.PodInterface
+ Namespace string
+ Running bool
+ Timeout time.Duration
+ LabelSelector string
+ PrometheusMetrics *antiHPAmetrics
+ sync.Mutex
+}
+
+// SetEnabled implements ConfigurableWatcher.
+func (a *AntiHPAWatcher) SetEnabled(v bool) {
+ a.Lock()
+ defer a.Unlock()
+
+ a.Enabled = v
+}
+
+// SetMaxReplicas implements ConfigurableWatcher.
+func (a *AntiHPAWatcher) SetMaxReplicas(v int) {}
+
+// SetMinReplicas implements ConfigurableWatcher.
+func (a *AntiHPAWatcher) SetMinReplicas(v int) {}
+
+// SetTimeout implements ConfigurableWatcher.
+func (a *AntiHPAWatcher) SetTimeout(v time.Duration) {
+ a.Lock()
+ defer a.Unlock()
+
+ a.Timeout = v
+}
+
+func (a *AntiHPAWatcher) GetTimeout() time.Duration {
+ a.Lock()
+ defer a.Unlock()
+
+ return a.Timeout
+}
+
+func (a *AntiHPAWatcher) GetEnabled() bool {
+ a.Lock()
+ defer a.Unlock()
+
+ return a.Enabled
+}
+
+func NewAntiHPAWatcher(clientset metricsv.Interface, podsClientset typedcorev1.PodInterface, namespace, podLabel string) *AntiHPAWatcher {
+ return &AntiHPAWatcher{
+ Enabled: true,
+ Logrus: logrus.WithFields(logrus.Fields{"component": "AntiHPAWatcher", "namespace": namespace, "podLabel": podLabel}),
+ MetricsClient: clientset.MetricsV1beta1(),
+ PodsClient: podsClientset,
+ Mutex: sync.Mutex{},
+ Running: false,
+ Namespace: namespace,
+ Timeout: 10 * time.Minute,
+ PrometheusMetrics: newAntiHPAMetrics(namespace, podLabel),
+ LabelSelector: podLabel,
+ }
+}
+
+// Close implements Watcher.
+func (a *AntiHPAWatcher) Close() error {
+ return nil
+}
+
+// IsRunning implements Watcher.
+func (a *AntiHPAWatcher) IsRunning() bool {
+ a.Lock()
+ defer a.Unlock()
+ return a.Running
+}
+
+func (a *AntiHPAWatcher) SetRunning(val bool) {
+ a.Lock()
+ defer a.Unlock()
+
+ a.Running = val
+}
+
+type node struct {
+ PodName string
+ ContainerName string
+ CpuValue int64
+ MemoryValue int64
+ Left *node
+ Right *node
+}
+
+func newNode(cpu, mem int64, podName, containerName string) *node {
+ return &node{
+ PodName: podName,
+ ContainerName: containerName,
+ CpuValue: cpu,
+ MemoryValue: mem,
+ Left: nil,
+ Right: nil,
+ }
+}
+
+func (n *node) insert(cpu, mem int64, podName, containerName string) *node {
+ if n == nil {
+ return newNode(cpu, mem, podName, containerName)
+ }
+
+ if cpu < n.CpuValue {
+ n.Left = n.Left.insert(cpu, mem, podName, containerName)
+ } else {
+ n.Right = n.Right.insert(cpu, mem, podName, containerName)
+ }
+
+ return n
+}
+
+func (n *node) getMostUsedCpu() *node {
+ if n == nil {
+ return nil
+ }
+
+ // Must go right until there is something
+ if n.Right != nil {
+ return n.Right.getMostUsedCpu()
+ }
+
+ return n
+}
+
+func (a *AntiHPAWatcher) getMostUsedPod(in *mv1beta1.PodMetricsList) (string, error) {
+ var tree *node = nil
+
+ for _, item := range in.Items {
+ a.Logrus.Debugf("Item: %s", item.Name)
+ for _, container := range item.Containers {
+ cpuRes, cpuOk := container.Usage.Cpu().AsInt64()
+ memRes, memOk := container.Usage.Memory().AsInt64()
+
+ if memOk && cpuOk {
+ a.Logrus.Debugf("Container %s, cpu: %d, memory: %d", container.Name, cpuRes, memRes)
+ if tree == nil {
+ tree = tree.insert(cpuRes, memRes, item.Name, container.Name)
+ } else {
+ tree.insert(cpuRes, memRes, item.Name, container.Name)
+ }
+ } else {
+ a.Logrus.Warnf("Could not convert memory or cpu to int64 (cpu: %+v, memory: %+v)", cpuOk, memOk)
+ }
+ }
+ }
+
+ ret := tree.getMostUsedCpu()
+
+ if ret == nil {
+ return "", fmt.Errorf("Could not find a pod to kill (list %d elements)", len(in.Items))
+ }
+
+ a.PrometheusMetrics.totalCpu.Add(float64(ret.CpuValue))
+ return ret.PodName, nil
+}
+
+// Start implements Watcher.
+func (a *AntiHPAWatcher) Start(ctx context.Context) error {
+ timer := time.NewTimer(a.GetTimeout())
+
+ a.SetRunning(true)
+
+ for a.IsRunning() {
+ select {
+ case <-timer.C:
+ if !a.GetEnabled() {
+ a.Logrus.Debug("AntiHPA is not enabled")
+ continue
+ }
+
+ a.Logrus.Info("AntiHPA kicking in")
+ a.PrometheusMetrics.runs.Inc()
+
+ podMetrics, err := a.MetricsClient.PodMetricses(a.Namespace).List(ctx, metav1.ListOptions{
+ LabelSelector: a.LabelSelector,
+ })
+ if err != nil {
+ a.Logrus.Error(err)
+ break
+ }
+
+ mostUsedPod, err := a.getMostUsedPod(podMetrics)
+ a.Logrus.Infof("Should kill: %s", mostUsedPod)
+
+ gracePeriodSeconds := int64(0)
+ if err := a.PodsClient.Delete(ctx, mostUsedPod, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds}); err != nil {
+ a.Logrus.Warnf("Could not delete pod: %s", err)
+ } else {
+ a.PrometheusMetrics.podsKilled.Inc()
+ }
+ case <-ctx.Done():
+ a.Logrus.Info("Watcher context done")
+ a.SetRunning(false)
+ }
+
+ timer.Reset(a.GetTimeout())
+ }
+
+ return nil
+}
+
+// Stop implements Watcher.
+func (a *AntiHPAWatcher) Stop() error {
+ a.Lock()
+ defer a.Unlock()
+
+ a.Running = false
+ return nil
+}
+
+var (
+ _ = (Watcher)((*AntiHPAWatcher)(nil))
+ _ = (ConfigurableWatcher)((*AntiHPAWatcher)(nil))
+)
diff --git a/internal/watcher/antihpa_test.go b/internal/watcher/antihpa_test.go
new file mode 100644
index 0000000..8ae0485
--- /dev/null
+++ b/internal/watcher/antihpa_test.go
@@ -0,0 +1,97 @@
+package watcher
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/sirupsen/logrus"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metricsv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
+)
+
+type containerMetric struct {
+ CpuUsage int64
+ MemoryUsage int64
+}
+
+func generateMetrics(in []containerMetric) []metricsv1beta1.ContainerMetrics {
+ var ret []metricsv1beta1.ContainerMetrics
+ for i, m := range in {
+ ret = append(ret, metricsv1beta1.ContainerMetrics{
+ Name: fmt.Sprintf("container%d", i),
+ Usage: corev1.ResourceList{
+ corev1.ResourceCPU: *resource.NewQuantity(m.CpuUsage, resource.DecimalSI),
+ corev1.ResourceMemory: *resource.NewQuantity(m.MemoryUsage, resource.DecimalSI),
+ },
+ })
+ }
+
+ return ret
+}
+
+func TestAntiHPA_binaryTree(t *testing.T) {
+ logrus.SetLevel(logrus.DebugLevel)
+ var root *node = nil
+
+ root = root.
+ insert(1, 2, "pod1", "container1").
+ insert(2, 3, "pod1", "container2").
+ insert(0, 5, "pod2", "container1").
+ insert(3, 10, "pod2", "container2").
+ insert(6, 11, "pod2", "container3").
+ insert(4, 10, "pod3", "container1").
+ insert(5, 11, "pod4", "container1")
+
+ mostUsed := root.getMostUsedCpu()
+ if mostUsed.PodName != "pod2" && mostUsed.ContainerName != "container3" {
+ t.Errorf("Most used: %+v", mostUsed)
+ }
+}
+
+func TestAntiHPA_getMostUsedPod(t *testing.T) {
+ metricsList := &metricsv1beta1.PodMetricsList{
+ Items: []metricsv1beta1.PodMetrics{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "pod1"},
+ Containers: generateMetrics([]containerMetric{
+ {CpuUsage: 100, MemoryUsage: 128},
+ {CpuUsage: 98, MemoryUsage: 512},
+ {CpuUsage: 110, MemoryUsage: 312},
+ }),
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "pod2"},
+ Containers: generateMetrics([]containerMetric{
+ {CpuUsage: 214, MemoryUsage: 12},
+ {CpuUsage: 203, MemoryUsage: 1024},
+ {CpuUsage: 512, MemoryUsage: 14},
+ }),
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "pod3"},
+ Containers: generateMetrics([]containerMetric{
+ {CpuUsage: 431, MemoryUsage: 3},
+ {CpuUsage: 15, MemoryUsage: 2045},
+ {CpuUsage: 12, MemoryUsage: 10},
+ }),
+ },
+ },
+ }
+
+ logrus.SetLevel(logrus.DebugLevel)
+ antiHpa := &AntiHPAWatcher{
+ Logrus: logrus.StandardLogger(),
+ PrometheusMetrics: newAntiHPAMetrics("", ""),
+ }
+
+ res, err := antiHpa.getMostUsedPod(metricsList)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res != "pod2" {
+ t.Fatalf("expected pod2, got %s", res)
+ }
+}
diff --git a/internal/watcher/crd.go b/internal/watcher/crd.go
index 6f29ccd..3df6b08 100644
--- a/internal/watcher/crd.go
+++ b/internal/watcher/crd.go
@@ -10,8 +10,8 @@ import (
typedcmc "github.com/massix/chaos-monkey/internal/apis/clientset/versioned"
"github.com/massix/chaos-monkey/internal/apis/clientset/versioned/scheme"
- cmv1alpha1 "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/typed/apis/v1alpha1"
- "github.com/massix/chaos-monkey/internal/apis/v1alpha1"
+ cmv1 "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/typed/apis/v1"
+ v1 "github.com/massix/chaos-monkey/internal/apis/v1"
"github.com/massix/chaos-monkey/internal/configuration"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -24,20 +24,22 @@ import (
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
+ metricsv "k8s.io/metrics/pkg/client/clientset/versioned"
)
type WatcherConfiguration struct {
- Configuration *v1alpha1.ChaosMonkeyConfiguration
+ Configuration *v1.ChaosMonkeyConfiguration
Watcher ConfigurableWatcher
}
type CrdWatcher struct {
- cmv1alpha1.ChaosMonkeyConfigurationInterface
appsv1.DeploymentInterface
record.EventRecorderLogger
+ V1 cmv1.ChaosMonkeyConfigurationInterface
Logrus logrus.FieldLogger
Client kubernetes.Interface
+ MetricsClient metricsv.Interface
metrics *crdMetrics
Mutex *sync.Mutex
DeploymentWatchers map[string]*WatcherConfiguration
@@ -48,6 +50,10 @@ type CrdWatcher struct {
Running bool
}
+type watchInterface interface {
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+}
+
type crdMetrics struct {
// Total number of events handled
addedEvents prometheus.Counter
@@ -65,6 +71,10 @@ type crdMetrics struct {
dwSpawned prometheus.Counter
dwActive prometheus.Gauge
+ // Metrics for AntiHPAWatchers
+ ahSpawned prometheus.Counter
+ ahActive prometheus.Gauge
+
// How long it takes to handle an event
eventDuration prometheus.Histogram
}
@@ -78,6 +88,8 @@ func (crd *crdMetrics) unregister() {
prometheus.Unregister(crd.pwActive)
prometheus.Unregister(crd.dwSpawned)
prometheus.Unregister(crd.dwActive)
+ prometheus.Unregister(crd.ahSpawned)
+ prometheus.Unregister(crd.ahActive)
prometheus.Unregister(crd.eventDuration)
}
@@ -145,6 +157,21 @@ func newCrdMetrics(namespace string) *crdMetrics {
ConstLabels: map[string]string{"namespace": namespace},
}),
+ ahSpawned: promauto.NewCounter(prometheus.CounterOpts{
+ Namespace: "chaos_monkey",
+ Name: "ah_spawned",
+ Subsystem: "crdwatcher",
+ Help: "Total number of AntiHPAWatchers active",
+ ConstLabels: map[string]string{"namespace": namespace},
+ }),
+ ahActive: promauto.NewGauge(prometheus.GaugeOpts{
+ Namespace: "chaos_monkey",
+ Name: "ah_active",
+ Subsystem: "crdwatcher",
+ Help: "Total number of AntiHPAWatchers active",
+ ConstLabels: map[string]string{"namespace": namespace},
+ }),
+
eventDuration: promauto.NewHistogram(prometheus.HistogramOpts{
Namespace: "chaos_monkey",
Name: "event_duration",
@@ -156,7 +183,7 @@ func newCrdMetrics(namespace string) *crdMetrics {
}
}
-func NewCrdWatcher(clientset kubernetes.Interface, cmcClientset typedcmc.Interface, recorder record.EventRecorderLogger, namespace string) Watcher {
+func NewCrdWatcher(clientset kubernetes.Interface, cmcClientset typedcmc.Interface, metricsClient metricsv.Interface, recorder record.EventRecorderLogger, namespace string) *CrdWatcher {
// Build my own recorder here
if recorder == nil {
logrus.Debug("No recorder provided, using default")
@@ -168,12 +195,13 @@ func NewCrdWatcher(clientset kubernetes.Interface, cmcClientset typedcmc.Interfa
conf := configuration.FromEnvironment()
return &CrdWatcher{
- ChaosMonkeyConfigurationInterface: cmcClientset.ChaosMonkeyConfigurationV1alpha1().ChaosMonkeyConfigurations(namespace),
- DeploymentInterface: clientset.AppsV1().Deployments(namespace),
- EventRecorderLogger: recorder,
+ V1: cmcClientset.ChaosMonkeyConfigurationV1().ChaosMonkeyConfigurations(namespace),
+ DeploymentInterface: clientset.AppsV1().Deployments(namespace),
+ EventRecorderLogger: recorder,
Logrus: logrus.WithFields(logrus.Fields{"component": "CRDWatcher", "namespace": namespace}),
Client: clientset,
+ MetricsClient: metricsClient,
Mutex: &sync.Mutex{},
DeploymentWatchers: map[string]*WatcherConfiguration{},
ForceStopChan: make(chan interface{}),
@@ -199,6 +227,88 @@ func (c *CrdWatcher) IsRunning() bool {
return c.Running
}
+// If this function returns an error, it means that we cannot recover
+func (c *CrdWatcher) handleEvent(ctx context.Context, evt watch.Event, wg *sync.WaitGroup) error {
+ var cmc *v1.ChaosMonkeyConfiguration
+
+ switch object := evt.Object.(type) {
+ case *v1.ChaosMonkeyConfiguration:
+ cmc = object
+ default:
+ return fmt.Errorf("unknown object type %T", evt.Object)
+ }
+
+ startTime := time.Now().UnixMicro()
+ defer func() {
+ endTime := time.Now().UnixMicro()
+ c.metrics.eventDuration.Observe(float64(endTime - startTime))
+ }()
+
+ c.Logrus.Debugf("Received %s event for %+v", evt.Type, cmc)
+
+ switch evt.Type {
+ case "", watch.Error:
+ c.Logrus.Errorf("Received empty error or event from CRD watcher: %+v", evt)
+ c.setRunning(false)
+ return errors.New("Empty event or error from CRD watcher")
+
+ case watch.Added:
+ c.Logrus.Infof("Received ADDED event for %s, for deployment %s", cmc.Name, cmc.Spec.Deployment.Name)
+
+ // Check if the target deployment exists
+ dep, err := c.DeploymentInterface.Get(ctx, cmc.Spec.Deployment.Name, metav1.GetOptions{})
+ if err != nil {
+ c.Logrus.Errorf("Error while trying to get deployment: %s", err)
+
+ // Recoverable error
+ return nil
+ }
+
+ c.Logrus.Infof("Adding watcher for deployment %s", dep.Name)
+
+ // Add a new watcher
+ if err = c.addWatcher(cmc, dep); err != nil {
+ c.Logrus.Errorf("Error while trying to add watcher: %s", err)
+
+ // Recoverable error
+ return nil
+ }
+
+ // Start it
+ if err := c.startWatcher(ctx, dep.Name, wg); err != nil {
+ c.Logrus.Errorf("Error while trying to start watcher: %s", err)
+ }
+
+ c.Logrus.Debug("All is good! Publishing event.")
+ c.EventRecorderLogger.Eventf(cmc, "Normal", "Started", "Watcher started for deployment %s", dep.Name)
+ c.metrics.addedEvents.Inc()
+
+ case watch.Modified:
+ c.Logrus.Infof("Received MODIFIED event for %s, for deployment %s", cmc.Name, cmc.Spec.Deployment.Name)
+
+ if err := c.modifyWatcher(ctx, cmc, wg); err != nil {
+ c.Logrus.Errorf("Error while trying to modify watcher: %s", err)
+ }
+
+ c.Logrus.Debug("All is good! Publishing event.")
+ c.EventRecorderLogger.Eventf(cmc, "Normal", "Modified", "Watcher modified for deployment %s", cmc.Spec.Deployment.Name)
+ c.metrics.modifiedEvents.Inc()
+
+ case watch.Deleted:
+ c.Logrus.Infof("Received DELETED event for %s, for deployment %s", cmc.Name, cmc.Spec.Deployment.Name)
+
+ if err := c.deleteWatcher(cmc); err != nil {
+ c.Logrus.Errorf("Error while trying to delete watcher: %s", err)
+ }
+
+ c.Logrus.Debug("All is good! Publishing event.")
+ c.EventRecorderLogger.Eventf(cmc, "Normal", "Deleted", "Watcher deleted for deployment %s", cmc.Spec.Deployment.Name)
+ c.metrics.deletedEvents.Inc()
+ }
+
+ return nil
+}
+
// Start implements Watcher.
func (c *CrdWatcher) Start(ctx context.Context) error {
defer c.Close()
@@ -207,7 +317,7 @@ func (c *CrdWatcher) Start(ctx context.Context) error {
var wg sync.WaitGroup
watchTimeout := int64(c.WatcherTimeout.Seconds())
- w, err := c.ChaosMonkeyConfigurationInterface.Watch(ctx, metav1.ListOptions{
+ wv1, err := c.V1.Watch(ctx, metav1.ListOptions{
Watch: true,
TimeoutSeconds: &watchTimeout,
})
@@ -215,18 +325,16 @@ func (c *CrdWatcher) Start(ctx context.Context) error {
return err
}
- defer w.Stop()
-
c.setRunning(true)
for c.IsRunning() {
select {
- case evt, ok := <-w.ResultChan():
+ case evt, ok := <-wv1.ResultChan():
if !ok {
c.Logrus.Warn("Watch timed out")
- w, err = c.restartWatch(ctx, &wg)
+ wv1, err = c.restartWatch(ctx, c.V1, &wg)
if err != nil {
- c.Logrus.Errorf("Error while restarting watchers: %s", err)
+ c.Logrus.Errorf("Error while restarting watcher: %s", err)
c.setRunning(false)
}
@@ -234,69 +342,10 @@ func (c *CrdWatcher) Start(ctx context.Context) error {
break
}
- startTime := time.Now().UnixMicro()
- cmc := evt.Object.(*v1alpha1.ChaosMonkeyConfiguration)
- c.Logrus.Debugf("Received %s event for %+v", evt.Type, cmc)
-
- switch evt.Type {
- case "", watch.Error:
- c.Logrus.Errorf("Received empty error or event from CRD watcher: %+v", evt)
- c.setRunning(false)
- err = errors.New("Empty event or error from CRD watcher")
-
- case watch.Added:
- c.Logrus.Infof("Received ADDED event for %s, for deployment %s", cmc.Name, cmc.Spec.DeploymentName)
-
- // Check if the target deployment exists
- dep, err := c.DeploymentInterface.Get(ctx, cmc.Spec.DeploymentName, metav1.GetOptions{})
- if err != nil {
- c.Logrus.Errorf("Error while trying to get deployment: %s", err)
- continue
- }
-
- c.Logrus.Infof("Adding watcher for deployment %s", dep.Name)
-
- // Add a new watcher
- if err = c.addWatcher(cmc, dep); err != nil {
- c.Logrus.Errorf("Error while trying to add watcher: %s", err)
- continue
- }
-
- // Start it
- if err := c.startWatcher(ctx, dep.Name, &wg); err != nil {
- c.Logrus.Errorf("Error while trying to start watcher: %s", err)
- }
-
- c.Logrus.Debug("All is good! Publishing event.")
- c.EventRecorderLogger.Eventf(cmc, "Normal", "Started", "Watcher started for deployment %s", dep.Name)
- c.metrics.addedEvents.Inc()
-
- case watch.Modified:
- c.Logrus.Infof("Received MODIFIED event for %s, for deployment %s", cmc.Name, cmc.Spec.DeploymentName)
-
- if err := c.modifyWatcher(ctx, cmc, &wg); err != nil {
- c.Logrus.Errorf("Error while trying to modify watcher: %s", err)
- }
-
- c.Logrus.Debug("All is good! Publishing event.")
- c.EventRecorderLogger.Eventf(cmc, "Normal", "Modified", "Watcher modified for deployment %s", cmc.Spec.DeploymentName)
- c.metrics.modifiedEvents.Inc()
-
- case watch.Deleted:
- c.Logrus.Infof("Received DELETED event for %s, for deployment %s", cmc.Name, cmc.Spec.DeploymentName)
-
- if err := c.deleteWatcher(cmc); err != nil {
- c.Logrus.Errorf("Error while trying to delete watcher: %s", err)
- }
-
- c.Logrus.Debug("All is good! Publishing event.")
- c.EventRecorderLogger.Eventf(cmc, "Normal", "Deleted", "Watcher deleted for deployment %s", cmc.Spec.DeploymentName)
- c.metrics.deletedEvents.Inc()
+ if err := c.handleEvent(ctx, evt, &wg); err != nil {
+ c.Logrus.Error(err)
+ return err
}
-
- endTime := time.Now().UnixMicro()
- c.metrics.eventDuration.Observe(float64(endTime - startTime))
-
case <-ctx.Done():
c.Logrus.Info("Watcher context done")
c.setRunning(false)
@@ -359,7 +408,7 @@ func (c *CrdWatcher) setRunning(v bool) {
c.Running = v
}
-func (c *CrdWatcher) addWatcher(cmc *v1alpha1.ChaosMonkeyConfiguration, dep *apiappsv1.Deployment) error {
+func (c *CrdWatcher) addWatcher(cmc *v1.ChaosMonkeyConfiguration, dep *apiappsv1.Deployment) error {
c.Mutex.Lock()
defer c.Mutex.Unlock()
@@ -368,32 +417,32 @@ func (c *CrdWatcher) addWatcher(cmc *v1alpha1.ChaosMonkeyConfiguration, dep *api
return errors.New("Watcher for " + dep.Name + " already exists")
}
- parsedDuration, err := time.ParseDuration(cmc.Spec.Timeout)
- if err != nil {
- c.Logrus.Warnf("Error while parsing timeout: %s, defaulting to 5 minutes", err)
- parsedDuration = time.Duration(5 * time.Minute)
- }
-
var newWatcher ConfigurableWatcher
+ var combinedLabelSelector []string
+ for label, value := range dep.Spec.Selector.MatchLabels {
+ combinedLabelSelector = append(combinedLabelSelector, fmt.Sprintf("%s=%s", label, value))
+ }
- if cmc.Spec.PodMode {
+ switch cmc.Spec.ScalingMode {
+ case v1.ScalingModeKillPod:
c.Logrus.Debug("Creating new pod watcher")
if dep.Spec.Selector == nil || len(dep.Spec.Selector.MatchLabels) == 0 {
return fmt.Errorf("No selector labels found for deployment %s", dep.Name)
}
- var combinedLabelSelector []string
- for label, value := range dep.Spec.Selector.MatchLabels {
- combinedLabelSelector = append(combinedLabelSelector, fmt.Sprintf("%s=%s", label, value))
- }
-
c.Logrus.Debugf("Configuring watcher with %+v", cmc.Spec)
newWatcher = DefaultPodFactory(c.Client, nil, dep.Namespace, strings.Join(combinedLabelSelector, ","))
c.metrics.pwSpawned.Inc()
- } else {
+ case v1.ScalingModeRandomScale:
c.Logrus.Debug("Creating new deployment watcher")
newWatcher = DefaultDeploymentFactory(c.Client, nil, dep)
c.metrics.dwSpawned.Inc()
+ case v1.ScalingModeAntiPressure:
+ c.Logrus.Debug("Creating new AntiHPDA watcher")
+ c.metrics.ahSpawned.Inc()
+ newWatcher = DefaultAntiHPAFactory(c.MetricsClient, c.Client.CoreV1().Pods(cmc.Namespace), dep.Namespace, strings.Join(combinedLabelSelector, ","))
+ default:
+ return fmt.Errorf("Unhandled scaling mode: %s", cmc.Spec.ScalingMode)
}
// Configure it
@@ -401,7 +450,7 @@ func (c *CrdWatcher) addWatcher(cmc *v1alpha1.ChaosMonkeyConfiguration, dep *api
newWatcher.SetEnabled(cmc.Spec.Enabled)
newWatcher.SetMinReplicas(cmc.Spec.MinReplicas)
newWatcher.SetMaxReplicas(cmc.Spec.MaxReplicas)
- newWatcher.SetTimeout(parsedDuration)
+ newWatcher.SetTimeout(cmc.Spec.Timeout)
c.Logrus.Debug("Adding watcher to map")
c.DeploymentWatchers[dep.Name] = &WatcherConfiguration{
@@ -428,6 +477,8 @@ func (c *CrdWatcher) startWatcher(ctx context.Context, forDeployment string, wg
activeMetric = c.metrics.pwActive
case *DeploymentWatcher:
activeMetric = c.metrics.dwActive
+ case *AntiHPAWatcher:
+ activeMetric = c.metrics.ahActive
}
if activeMetric != nil {
@@ -450,25 +501,18 @@ func (c *CrdWatcher) startWatcher(ctx context.Context, forDeployment string, wg
return nil
}
-func (c *CrdWatcher) modifyWatcher(ctx context.Context, cmc *v1alpha1.ChaosMonkeyConfiguration, wg *sync.WaitGroup) error {
+func (c *CrdWatcher) modifyWatcher(ctx context.Context, cmc *v1.ChaosMonkeyConfiguration, wg *sync.WaitGroup) error {
c.Mutex.Lock()
defer c.Mutex.Unlock()
- wc, ok := c.DeploymentWatchers[cmc.Spec.DeploymentName]
+ wc, ok := c.DeploymentWatchers[cmc.Spec.Deployment.Name]
if !ok {
- return fmt.Errorf("Watcher for deployment %s does not exist", cmc.Spec.DeploymentName)
- }
-
- // The parsing of the duration is the same for PodWatchers and DeploymentWatchers
- newDuration, err := time.ParseDuration(cmc.Spec.Timeout)
- if err != nil {
- newDuration = 10 * time.Minute
- c.Logrus.Warnf("Error while parsing timeout: %s, using default of %s", err, newDuration)
+ return fmt.Errorf("Watcher for deployment %s does not exist", cmc.Spec.Deployment.Name)
}
c.Logrus.Debugf("Reconfiguring watcher with %+v", cmc.Spec)
- if wc.Configuration.Spec.PodMode != cmc.Spec.PodMode {
+ if wc.Configuration.Spec.ScalingMode != cmc.Spec.ScalingMode {
c.Logrus.Infof("CMC %s changed its pod mode, recreating the watcher from scratch", cmc.Name)
c.Logrus.Debugf("Stopping watcher %s", cmc.Name)
@@ -476,36 +520,42 @@ func (c *CrdWatcher) modifyWatcher(ctx context.Context, cmc *v1alpha1.ChaosMonke
return err
}
- delete(c.DeploymentWatchers, cmc.Spec.DeploymentName)
+ delete(c.DeploymentWatchers, cmc.Spec.Deployment.Name)
// Get the deployment
- dep, err := c.DeploymentInterface.Get(context.Background(), cmc.Spec.DeploymentName, metav1.GetOptions{})
+ dep, err := c.DeploymentInterface.Get(context.Background(), cmc.Spec.Deployment.Name, metav1.GetOptions{})
if err != nil {
return err
}
+ allLabels := []string{}
+ for key, val := range dep.Spec.Selector.MatchLabels {
+ allLabels = append(allLabels, fmt.Sprintf("%s=%s", key, val))
+ }
+
var newWatcher ConfigurableWatcher
- if cmc.Spec.PodMode {
+ switch cmc.Spec.ScalingMode {
+ case v1.ScalingModeKillPod:
c.Logrus.Debug("Creating new Pod watcher")
-
- allLabels := []string{}
- for key, val := range dep.Spec.Selector.MatchLabels {
- allLabels = append(allLabels, fmt.Sprintf("%s=%s", key, val))
- }
-
newWatcher = DefaultPodFactory(c.Client, nil, dep.Namespace, allLabels...)
c.metrics.pwSpawned.Inc()
- } else {
+ case v1.ScalingModeRandomScale:
c.Logrus.Debug("Creating new Deployment watcher")
newWatcher = DefaultDeploymentFactory(c.Client, nil, dep)
c.metrics.dwSpawned.Inc()
+ case v1.ScalingModeAntiPressure:
+ c.Logrus.Debug("Creating new AntiHPA watcher")
+ newWatcher = DefaultAntiHPAFactory(c.MetricsClient, c.Client.CoreV1().Pods(cmc.Namespace), dep.Namespace, strings.Join(allLabels, ","))
+ c.metrics.ahSpawned.Inc()
+ default:
+ return fmt.Errorf("Unhandled scaling mode: %s", cmc.Spec.ScalingMode)
}
// Configure the watcher
newWatcher.SetEnabled(cmc.Spec.Enabled)
newWatcher.SetMinReplicas(cmc.Spec.MinReplicas)
newWatcher.SetMaxReplicas(cmc.Spec.MaxReplicas)
- newWatcher.SetTimeout(newDuration)
+ newWatcher.SetTimeout(cmc.Spec.Timeout)
// Start the watcher
c.Logrus.Info("Starting the newly created watcher")
@@ -516,6 +566,8 @@ func (c *CrdWatcher) modifyWatcher(ctx context.Context, cmc *v1alpha1.ChaosMonke
activeMetric = c.metrics.pwActive
case *DeploymentWatcher:
activeMetric = c.metrics.dwActive
+ case *AntiHPAWatcher:
+ activeMetric = c.metrics.ahActive
}
if activeMetric != nil {
@@ -525,7 +577,7 @@ func (c *CrdWatcher) modifyWatcher(ctx context.Context, cmc *v1alpha1.ChaosMonke
wg.Add(1)
go func() {
defer wg.Done()
- c.Logrus.Debugf("Starting watcher for %s", cmc.Spec.DeploymentName)
+ c.Logrus.Debugf("Starting watcher for %s", cmc.Spec.Deployment.Name)
if err := newWatcher.Start(ctx); err != nil {
c.Logrus.Errorf("Error while starting watcher: %s", err)
}
@@ -536,7 +588,7 @@ func (c *CrdWatcher) modifyWatcher(ctx context.Context, cmc *v1alpha1.ChaosMonke
}()
// Put it into the map
- c.DeploymentWatchers[cmc.Spec.DeploymentName] = &WatcherConfiguration{
+ c.DeploymentWatchers[cmc.Spec.Deployment.Name] = &WatcherConfiguration{
Configuration: cmc,
Watcher: newWatcher,
}
@@ -545,25 +597,25 @@ func (c *CrdWatcher) modifyWatcher(ctx context.Context, cmc *v1alpha1.ChaosMonke
wc.Watcher.SetEnabled(cmc.Spec.Enabled)
wc.Watcher.SetMinReplicas(cmc.Spec.MinReplicas)
wc.Watcher.SetMaxReplicas(cmc.Spec.MaxReplicas)
- wc.Watcher.SetTimeout(newDuration)
+ wc.Watcher.SetTimeout(cmc.Spec.Timeout)
}
return nil
}
-func (c *CrdWatcher) deleteWatcher(cmc *v1alpha1.ChaosMonkeyConfiguration) error {
+func (c *CrdWatcher) deleteWatcher(cmc *v1.ChaosMonkeyConfiguration) error {
c.Mutex.Lock()
defer c.Mutex.Unlock()
- c.Logrus.Infof("Deleting watcher for %s", cmc.Spec.DeploymentName)
+ c.Logrus.Infof("Deleting watcher for %s", cmc.Spec.Deployment.Name)
- if wc, ok := c.DeploymentWatchers[cmc.Spec.DeploymentName]; ok {
+ if wc, ok := c.DeploymentWatchers[cmc.Spec.Deployment.Name]; ok {
if err := wc.Watcher.Stop(); err != nil {
c.Logrus.Warnf("Error while stopping watcher: %s", err)
}
- delete(c.DeploymentWatchers, cmc.Spec.DeploymentName)
+ delete(c.DeploymentWatchers, cmc.Spec.Deployment.Name)
} else {
- return fmt.Errorf("Watcher for deployment %s does not exist", cmc.Spec.DeploymentName)
+ return fmt.Errorf("Watcher for deployment %s does not exist", cmc.Spec.Deployment.Name)
}
return nil
@@ -581,7 +633,7 @@ func (c *CrdWatcher) cleanUp() {
}
}
-func (c *CrdWatcher) restartWatch(ctx context.Context, wg *sync.WaitGroup) (watch.Interface, error) {
+func (c *CrdWatcher) restartWatch(ctx context.Context, wi watchInterface, wg *sync.WaitGroup) (watch.Interface, error) {
c.Mutex.Lock()
defer c.Mutex.Unlock()
@@ -601,7 +653,7 @@ func (c *CrdWatcher) restartWatch(ctx context.Context, wg *sync.WaitGroup) (watc
wg.Wait()
timeoutSeconds := int64(c.WatcherTimeout.Seconds())
- return c.ChaosMonkeyConfigurationInterface.Watch(ctx, metav1.ListOptions{
+ return wi.Watch(ctx, metav1.ListOptions{
Watch: true,
TimeoutSeconds: &timeoutSeconds,
})
diff --git a/internal/watcher/crd_test.go b/internal/watcher/crd_test.go
index 68a219d..96cfbef 100644
--- a/internal/watcher/crd_test.go
+++ b/internal/watcher/crd_test.go
@@ -10,7 +10,7 @@ import (
"time"
cmc "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/fake"
- "github.com/massix/chaos-monkey/internal/apis/v1alpha1"
+ v1 "github.com/massix/chaos-monkey/internal/apis/v1"
"github.com/massix/chaos-monkey/internal/watcher"
"github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
@@ -19,8 +19,11 @@ import (
"k8s.io/apimachinery/pkg/watch"
k "k8s.io/client-go/kubernetes"
kubernetes "k8s.io/client-go/kubernetes/fake"
+ typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
ktest "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record"
+ metricsv "k8s.io/metrics/pkg/client/clientset/versioned"
+ fakemetrics "k8s.io/metrics/pkg/client/clientset/versioned/fake"
)
type FakeDeploymentWatcher struct {
@@ -99,7 +102,7 @@ func (f *FakeDeploymentWatcher) Close() error {
var _ watcher.ConfigurableWatcher = &FakeDeploymentWatcher{}
func TestCRDWatcher_Create(t *testing.T) {
- w := watcher.DefaultCrdFactory(kubernetes.NewSimpleClientset(), cmc.NewSimpleClientset(), record.NewFakeRecorder(1024), "chaos-monkey")
+ w := watcher.NewCrdWatcher(kubernetes.NewSimpleClientset(), cmc.NewSimpleClientset(), fakemetrics.NewSimpleClientset(), record.NewFakeRecorder(1024), "chaos-monkey")
defer w.Close()
if w.IsRunning() {
@@ -107,18 +110,18 @@ func TestCRDWatcher_Create(t *testing.T) {
}
}
-func createCMC(name string, enabled, podMode bool, minReplicas, maxReplicas int, deploymentName, timeout string) *v1alpha1.ChaosMonkeyConfiguration {
- return &v1alpha1.ChaosMonkeyConfiguration{
+func createCMC(name string, enabled bool, scalingMode v1.ScalingMode, minReplicas, maxReplicas int, deploymentName string, timeout time.Duration) *v1.ChaosMonkeyConfiguration {
+ return &v1.ChaosMonkeyConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
- Spec: v1alpha1.ChaosMonkeyConfigurationSpec{
- Enabled: enabled,
- MinReplicas: minReplicas,
- MaxReplicas: maxReplicas,
- DeploymentName: deploymentName,
- Timeout: timeout,
- PodMode: podMode,
+ Spec: v1.ChaosMonkeyConfigurationSpec{
+ Enabled: enabled,
+ MinReplicas: minReplicas,
+ MaxReplicas: maxReplicas,
+ Timeout: timeout,
+ ScalingMode: scalingMode,
+ Deployment: v1.ChaosMonkeyConfigurationSpecDeployment{Name: deploymentName},
},
}
}
@@ -127,7 +130,8 @@ func TestCRDWatcher_BasicBehaviour(t *testing.T) {
logrus.SetLevel(logrus.DebugLevel)
clientSet := kubernetes.NewSimpleClientset()
cmClientset := cmc.NewSimpleClientset()
- w := watcher.DefaultCrdFactory(clientSet, cmClientset, record.NewFakeRecorder(1024), "chaos-monkey").(*watcher.CrdWatcher)
+ metricsClientset := fakemetrics.NewSimpleClientset()
+ w := watcher.NewCrdWatcher(clientSet, cmClientset, metricsClientset, record.NewFakeRecorder(1024), "chaos-monkey")
w.CleanupTimeout = 1 * time.Second
// Inject my Deployment Factory
@@ -140,16 +144,21 @@ func TestCRDWatcher_BasicBehaviour(t *testing.T) {
return &FakeDeploymentWatcher{Mutex: &sync.Mutex{}, DeploymentName: namespace, IsPodMode: true}
}
+ // Inject my AntiHPA Factory
+ watcher.DefaultAntiHPAFactory = func(client metricsv.Interface, podset typedcorev1.PodInterface, namespace, podLabel string) watcher.ConfigurableWatcher {
+ return &FakeDeploymentWatcher{Mutex: &sync.Mutex{}, DeploymentName: namespace, IsPodMode: true}
+ }
+
// Create the scenario
cmClientset.PrependWatchReactor("chaosmonkeyconfigurations", func(action ktest.Action) (handled bool, ret watch.Interface, err error) {
fakeWatch := watch.NewFake()
go func() {
- fakeWatch.Add(createCMC("test-1", true, false, 1, 1, "test-1", "1s"))
- fakeWatch.Add(createCMC("test-2", false, true, 1, 1, "test-2", "10s"))
- fakeWatch.Add(createCMC("test-3", true, true, 1, 1, "test-3", "invalidstring"))
- fakeWatch.Modify(createCMC("test-1", true, false, 4, 8, "test-1", "1s"))
- fakeWatch.Delete(createCMC("test-2", true, false, 4, 8, "test-2", "1s"))
+ fakeWatch.Add(createCMC("test-1", true, v1.ScalingModeRandomScale, 1, 1, "test-1", 1*time.Second))
+ fakeWatch.Add(createCMC("test-2", false, v1.ScalingModeKillPod, 1, 1, "test-2", 10*time.Second))
+ fakeWatch.Add(createCMC("test-3", true, v1.ScalingModeAntiPressure, 1, 1, "test-3", 10*time.Minute))
+ fakeWatch.Modify(createCMC("test-1", true, v1.ScalingModeRandomScale, 4, 8, "test-1", 1*time.Second))
+ fakeWatch.Delete(createCMC("test-2", true, v1.ScalingModeRandomScale, 4, 8, "test-2", 1*time.Second))
}()
return true, fakeWatch, nil
@@ -158,7 +167,6 @@ func TestCRDWatcher_BasicBehaviour(t *testing.T) {
// Setup the scenario for the deployments too
clientSet.PrependReactor("get", "deployments", func(action ktest.Action) (handled bool, ret runtime.Object, err error) {
askedDeployment := action.(ktest.GetAction).GetName()
- t.Logf("Asked deployment %s", askedDeployment)
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: askedDeployment,
@@ -217,9 +225,9 @@ func TestCRDWatcher_BasicBehaviour(t *testing.T) {
t.Error("Deployment for test-1 not found")
}
- // The deployment for "test-3" should have the default timeout of 5 minutes
+ // The deployment for "test-3" should have the default timeout of 10 minutes
if !testDW("test-3", func(d *FakeDeploymentWatcher) {
- if d.Timeout != 5*time.Minute {
+ if d.Timeout != 10*time.Minute {
t.Errorf("Expected 5 minutes timeout, got %s", d.Timeout)
}
}) {
@@ -248,14 +256,15 @@ func TestCRDWatcher_BasicBehaviour(t *testing.T) {
func TestCRDWatcher_Error(t *testing.T) {
clientSet := kubernetes.NewSimpleClientset()
cmClientset := cmc.NewSimpleClientset()
- w := watcher.DefaultCrdFactory(clientSet, cmClientset, record.NewFakeRecorder(1024), "chaos-monkey").(*watcher.CrdWatcher)
+ metricsClientset := fakemetrics.NewSimpleClientset()
+ w := watcher.NewCrdWatcher(clientSet, cmClientset, metricsClientset, record.NewFakeRecorder(1024), "chaos-monkey")
w.CleanupTimeout = 1 * time.Second
// Setup the scenario for the CMCs
cmClientset.PrependWatchReactor("chaosmonkeyconfigurations", func(action ktest.Action) (handled bool, ret watch.Interface, err error) {
fakeWatch := watch.NewFake()
go func() {
- fakeWatch.Error(&v1alpha1.ChaosMonkeyConfiguration{
+ fakeWatch.Error(&v1.ChaosMonkeyConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
@@ -271,8 +280,6 @@ func TestCRDWatcher_Error(t *testing.T) {
go func() {
if err := w.Start(context.Background()); err == nil || !strings.Contains(err.Error(), "Empty event or error from CRD watcher") {
t.Errorf("Expected error, got %+v instead", err)
- } else {
- t.Logf("Expected: %s", err)
}
done <- struct{}{}
@@ -290,7 +297,8 @@ func TestCRDWatcher_Error(t *testing.T) {
func TestCRDWatcher_Cleanup(t *testing.T) {
clientSet := kubernetes.NewSimpleClientset()
cmClientset := cmc.NewSimpleClientset()
- w := watcher.DefaultCrdFactory(clientSet, cmClientset, record.NewFakeRecorder(1024), "chaos-monkey").(*watcher.CrdWatcher)
+ metricsClientset := fakemetrics.NewSimpleClientset()
+ w := watcher.NewCrdWatcher(clientSet, cmClientset, metricsClientset, record.NewFakeRecorder(1024), "chaos-monkey")
w.CleanupTimeout = 1 * time.Second
// Inject some FakeDeploymentWatchers inside the watcher itself
@@ -351,7 +359,8 @@ func TestCRDWatcher_Cleanup(t *testing.T) {
func TestCRDWatcher_Restart(t *testing.T) {
clientSet := kubernetes.NewSimpleClientset()
cmClientset := cmc.NewSimpleClientset()
- w := watcher.DefaultCrdFactory(clientSet, cmClientset, record.NewFakeRecorder(1024), "chaos-monkey").(*watcher.CrdWatcher)
+ metricsClientset := fakemetrics.NewSimpleClientset()
+ w := watcher.NewCrdWatcher(clientSet, cmClientset, metricsClientset, record.NewFakeRecorder(1024), "chaos-monkey")
w.CleanupTimeout = 1 * time.Second
timesRestarted := &atomic.Int32{}
timesRestarted.Store(0)
@@ -368,7 +377,7 @@ func TestCRDWatcher_Restart(t *testing.T) {
go func() {
for i := range [10]int{} {
depName := fmt.Sprintf("test-%d", i)
- fakeWatch.Add(createCMC(depName, false, false, 0, 10, depName, "10s"))
+ fakeWatch.Add(createCMC(depName, false, v1.ScalingModeRandomScale, 0, 10, depName, 10*time.Second))
time.Sleep(100 * time.Millisecond)
}
@@ -379,7 +388,14 @@ func TestCRDWatcher_Restart(t *testing.T) {
clientSet.PrependReactor("get", "deployments", func(action ktest.Action) (handled bool, ret runtime.Object, err error) {
requestedName := action.(ktest.GetAction).GetName()
- return true, &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: requestedName}}, nil
+ return true, &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{Name: requestedName},
+ Spec: appsv1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": requestedName},
+ },
+ },
+ }, nil
})
// Start the watcher in background
@@ -417,7 +433,8 @@ func TestCRDWatcher_Restart(t *testing.T) {
func TestCRDWatcher_ModifyWatcherType(t *testing.T) {
clientSet := kubernetes.NewSimpleClientset()
cmClientset := cmc.NewSimpleClientset()
- w := watcher.DefaultCrdFactory(clientSet, cmClientset, record.NewFakeRecorder(1024), "chaos-monkey").(*watcher.CrdWatcher)
+ metricsClientset := fakemetrics.NewSimpleClientset()
+ w := watcher.NewCrdWatcher(clientSet, cmClientset, metricsClientset, record.NewFakeRecorder(1024), "chaos-monkey")
w.CleanupTimeout = 1 * time.Second
// Number of times each watcher has been created
@@ -450,8 +467,8 @@ func TestCRDWatcher_ModifyWatcherType(t *testing.T) {
cmClientset.PrependWatchReactor("chaosmonkeyconfigurations", func(action ktest.Action) (handled bool, ret watch.Interface, err error) {
go func() {
- fakeWatch.Add(createCMC("test-deploy", false, false, 0, 10, "test-deploy", "10s"))
- fakeWatch.Add(createCMC("test-pod", false, true, 0, 10, "test-pod", "10s"))
+ fakeWatch.Add(createCMC("test-deploy", false, v1.ScalingModeRandomScale, 0, 10, "test-deploy", 10*time.Second))
+ fakeWatch.Add(createCMC("test-pod", false, v1.ScalingModeKillPod, 0, 10, "test-pod", 10*time.Second))
}()
return true, fakeWatch, nil
@@ -486,7 +503,7 @@ func TestCRDWatcher_ModifyWatcherType(t *testing.T) {
w.Mutex.Unlock()
// Now send a Modify event
- fakeWatch.Modify(createCMC("test-deploy", false, true, 0, 10, "test-deploy", "10s"))
+ fakeWatch.Modify(createCMC("test-deploy", false, v1.ScalingModeKillPod, 0, 10, "test-deploy", 10*time.Second))
time.Sleep(100 * time.Millisecond)
// We should still have 2 watchers
@@ -507,7 +524,7 @@ func TestCRDWatcher_ModifyWatcherType(t *testing.T) {
w.Mutex.Unlock()
// Now send another Modify event
- fakeWatch.Modify(createCMC("test-pod", false, false, 0, 10, "test-pod", "10s"))
+ fakeWatch.Modify(createCMC("test-pod", false, v1.ScalingModeRandomScale, 0, 10, "test-pod", 10*time.Second))
time.Sleep(100 * time.Millisecond)
// Still 2 watchers
diff --git a/internal/watcher/deployment.go b/internal/watcher/deployment.go
index 4c864df..3a3a62f 100644
--- a/internal/watcher/deployment.go
+++ b/internal/watcher/deployment.go
@@ -62,7 +62,7 @@ func (dw *dwMetrics) unregister() {
var _ = (ConfigurableWatcher)((*DeploymentWatcher)(nil))
-func NewDeploymentWatcher(clientset kubernetes.Interface, recorder record.EventRecorderLogger, deployment *appsv1.Deployment) ConfigurableWatcher {
+func NewDeploymentWatcher(clientset kubernetes.Interface, recorder record.EventRecorderLogger, deployment *appsv1.Deployment) *DeploymentWatcher {
logrus.Infof("Creating new Deployment watcher for %s/%s", deployment.Namespace, deployment.Name)
// Build my own recorder here
diff --git a/internal/watcher/deployment_test.go b/internal/watcher/deployment_test.go
index 5478e49..0970bbf 100644
--- a/internal/watcher/deployment_test.go
+++ b/internal/watcher/deployment_test.go
@@ -87,8 +87,6 @@ func TestDeploymentWatcher_BasicBehaviour(t *testing.T) {
<-done
- t.Logf("Number of requests: %d", numberOfRequests)
-
// We should have received 10 requests
if numberOfRequests.Load() != 10 {
t.Errorf("Wrong number of requests: %d", numberOfRequests)
diff --git a/internal/watcher/namespace.go b/internal/watcher/namespace.go
index a7e1cc5..5460096 100644
--- a/internal/watcher/namespace.go
+++ b/internal/watcher/namespace.go
@@ -19,6 +19,7 @@ import (
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
+ metricsv "k8s.io/metrics/pkg/client/clientset/versioned"
)
type NamespaceWatcher struct {
@@ -28,6 +29,7 @@ type NamespaceWatcher struct {
Logrus logrus.FieldLogger
Client kubernetes.Interface
CmcClient mc.Interface
+ MetricsClient metricsv.Interface
Mutex *sync.Mutex
CrdWatchers map[string]Watcher
metrics *nwMetrics
@@ -135,7 +137,7 @@ func newNwMetrics(rootNamespace, behavior string) *nwMetrics {
}
}
-func NewNamespaceWatcher(clientset kubernetes.Interface, cmcClientset mc.Interface, recorder record.EventRecorderLogger, rootNamespace string, behavior configuration.Behavior) Watcher {
+func NewNamespaceWatcher(clientset kubernetes.Interface, cmcClientset mc.Interface, metricsClient metricsv.Interface, recorder record.EventRecorderLogger, rootNamespace string, behavior configuration.Behavior) *NamespaceWatcher {
logrus.Infof("Creating new namespace watcher for namespace %s", rootNamespace)
if clientset == nil {
@@ -165,6 +167,7 @@ func NewNamespaceWatcher(clientset kubernetes.Interface, cmcClientset mc.Interfa
Running: false,
Client: clientset,
CmcClient: cmcClientset,
+ MetricsClient: metricsClient,
WatcherTimeout: conf.Timeouts.Namespace,
}
}
@@ -354,7 +357,7 @@ func (n *NamespaceWatcher) addWatcher(namespace string) error {
return fmt.Errorf("Watcher for namespace %s already exists", namespace)
}
- n.CrdWatchers[namespace] = DefaultCrdFactory(n.Client, n.CmcClient, nil, namespace)
+ n.CrdWatchers[namespace] = DefaultCrdFactory(n.Client, n.CmcClient, n.MetricsClient, nil, namespace)
return nil
}
diff --git a/internal/watcher/namespace_test.go b/internal/watcher/namespace_test.go
index 67461de..ce5dc46 100644
--- a/internal/watcher/namespace_test.go
+++ b/internal/watcher/namespace_test.go
@@ -1,4 +1,4 @@
-package watcher_test
+package watcher
import (
"context"
@@ -12,7 +12,6 @@ import (
typedcmc "github.com/massix/chaos-monkey/internal/apis/clientset/versioned"
fakecmc "github.com/massix/chaos-monkey/internal/apis/clientset/versioned/fake"
"github.com/massix/chaos-monkey/internal/configuration"
- "github.com/massix/chaos-monkey/internal/watcher"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -21,6 +20,8 @@ import (
kubernetes "k8s.io/client-go/kubernetes/fake"
ktest "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record"
+ metricsv "k8s.io/metrics/pkg/client/clientset/versioned"
+ fakemetricsv "k8s.io/metrics/pkg/client/clientset/versioned/fake"
)
type FakeCrdWatcher struct {
@@ -65,7 +66,8 @@ func (f *FakeCrdWatcher) Close() error {
var cmcClientset = fakecmc.NewSimpleClientset()
func TestNamespaceWatcher_Create(t *testing.T) {
- w := watcher.DefaultNamespaceFactory(kubernetes.NewSimpleClientset(), cmcClientset, record.NewFakeRecorder(1024), "chaos-monkey", configuration.BehaviorAllowAll)
+ metricsClientset := fakemetricsv.NewSimpleClientset()
+ w := NewNamespaceWatcher(kubernetes.NewSimpleClientset(), cmcClientset, metricsClientset, record.NewFakeRecorder(1024), "chaos-monkey", configuration.BehaviorAllowAll)
defer w.Close()
if w.IsRunning() {
@@ -73,14 +75,99 @@ func TestNamespaceWatcher_Create(t *testing.T) {
}
}
+func TestNamespaceWatcher_IsNamespaceAllowed(t *testing.T) {
+ genNamespace := func(label string) *corev1.Namespace {
+ if label == "" {
+ return &corev1.Namespace{
+ ObjectMeta: v1.ObjectMeta{
+ Labels: map[string]string{},
+ },
+ }
+ } else {
+ return &corev1.Namespace{
+ ObjectMeta: v1.ObjectMeta{
+ Labels: map[string]string{
+ configuration.NamespaceLabel: label,
+ },
+ },
+ }
+ }
+ }
+
+ nsWithoutLabel := genNamespace("")
+ nsWithOkLabel := genNamespace("true")
+ nsWithKoLabel := genNamespace("false")
+
+ t.Run("BehaviorAllowAll", func(t *testing.T) {
+ ns := &NamespaceWatcher{
+ Behavior: configuration.BehaviorAllowAll,
+ }
+
+ if ns.IsNamespaceAllowed(nsWithoutLabel) != true {
+ t.Error("should be true")
+ }
+
+ if ns.IsNamespaceAllowed(nsWithOkLabel) != true {
+ t.Error("should be true")
+ }
+
+ if ns.IsNamespaceAllowed(nsWithKoLabel) != false {
+ t.Error("should be false")
+ }
+ })
+
+ t.Run("BehaviorDenyAll", func(t *testing.T) {
+ ns := &NamespaceWatcher{
+ Behavior: configuration.BehaviorDenyAll,
+ }
+
+ if ns.IsNamespaceAllowed(nsWithoutLabel) != false {
+ t.Error("should be false")
+ }
+
+ if ns.IsNamespaceAllowed(nsWithOkLabel) != true {
+ t.Error("should be true")
+ }
+
+ if ns.IsNamespaceAllowed(nsWithKoLabel) != false {
+ t.Error("should be false")
+ }
+ })
+}
+
+func TestNamespaceWatcher_RestartWatch(t *testing.T) {
+ ns := &NamespaceWatcher{
+ NamespaceInterface: kubernetes.NewSimpleClientset().CoreV1().Namespaces(),
+ Mutex: &sync.Mutex{},
+ Logrus: logrus.New(),
+ CrdWatchers: map[string]Watcher{
+ "w1": &FakeCrdWatcher{Mutex: &sync.Mutex{}},
+ "w2": &FakeCrdWatcher{Mutex: &sync.Mutex{}},
+ "w3": &FakeCrdWatcher{Mutex: &sync.Mutex{}},
+ },
+ }
+
+ t.Run("Cleans the watchers properly", func(t *testing.T) {
+ wg := &sync.WaitGroup{}
+ if _, err := ns.restartWatch(context.TODO(), wg); err != nil {
+ t.Errorf("received error: %s", err)
+ }
+
+ if l := len(ns.CrdWatchers); l != 0 {
+ t.Errorf("still %d watchers remaining", l)
+ }
+ })
+}
+
func TestNamespaceWatcher_BasicBehaviour(t *testing.T) {
logrus.SetLevel(logrus.DebugLevel)
clientSet := kubernetes.NewSimpleClientset()
- w := watcher.DefaultNamespaceFactory(clientSet, cmcClientset, record.NewFakeRecorder(1024), "chaos-monkey", configuration.BehaviorAllowAll).(*watcher.NamespaceWatcher)
+ metricsClientset := fakemetricsv.NewSimpleClientset()
+ w := NewNamespaceWatcher(clientSet, cmcClientset, metricsClientset, record.NewFakeRecorder(1024), "chaos-monkey", configuration.BehaviorAllowAll)
w.CleanupTimeout = 1 * time.Second
// Inject my CRD Factory
- watcher.DefaultCrdFactory = func(k.Interface, typedcmc.Interface, record.EventRecorderLogger, string) watcher.Watcher {
+ DefaultCrdFactory = func(k.Interface, typedcmc.Interface, metricsv.Interface, record.EventRecorderLogger, string) Watcher {
return &FakeCrdWatcher{Mutex: &sync.Mutex{}}
}
@@ -161,7 +248,8 @@ func TestNamespaceWatcher_BasicBehaviour(t *testing.T) {
func TestNamespaceWatcher_Error(t *testing.T) {
logrus.SetLevel(logrus.DebugLevel)
clientset := kubernetes.NewSimpleClientset()
- w := watcher.DefaultNamespaceFactory(clientset, cmcClientset, record.NewFakeRecorder(1024), "chaos-monkey", configuration.BehaviorAllowAll)
+ metricsClientset := fakemetricsv.NewSimpleClientset()
+ w := NewNamespaceWatcher(clientset, cmcClientset, metricsClientset, record.NewFakeRecorder(1024), "chaos-monkey", configuration.BehaviorAllowAll)
clientset.PrependWatchReactor("namespaces", func(action ktest.Action) (handled bool, ret watch.Interface, err error) {
fakeWatch := watch.NewFake()
@@ -201,11 +289,12 @@ func TestNamespaceWatcher_Error(t *testing.T) {
func TestNamespaceWatcher_Cleanup(t *testing.T) {
logrus.SetLevel(logrus.DebugLevel)
clientset := kubernetes.NewSimpleClientset()
- w := watcher.DefaultNamespaceFactory(clientset, cmcClientset, record.NewFakeRecorder(1024), "chaos-monkey", configuration.BehaviorAllowAll).(*watcher.NamespaceWatcher)
+ metricsClientset := fakemetricsv.NewSimpleClientset()
+ w := NewNamespaceWatcher(clientset, cmcClientset, metricsClientset, record.NewFakeRecorder(1024), "chaos-monkey", configuration.BehaviorAllowAll)
w.CleanupTimeout = 1 * time.Second
// Add some fake watchers
- w.CrdWatchers = map[string]watcher.Watcher{
+ w.CrdWatchers = map[string]Watcher{
"test-1": &FakeCrdWatcher{Mutex: &sync.Mutex{}, Running: true},
"test-2": &FakeCrdWatcher{Mutex: &sync.Mutex{}, Running: false},
"test-3": &FakeCrdWatcher{Mutex: &sync.Mutex{}, Running: false},
@@ -254,12 +343,13 @@ func TestNamespaceWatcher_Cleanup(t *testing.T) {
func TestNamespaceWatcher_RestartWatcher(t *testing.T) {
clientset := kubernetes.NewSimpleClientset()
- w := watcher.DefaultNamespaceFactory(clientset, cmcClientset, record.NewFakeRecorder(1024), "chaos-monkey", configuration.BehaviorAllowAll)
- w.(*watcher.NamespaceWatcher).CleanupTimeout = 1 * time.Second
+ metricsClientset := fakemetricsv.NewSimpleClientset()
+ w := NewNamespaceWatcher(clientset, cmcClientset, metricsClientset, record.NewFakeRecorder(1024), "chaos-monkey", configuration.BehaviorAllowAll)
+ w.CleanupTimeout = 1 * time.Second
timeAsked := &atomic.Int32{}
timeAsked.Store(0)
- watcher.DefaultCrdFactory = func(clientset k.Interface, cmcClientset typedcmc.Interface, recorder record.EventRecorderLogger, namespace string) watcher.Watcher {
+ DefaultCrdFactory = func(clientset k.Interface, cmcClientset typedcmc.Interface, _ metricsv.Interface, recorder record.EventRecorderLogger, namespace string) Watcher {
return &FakeCrdWatcher{Mutex: &sync.Mutex{}}
}
@@ -310,7 +400,7 @@ func TestNamespaceWatcher_ModifyNamespace(t *testing.T) {
return true, fakeWatch, nil
})
- watcher.DefaultCrdFactory = func(clientset k.Interface, cmcClientset typedcmc.Interface, recorder record.EventRecorderLogger, namespace string) watcher.Watcher {
+ DefaultCrdFactory = func(clientset k.Interface, cmcClientset typedcmc.Interface, _ metricsv.Interface, recorder record.EventRecorderLogger, namespace string) Watcher {
return &FakeCrdWatcher{Mutex: &sync.Mutex{}}
}
@@ -324,7 +414,8 @@ func TestNamespaceWatcher_ModifyNamespace(t *testing.T) {
}
}
- w := watcher.NewNamespaceWatcher(clientset, nil, record.NewFakeRecorder(1024), "chaosmonkey", configuration.BehaviorAllowAll).(*watcher.NamespaceWatcher)
+ metricsClientset := fakemetricsv.NewSimpleClientset()
+ w := NewNamespaceWatcher(clientset, nil, metricsClientset, record.NewFakeRecorder(1024), "chaosmonkey", configuration.BehaviorAllowAll)
w.WatcherTimeout = 24 * time.Hour
w.CleanupTimeout = 300 * time.Millisecond
@@ -389,7 +480,7 @@ func TestNamespaceWatcher_ModifyNamespace(t *testing.T) {
// Change the behavior to "DenyAll" and reset the watcher
w.Mutex.Lock()
w.Behavior = configuration.BehaviorDenyAll
- w.CrdWatchers = map[string]watcher.Watcher{}
+ w.CrdWatchers = map[string]Watcher{}
w.Mutex.Unlock()
t.Run("DenyAll", func(t *testing.T) {
diff --git a/internal/watcher/pod.go b/internal/watcher/pod.go
index 93cd678..1ea94e2 100644
--- a/internal/watcher/pod.go
+++ b/internal/watcher/pod.go
@@ -92,7 +92,7 @@ func newPwMetrics(namespace, combinedLabelSelector string) *pwMetrics {
var _ = (ConfigurableWatcher)((*PodWatcher)(nil))
-func NewPodWatcher(clientset kubernetes.Interface, recorder record.EventRecorderLogger, namespace string, labelSelector ...string) ConfigurableWatcher {
+func NewPodWatcher(clientset kubernetes.Interface, recorder record.EventRecorderLogger, namespace string, labelSelector ...string) *PodWatcher {
logrus.Infof("Creating pod watcher in namespace %s for label selector %s", namespace, labelSelector)
if recorder == nil {
diff --git a/internal/watcher/pod_test.go b/internal/watcher/pod_test.go
index c38b63b..1064b33 100644
--- a/internal/watcher/pod_test.go
+++ b/internal/watcher/pod_test.go
@@ -31,7 +31,7 @@ func TestPodWatcher_Create(t *gtest.T) {
clientset := fake.NewSimpleClientset()
recorder := record.NewFakeRecorder(1024)
- p := watcher.NewPodWatcher(clientset, recorder, "test", "app=name").(*watcher.PodWatcher)
+ p := watcher.NewPodWatcher(clientset, recorder, "test", "app=name")
if p == nil {
t.Fatal("Failed to create pod watcher")
}
@@ -56,7 +56,7 @@ func TestPodWatcher_BasicBehaviour(t *gtest.T) {
logrus.SetLevel(logrus.DebugLevel)
clientset := fake.NewSimpleClientset()
recorder := record.NewFakeRecorder(1024)
- p := watcher.NewPodWatcher(clientset, recorder, "test", "app=name").(*watcher.PodWatcher)
+ p := watcher.NewPodWatcher(clientset, recorder, "test", "app=name")
pause := make(chan interface{})
defer close(pause)
@@ -89,8 +89,6 @@ func TestPodWatcher_BasicBehaviour(t *gtest.T) {
})
clientset.PrependReactor("delete", "pods", func(action ktest.Action) (handled bool, ret runtime.Object, err error) {
- t.Logf("Asked to delete %s", action.(ktest.DeleteAction).GetName())
-
return true, nil, nil
})
@@ -161,7 +159,7 @@ func TestPodWatcher_DeletePods(t *gtest.T) {
logrus.SetLevel(logrus.DebugLevel)
clientset := fake.NewSimpleClientset()
recorder := record.NewFakeRecorder(1024)
- p := watcher.NewPodWatcher(clientset, recorder, "test", "app=name").(*watcher.PodWatcher)
+ p := watcher.NewPodWatcher(clientset, recorder, "test", "app=name")
fakeWatch := watch.NewFake()
p.SetTimeout(100 * time.Millisecond)
@@ -190,7 +188,6 @@ func TestPodWatcher_DeletePods(t *gtest.T) {
clientset.PrependReactor("delete", "pods", func(action ktest.Action) (handled bool, ret runtime.Object, err error) {
podName := action.(ktest.DeleteAction).GetName()
- t.Logf("Asked to delete %s", podName)
// We can delete the first 5 pods, not the other ones
switch podName {
@@ -237,7 +234,7 @@ func TestPodWatcher_NotEnabled(t *gtest.T) {
logrus.SetLevel(logrus.DebugLevel)
clientset := fake.NewSimpleClientset()
recorder := record.NewFakeRecorder(1024)
- p := watcher.NewPodWatcher(clientset, recorder, "test", "app=name").(*watcher.PodWatcher)
+ p := watcher.NewPodWatcher(clientset, recorder, "test", "app=name")
fakeWatch := watch.NewFake()
p.SetTimeout(100 * time.Millisecond)
@@ -266,8 +263,6 @@ func TestPodWatcher_NotEnabled(t *gtest.T) {
clientset.PrependReactor("delete", "pods", func(action ktest.Action) (handled bool, ret runtime.Object, err error) {
podName := action.(ktest.DeleteAction).GetName()
- t.Logf("Asked to delete %s", podName)
-
go fakeWatch.Delete(createPod(podName))
return true, nil, nil
})
@@ -286,7 +281,6 @@ func TestPodWatcher_NotEnabled(t *gtest.T) {
<-podsAdded
time.Sleep(500 * time.Millisecond)
- t.Log("First batch of assertions")
p.Mutex.Lock()
// We should still have 10 pods in the list
if cnt := len(p.PodList); cnt != 10 {
@@ -295,19 +289,16 @@ func TestPodWatcher_NotEnabled(t *gtest.T) {
p.Mutex.Unlock()
p.SetEnabled(true)
- t.Log("First batch of assertions over")
// Wait some more time for the pods to be deleted
time.Sleep(1 * time.Second)
- t.Log("Second batch of assertions")
p.Mutex.Lock()
// We should now have 0 pods in the list
if cnt := len(p.PodList); cnt != 0 {
t.Errorf("Was expecting 0 pods in the list, got %d instead", cnt)
}
p.Mutex.Unlock()
- t.Log("Second batch of assertions over")
cancel()
<-done
@@ -317,7 +308,7 @@ func TestPodWatcher_Restart(t *gtest.T) {
logrus.SetLevel(logrus.DebugLevel)
clientset := fake.NewSimpleClientset()
recorder := record.NewFakeRecorder(1024)
- p := watcher.NewPodWatcher(clientset, recorder, "test", "app=name").(*watcher.PodWatcher)
+ p := watcher.NewPodWatcher(clientset, recorder, "test", "app=name")
p.SetTimeout(5 * time.Hour)
p.SetEnabled(true)
diff --git a/internal/watcher/types.go b/internal/watcher/types.go
index 548e7d2..819bd59 100644
--- a/internal/watcher/types.go
+++ b/internal/watcher/types.go
@@ -5,11 +5,13 @@ import (
"io"
"time"
- mc "github.com/massix/chaos-monkey/internal/apis/clientset/versioned"
+ cmcv "github.com/massix/chaos-monkey/internal/apis/clientset/versioned"
"github.com/massix/chaos-monkey/internal/configuration"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/client-go/kubernetes"
+ typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
+ metricsv "k8s.io/metrics/pkg/client/clientset/versioned"
)
type Watcher interface {
@@ -29,16 +31,32 @@ type ConfigurableWatcher interface {
}
type (
- NamespaceFactory func(clientset kubernetes.Interface, cmcClientset mc.Interface, recorder record.EventRecorderLogger, rootNamespace string, behavior configuration.Behavior) Watcher
- CrdFactory func(clientset kubernetes.Interface, cmcClientset mc.Interface, recorder record.EventRecorderLogger, namespace string) Watcher
+ NamespaceFactory func(clientset kubernetes.Interface, cmcClientset cmcv.Interface, metricsClientset metricsv.Interface, recorder record.EventRecorderLogger, rootNamespace string, behavior configuration.Behavior) Watcher
+ CrdFactory func(clientset kubernetes.Interface, cmcClientset cmcv.Interface, metricsClientset metricsv.Interface, recorder record.EventRecorderLogger, namespace string) Watcher
DeploymentFactory func(clientset kubernetes.Interface, recorder record.EventRecorderLogger, deployment *appsv1.Deployment) ConfigurableWatcher
PodFactory func(clientset kubernetes.Interface, recorder record.EventRecorderLogger, namespace string, labelSelector ...string) ConfigurableWatcher
+ AntiHPAFactory func(client metricsv.Interface, podset typedcorev1.PodInterface, namespace, podLabel string) ConfigurableWatcher
)
// Default factories
var (
- DefaultNamespaceFactory NamespaceFactory = NewNamespaceWatcher
- DefaultCrdFactory CrdFactory = NewCrdWatcher
- DefaultDeploymentFactory DeploymentFactory = NewDeploymentWatcher
- DefaultPodFactory PodFactory = NewPodWatcher
+ DefaultNamespaceFactory NamespaceFactory = func(clientset kubernetes.Interface, cmcClientset cmcv.Interface, metricsClientset metricsv.Interface, recorder record.EventRecorderLogger, rootNamespace string, behavior configuration.Behavior) Watcher {
+ return NewNamespaceWatcher(clientset, cmcClientset, metricsClientset, recorder, rootNamespace, behavior)
+ }
+
+ DefaultCrdFactory CrdFactory = func(clientset kubernetes.Interface, cmcClientset cmcv.Interface, metricsClientset metricsv.Interface, recorder record.EventRecorderLogger, namespace string) Watcher {
+ return NewCrdWatcher(clientset, cmcClientset, metricsClientset, recorder, namespace)
+ }
+
+ DefaultDeploymentFactory DeploymentFactory = func(clientset kubernetes.Interface, recorder record.EventRecorderLogger, deployment *appsv1.Deployment) ConfigurableWatcher {
+ return NewDeploymentWatcher(clientset, recorder, deployment)
+ }
+
+ DefaultPodFactory PodFactory = func(clientset kubernetes.Interface, recorder record.EventRecorderLogger, namespace string, labelSelector ...string) ConfigurableWatcher {
+ return NewPodWatcher(clientset, recorder, namespace, labelSelector...)
+ }
+
+ DefaultAntiHPAFactory AntiHPAFactory = func(client metricsv.Interface, podset typedcorev1.PodInterface, namespace, podLabel string) ConfigurableWatcher {
+ return NewAntiHPAWatcher(client, podset, namespace, podLabel)
+ }
)
diff --git a/main.tf b/main.tf
index a771ffa..27e6888 100644
--- a/main.tf
+++ b/main.tf
@@ -2,23 +2,23 @@ terraform {
required_providers {
kind = {
source = "tehcyx/kind"
- version = "~>0.5.1"
+ version = "~>0.5"
}
docker = {
source = "kreuzwerker/docker"
- version = "~>3.0.1"
+ version = "~>3.0"
}
shell = {
source = "scottwinkler/shell"
- version = "~>1.7.10"
+ version = "~>1.7"
}
kubernetes = {
source = "hashicorp/kubernetes"
- version = "~>2.31.0"
+ version = "~>2.31"
}
kubectl = {
source = "alekc/kubectl"
- version = "~>2.0.4"
+ version = "~>2.0"
}
}
}
@@ -59,6 +59,7 @@ resource "docker_image" "chaos-monkey-image" {
triggers = {
dockerFile = sha256(file("${path.module}/Dockerfile"))
binFile = sha256(filebase64("${path.module}/bin/chaos-monkey"))
+ certFile = sha256(file("${path.module}/certs/chaos-monkey.chaosmonkey.svc.crt"))
}
}
@@ -89,7 +90,7 @@ resource "kubernetes_namespace" "target-namespace" {
resource "kubectl_manifest" "deployment-mode-crd" {
yaml_body = </dev/null; then
@@ -60,19 +56,20 @@ fi
deploymentCount=$(${KUBECTL} get deployments --namespace=target --no-headers | wc -l)
debug "target namespace contains ${deploymentCount} deployment(s)"
-if [[ ${deploymentCount} != 2 ]]; then
- panic "target namespace should contain 2 deployments"
+if [[ ${deploymentCount} != 3 ]]; then
+ panic "target namespace should contain 3 deployments"
fi
info "Checking ChaosMonkeyConfigurations"
cmcCount=$(${KUBECTL} get cmc --namespace=target --no-headers | wc -l)
debug "target namespace contains ${cmcCount} cmc(s)"
-if [[ ${cmcCount} != 2 ]]; then
- panic "target namespace should contain 2 cmc"
+if [[ ${cmcCount} != 3 ]]; then
+ panic "target namespace should contain 3 cmc"
fi
disruptScale="nginx-disrupt-scale"
disruptPods="nginx-disrupt-pods"
+disruptHpa="nginx-antihpa"
info "Resetting CMCs to initial values"
@@ -80,7 +77,7 @@ debug "Force enable ${disruptScale}"
${KUBECTL} -n target patch cmc chaosmonkey-${disruptScale} --type json --patch-file=/dev/stdin <<-JSONPATCH >/dev/null
[
{"op": "replace", "path": "/spec/enabled", "value": true},
- {"op": "replace", "path": "/spec/podMode", "value": false},
+ {"op": "replace", "path": "/spec/scalingMode", "value": "randomScale"},
{"op": "replace", "path": "/spec/minReplicas", "value": 2},
{"op": "replace", "path": "/spec/maxReplicas", "value": 5}
]
@@ -90,29 +87,47 @@ debug "Force enable ${disruptPods}"
${KUBECTL} -n target patch cmc chaosmonkey-${disruptPods} --type json --patch-file=/dev/stdin <<-JSONPATCH >/dev/null
[
{"op": "replace", "path": "/spec/enabled", "value": true},
- {"op": "replace", "path": "/spec/podMode", "value": true},
+ {"op": "replace", "path": "/spec/scalingMode", "value": "killPod"},
{"op": "replace", "path": "/spec/minReplicas", "value": 6},
{"op": "replace", "path": "/spec/maxReplicas", "value": 8}
]
JSONPATCH
+debug "Force enable ${disruptHpa}"
+${KUBECTL} -n target patch cmc chaosmonkey-${disruptHpa} --type json --patch-file=/dev/stdin <<-JSONPATCH >/dev/null
+[
+ {"op": "replace", "path": "/spec/enabled", "value": true},
+ {"op": "replace", "path": "/spec/scalingMode", "value": "antiPressure"},
+ {"op": "replace", "path": "/spec/timeout", "value": "10s"}
+]
+JSONPATCH
+
info "Resetting ${disruptPods} to 2 replicas"
${KUBECTL} -n target scale deployment ${disruptPods} --replicas=2 >/dev/null
+info "Resetting ${disruptHpa} to 6 replicas"
+${KUBECTL} -n target scale deployment ${disruptHpa} --replicas=6 >/dev/null
+
info "Checking events"
if ! ${KUBECTL} -n target get events | grep ChaosMonkey &>/dev/null; then
warn "no events found in target namespace, please check the chaosmonkey pod logs (not considered as an error)"
fi
-info "Checking CMC with podMode=false (${disruptScale})"
+info "Checking CMC with scalingMode=randomScale (${disruptScale})"
replicasShouldChange -d ${disruptScale} -n target -r 5 -s 10
-info "Checking CMC with podMode=true (${disruptPods})"
+info "Checking CMC with scalingMode=killPod (${disruptPods})"
podsShouldChange -l "app=${disruptPods}" -n target -r 5
-info "Checking number of pods"
+info "Checking CMC with scalingMode=antiPressure (${disruptHpa})"
+podsShouldChange -l "app=${disruptHpa}" -n target -r 5
+
+info "Checking number of pods for ${disruptPods}"
numberOfPodsShouldNotChange -l "app=${disruptPods}" -n target -t 2 -L 5
+info "Checking number of pods for ${disruptHpa}"
+numberOfPodsShouldNotChange -l "app=${disruptHpa}" -n target -t 6 -L 5
+
info "Stopping ${disruptScale} CMC"
if ! ${KUBECTL} patch -n target cmc chaosmonkey-${disruptScale} --type json --patch-file=/dev/stdin <<-JSONPATCH >/dev/null; then
[
@@ -122,22 +137,34 @@ JSONPATCH
panic "Could not patch CMC for ${disruptScale}"
fi
+info "Stopping ${disruptHpa} CMC"
+if ! ${KUBECTL} patch -n target cmc chaosmonkey-${disruptHpa} --type json --patch-file=/dev/stdin <<-JSONPATCH >/dev/null; then
+[
+ { "op": "replace", "path": "/spec/enabled", "value": false }
+]
+JSONPATCH
+ panic "Could not patch CMC for ${disruptHpa}"
+fi
+
info "Checking that CMC ${disruptScale} has been stopped correctly (number of scales should not change over time)"
replicasShouldNotChange -d ${disruptScale} -n target -l 5 -s 10
-info "Switching ${disruptPods} from podMode=true to podMode=false"
-if ! ${KUBECTL} patch -n target cmc chaosmonkey-${disruptPods} --type json --patch '[{"op":"replace", "path":"/spec/podMode", "value":false}]' >/dev/null; then
+info "Checking that CMC ${disruptHpa} has been stopped correctly (pods should not change over time)"
+podsShouldNotChange -l "app=${disruptHpa}" -n target -r 4 -s 10
+
+info "Switching ${disruptPods} from scalingMode=killPod to scalingMode=randomScale"
+if ! ${KUBECTL} patch -n target cmc chaosmonkey-${disruptPods} --type json --patch '[{"op":"replace", "path":"/spec/scalingMode", "value":"randomScale"}]' >/dev/null; then
panic "Could not patch CMC ${disruptPods}"
fi
info "Checking that CMC ${disruptPods} is now correctly modifying the replicas of the deployment"
replicasShouldChange -d ${disruptPods} -n target -r 5
-info "Switching ${disruptScale} from podMode=false to podMode=true and re-enabling it"
+info "Switching ${disruptScale} from scalingMode=randomScale to scalingMode=killPod and re-enabling it"
if ! ${KUBECTL} patch -n target cmc chaosmonkey-${disruptScale} --type json --patch-file=/dev/stdin <<-JSONPATCH >/dev/null; then
[
{ "op": "replace", "path": "/spec/enabled", "value": true },
- { "op": "replace", "path": "/spec/podMode", "value": true }
+ { "op": "replace", "path": "/spec/scalingMode", "value": "killPod" }
]
JSONPATCH
panic "Could not patch CMC ${disruptScale}"
@@ -181,6 +208,8 @@ ALLMETRICS=(
"chaos_monkey_crdwatcher_pw_active"
"chaos_monkey_crdwatcher_dw_spawned"
"chaos_monkey_crdwatcher_dw_active"
+ "chaos_monkey_crdwatcher_ah_spawned"
+ "chaos_monkey_crdwatcher_ah_active"
"chaos_monkey_crdwatcher_event_duration_bucket"
"chaos_monkey_crdwatcher_restarts"
"chaos_monkey_podwatcher_pods_added"
@@ -191,6 +220,9 @@ ALLMETRICS=(
"chaos_monkey_deploymentwatcher_deployments_rescaled"
"chaos_monkey_deploymentwatcher_random_distribution"
"chaos_monkey_deploymentwatcher_last_scale"
+ "chaos_monkey_antihpawatcher_runs"
+ "chaos_monkey_antihpawatcher_pods_killed"
+ "chaos_monkey_antihpawatcher_average_cpu"
)
for m in "${ALLMETRICS[@]}"; do
metricShouldExist -m "$m" -h "localhost" -p "9090"
@@ -218,8 +250,8 @@ JSONPATCH
panic "Could not patch namespace"
fi
-# Wait for the ChaosMonkey to terminate
-sleep 5
+# Wait for all the ChaosMonkeys to terminate
+sleep 15
info "Checking that chaosmonkey is disabled for target namespace"
podsOfNamespaceShouldNotChange -n target
@@ -249,7 +281,7 @@ info "Patch the CMC configurations to their initial values"
if ! ${KUBECTL} -n target patch cmc chaosmonkey-${disruptScale} --type json --patch-file=/dev/stdin <<-JSONPATCH >/dev/null; then
[
{"op": "replace", "path": "/spec/enabled", "value": true},
- {"op": "replace", "path": "/spec/podMode", "value": false},
+ {"op": "replace", "path": "/spec/scalingMode", "value": "randomScale"},
{"op": "replace", "path": "/spec/minReplicas", "value": 2},
{"op": "replace", "path": "/spec/maxReplicas", "value": 4}
]
@@ -260,7 +292,7 @@ fi
if ! ${KUBECTL} -n target patch cmc chaosmonkey-${disruptPods} --type json --patch-file=/dev/stdin <<-JSONPATCH >/dev/null; then
[
{"op": "replace", "path": "/spec/enabled", "value": true},
- {"op": "replace", "path": "/spec/podMode", "value": true},
+ {"op": "replace", "path": "/spec/scalingMode", "value": "killPod"},
{"op": "replace", "path": "/spec/minReplicas", "value": 0},
{"op": "replace", "path": "/spec/maxReplicas", "value": 1}
]
@@ -283,4 +315,30 @@ podsShouldChange -l "app=${disruptPods}" -n target -r 5
info "Check that the replicas are changing again"
replicasShouldChange -d "${disruptScale}" -n target -r 5
+info "Injecting older version of the CMC"
+if ! ${KUBECTL} -n target delete cmc chaosmonkey-${disruptPods} >/dev/null; then
+ panic "Could not delete CMC ${disruptPods}"
+fi
+
+cat </dev/null
+ apiVersion: cm.massix.github.io/v1alpha1
+ kind: ChaosMonkeyConfiguration
+ metadata:
+ name: chaosmonkey-${disruptPods}
+ namespace: target
+ spec:
+ minReplicas: 0
+ maxReplicas: 1
+ podMode: true
+ timeout: 30s
+ enabled: true
+ deploymentName: "${disruptPods}"
+EOF
+
+# If we can get the deployment name using the new CMC we're good
+deploymentName=$(${KUBECTL} -n target get cmc "chaosmonkey-${disruptPods}" -o jsonpath='{.spec.deployment.name}')
+if [ "${deploymentName}" != "${disruptPods}" ]; then
+ panic "Could not get CMC ${disruptPods}"
+fi
+
info "All tests passed!"
diff --git a/tests/library.sh b/tests/library.sh
index 681fbaf..37d07f3 100644
--- a/tests/library.sh
+++ b/tests/library.sh
@@ -32,6 +32,7 @@ function checkProgram() {
# Check that we have everything we need to run the tests
checkProgram kubectl "Please install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/"
checkProgram curl "Please install curl: https://curl.se/download.html"
+checkProgram jq "Please install jq: https://stedolan.github.io/jq/download/"
# Checks that the scale value of a given deployment change over time
# @param -d deploymentName (mandatory), the name of the deployment to watch
@@ -226,6 +227,74 @@ function podsShouldChange() {
fi
}
+# Checks that the pods of a given deployment do not change over time
+# @params -l selector (mandatory), the selector of the deployment
+# @params -n namespace (mandatory), the namespace of the deployment
+# @params -r retries (default: 10), maximum number of retries
+# @params -s sleep (default: 10), seconds to sleep between one loop and the other
+function podsShouldNotChange() {
+ local OPTIND OPTARG
+
+ local selector=""
+ local namespace=""
+ local -i retries=10
+ local -i sleepDuration=10
+
+ local -i completedLoops=0
+ local -r jsonPath="{.items[*].metadata.name}"
+ local -i testSuccess=0
+ local currentPods
+ local newPods
+
+ while getopts "l:n:r:s:" opt; do
+ case "${opt}" in
+ l)
+ selector="${OPTARG}"
+ ;;
+ n)
+ namespace="${OPTARG}"
+ ;;
+ r)
+ retries="${OPTARG}"
+ ;;
+ s)
+ sleepDuration="${OPTARG}"
+ ;;
+ "?")
+ panic "Invalid option: -${OPTARG}"
+ ;;
+ esac
+ done
+
+ local testSuccess=1
+ currentPods=$(kubectl get pods -n "${namespace}" --selector "${selector}" -o jsonpath="${jsonPath}")
+ newPods=${currentPods}
+
+ debug "podsShouldNotChange() for ${selector}, max ${retries} retries, namespace: ${namespace}, sleep: ${sleepDuration}"
+
+ completedLoops=0
+
+ info "Checking that pods for ${selector} are not changing"
+
+ while [[ ${completedLoops} -lt ${retries} ]]; do
+ newPods=$(kubectl get pods -n "${namespace}" --selector "${selector}" -o jsonpath="${jsonPath}")
+ debug "Current pods: ${currentPods}, new pods: ${newPods}"
+ completedLoops=$((completedLoops + 1))
+
+ if [ "${newPods}" != "${currentPods}" ]; then
+ testSuccess=0
+ break
+ fi
+
+ debug "Pods for \"${selector}\" are still the same ($((retries - completedLoops)) loops left)"
+ sleep ${sleepDuration}
+ done
+
+ if [[ ${testSuccess} -eq 0 ]]; then
+ panic "Pods for \"${selector}\" did change after ${retries} loops"
+ fi
+}
+
# Checks that the number of pods do not change over time
# @param -l selector (mandatory), the selector of the deployment
# @param -n namespace (default: ""), the namespace of the deployment
diff --git a/tests/manifests/components.yaml b/tests/manifests/components.yaml
new file mode 100644
index 0000000..debd94d
--- /dev/null
+++ b/tests/manifests/components.yaml
@@ -0,0 +1,202 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ k8s-app: metrics-server
+ rbac.authorization.k8s.io/aggregate-to-admin: "true"
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+ name: system:aggregated-metrics-reader
+rules:
+ - apiGroups:
+ - metrics.k8s.io
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - nodes/metrics
+ verbs:
+ - get
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server-auth-reader
+ namespace: kube-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+subjects:
+ - kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server:system:auth-delegator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:auth-delegator
+subjects:
+ - kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:metrics-server
+subjects:
+ - kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+spec:
+ ports:
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: https
+ selector:
+ k8s-app: metrics-server
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: metrics-server
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 0
+ template:
+ metadata:
+ labels:
+ k8s-app: metrics-server
+ spec:
+ containers:
+ - args:
+ - --cert-dir=/tmp
+ - --secure-port=10250
+ - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
+ - --kubelet-use-node-status-port
+ - --metric-resolution=15s
+ - --kubelet-insecure-tls
+ image: registry.k8s.io/metrics-server/metrics-server:v0.7.1
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /livez
+ port: https
+ scheme: HTTPS
+ periodSeconds: 10
+ name: metrics-server
+ ports:
+ - containerPort: 10250
+ name: https
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /readyz
+ port: https
+ scheme: HTTPS
+ initialDelaySeconds: 20
+ periodSeconds: 10
+ resources:
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ serviceAccountName: metrics-server
+ volumes:
+ - emptyDir: {}
+ name: tmp-dir
+---
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: v1beta1.metrics.k8s.io
+spec:
+ group: metrics.k8s.io
+ groupPriorityMinimum: 100
+ insecureSkipTLSVerify: true
+ service:
+ name: metrics-server
+ namespace: kube-system
+ version: v1beta1
+ versionPriority: 100