123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345 |
- ---
- # Prepares the environment for the apps-placement exercise, and cleans up afterwards.
- #
- # TODO: create some projects:
- # - one with a project node selector
- # and a deployment with conflicting node {selector|affinity} ???
- # - one with a very low quota
- # and a deployment which exceeds the quota
- # - taint a node
- # and select a deployment to run a pod on it
- # then debug these conditions and fix them
- #
- # TODO: make two nodes unschedulable, create a project, and deploy an application, scale to three
- # make the nodes schedulable again, use podAntiAffinity to disperse the pods, scale to 6 and see scheduling
- #
- # simulate load (loadtest? loadgenerator?) beyond container's cpu limit and then improve performance by raising limit
- #
- # TODO: probes with extremely low cpu limit, see them crashloop, fix it
- #
- # use stress-ng ap to allocate all memory (more than limit), monitor the metrics to diagnose the crash
- #
- # client-server apps, low limits, monitor performance
- #
- # custom metrics, grafana
- #
- # TODO: run two instances on the same node, no pdb, drain the node - observe failure in another terminal
- # repeat with pdb, see no failures
- #
- # recreate strategy, rollout a change, observe outage in another terminal
- # switch to rolling w/maxUnavailable, repeat, see no failures
- #
- # deploy an app w/requests, generate load, observe timing
- # add HPA, generate load, compare
- #
- - name: Prepare (or clean up) the exercise of apps-placement.
- hosts: localhost
- gather_subset: min
- become: no
- tasks:
- - name: Prereqs
- include_role:
- name: check-env
- - name: Ensure the projects are there
- kubernetes.core.k8s:
- kubeconfig: tmp/kubeconfig-ocp4
- validate_certs: no
- api_version: v1
- kind: namespace
- name: "{{ item.name }}"
- resource_definition:
- metadata:
- annotations:
- openshift.io/node-selector: "{{ item.nodeselector | default(omit) }}"
- loop:
- - name: apps-selector-conflict
- nodeselector: kubernetes.io/hostname=worker03
- - name: apps-selector-impossible
- - name: apps-lowquota
- - name: apps-taint
- - name: apps-antiaffinity
- #- name: apps-lowlimit
- - name: apps-pdb
- - name: Deployment conflicting node selector
- kubernetes.core.k8s:
- kubeconfig: tmp/kubeconfig-ocp4
- validate_certs: no
- api_version: apps/v1
- kind: deployment
- namespace: apps-selector-conflict
- name: conflict
- resource_definition:
- spec:
- replicas: 3
- selector:
- matchLabels:
- app: hello
- template:
- metadata:
- labels:
- app: hello
- spec:
- nodeSelector:
- kubernetes.io/hostname: worker01
- containers:
- - name: hello
- image: quay.io/redhattraining/hello-world-nginx:latest
- ports:
- - name: http
- containerPort: 8080
- - name: Deployment with an impossible node selector
- kubernetes.core.k8s:
- kubeconfig: tmp/kubeconfig-ocp4
- validate_certs: no
- api_version: apps/v1
- kind: deployment
- namespace: apps-selector-impossible
- name: select
- resource_definition:
- spec:
- replicas: 3
- selector:
- matchLabels:
- app: hello
- template:
- metadata:
- labels:
- app: hello
- spec:
- nodeSelector:
- impossible: nodelabel
- containers:
- - name: hello
- image: quay.io/redhattraining/hello-world-nginx:latest
- ports:
- - name: http
- containerPort: 8080
- - name: Ensure low quota on the lowquota project
- kubernetes.core.k8s:
- kubeconfig: tmp/kubeconfig-ocp4
- validate_certs: no
- api_version: v1
- kind: resourcequota
- resource_definition:
- metadata:
- name: compute-quota
- namespace: apps-lowquota
- spec:
- hard:
- requests.cpu: 500m
- requests.memory: 512Mi
- limits.cpu: 1000m
- limits.memory: 1Gi
- - name: Deployment exceeding quota
- kubernetes.core.k8s:
- kubeconfig: tmp/kubeconfig-ocp4
- validate_certs: no
- api_version: apps/v1
- kind: deployment
- namespace: apps-lowquota
- name: quota
- resource_definition:
- spec:
- replicas: 3
- selector:
- matchLabels:
- app: hello
- template:
- metadata:
- labels:
- app: hello
- spec:
- containers:
- - name: hello
- image: quay.io/redhattraining/hello-world-nginx:latest
- ports:
- - name: http
- containerPort: 8080
- resources:
- requests:
- memory: 1Gi
- cpu: 2
- - name: Taint a node
- kubernetes.core.k8s:
- kubeconfig: tmp/kubeconfig-ocp4
- validate_certs: no
- api_version: v1
- kind: node
- name: worker01
- state: patched
- resource_definition:
- spec:
- taints:
- - effect: NoSchedule
- key: foo
- value: bar
- - name: Deployment targetting tainted node
- kubernetes.core.k8s:
- kubeconfig: tmp/kubeconfig-ocp4
- validate_certs: no
- api_version: apps/v1
- kind: deployment
- namespace: apps-taint
- name: tainted
- resource_definition:
- spec:
- replicas: 3
- selector:
- matchLabels:
- app: hello
- template:
- metadata:
- labels:
- app: hello
- spec:
- nodeSelector:
- kubernetes.io/hostname: worker01
- containers:
- - name: hello
- image: quay.io/redhattraining/hello-world-nginx:latest
- ports:
- - name: http
- containerPort: 8080
- - name: Make nodes unschedulable
- kubernetes.core.k8s:
- kubeconfig: tmp/kubeconfig-ocp4
- validate_certs: no
- api_version: v1
- kind: node
- name: "{{ item }}"
- state: patched
- resource_definition:
- spec:
- unschedulable: true
- loop:
- - worker01
- - worker02
- - name: Deployment on the only available node, to be preferred
- kubernetes.core.k8s:
- kubeconfig: tmp/kubeconfig-ocp4
- validate_certs: no
- api_version: apps/v1
- kind: deployment
- namespace: apps-antiaffinity
- name: dislike
- resource_definition:
- spec:
- replicas: 3
- selector:
- matchLabels:
- app: dislike
- template:
- metadata:
- labels:
- app: dislike
- spec:
- #affinity:
- # podAntiAffinity:
- # preferredDuringSchedulingIgnoredDuringExecution:
- # - weight: 10
- # podAffinityTerm:
- # labelSelector:
- # matchLabels:
- # app: hello
- # topologyKey: kubernetes.io/hostname
- containers:
- - name: hello
- image: quay.io/redhattraining/hello-world-nginx:latest
- ports:
- - name: http
- containerPort: 8080
- - name: Deployment on the only available node, to be required
- kubernetes.core.k8s:
- kubeconfig: tmp/kubeconfig-ocp4
- validate_certs: no
- api_version: apps/v1
- kind: deployment
- namespace: apps-antiaffinity
- name: refuse
- resource_definition:
- spec:
- replicas: 3
- selector:
- matchLabels:
- app: refuse
- template:
- metadata:
- labels:
- app: refuse
- spec:
- #affinity:
- # podAntiAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # labelSelector:
- # matchLabels:
- # app: hello
- # topologyKey: kubernetes.io/hostname
- containers:
- - name: hello
- image: quay.io/redhattraining/hello-world-nginx:latest
- ports:
- - name: http
- containerPort: 8080
- - name: Make nodes schedulable again
- kubernetes.core.k8s:
- kubeconfig: tmp/kubeconfig-ocp4
- validate_certs: no
- api_version: v1
- kind: node
- name: "{{ item }}"
- state: patched
- resource_definition:
- spec:
- unschedulable: false
- loop:
- - worker01
- - worker02
- - name: Deployment on the same node for PDB
- kubernetes.core.k8s:
- kubeconfig: tmp/kubeconfig-ocp4
- validate_certs: no
- api_version: apps/v1
- kind: deployment
- namespace: apps-pdb
- name: budget
- resource_definition:
- spec:
- replicas: 2
- selector:
- matchLabels:
- app: hello
- template:
- metadata:
- labels:
- app: hello
- spec:
- affinity:
- nodeAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - preference:
- matchExpressions:
- - key: kubernetes.io/hostname
- operator: In
- values:
- - worker02
- weight: 50
- containers:
- - name: hello
- image: quay.io/redhattraining/hello-world-nginx:latest
- ports:
- - name: http
- containerPort: 8080
- ...
|