فهرست منبع

combined roles for LSO and ODF

Grega Bremec 1 ماه پیش
والد
کامیت
6089229489

+ 4 - 0
p0f/operators/roles/deploy-local-storage/defaults/main.yml

@@ -0,0 +1,4 @@
+---
+# Variables that are usually overridden.
+kubeadmin_config: "tmp/kubeconfig-ocp4"
+...

+ 142 - 0
p0f/operators/roles/deploy-local-storage/tasks/main.yml

@@ -0,0 +1,142 @@
+---
+# Deploys the local storage operator specific resources.
+#
+# Required variables:
+#
+#   local_storage:
+#     namespace:          which namespace the LSO is in
+#     storage_class_name: what to call the storage class
+#     node_selector_key:  which nodes to scan for local volumes (just the key of the label)
+#
+# Optional variables:
+#
+#   kubeadmin_config      the administrator kubeconfig file (tmp/kubeconfig-ocp4)
+#
+- name: Check if there is a namespace.
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: v1
+    kind: namespace
+    name: "{{ local_storage.namespace }}"
+  register: prereq_ns
+
+- name: Fail if not so.
+  ansible.builtin.assert:
+    that:
+      - prereq_ns.resources is defined
+      - prereq_ns.resources | length == 1
+    success_msg: "OK, namespace found."
+    fail_msg: "FATAL: namespace to deploy ({{ local_storage.namespace }}) not found. Ensure there is an operator already present."
+
+- name: Check if there is a CSV in the namespace.
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: operators.coreos.com/v1alpha1
+    kind: clusterserviceversion
+    namespace: "{{ local_storage.namespace }}"
+    label_selectors:
+      - operators.coreos.com/local-storage-operator.openshift-local-storage=
+  register: prereq_csv
+
+- name: Fail if not so.
+  ansible.builtin.assert:
+    that:
+      - prereq_csv.resources is defined
+      - prereq_csv.resources | length > 0
+    success_msg: "OK, operator CSV found."
+    fail_msg: "FATAL: Operator is not deployed in the namespace: {{ local_storage.namespace }}. Ensure there is an operator already present."
+
+- name: Find how many nodes match the node_selector_key
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: v1
+    kind: node
+    label_selectors:
+      "{{ local_storage.node_selector_key }}"
+  register: matching_nodes
+
+- name: Just remember how many
+  ansible.builtin.set_fact:
+    num_storage_nodes: "{{ matching_nodes.resources | length }}"
+
+# NOTE: LVD resource must be named auto-discover-devices
+- name: Create a local volume discovery resource.
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: local.storage.openshift.io/v1alpha1
+    kind: localvolumediscovery
+    namespace: "{{ local_storage.namespace }}"
+    name: auto-discover-devices
+    resource_definition:
+      spec:
+        nodeSelector:
+          nodeSelectorTerms:
+            - matchExpressions:
+              - key: "{{ local_storage.node_selector_key }}"
+                operator: Exists
+
+- name: Wait until we have the correct number of local volume discovery results
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: local.storage.openshift.io/v1alpha1
+    kind: localvolumediscoveryresult
+    namespace: "{{ local_storage.namespace }}"
+  register: lvdresults
+  until:
+    - (lvdresults.resources | length) == num_storage_nodes
+  retries: 12
+  delay: 5
+
+- name: Finally, create a local volume set
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: local.storage.openshift.io/v1alpha1
+    kind: localvolumeset
+    namespace: "{{ local_storage.namespace }}"
+    name: "{{ local_storage.storage_class_name }}"
+    resource_definition:
+      spec:
+        deviceInclusionSpec:
+          deviceTypes:
+            - disk
+          minSize: 256Gi
+        nodeSelector:
+          nodeSelectorTerms:
+            - matchExpressions:
+              - key: "{{ local_storage.node_selector_key }}"
+                operator: Exists
+        storageClassName: "{{ local_storage.storage_class_name }}"
+        maxDeviceCount: 1
+        volumeMode: Block
+
+- name: Wait until the volume set is available
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: local.storage.openshift.io/v1alpha1
+    kind: localvolumeset
+    namespace: "{{ local_storage.namespace }}"
+    name: "{{ local_storage.storage_class_name }}"
+  register: lso_volset
+  until: (lso_volset.resources[0] | community.general.json_query('status.conditions[?type==`Available`].status'))
+  retries: 12
+  delay: 5
+
+- name: Wait until the storage class appears
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: storage.k8s.io/v1
+    kind: storageclass
+    name: "{{ local_storage.storage_class_name }}"
+  register: lso_class
+  until: lso_class.resources | length > 0
+  retries: 12
+  delay: 5
+...

+ 4 - 0
p0f/operators/roles/deploy-odf-storage/defaults/main.yml

@@ -0,0 +1,4 @@
+---
+# Variables that are usually overridden.
+kubeadmin_config: "tmp/kubeconfig-ocp4"
+...

+ 174 - 0
p0f/operators/roles/deploy-odf-storage/tasks/main.yml

@@ -0,0 +1,174 @@
+---
+# Deploys the local storage operator specific resources.
+#
+# Required variables:
+#
+#   local_storage:
+#     namespace:          which namespace the LSO is in
+#     storage_class_name: what to call the storage class
+#
+#   odf_storage:
+#     namespace:          which namespace ODF operator is in
+#     name:               the name of the storage cluster
+#
+# Optional variables:
+#
+#   kubeadmin_config      the administrator kubeconfig file (tmp/kubeconfig-ocp4)
+#
+- name: Check if there is a namespace.
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: v1
+    kind: namespace
+    name: "{{ odf_storage.namespace }}"
+  register: prereq_ns
+
+- name: Fail if not so.
+  ansible.builtin.assert:
+    that:
+      - prereq_ns.resources is defined
+      - prereq_ns.resources | length == 1
+    success_msg: "OK, namespace found."
+    fail_msg: "FATAL: namespace to deploy ({{ odf_storage.namespace }}) not found. Ensure there is an operator already present."
+
+- name: Check if there is a CSV in the namespace.
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: operators.coreos.com/v1alpha1
+    kind: clusterserviceversion
+    namespace: "{{ odf_storage.namespace }}"
+    label_selectors:
+      - operators.coreos.com/odf-operator.openshift-storage=
+  register: prereq_csv
+
+- name: Fail if not so.
+  ansible.builtin.assert:
+    that:
+      - prereq_csv.resources is defined
+      - prereq_csv.resources | length > 0
+    success_msg: "OK, operator CSV found."
+    fail_msg: "FATAL: Operator is not deployed in the namespace: {{ odf_storage.namespace }}. Ensure there is an operator already present."
+
+- name: Check if the LSO storage class exists.
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: storage.k8s.io/v1
+    kind: storageclass
+    name: "{{ local_storage.storage_class_name }}"
+  register: lso_sclass
+
+- name: Fail if not so.
+  ansible.builtin.assert:
+    that:
+      - lso_sclass.resources is defined
+      - lso_sclass.resources | length > 0
+    success_msg: "OK, LSO storage class found."
+    fail_msg: "FATAL: Local Storage Operator is not configured correctly: storage class {{ local_storage.storage_class_name }} not found."
+
+- name: Verify the web console settings
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: operator.openshift.io/v1
+    kind: console
+    name: cluster
+  register: cluster_console
+
+- name: Patch the web console with odf-console plugin if not yet there
+  kubernetes.core.k8s_json_patch:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: operator.openshift.io/v1
+    kind: console
+    name: cluster
+    patch:
+      - op: add
+        path: /spec/plugins/-
+        value: odf-console
+  when: not ("odf-console" in cluster_console.resources[0].spec.plugins)
+
+- name: Find how many nodes match the node_selector_key
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: v1
+    kind: node
+    label_selectors:
+      "{{ local_storage.node_selector_key }}"
+  register: matching_nodes
+
+- name: Just remember how many
+  ansible.builtin.set_fact:
+    num_storage_nodes: "{{ matching_nodes.resources | length }}"
+
+- name: Ensure there is a storage cluster resource
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: ocs.openshift.io/v1
+    kind: storagecluster
+    namespace: "{{ odf_storage.namespace }}"
+    name: "{{ odf_storage.name }}"
+    resource_definition:
+      spec:
+        monDataDirHostPath: /var/lib/rook
+        storageDeviceSets:
+          - count: 1
+            dataPVCTemplate:
+              spec:
+                accessModes:
+                  - ReadWriteOnce
+                resources:
+                  requests:
+                    storage: "1"
+                storageClassName: "{{ local_storage.storage_class_name }}"
+                volumeMode: Block
+            name: odf-lso-device-set
+            replica: "{{ num_storage_nodes }}"
+        flexibleScaling: false
+        placement:
+          all:
+            nodeAffinity:
+              preferredDuringSchedulingIgnoredDuringExecution:
+                - weight: 5
+                  preference:
+                    matchExpressions:
+                      - key: "{{ local_storage.node_selector_key }}"
+                        operator: Exists
+
+- name: Wait for the Ceph cluster to finish progressing.
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: ceph.rook.io/v1
+    kind: cephcluster
+    namespace: "{{ odf_storage.namespace }}"
+    name: "{{ odf_storage.name }}-cephcluster"
+  register: cephcluster
+  until: cephcluster.resources[0].status.phase == "Ready"
+  retries: 60
+  delay: 5
+
+- name: Wait for the storage cluster to finish progressing.
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: ocs.openshift.io/v1
+    kind: storagecluster
+    namespace: "{{ odf_storage.namespace }}"
+    name: "{{ odf_storage.name }}"
+  register: storagecluster
+  until: storagecluster.resources[0].status.phase == "Ready"
+  retries: 60
+  delay: 5
+
+# TODO: Perhaps verify the storage classes are there?
+#       The naming scheme is:
+#         {{ odf_storage.name }}-ceph-rgw
+#         {{ odf_storage.name }}-ceph-rbd
+#         {{ odf_storage.name }}-cephfs
+#         openshift-storage.noobaa.io
+...