6 Revize f6c588852a ... c7830dc608

Autor SHA1 Zpráva Datum
  Grega Bremec c7830dc608 simple role to add logging stack to a cluster před 2 dny
  Grega Bremec 2677d5bcdc simple role to add storage to monitoring and optionally enable uwm (with storage as well) před 2 dny
  Grega Bremec d584b38965 role to add a custom catalog source před 2 dny
  Grega Bremec 45cc7f2958 add optional catalog_namespace variable před 2 dny
  Grega Bremec 3fddd6ab15 fix conditionals to make ansible 2.19 happy, tweak deploy-rhbk a bit před 2 dny
  Grega Bremec f46e987d73 replace lingering hardcoded kubeconfig & old naming for fqdn před 1 týdnem

+ 4 - 0
p0f/operators/roles/add-catalog/defaults/main.yml

@@ -0,0 +1,4 @@
+---
+# Variables that are usually overridden.
+kubeadmin_config: "tmp/kubeconfig-ocp4"
+...

+ 117 - 0
p0f/operators/roles/add-catalog/tasks/main.yml

@@ -0,0 +1,117 @@
+---
+# Adds a new catalog source to the cluster.
+#
+# NOTE: If a catalog source is added to a namespace other than
+#       openshift-marketplace, operators can only be installed in that same
+#       namespace.
+#
+# REQUIRED:
+#
+#   added_catalogs:
+#     - image:          the container image serving the catalog source
+#       name:           the name for the catalog
+#       namespace:      the namespace where it should be created
+#       display_name:   display name for the catalog
+#       publisher:      the name of the publisher
+#       catalog_type:   (optional) the type of catalog, defaults to grpc
+#       verify_mft:     (optional) the manifest to check for to verify content provisioning
+#
+# This role must be applied as:
+#
+#   - include_role:
+#       name: add-catalog
+#     loop: "{{ added_catalogs }}"
+#     loop_control:
+#       loop_var: role
+#
+# What this means is that each item of added_operators is expected to be
+# placed in the "role" variable prior to iterating over this role.
+#
+# OPTIONAL:
+#
+#   kubeadmin_config    kubeadmin (or other admin) credentials (tmp/kubeconfig-ocp4)
+#
+# TODO: verify required variables are set
+- name: Wait for the marketplace-operator to be up
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: v1
+    kind: pod
+    namespace: openshift-marketplace
+    label_selectors:
+      - name=marketplace-operator
+  register: mktplc_pod
+  until:
+    - (mktplc_pod.resources | length) == 1
+    - mktplc_pod.resources[0].status.containerStatuses[0].ready
+  retries: 30
+  delay: 10
+
+- name: Make sure the target namespace exists
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: v1
+    kind: namespace
+    name: "{{ role.namespace }}"
+    resource_definition:
+      metadata:
+        annotations:
+          capability.openshift.io/name: marketplace
+        labels:
+          openshift.io/cluster-monitoring: "true"
+
+- name: Create the catalog source if not there yet, or patch it
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: operators.coreos.com/v1alpha1
+    kind: catalogsource
+    namespace: "{{ role.namespace }}"
+    name: "{{ role.name }}"
+    resource_definition:
+      spec:
+        sourceType: "{{ role.catalog_type | default('grpc') }}"
+        image: "{{ role.image }}"
+        displayName: "{{ role.display_name }}"
+        publisher: "{{ role.publisher }}"
+
+- name: Wait for the catalog source to be ready
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: operators.coreos.com/v1alpha1
+    kind: catalogsource
+    namespace: "{{ role.namespace }}"
+    name: "{{ role.name }}"
+  register: cat_stat
+  until:
+    - (cat_stat.resources | length) == 1
+    - cat_stat.resources[0].status is defined
+    - cat_stat.resources[0].status.connectionState is defined
+    - cat_stat.resources[0].status.connectionState.lastObservedState == "READY"
+  retries: 30
+  delay: 10
+
+- name: Verify correct deployment
+  block:
+    - name: Wait for the operator packagemanifest to appear
+      kubernetes.core.k8s_info:
+        kubeconfig: "{{ kubeadmin_config }}"
+        validate_certs: no
+        api_version: packages.operators.coreos.com/v1
+        kind: packagemanifest
+        namespace: "{{ role.namespace }}"
+        name: "{{ role.verify_mft }}"
+      register: vrfy_mft
+      until:
+        - (vrfy_mft.resources | length) == 1
+        - vrfy_mft.resources[0].status.catalogSource == role.name
+        - vrfy_mft.resources[0].status.packageName == role.verify_mft
+      retries: 60
+      delay: 10
+
+  when:
+    - role.verify_mft is defined
+...

+ 6 - 0
p0f/operators/roles/cluster-logging/defaults/main.yml

@@ -0,0 +1,6 @@
+---
+# Variables that are usually overridden.
+kubeadmin_config: "tmp/kubeconfig-ocp4"
+logging_obc_storage_class: odf-cluster-ceph-rgw
+logging_pvc_storage_class: odf-cluster-ceph-rbd
+...

+ 236 - 0
p0f/operators/roles/cluster-logging/tasks/main.yml

@@ -0,0 +1,236 @@
+---
+# Already performed by deploy-operators role:
+#   - deploy loki operator
+#   - deploy cluster logging operator
+#   - deploy cluster observability operator
+#
+# Deploy and configure the cluster logging stack:
+#   - create an object bucket claim
+#   - extract the credentials and endpoints
+#   - create a loki secret
+#   - deploy a LokiStack
+#   - deploy a ClusterLogForwarder instance
+#
+# TODO: check that the required operators are installed
+#
+# Required variables:
+#
+#   NONE
+#
+# Optional variables:
+#
+#   kubeadmin_config          the administrator kubeconfig file (tmp/kubeconfig-ocp4)
+#   logging_obc_storage_class odf-cluster-ceph-rgw
+#   logging_pvc_storage_class odf-cluster-ceph-rbd
+#
+- name: Create an ObjectBucketClaim for Loki
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: objectbucket.io/v1alpha1
+    kind: objectbucketclaim
+    namespace: openshift-logging
+    name: loki-object-bucket
+    resource_definition:
+      spec:
+        generateBucketName: logging
+        storageClassName: "{{ logging_obc_storage_class }}"
+
+- name: Wait for the OBC to be bound
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: objectbucket.io/v1alpha1
+    kind: objectbucketclaim
+    namespace: openshift-logging
+    name: loki-object-bucket
+  register: obc_bound
+  until:
+    - obc_bound.resources is defined
+    - obc_bound.resources | length == 1
+    - obc_bound.resources[0].status is defined
+    - obc_bound.resources[0].status.phase == 'Bound'
+  retries: 6
+  delay: 5
+
+- name: Load the OBC secret
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: v1
+    kind: secret
+    namespace: openshift-logging
+    name: loki-object-bucket
+  register: obc_secret
+
+- name: Load the OBC configmap
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: v1
+    kind: configmap
+    namespace: openshift-logging
+    name: loki-object-bucket
+  register: obc_cm
+
+- name: Remember OBC data as facts
+  ansible.builtin.set_fact:
+    obc_access_key: "{{ obc_secret.resources[0].data.AWS_ACCESS_KEY_ID | ansible.builtin.b64decode }}"
+    obc_secret_key: "{{ obc_secret.resources[0].data.AWS_SECRET_ACCESS_KEY | ansible.builtin.b64decode }}"
+    obc_bucket_name: "{{ obc_cm.resources[0].data.BUCKET_NAME }}"
+    obc_endpoint: "{{ obc_cm.resources[0].data.BUCKET_HOST }}"
+
+- name: Create a secret for Loki
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: v1
+    kind: secret
+    namespace: openshift-logging
+    name: loki-store
+    resource_definition:
+      stringData:
+        access_key_id: "{{ obc_access_key }}"
+        access_key_secret: "{{ obc_secret_key }}"
+        bucketnames: "{{ obc_bucket_name }}"
+        endpoint: "http://{{ obc_endpoint }}"
+        region: eu-central-1
+
+- name: Create a LokiStack
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: loki.grafana.com/v1
+    kind: lokistack
+    namespace: openshift-logging
+    name: logging-loki
+    resource_definition:
+      spec:
+        managementState: Managed
+        size: 1x.demo
+        storage:
+          schemas:
+            - effectiveDate: '2024-10-01'
+              version: v13
+          secret:
+            name: loki-store
+            type: s3
+        storageClassName: "{{ logging_pvc_storage_class }}"
+        tenants:
+          mode: openshift-logging
+
+# NOTE: this might take a VERY long time in case adjustments are made after an initial deployment.
+- name: Wait for LokiStack to be ready
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: loki.grafana.com/v1
+    kind: lokistack
+    namespace: openshift-logging
+    name: logging-loki
+  register: loki_ready
+  until:
+    - loki_ready.resources is defined
+    - loki_ready.resources | length == 1
+    - loki_ready.resources[0].status is defined
+    - (loki_ready.resources[0].status | community.general.json_query('conditions[?type==`Ready`].status')) | length == 1
+    - (loki_ready.resources[0].status | community.general.json_query('conditions[?type==`Ready`].status'))[0] == 'True'
+  retries: 60
+  delay: 5
+
+- name: Create a service account for the log forwarder
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: v1
+    kind: serviceaccount
+    namespace: openshift-logging
+    name: collector
+
+- name: Assign it with required ClusterRoleBindings
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: rbac.authorization.k8s.io/v1
+    kind: clusterrolebinding
+    name: "clf-{{ item }}"
+    resource_definition:
+      roleRef:
+        apiGroup: rbac.authorization.k8s.io
+        kind: ClusterRole
+        name: "{{ item }}"
+      subjects:
+      - kind: ServiceAccount
+        name: collector
+        namespace: openshift-logging
+  loop:
+    - logging-collector-logs-writer
+    - collect-application-logs
+    - collect-audit-logs
+    - collect-infrastructure-logs
+
+- name: Finally, create a CLF
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: observability.openshift.io/v1
+    kind: clusterlogforwarder
+    namespace: openshift-logging
+    name: collector
+    resource_definition:
+      spec:
+        serviceAccount:
+          name: collector
+        outputs:
+        - name: default-lokistack
+          type: lokiStack
+          lokiStack:
+            authentication:
+              token:
+                from: serviceAccount
+            target:
+              name: logging-loki
+              namespace: openshift-logging
+          tls:
+            ca:
+              key: service-ca.crt
+              configMapName: openshift-service-ca.crt
+        pipelines:
+        - name: default-logstore
+          inputRefs:
+          - application
+          - infrastructure
+          outputRefs:
+          - default-lokistack
+
+- name: Wait for CLF to be ready
+  kubernetes.core.k8s_info:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: observability.openshift.io/v1
+    kind: clusterlogforwarder
+    namespace: openshift-logging
+    name: collector
+  register: clf_ready
+  until:
+    - clf_ready.resources is defined
+    - clf_ready.resources | length == 1
+    - clf_ready.resources[0].status is defined
+    - (clf_ready.resources[0].status | community.general.json_query('conditions[?type==`Ready`].status'))[0] == 'True'
+  retries: 6
+  delay: 5
+
+- name: Activate the web console plugin
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: observability.openshift.io/v1alpha1
+    kind: uiplugin
+    name: logging
+    resource_definition:
+      spec:
+        type: Logging
+        logging:
+          lokiStack:
+            name: logging-loki
+...

+ 20 - 0
p0f/operators/roles/cluster-monitoring/defaults/main.yml

@@ -0,0 +1,20 @@
+---
+# Variables that are usually overridden.
+kubeadmin_config: "tmp/kubeconfig-ocp4"
+enable_user_workload: true
+main_prom_pvc: 100Gi
+main_prom_sc: odf-cluster-ceph-rbd
+main_prom_retain_time: 21d
+main_prom_retain_size: 80GiB
+main_alrt_pvc: 10Gi
+main_alrt_sc: odf-cluster-ceph-rbd
+user_prom_pvc: 40Gi
+user_prom_sc: odf-cluster-ceph-rbd
+user_prom_retain_time: 21d
+user_prom_retain_size: 30GiB
+user_alrt_pvc: 10Gi
+user_alrt_sc: odf-cluster-ceph-rbd
+user_thanos_pvc: 10Gi
+user_thanos_sc: odf-cluster-ceph-rbd
+user_thanos_retain_time: 21d
+...

+ 85 - 0
p0f/operators/roles/cluster-monitoring/tasks/main.yml

@@ -0,0 +1,85 @@
+---
+# Configure cluster logging:
+#  - storage for Prometheus and AlertManager
+#  - retention settings
+#  - UWM
+#  - storage for Prometheus, AlertManager, and ThanosRuler
+#  - retention settings
+#  - user RBAC to allow them to query and view metrics (cluster-monitoring-view) TODO
+#
+# Required variables:
+#
+#   NONE
+#
+# Optional variables:
+#
+#   kubeadmin_config          the administrator kubeconfig file (tmp/kubeconfig-ocp4)
+#
+#   enable_user_workload      defaults to true
+#   main_prom_pvc             prometheusK8s PVC size (100Gi)
+#   main_prom_sc              prometheusK8s PVC storage class (odf-cluster-ceph-rbd)
+#   main_prom_retain_time     system metric retention time (21d)
+#   main_prom_retain_size     system metric retention size in GiB (80GiB)
+#   main_alrt_pvc             alertmanagerMain PVC size (10Gi)
+#   main_alrt_sc              alertmanagerMain PVC storage class (odf-cluster-ceph-rbd)
+#   user_prom_pvc             prometheus PVC size (40Gi)
+#   user_prom_sc              prometheus PVC storage class (odf-cluster-ceph-rbd)
+#   user_prom_retain_time     user metric retention time (21d)
+#   user_prom_retain_size     user metric retention size in GiB (30GiB)
+#   user_alrt_pvc             alertmanager PVC size (10Gi)
+#   user_alrt_sc              alertmanager PVC storage class (odf-cluster-ceph-rbd)
+#   user_thanos_pvc           thanos ruler PVC size (10Gi)
+#   user_thanos_sc            thanos ruler PVC storage class (odf-cluster-ceph-rbd)
+#   user_thanos_retain_time   thanos ruler retention time (21d)
+#
+# OPTIONAL TODOs:
+#  - nodeSelector
+#  - taints
+#  - tolerations
+#
+# NOTES:
+#  symptoms: disk pressure, https://access.redhat.com/solutions/5341801 and
+#                           https://access.redhat.com/solutions/6738851
+#
+- name: Apply cluster monitoring configmap
+  kubernetes.core.k8s:
+    kubeconfig: "{{ kubeadmin_config }}"
+    validate_certs: no
+    api_version: v1
+    kind: configmap
+    namespace: openshift-monitoring
+    name: cluster-monitoring-config
+    template: templates/cluster-monitoring.yml.j2
+
+- name: Apply user monitoring settings if required
+  block:
+    - name: Wait for UVM operator pod to become ready
+      kubernetes.core.k8s_info:
+        kubeconfig: "{{ kubeadmin_config }}"
+        validate_certs: no
+        api_version: v1
+        kind: pod
+        namespace: openshift-user-workload-monitoring
+        label_selectors:
+          - app.kubernetes.io/component=controller
+      register: uwm_op_ready
+      until:
+        - uwm_op_ready.resources is defined
+        - uwm_op_ready.resources | length == 1
+        - uwm_op_ready.resources[0].status is defined
+        - (uwm_op_ready.resources[0].status | community.general.json_query('conditions[?type==`Ready`].status'))[0] == 'True'
+      retries: 6
+      delay: 5
+
+    - name: Apply user monitoring configmap if required
+      kubernetes.core.k8s:
+        kubeconfig: "{{ kubeadmin_config }}"
+        validate_certs: no
+        api_version: v1
+        kind: configmap
+        namespace: openshift-user-workload-monitoring
+        name: user-workload-monitoring-config
+        template: templates/user-monitoring.yml.j2
+
+  when: enable_user_workload == True
+...

+ 26 - 0
p0f/operators/roles/cluster-monitoring/templates/cluster-monitoring.yml.j2

@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: cluster-monitoring-config
+  namespace: openshift-monitoring
+data:
+  config.yaml: |
+{% if enable_user_workload %}
+    enableUserWorkload: true
+{% endif %}
+    prometheusK8s:
+      retention: {{ main_prom_retain_time }}
+      retentionSize: {{ main_prom_retain_size }}
+      volumeClaimTemplate:
+        spec:
+          storageClassName: {{ main_prom_sc }}
+          resources:
+            requests:
+              storage: {{ main_prom_pvc }}
+    alertmanagerMain:
+      volumeClaimTemplate:
+        spec:
+          storageClassName: {{ main_alrt_sc }}
+          resources:
+            requests:
+              storage: {{ main_alrt_pvc }}

+ 33 - 0
p0f/operators/roles/cluster-monitoring/templates/user-monitoring.yml.j2

@@ -0,0 +1,33 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: user-workload-monitoring-config
+  namespace: openshift-user-workload-monitoring
+data:
+  config.yaml: |
+    prometheus:
+      retention: {{ user_prom_retain_time }}
+      retentionSize: {{ user_prom_retain_size }}
+      volumeClaimTemplate:
+        spec:
+          storageClassName: {{ user_prom_sc }}
+          resources:
+            requests:
+              storage: {{ user_prom_pvc }}
+    alertmanager:
+      enabled: true
+      enableAlertmanagerConfig: true
+      volumeClaimTemplate:
+        spec:
+          storageClassName: {{ user_alrt_sc }}
+          resources:
+            requests:
+              storage: {{ user_alrt_pvc }}
+    thanosRuler:
+      retention: {{ user_thanos_retain_time }}
+      volumeClaimTemplate:
+        spec:
+          storageClassName: {{ user_thanos_sc }}
+          resources:
+            requests:
+              storage: {{ user_thanos_pvc }}

+ 1 - 1
p0f/operators/roles/deploy-local-storage/tasks/main.yml

@@ -124,7 +124,7 @@
     namespace: "{{ local_storage.namespace }}"
     name: "{{ local_storage.storage_class_name }}"
   register: lso_volset
-  until: (lso_volset.resources[0] | community.general.json_query('status.conditions[?type==`Available`].status'))
+  until: (lso_volset.resources[0] | community.general.json_query('status.conditions[?type==`Available`].status'))[0] == 'True'
   retries: 12
   delay: 5
 

+ 3 - 1
p0f/operators/roles/deploy-odf-storage/tasks/main.yml

@@ -151,7 +151,9 @@
     namespace: "{{ odf_storage.namespace }}"
     name: "{{ odf_storage.name }}-cephcluster"
   register: cephcluster
-  until: cephcluster.resources[0].status.phase == "Ready"
+  until:
+    - cephcluster.resources | length > 0
+    - cephcluster.resources[0].status.phase == "Ready"
   retries: 60
   delay: 5
 

+ 11 - 10
p0f/operators/roles/deploy-operators/tasks/main.yml

@@ -3,15 +3,16 @@
 #
 # The following variables must exist:
 #
-#  added_operators:   (list)
-#    - catalog:       the catalog of the manifest
-#      package:       the name of the packagemanifest
-#      subscription:  the name of the operatorgroup and subscription (optional, defaults to package)
-#      channel:       which channel to install from
-#      namespace:     target namespace for subscription
-#      desired_csv:   for verification - wait for this CSV to appear
-#      og_namespaces: (list) operatorgroup namespaces
-#      approval:      Automatic (default) or Manual
+#   added_operators:        (list)
+#     - catalog:            the catalog of the manifest
+#       catalog_namespace:  (optional) catalog namespace (defaults to openshift-marketplace)
+#       package:            the name of the packagemanifest
+#       subscription:       the name of the operatorgroup and subscription (optional, defaults to package)
+#       channel:            which channel to install from
+#       namespace:          target namespace for subscription
+#       desired_csv:        for verification - wait for this CSV to appear
+#       og_namespaces:      (list) operatorgroup namespaces
+#       approval:           Automatic (default) or Manual
 #
 # This role must then be applied as:
 #
@@ -122,7 +123,7 @@
     definition:
       spec:
         source: "{{ role.catalog }}"
-        sourceNamespace: openshift-marketplace
+        sourceNamespace: "{{ role.catalog_namespace | default('openshift-marketplace') }}"
         name: "{{ role.package }}"
         channel: "{{ role.channel }}"
         startingCSV: "{{ role.desired_csv }}"

+ 16 - 1
p0f/operators/roles/deploy-rhbk/tasks/present.yml

@@ -261,7 +261,7 @@
     - rhbk_ready.resources is defined
     - rhbk_ready.resources | length == 1
     - rhbk_ready.resources[0].status is defined
-    - (rhbk_ready.resources[0].status | community.general.json_query('conditions[?type==`Ready`].status'))[0]
+    - (rhbk_ready.resources[0].status | community.general.json_query('conditions[?type==`Ready`].status'))[0] == 'True'
   retries: 24
   delay: 5
 
@@ -299,6 +299,16 @@
   ansible.builtin.set_fact:
     realms: "{{ rhbk_realms.json | items2dict(key_name='realm', value_name='id') }}"
 
+- name: Show what groups were found at verbosity 2+.
+  ansible.builtin.debug:
+    var: admin_token
+    verbosity: 2
+
+- name: Show what groups were found at verbosity 2+.
+  ansible.builtin.debug:
+    var: realms
+    verbosity: 2
+
 - name: Import the realm if not present yet
   block:
 
@@ -366,6 +376,11 @@
       delay: 5
       when: created_import.changed
 
+    - name: Add another 30 seconds because Keycloak flaps.
+      ansible.builtin.pause:
+        prompt: Waiting 30 seconds for Keycloak to settle.
+        seconds: 30
+
   when:
     - realms[rhbk.realm | default('sample-realm')] is not defined
 

+ 5 - 0
p0f/operators/roles/deploy-rhbk/tasks/token.yml

@@ -23,6 +23,11 @@
     fail_msg: "ERROR: Failed to obtain authentication token from Keycloak."
     success_msg: "OK: got authentication token."
 
+- name: Show the token at verbosity 2+
+  debug:
+    verbosity: 2
+    var: sso_token_rsp.json.access_token
+
 - name: Store the token as a fact
   ansible.builtin.set_fact:
     admin_token: "{{ sso_token_rsp.json.access_token }}"

+ 2 - 2
p0f/operators/roles/rhbk-authn/tasks/main.yml

@@ -237,7 +237,7 @@
 
 - name: Ensure the console has a logoutRedirect
   kubernetes.core.k8s:
-    kubeconfig: tmp/kubeconfig-ocp4
+    kubeconfig: "{{ kubeadmin_config }}"
     validate_certs: no
     api_version: config.openshift.io/v1
     kind: console
@@ -246,7 +246,7 @@
     resource_definition:
       spec:
         authentication:
-          logoutRedirect: "https://{{ rhbk.fqdn }}/realms/{{ rhbk.realm | default('sample-realm') }}/protocol/openid-connect/logout"
+          logoutRedirect: "https://{{ rhbk_fqdn }}/realms/{{ rhbk.realm | default('sample-realm') }}/protocol/openid-connect/logout"
 
 - name: Ensure OpenShift groups are there as well.
   kubernetes.core.k8s: