Преглед на файлове

Add 'playbooks/' from commit '8de821fdb674ce537920a171b39290edd462cfe1'

git-subtree-dir: playbooks
git-subtree-mainline: 7eeedea6d68aa2e3ffd66fa02c05c4386619b999
git-subtree-split: 8de821fdb674ce537920a171b39290edd462cfe1
Grega Bremec преди 3 дни
родител
ревизия
bcb3d3655a

+ 16 - 0
playbooks/00-general-pre-flight.yml

@@ -0,0 +1,16 @@
+---
+- name: Ensure workstation has the collections it needs.
+  hosts: workstation.lab.example.com
+  become: no
+  gather_facts: no
+  tasks:
+    - name: Install the required collections.
+      become: yes
+      ansible.builtin.yum:
+        name:
+          - ansible-collection-ansible-posix
+          - ansible-collection-community-crypto
+          - ansible-collection-community-general
+          - ansible-collection-containers-podman
+        state: present
+...

+ 174 - 0
playbooks/10-quay-tmp-fixes.yml

@@ -0,0 +1,174 @@
+---
+# These are the temporary tasks needed on various machines before classroom build is finished.
+- name: Fixes required on workstation VM.
+  hosts: workstation.lab.example.com
+  become: yes
+  gather_subset: min
+  tasks:
+    - name: Remove Google from resolv.conf
+      ansible.builtin.lineinfile:
+        path: /etc/resolv.conf
+        line: "nameserver 8.8.8.8"
+        state: absent
+
+- name: Fixes required on utility VM.
+  hosts: utility.lab.example.com
+  become: yes
+  gather_subset: min
+  tasks:
+# XXX DONE XXX    # Fixing the DNS first.
+# XXX DONE XXX    - name: add ocp4.example.com hosts to /etc/hosts
+# XXX DONE XXX      become: yes
+# XXX DONE XXX      ansible.builtin.lineinfile:
+# XXX DONE XXX        path: /etc/hosts
+# XXX DONE XXX        mode: 0644
+# XXX DONE XXX        regex: "{{ item.hostname }}"
+# XXX DONE XXX        line: "{{ item.addr }} {{ item.hostname }}"
+# XXX DONE XXX        state: present
+# XXX DONE XXX      loop:
+# XXX DONE XXX        - addr: 192.168.50.40
+# XXX DONE XXX          hostname: idm.ocp4.example.com
+# XXX DONE XXX        - addr: 192.168.50.50
+# XXX DONE XXX          hostname: registry.ocp4.example.com
+# XXX DONE XXX        - addr: 192.168.50.10
+# XXX DONE XXX          hostname: master01.ocp4.example.com
+# XXX DONE XXX        - addr: 192.168.50.11
+# XXX DONE XXX          hostname: master02.ocp4.example.com
+# XXX DONE XXX        - addr: 192.168.50.12
+# XXX DONE XXX          hostname: master03.ocp4.example.com
+# XXX DONE XXX        - addr: 192.168.50.13
+# XXX DONE XXX          hostname: worker01.ocp4.example.com
+# XXX DONE XXX        - addr: 192.168.50.14
+# XXX DONE XXX          hostname: worker02.ocp4.example.com
+# XXX DONE XXX
+# XXX DONE XXX    - name: Ensure dnsmasq is installed.
+# XXX DONE XXX      ansible.builtin.yum:
+# XXX DONE XXX        name:
+# XXX DONE XXX          - dnsmasq
+# XXX DONE XXX          - dnsmasq-utils
+# XXX DONE XXX        state: present
+# XXX DONE XXX
+# XXX DONE XXX    - name: Ensure dnsmasq is listening on all interfaces
+# XXX DONE XXX      ansible.builtin.lineinfile:
+# XXX DONE XXX        path: /etc/dnsmasq.conf
+# XXX DONE XXX        mode: 0644
+# XXX DONE XXX        regex: "^interface=(.*)$"
+# XXX DONE XXX        line: '#interface=\g<1>'
+# XXX DONE XXX        backrefs: yes
+# XXX DONE XXX
+# XXX DONE XXX    - name: Ensure dnsmasq is enabled and running.
+# XXX DONE XXX      ansible.builtin.systemd_service:
+# XXX DONE XXX        name: dnsmasq
+# XXX DONE XXX        enabled: yes
+# XXX DONE XXX        state: started
+# XXX DONE XXX
+# XXX DONE XXX    - name: Ensure DNS is open in the firewall.
+# XXX DONE XXX      ansible.posix.firewalld:
+# XXX DONE XXX        immediate: yes
+# XXX DONE XXX        permanent: yes
+# XXX DONE XXX        zone: "{{ item }}"
+# XXX DONE XXX        service: dns
+# XXX DONE XXX        state: enabled
+# XXX DONE XXX      loop:
+# XXX DONE XXX        - external
+# XXX DONE XXX        - public
+
+    - name: Ensure idm is in ocp4.example.com zone.
+      ansible.builtin.lineinfile:
+        path: /var/named/ocp4.example.com.db
+        regex: '^idm[[:space:]]'
+        insertafter: '.*IN NS dns\.ocp4\.example\.com\.$'
+        line: 'idm      IN A 192.168.50.40'
+      notify:
+        - fix forward zone serial
+        - restart named
+
+    - name: Ensure idm is in ocp4.example.com reverse zone.
+      ansible.builtin.lineinfile:
+        path: /var/named/ocp4.example.com.reverse.db
+        regex: '^40[[:space:]]'
+        insertafter: '.*IN NS dns\.ocp4\.example\.com\.$'
+        line: '40  IN PTR idm.ocp4.example.com.'
+      notify:
+        - fix reverse zone serial
+        - restart named
+
+    - name: Ensure utility allows forwarding traffic from external to public/trusted zones.
+      ansible.builtin.copy:
+        dest: /etc/firewalld/policies/fwd-stud-to-ocp.xml
+        mode: 0644
+        owner: root
+        group: root
+        content: |
+          <?xml version="1.0" encoding="utf-8"?>
+          <policy target="ACCEPT">
+            <ingress-zone name="external"/>
+            <egress-zone name="public"/>
+            <egress-zone name="trusted"/>
+          </policy>
+      notify:
+        - reload utility firewalld
+  handlers:
+    - name: reload utility firewalld
+      ansible.builtin.service:
+        name: firewalld
+        state: reloaded
+
+    - name: fix forward zone serial
+      ansible.builtin.lineinfile:
+        path: /var/named/ocp4.example.com.db
+        regex: '.*; serial$'
+        line: "                {{ ansible_facts['date_time']['year'] }}{{ ansible_facts['date_time']['month'] }}{{ ansible_facts['date_time']['day'] }}00"
+
+    - name: fix reverse zone serial
+      ansible.builtin.lineinfile:
+        path: /var/named/ocp4.example.com.reverse.db
+        regex: '.*; serial$'
+        line: "                {{ ansible_facts['date_time']['year'] }}{{ ansible_facts['date_time']['month'] }}{{ ansible_facts['date_time']['day'] }}00"
+
+    - name: restart named
+      ansible.builtin.service:
+        name: named
+        state: restarted
+
+- name: Fix registry VM configuration.
+  hosts: registry.ocp4.example.com
+  become: yes
+  gather_facts: no
+  tasks:
+    - name: Ensure eth1 interface is in public zone.
+      ansible.builtin.firewalld:
+        zone: public
+        interface: eth1
+        immediate: yes
+        permanent: yes
+        state: enabled
+      notify:
+        - reload registry firewalld
+
+# XXX DONE XXX    #- name: Ensure registry is using bastion as the DNS
+# XXX DONE XXX    #  community.general.nmcli:
+# XXX DONE XXX    #    conn_name: "System eth1"
+# XXX DONE XXX    #    dns4: 172.25.250.254
+# XXX DONE XXX    #    state: present
+# XXX DONE XXX    #  notify:
+# XXX DONE XXX    #    - bounce eth1
+
+  handlers:
+    - name: reload registry firewalld
+      ansible.builtin.service:
+        name: firewalld
+        state: reloaded
+
+# XXX DONE XXX    #- name: reload connections
+# XXX DONE XXX    #  listen: bounce eth1
+# XXX DONE XXX    #  ansible.builtin.command: nmcli con reload
+# XXX DONE XXX
+# XXX DONE XXX    #- name: take eth1 down
+# XXX DONE XXX    #  listen: bounce eth1
+# XXX DONE XXX    #  ansible.builtin.command: nmcli con down "System eth1"
+# XXX DONE XXX
+# XXX DONE XXX    #- name: bring eth1 up
+# XXX DONE XXX    #  listen: bounce eth1
+# XXX DONE XXX    #  ansible.builtin.command: nmcli con up "System eth1"
+...

+ 15 - 0
playbooks/20-general-post-tmp.yml

@@ -0,0 +1,15 @@
+---
+- name: Post-fix adjustments and corrections - a good test of state of affairs.
+  hosts: all
+  become: no
+  gather_facts: no
+  tasks:
+    - name: remove annoying MOTDs
+      become: yes
+      ansible.builtin.file:
+        path: "/etc/motd.d/{{ item }}"
+        state: absent
+      loop:
+        - cockpit
+        - insights-client
+...

+ 225 - 0
playbooks/30-quay-pre-tasks.yml

@@ -0,0 +1,225 @@
+---
+# Tasks required by 00-initial-config.adoc.
+- name: Create a CA on workstation.
+  hosts: workstation.lab.example.com
+  become: no
+  gather_subset: min
+  tasks:
+    # TODO: Only if necessary.
+    - name: Create directories.
+      ansible.builtin.file:
+        path: "{{ ansible_facts['user_dir'] }}/ca/lab-ca/newcerts"
+        state: directory
+        recurse: yes
+        mode: 0700
+
+    # TODO: Only if necessary.
+    - name: Create cert index.
+      ansible.builtin.copy:
+        dest: "{{ ansible_facts['user_dir'] }}/ca/lab-ca/index.txt"
+        mode: 0600
+        content: ""
+
+    # TODO: Only if necessary.
+    - name: Create cert serial tracker.
+      ansible.builtin.copy:
+        dest: "{{ ansible_facts['user_dir'] }}/ca/lab-ca/serial"
+        mode: 0600
+        content: "0000"
+
+    - name: Ensure openssl.cnf is there and correct.
+      ansible.builtin.copy:
+        dest: "{{ ansible_facts['user_dir'] }}/ca/openssl.cnf"
+        mode: 0600
+        content: |
+          [ ca ]
+          default_ca      = CA_default
+          
+          [ CA_default ]
+          
+          dir            = /home/student/ca/lab-ca
+          serial         = $dir/serial
+          database       = $dir/index.txt
+          new_certs_dir  = $dir/newcerts
+          
+          certificate    = /home/student/ca/ca-cert.pem
+          private_key    = /home/student/ca/ca-key.pem
+          
+          default_days   = 365
+          default_crl_days= 30
+          default_md     = sha256
+          
+          policy         = policy_any
+          email_in_dn    = no
+          
+          name_opt       = ca_default
+          cert_opt       = ca_default
+          copy_extensions = copy
+          
+          [ policy_any ]
+          countryName            = supplied
+          stateOrProvinceName    = optional
+          organizationName       = optional
+          organizationalUnitName = optional
+          commonName             = supplied
+          emailAddress           = optional
+
+    - name: Check if CA key exists to save time
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/ca/ca-key.pem"
+        get_attributes: no
+        get_checksum: no
+        get_mime: no
+      register: cakey_file
+
+    - name: Check if CA cert exists to save time
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/ca/ca-cert.pem"
+        get_attributes: no
+        get_checksum: no
+        get_mime: no
+      register: cacert_file
+
+    - name: Create a new CA private key, if it does not exist yet.
+      community.crypto.openssl_privatekey:
+        path: "{{ ansible_facts['user_dir'] }}/ca/ca-key.pem"
+        passphrase: verysecret
+        type: RSA
+        cipher: auto
+        size: 8192
+        mode: 0600
+      when: cakey_file.stat.exists == false
+
+    - name: Generate a CSR for the CA cert.
+      community.crypto.openssl_csr:
+        path: "{{ ansible_facts['user_dir'] }}/ca/ca-csr.pem"
+        privatekey_path: "{{ ansible_facts['user_dir'] }}/ca/ca-key.pem"
+        privatekey_passphrase: verysecret
+        basic_constraints: "CA:TRUE"
+        basic_constraints_critical: yes
+        subject:
+          C: US
+          ST: North Carolina
+          L: Raleigh
+          O: Red Hat
+          OU: RHT
+          CN: Classroom Root CA
+        mode: 0600
+      when: cacert_file.stat.exists == false
+
+    - name: Create a self-signed cert for the CA.
+      community.crypto.x509_certificate:
+        path: "{{ ansible_facts['user_dir'] }}/ca/ca-cert.pem"
+        csr_path: "{{ ansible_facts['user_dir'] }}/ca/ca-csr.pem"
+        privatekey_path: "{{ ansible_facts['user_dir'] }}/ca/ca-key.pem"
+        privatekey_passphrase: verysecret
+        provider: selfsigned
+        selfsigned_not_after: +510w
+        mode: 0600
+      when: cacert_file.stat.exists == false
+
+    - name: Get rid of the CSR.
+      ansible.builtin.file:
+        path: "{{ ansible_facts['user_dir'] }}/ca/ca-csr.pem"
+        state: absent
+
+    - name: Copy CA cert to ca-trust dir.
+      become: yes
+      ansible.builtin.copy:
+        src: "{{ ansible_facts['user_dir'] }}/ca/ca-cert.pem"
+        dest: "/etc/pki/ca-trust/source/anchors/lab-ca.pem"
+        mode: 0644
+      register: copied
+
+    - name: Have workstation trust the CA.
+      become: yes
+      command: update-ca-trust
+      when: copied.changed
+
+- name: Have utility serve time.
+  hosts: utility.lab.example.com
+  become: no
+  gather_subset: min
+  tasks:
+    - name: Ensure we have the correct chrony.conf
+      become: yes
+      ansible.builtin.copy:
+        dest: /etc/chrony.conf
+        mode: 0644
+        content: |
+          # Use public servers from the pool.ntp.org project.
+          # Please consider joining the pool (http://www.pool.ntp.org/join.html).
+          server 172.25.254.254 iburst
+          
+          # Record the rate at which the system clock gains/losses time.
+          driftfile /var/lib/chrony/drift
+          
+          # Allow the system clock to be stepped in the first three updates
+          # if its offset is larger than 1 second.
+          makestep 1.0 3
+          
+          # Enable kernel synchronization of the real-time clock (RTC).
+          rtcsync
+          
+          # Enable hardware timestamping on all interfaces that support it.
+          #hwtimestamp *
+          
+          # Increase the minimum number of selectable sources required to adjust
+          # the system clock.
+          #minsources 2
+          
+          # Allow NTP client access from local network.
+          #allow 192.168.0.0/16
+          allow all
+          
+          bindcmdaddress 0.0.0.0
+          cmdallow all
+          
+          # Serve time even if not synchronized to a time source.
+          #local stratum 10
+          
+          # Specify file containing keys for NTP authentication.
+          keyfile /etc/chrony.keys
+          
+          # Get TAI-UTC offset and leap seconds from the system tz database.
+          leapsectz right/UTC
+          
+          # Specify directory for log files.
+          logdir /var/log/chrony
+          
+          # Select which information is logged.
+          #log measurements statistics tracking
+      notify:
+        - restart chronyd
+
+    - name: Ensure firewall allows NTP.
+      become: yes
+      ansible.posix.firewalld:
+        immediate: yes
+        permanent: yes
+        zone: "{{ item }}"
+        service: ntp
+        state: enabled
+      loop:
+        - external
+        - public
+
+    - name: Ensure firewall allows cmdport.
+      become: yes
+      ansible.posix.firewalld:
+        immediate: yes
+        permanent: yes
+        zone: "{{ item }}"
+        port: 323/udp
+        state: enabled
+      loop:
+        - external
+        - public
+
+  handlers:
+    - name: restart chronyd
+      become: yes
+      ansible.builtin.service:
+        name: chronyd
+        state: restarted
+...

+ 423 - 0
playbooks/32-quay-deploy.yml

@@ -0,0 +1,423 @@
+---
+# Tasks required by 10-quay-deploy.adoc.
+- name: Issue a new Cert for Quay if necessary.
+  hosts: workstation.lab.example.com
+  gather_subset: min
+  tasks:
+    - name: Check if Quay key exists to save time
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/ca/quay-key.pem"
+        get_attributes: no
+        get_checksum: no
+        get_mime: no
+      register: qkey_file
+
+    - name: Check if Quay cert exists to save time
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/ca/quay-cert.pem"
+        get_attributes: no
+        get_checksum: no
+        get_mime: no
+      register: qcert_file
+
+    - name: Create a new private key for Quay, if it does not exist yet.
+      community.crypto.openssl_privatekey:
+        path: "{{ ansible_facts['user_dir'] }}/ca/quay-key.pem"
+        type: RSA
+        size: 4096
+        mode: 0600
+      when: qkey_file.stat.exists == false
+
+    - name: Create a CSR for Quay
+      community.crypto.openssl_csr:
+        path: "{{ ansible_facts['user_dir'] }}/ca/quay-csr.pem"
+        privatekey_path: "{{ ansible_facts['user_dir'] }}/ca/quay-key.pem"
+        subject:
+          C: US
+          ST: North Carolina
+          L: Raleigh
+          O: Red Hat
+          OU: RHT
+          CN: registry.ocp4.example.com
+        use_common_name_for_san: yes
+        mode: 0600
+      when: qcert_file.stat.exists == false
+
+    - name: Issue a certificate for Quay if one isn't there yet.
+      ansible.builtin.command:
+        cmd: openssl ca -config {{ ansible_facts['user_dir'] }}/ca/openssl.cnf -passin pass:verysecret -in {{ ansible_facts['user_dir'] }}/ca/quay-csr.pem -out {{ ansible_facts['user_dir'] }}/ca/quay-cert.pem -batch -notext
+        creates: "{{ ansible_facts['user_dir'] }}/ca/quay-cert.pem"
+
+    - name: Load CA cert and Quay cert.
+      ansible.builtin.set_fact:
+        ca_cert: "{{ lookup('file', ansible_facts['user_dir'] + '/ca/ca-cert.pem') }}"
+        quay_cert: "{{ lookup('file', ansible_facts['user_dir'] + '/ca/lab-ca/newcerts/00.pem') }}"
+
+    - name: Concatenate Quay and CA certs.
+      ansible.builtin.copy:
+        dest: "{{ ansible_facts['user_dir'] }}/ca/quay-cert.pem"
+        content: |
+          {{ quay_cert }}
+          {{ ca_cert }}
+
+- name: Prepare registry VM to run Quay services.
+  hosts: registry.ocp4.example.com
+  gather_subset: min
+  tasks:
+    - name: Ensure firewall allows HTTP/HTTPS.
+      become: yes
+      ansible.posix.firewalld:
+        immediate: yes
+        permanent: yes
+        zone: public
+        service: "{{ item }}"
+        state: enabled
+      loop:
+        - http
+        - https
+
+    - name: Ensure unpriv users can open ports from 80 onwards.
+      become: yes
+      ansible.posix.sysctl:
+        name: net.ipv4.ip_unprivileged_port_start
+        value: "80"
+        state: present
+        sysctl_file: /etc/sysctl.d/quay-low-ports.conf
+        reload: yes
+
+    - name: Ensure user quay exists.
+      become: yes
+      ansible.builtin.user:
+        name: quay
+        create_home: yes
+        state: present
+
+    - name: Have the quay user accept student's SSH key.
+      become: yes
+      ansible.posix.authorized_key:
+        key: "{{ lookup('ansible.builtin.file', '/home/student/.ssh/lab_rsa.pub') }}"
+        user: quay
+        state: present
+
+    - name: Ensure user quay will linger.
+      become: yes
+      ansible.builtin.command:
+        cmd: loginctl enable-linger quay
+        creates: /var/lib/systemd/linger/quay
+
+    - name: Ensure data directories are there.
+      become: yes
+      ansible.builtin.file:
+        path: "{{ item }}"
+        mode: 0770
+        owner: quay
+        group: quay
+        state: directory
+      loop:
+        - /local/quay-pg
+        - /local/quay
+
+    - name: Ensure .docker directory is there
+      become: yes
+      ansible.builtin.file:
+        path: "/home/quay/.docker"
+        mode: 0700
+        owner: quay
+        group: quay
+        state: directory
+
+    # TODO: figure out how to customise this with registry host changes
+    - name: Ensure podman will be able to log into the upstream registry
+      become: yes
+      ansible.builtin.copy:
+        dest: "/home/quay/.docker/config.json"
+        content: |
+          {"auths":{"registry.redhat.io":{"auth":"fHVoYy1wb29sLTlmMDA1Mzc2LTM2YTItNDJhMS1hNTQwLTA0NzNkYzg3MzYzMzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSTVPRGc1WVdFeFl6Qm1PV0kwWmpVM1lqazNObUk1WldFeU16SXdaalUwTUNKOS5zWmQ5VE1RbzBXREc2NUc5Qk1ObmtuYlBjRkIzNmhyRFhkMThfdTNLeHFaczdlOG1hQ19QeEFReGpwdVk0YVM2VERIbkxDNWpGYjRRNXFYVEpWbjJCOGE4cDFuY08tM24ySG5QdDg3NmktVUFDU3lldWtpb3k4aHI0V3d1ZkhReFVYMmxxWFhYdjN6blE3am1URUNBc25rWkNRSFU1dFNpRnNUZHhFZGZkeU42Z20xN3VqY2thZG5NbFBZcTZfU1I2bUtLaUpUdFQ3SFlDWXJBVk5zZ0tfNGFkZ2MtRXBlbEtHbGNERWkzNGhYbzFqbEIzRERyUWkxSUxCV0UwZkdXb1czZy1ZUzFGMFlEXzc0bm1XSU5mUE1jM25UOERaQWl0OEw0VlFPTnZnUE51YnVfTVVGUGhqX29VUjF3VUR0a1BRNktJdm82UWYyRkdwMndLM1B6YnRBRFFzRVZTZDlITzQ3a0RKdGFobk95YTFmRmdqZVk1bFNxLW1vT2RqUldCZ3U2XzNIX25lZExJR1lQRHRBZnp5cGJ1eHZ1cEd1M2hYWnVzeWN0aURtR203SkR5RW5KdjF1RFZmYVduU2EzSV9NcFRSVVcyZWU1RF9CanJleTdlU2I0bEpGcmp1eC1nY2JVaHFsWGJZc2l6azdXWHpvRmtrVFlMdXFDQ1FvS1J0OFdSN1UzTmh3c3Q2ckV3eEFOaWJFTlNzUVB3MGg4X0NDRm5qTHFSTl82cWpTc0tpeWRGT2tHVFliT0taTktaSVVhYkZFTjRhYVRVYmlYTVdPS2Eyak1xLUhwazBMNEowUmtOM2JkQVVqWmtERHE0ZFY1ZVFjdXNIeV9LY29nd1VKSjZ4MDNObnM4b0xBdjRJZ3RKeXlxcmE1YUJHSkxReHNjRXVSNzQwWQ=="}}}
+        mode: 0600
+        owner: quay
+        group: quay
+
+- name: Configure containers and their environment on registry VM.
+  hosts: registry.ocp4.example.com
+  gather_subset: min
+  remote_user: quay
+  tasks:
+    - name: Create a podman network, if necessary.
+      containers.podman.podman_network:
+        name: quay
+        state: present
+
+    - name: Pull all the images if necessary.
+      containers.podman.podman_image:
+        name: "{{ registry_host }}/{{ item }}"
+        pull: yes
+        state: present
+      loop:
+        - rhel9/postgresql-15:latest
+        - rhel9/redis-7:latest
+        - quay/quay-rhel8:v{{ quay_version }}
+        - quay/clair-rhel8:v{{ quay_version }}
+
+    # TODO: recursive!
+    - name: Ensure PG datadir is owned by the correct user.
+      become_method: containers.podman.podman_unshare
+      become: yes
+      ansible.builtin.file:
+        path: /local/quay-pg
+        state: directory
+        owner: 26
+        mode: 0770
+
+    - name: Start postgres container if necessary.
+      containers.podman.podman_container:
+        name: postgresql
+        image: "{{ registry_host }}/rhel9/postgresql-15:latest"
+        rm: yes
+        detach: yes
+        env:
+          POSTGRESQL_USER: quay
+          POSTGRESQL_PASSWORD: secret
+          POSTGRESQL_DATABASE: quay
+          POSTGRESQL_ADMIN_PASSWORD: verysecret
+        network:
+          - quay
+        volumes:
+          - /local/quay-pg:/var/lib/pgsql/data:Z
+        state: started
+      register: pg_started
+
+    - name: Wait for the PostgreSQL container to become ready if it was changed in any way.
+      containers.podman.podman_container_info:
+        name: postgresql
+      when: pg_started.changed
+      register: pg_info
+      until: pg_info.containers[0].State.Running
+      retries: 12
+      delay: 5
+
+    - name: Wait for the server inside container to start up.
+      containers.podman.podman_container_exec:
+        name: postgresql
+        command: 'psql -d quay -U postgres -c "SELECT 1"'
+      when: pg_started.changed
+      changed_when: no
+      register: pg_rdy
+      until: pg_rdy.rc == 0
+      retries: 10
+      delay: 3
+
+    - name: Create the trigram extension if necessary.
+      containers.podman.podman_container_exec:
+        name: postgresql
+        command: 'psql -d quay -U postgres -c "CREATE EXTENSION IF NOT EXISTS pg_trgm"'
+      register: pg_ext
+      changed_when:
+        - not "already exists" in pg_ext.stderr
+
+    - name: If we started the PG container and created the extension, stop the container now.
+      containers.podman.podman_container:
+        name: postgresql
+        state: stopped
+      when:
+        - pg_started.changed
+        - pg_ext.changed
+
+    - name: Create Quay config directory if necessary.
+      ansible.builtin.file:
+        path: "{{ ansible_facts['user_dir'] }}/config"
+        state: directory
+        mode: 0770
+
+    - name: Publish Quay key on registry.
+      ansible.builtin.copy:
+        src: /home/student/ca/quay-key.pem
+        dest: "{{ ansible_facts['user_dir'] }}/config/ssl.key"
+        mode: 0440
+
+    - name: Publish Quay cert on registry.
+      ansible.builtin.copy:
+        src: /home/student/ca/quay-cert.pem
+        dest: "{{ ansible_facts['user_dir'] }}/config/ssl.cert"
+        mode: 0440
+
+    - name: Publish Quay config file.
+      ansible.builtin.copy:
+        dest: "{{ ansible_facts['user_dir'] }}/config/config.yaml"
+        content: |
+          BUILDLOGS_REDIS:
+            host: redis
+            password: verysecret
+            port: 6379
+          CREATE_NAMESPACE_ON_PUSH: true
+          DATABASE_SECRET_KEY: 410c87de-8ad8-4f4c-9670-2ec25bc87191
+          DB_URI: postgresql://quay:secret@postgresql:5432/quay
+          DISTRIBUTED_STORAGE_CONFIG:
+            default:
+              - LocalStorage
+              - storage_path: /registry
+          DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: []
+          DISTRIBUTED_STORAGE_PREFERENCE:
+            - default
+          FEATURE_MAILING: false
+          SECRET_KEY: 7ce58d4d-b6f5-4400-ba6b-77b9f728a115
+          SERVER_HOSTNAME: registry.ocp4.example.com
+          PREFERRED_URL_SCHEME: https
+          SETUP_COMPLETE: true
+          SUPER_USERS:
+            - admin
+          TESTING: false
+          USER_EVENTS_REDIS:
+            host: redis
+            password: verysecret
+            port: 6379
+        mode: 0660
+
+    # TODO: recursive!
+    - name: Ensure Quay data dirs are owned by the correct user.
+      become_method: containers.podman.podman_unshare
+      become: yes
+      ansible.builtin.file:
+        path: "{{ item }}"
+        state: directory
+        owner: 1001
+      loop:
+        - /local/quay
+        - "{{ ansible_facts['user_dir'] }}/config"
+
+    - name: Ensure systemd user dir is there.
+      ansible.builtin.file:
+        path: "{{ ansible_facts['user_dir'] }}/.config/systemd/user"
+        state: directory
+
+    - name: Deploy service units.
+      ansible.builtin.template:
+        dest: "{{ ansible_facts['user_dir'] }}/.config/systemd/user/{{ item }}"
+        src: "templates/{{ item }}.j2"
+      loop:
+        - quay-pg.service
+        - quay-redis.service
+        - quay.service
+
+    - name: Reload systemd.
+      ansible.builtin.systemd_service:
+        daemon_reload: yes
+        scope: user
+
+    - name: Enable services and start them.
+      ansible.builtin.systemd_service:
+        name: "{{ item }}"
+        scope: user
+        state: started
+        enabled: yes
+      loop:
+        - quay-pg
+        - quay-redis
+        - quay
+      register: startup
+
+    - name: Wait a bit if the Quay container was just started.
+      ansible.builtin.uri:
+        method: GET
+        url: https://registry.ocp4.example.com/
+        headers:
+          Accept: application/json
+          Content-Type: application/json
+        validate_certs: no
+        status_code:
+          - 200
+          - 404
+          - 502
+      when: startup.results[2].changed
+      register: startup_wait
+      until: startup_wait.status == 200
+      retries: 30
+      delay: 5
+
+    - name: Check if the admin user exists already.
+      ansible.builtin.uri:
+        method: GET
+        url: https://registry.ocp4.example.com/api/v1/users/admin
+        headers:
+          Accept: application/json
+          Content-Type: application/json
+        validate_certs: no
+        status_code:
+          - 200
+          - 404
+        return_content: yes
+      register: adminuser_is_there
+
+    - name: Create an admin user if not yet there.
+      block:
+        - name: Obtain an encoded CSRF token.
+          ansible.builtin.uri:
+            method: GET
+            url: https://registry.ocp4.example.com/
+            headers:
+              Accept: application/json
+              Content-Type: application/json
+            validate_certs: no
+            return_content: yes
+          ignore_errors: yes
+          register: csrf_token_payload
+
+        - ansible.builtin.assert:
+            that:
+              - csrf_token_payload.cookies['_csrf_token'] is defined
+            fail_msg: "No CSRF token returned by registry. Can not proceed."
+            success_msg: "Good, CSRF token found in response."
+
+        # In case of issues, run with -v and this will show the raw cookie.
+        - ansible.builtin.debug:
+            var: csrf_token_payload.cookies
+            verbosity: 1
+
+        - name: Store the cookie as a new fact. We need it later.
+          ansible.builtin.set_fact:
+            csrf_cookie: "{{ csrf_token_payload.cookies['_csrf_token'] }}"
+
+        # In case of issues, run with -v and this will show the cookie payload.
+        - ansible.builtin.debug:
+            var: csrf_cookie
+            verbosity: 1
+
+        # Must chop out the part of the token before the first dot (the rest is control shit).
+        # Next, and pad it (==) at the end to have 112 characters (no checking done here).
+        # Lastly, convert that from JSON to a dict and obtain the value of the token (_csrf_token).
+        - name: Store CSRF token as a new fact.
+          ansible.builtin.set_fact:
+            csrf_token: "{{ (csrf_token_payload.cookies['_csrf_token'] | ansible.builtin.regex_replace('^(\\w+)\\..*$', '\\1==') | ansible.builtin.b64decode | ansible.builtin.from_json)['_csrf_token'] }}"
+
+        # In case of issues, run with -v and this will show the decoded token.
+        - ansible.builtin.debug:
+            var: csrf_token
+            verbosity: 1
+
+        - name: Send a POST request to registry API to create the admin user.
+          ansible.builtin.uri:
+            method: POST
+            url: https://registry.ocp4.example.com/api/v1/user/
+            headers:
+              Accept: application/json
+              Content-Type: application/json
+              Cookie: _csrf_token={{ csrf_cookie }}
+              X-CSRF-Token: "{{ csrf_token }}"
+            body: |
+              {
+                "username": "admin",
+                "password": "redhat123",
+                "repeatPassword": "redhat123",
+                "email": "admin@example.com"
+              }
+            body_format: json
+            validate_certs: no
+            return_content: yes
+          register: admin_user_response
+
+        # In case of issues, run with -v and this will show the response.
+        - ansible.builtin.debug:
+            var: admin_user_response
+            verbosity: 1
+
+      when: adminuser_is_there.status == 404
+...

+ 200 - 0
playbooks/33-clair-deploy.yml

@@ -0,0 +1,200 @@
+---
+# Tasks required by 15-clair-deploy.adoc.
+- name: Prepare registry VM to run Clair services.
+  hosts: registry.ocp4.example.com
+  gather_subset: min
+  remote_user: quay
+  tasks:
+    - name: Ensure the podman network is there.
+      containers.podman.podman_network_info:
+        name: quay
+      register: quay_net
+      ignore_errors: yes
+
+    - ansible.builtin.assert:
+        that:
+          - not quay_net.failed
+          - quay_net.networks is defined
+          - quay_net.networks is iterable
+          - quay_net.networks | length == 1
+        fail_msg: "FATAL: Podman network 'quay' does not exist for 'quay' user. Ensure you deployed Quay before running this playbook."
+        success_msg: "OK, network 'quay' found."
+
+    - name: Ensure the quay service is defined.
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/.config/systemd/user/quay.service"
+        get_attributes: no
+        get_checksum: no
+        get_mime: no
+      register: quay_svc_unit
+
+    - ansible.builtin.assert:
+        that:
+          - not quay_svc_unit.failed
+          - quay_svc_unit.stat.exists
+        fail_msg: "FATAL: User service 'quay.service' not found for 'quay' user. Ensure you deployed Quay before running this playbook."
+        success_msg: "OK, service 'quay.service' found."
+
+    - name: Ensure the quay-pg service is defined.
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/.config/systemd/user/quay-pg.service"
+        get_attributes: no
+        get_checksum: no
+        get_mime: no
+      register: quay_pg_svc_unit
+
+    - ansible.builtin.assert:
+        that:
+          - not quay_pg_svc_unit.failed
+          - quay_pg_svc_unit.stat.exists
+        fail_msg: "FATAL: User service 'quay-pg.service' not found for 'quay' user. Ensure you deployed Quay before running this playbook."
+        success_msg: "OK, service 'quay-pg.service' found."
+
+    - name: Ensure Quay PostgreSQL is running.
+      ansible.builtin.systemd_service:
+        name: quay-pg
+        scope: user
+        state: started
+
+    - name: Check whether the clair database exists.
+      containers.podman.podman_container_exec:
+        name: postgresql
+        command: psql -d postgres -U postgres -t -A -c "SELECT datname FROM pg_database WHERE datname = 'clair'"
+      register: pg_clair
+      changed_when: no
+
+    - name: Create the clair database if necessary.
+      containers.podman.podman_container_exec:
+        name: postgresql
+        command: 'psql -d postgres -U postgres -c "CREATE DATABASE clair OWNER quay"'
+      when:
+        - pg_clair is defined
+        - pg_clair.stdout_lines | length == 0
+
+    - name: Create the uuid-ossp extension if necessary.
+      containers.podman.podman_container_exec:
+        name: postgresql
+        command: psql -d clair -U postgres -c 'CREATE EXTENSION IF NOT EXISTS "uuid-ossp"'
+      register: pg_ext
+      changed_when:
+        - not "already exists" in pg_ext.stderr
+
+    # TODO: Make loop labels nicer.
+    - name: Patch Quay config if necessary.
+      ansible.builtin.lineinfile:
+        path: "{{ ansible_facts['user_dir'] }}/config/config.yaml"
+        insertafter: "{{ item.after }}"
+        regexp: "{{ item.fixre }}"
+        line: "{{ item.value }}"
+      loop:
+        - after: "^FEATURE_MAILING: false$"
+          fixre: "^FEATURE_SECURITY_SCANNER: .*$"
+          value: "FEATURE_SECURITY_SCANNER: true"
+        - after: "^SECRET_KEY: .*$"
+          fixre: "^SECURITY_SCANNER_INDEXING_INTERVAL: .*$"
+          value: "SECURITY_SCANNER_INDEXING_INTERVAL: 30"
+        - after: "^SECURITY_SCANNER_INDEXING_INTERVAL: .*$"
+          fixre: "^SECURITY_SCANNER_V4_PSK: .*$"
+          value: "SECURITY_SCANNER_V4_PSK: NjA1aWhnNWk4MWhqNw=="
+        - after: "^SECURITY_SCANNER_V4_PSK: .*$"
+          fixre: "^SECURITY_SCANNER_V4_ENDPOINT: .*$"
+          value: "SECURITY_SCANNER_V4_ENDPOINT: http://clair:8081"
+      notify:
+        - restart quay and wait for ready
+
+    - name: Create Clair config directory if necessary.
+      ansible.builtin.file:
+        path: "{{ ansible_facts['user_dir'] }}/clair"
+        state: directory
+        mode: 0775
+
+    - name: Publish Clair config if necessary.
+      ansible.builtin.copy:
+        dest: "{{ ansible_facts['user_dir'] }}/clair/config.yaml"
+        content: |
+          http_listen_addr: :8081
+          introspection_addr: :8088
+          log_level: debug
+          indexer:
+            connstring: host=postgresql port=5432 dbname=clair user=quay password=secret sslmode=disable
+            scanlock_retry: 10
+            layer_scan_concurrency: 5
+            migrations: true
+          matcher:
+            connstring: host=postgresql port=5432 dbname=clair user=quay password=secret sslmode=disable
+            max_conn_pool: 100
+            migrations: true
+            indexer_addr: clair-indexer
+          notifier:
+            connstring: host=postgresql port=5432 dbname=clair user=quay password=secret sslmode=disable
+            delivery_interval: 1m
+            poll_interval: 5m
+            migrations: true
+          auth:
+            psk:
+              key: "NjA1aWhnNWk4MWhqNw=="
+              iss: ["quay"]
+          metrics:
+            name: "prometheus"
+        mode: 0664
+      notify:
+        - restart quay and wait for ready
+        - restart clair
+
+    - name: Ensure same TLS trust will be used for Clair as for workstation.
+      ansible.builtin.copy:
+        src: /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
+        dest: "{{ ansible_facts['user_dir'] }}/tls-ca-bundle.pem"
+        mode: 0664
+      notify:
+        - restart clair
+
+    - name: Ensure Clair service unit is there.
+      ansible.builtin.template:
+        dest: "{{ ansible_facts['user_dir'] }}/.config/systemd/user/clair.service"
+        src: "templates/clair.service.j2"
+
+    - name: Reload systemd.
+      ansible.builtin.systemd_service:
+        daemon_reload: yes
+        scope: user
+
+    - name: Enable services and start them.
+      ansible.builtin.systemd_service:
+        name: clair
+        scope: user
+        state: started
+        enabled: yes
+
+  handlers:
+    - name: restart quay
+      listen: restart quay and wait for ready
+      ansible.builtin.systemd_service:
+        name: quay
+        scope: user
+        state: restarted
+
+    - name: wait for quay to become ready again
+      listen: restart quay and wait for ready
+      ansible.builtin.uri:
+        method: GET
+        url: https://registry.ocp4.example.com/
+        headers:
+          Accept: application/json
+          Content-Type: application/json
+        validate_certs: no
+        status_code:
+          - 200
+          - 404
+          - 502
+      register: startup_wait
+      until: startup_wait.status == 200
+      retries: 30
+      delay: 5
+
+    - name: restart clair
+      ansible.builtin.systemd_service:
+        name: clair
+        scope: user
+        state: restarted
+...

+ 99 - 0
playbooks/34-clair-disable.yml

@@ -0,0 +1,99 @@
+---
+# Tasks required to disable Clair scanning (required before oc-mirror).
+- name: Disable Clair integration in Quay and stop Clair.
+  hosts: registry.ocp4.example.com
+  gather_subset: min
+  remote_user: quay
+  tasks:
+    - name: Ensure the podman network is there.
+      containers.podman.podman_network_info:
+        name: quay
+      register: quay_net
+      ignore_errors: yes
+
+    - ansible.builtin.assert:
+        that:
+          - not quay_net.failed
+          - quay_net.networks is defined
+          - quay_net.networks is iterable
+          - quay_net.networks | length == 1
+        fail_msg: "FATAL: Podman network 'quay' does not exist for 'quay' user. Ensure you deployed Quay before running this playbook."
+        success_msg: "OK, network 'quay' found."
+
+    - name: Ensure the quay service is defined.
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/.config/systemd/user/quay.service"
+        get_attributes: no
+        get_checksum: no
+        get_mime: no
+      register: quay_svc_unit
+
+    - ansible.builtin.assert:
+        that:
+          - not quay_svc_unit.failed
+          - quay_svc_unit.stat.exists
+        fail_msg: "FATAL: User service 'quay.service' not found for 'quay' user. Ensure you deployed Quay before running this playbook."
+        success_msg: "OK, service 'quay.service' found."
+
+    - name: Ensure the clair service is defined.
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/.config/systemd/user/clair.service"
+        get_attributes: no
+        get_checksum: no
+        get_mime: no
+      register: clair_svc_unit
+
+    - ansible.builtin.assert:
+        that:
+          - not clair_svc_unit.failed
+          - clair_svc_unit.stat.exists
+        fail_msg: "FATAL: User service 'clair.service' not found for 'quay' user. Ensure you deployed Clair before running this playbook."
+        success_msg: "OK, service 'clair.service' found."
+
+    - name: Patch Quay config if necessary.
+      ansible.builtin.lineinfile:
+        path: "{{ ansible_facts['user_dir'] }}/config/config.yaml"
+        regexp: "FEATURE_SECURITY_SCANNER:"
+        line: "FEATURE_SECURITY_SCANNER: false"
+      notify:
+        - restart quay and wait for ready
+
+    - name: Disable and stop Clair.
+      ansible.builtin.systemd_service:
+        name: clair
+        scope: user
+        state: stopped
+        enabled: no
+
+    - name: Also, kill the container if necessary.
+      containers.podman.podman_container:
+        name: clair
+        state: stopped
+        stop_time: 10
+
+  handlers:
+    - name: restart quay
+      listen: restart quay and wait for ready
+      ansible.builtin.systemd_service:
+        name: quay
+        scope: user
+        state: restarted
+
+    - name: wait for quay to become ready again
+      listen: restart quay and wait for ready
+      ansible.builtin.uri:
+        method: GET
+        url: https://registry.ocp4.example.com/
+        headers:
+          Accept: application/json
+          Content-Type: application/json
+        validate_certs: no
+        status_code:
+          - 200
+          - 404
+          - 502
+      register: startup_wait
+      until: startup_wait.status == 200
+      retries: 30
+      delay: 5
+...

Файловите разлики са ограничени, защото са твърде много
+ 83 - 0
playbooks/40-mirror-prep.yml


+ 53 - 0
playbooks/45-oc-mirror.yml

@@ -0,0 +1,53 @@
+---
+# Create image set config if necessary, start "oc mirror".
+- name: Ensure "oc mirror" has completed. (NON-IDEMPOTENT!)
+  hosts: workstation.lab.example.com
+  gather_subset: min
+  become: no
+  tasks:
+    - name: Ensure working directory exists.
+      ansible.builtin.file:
+        path: "{{ ansible_facts['user_dir'] }}/mirror"
+        state: directory
+        mode: 0755
+        owner: student
+        group: student
+
+    - name: Ensure image set config is correct.
+      ansible.builtin.copy:
+        dest: "{{ ansible_facts['user_dir'] }}/image-set-config.yaml"
+        mode: 0644
+        owner: student
+        group: student
+        content: |
+          kind: ImageSetConfiguration
+          apiVersion: mirror.openshift.io/v2alpha1
+          mirror:
+            platform:
+              channels:
+              - name: stable-4.18
+                type: ocp
+                minVersion: 4.18.6
+                maxVersion: 4.18.6
+              graph: true
+            operators:
+              - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.18
+                full: false
+                packages:
+                  - name: node-maintenance-operator
+            additionalImages:
+             - name: registry.redhat.io/ubi9/ubi:latest
+
+    - name: Kick off "oc mirror".
+      ansible.builtin.command:
+        cmd: oc mirror --v2 -c {{ ansible_facts['user_dir'] }}/image-set-config.yaml --workspace file://{{ ansible_facts['user_dir'] }}/mirror/ docker://registry.ocp4.example.com
+      register: mirror_output
+
+    - name: Show what happened on stdout.
+      ansible.builtin.debug:
+        var: mirror_output.stdout_lines
+
+    - name: Show what happened on stderr.
+      ansible.builtin.debug:
+        var: mirror_output.stderr_lines
+...

Файловите разлики са ограничени, защото са твърде много
+ 121 - 0
playbooks/50-coreos-inst-prep.yml


+ 192 - 0
playbooks/52-coreos-installer.yml

@@ -0,0 +1,192 @@
+---
+# Perform the tasks involved with installing SNO using coreos-installer.
+- name: Prepare the files required for a SNO installation using coreos-installer.
+  hosts: workstation.lab.example.com
+  become: no
+  gather_subset: min
+  tasks:
+    - name: Check the dependency status.
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/{{ item }}"
+        get_attributes: no
+        get_checksum: no
+        get_mime: no
+      register: dependencies
+      loop:
+        - install-pull-secret
+        - .ssh/openshift.pub
+        - ca/ca-cert.pem
+        - mirror/working-dir/cluster-resources/idms-oc-mirror.yaml
+        - Downloads/rhcos-418.94.202501221327-0-live.x86_64.iso
+
+    - ansible.builtin.assert:
+        that:
+          - dependencies.results[0].stat.exists
+          - dependencies.results[1].stat.exists
+          - dependencies.results[2].stat.exists
+          - dependencies.results[3].stat.exists
+          - dependencies.results[4].stat.exists
+        fail_msg: |
+          ERROR: Either pull secret, SSH keypair, CA certificate, RHCOS ISO, or mirror artifacts are missing.
+          Ensure all the relevant preceding tasks have been completed:
+            - Quay prerequisites,
+            - Quay deployment,
+            - oc-mirror prerequisites,
+            - oc-mirror execution,
+            - coreos-installer prerequisites
+          Exiting.
+        success_msg: "OK, dependencies exist."
+
+    - name: Check whether someone fiddled with installation before.
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/embed/.openshift_install.log"
+      register: install_log
+
+    - name: Warn if installation log was found.
+      ansible.builtin.pause:
+        prompt: |
+          WARNING: Found .openshift_install.log in the cluster working directory. This usually
+                   means there were previous attempts of creating installation artifacts.
+          
+                   If you want to recreate the cluster working directory from scratch, run this
+                   playbook with the variable "recreate_cluster_dir" set to any value like this:
+          
+                    ansible-playbook -e recreate_cluster_dir=yes ./52-coreos-installer.yml
+          
+                   Continuing in 5 seconds unless you interrupt execution.
+        seconds: 5
+      when:
+        - install_log.stat.exists
+        - recreate_cluster_dir is not defined
+
+    - name: Load the dependencies as facts.
+      ansible.builtin.set_fact:
+        pull_secret: "{{ lookup('ansible.builtin.file', ansible_facts['user_dir'] + '/install-pull-secret') }}"
+        public_key: "{{ lookup('ansible.builtin.file', ansible_facts['user_dir'] + '/.ssh/openshift.pub') }}"
+        lab_ca_cert: "{{ lookup('ansible.builtin.file', ansible_facts['user_dir'] + '/ca/ca-cert.pem') }}"
+        content_sources: "{{ lookup('ansible.builtin.file', ansible_facts['user_dir'] + '/mirror/working-dir/cluster-resources/idms-oc-mirror.yaml')
+                              | ansible.builtin.from_yaml_all }}"
+
+    - name: Set the fact determining installation type (required for templating).
+      ansible.builtin.set_fact:
+        install_type: iso
+
+    - name: Ensure install-config is there.
+      ansible.builtin.template:
+        src: templates/install-config-template.yaml.j2
+        dest: "{{ ansible_facts['user_dir'] }}/install-config-embed.yaml"
+        mode: 0644
+        owner: student
+        group: student
+      register: updated_install_config
+
+    - name: Remove the installation directory if so required.
+      ansible.builtin.file:
+        path: "{{ ansible_facts['user_dir'] }}/embed"
+        state: absent
+      when:
+        - recreate_cluster_dir is defined
+        - recreate_cluster_dir
+
+    - name: Ensure the presence of installation directory.
+      ansible.builtin.file:
+        path: "{{ ansible_facts['user_dir'] }}/embed"
+        state: directory
+        mode: 0755
+
+    - name: Also, ensure that the right install-config.yaml file is in there.
+      ansible.builtin.copy:
+        src: "{{ ansible_facts['user_dir'] }}/install-config-embed.yaml"
+        remote_src: yes
+        dest: "{{ ansible_facts['user_dir'] }}/embed/install-config.yaml"
+        mode: 0644
+      register: published_install_config
+      when:
+        - (not install_log.stat.exists) or (recreate_cluster_dir is defined) or updated_install_config.changed
+
+    - name: Create installation manifests if install config was published.
+      ansible.builtin.command:
+        cmd: openshift-install-fips create manifests
+        chdir: "{{ ansible_facts['user_dir'] }}/embed"
+      when: published_install_config.changed
+
+    - name: Render chrony customizations in home directory.
+      ansible.builtin.template:
+        src: templates/chrony-customization.bu.j2
+        dest: "{{ ansible_facts['user_dir'] }}/chrony-{{ item }}.bu"
+        mode: 0644
+        owner: student
+        group: student
+      loop:
+        - master
+        - worker
+
+    - name: Publish chrony customizations in manifests directory.
+      ansible.builtin.command:
+        cmd: butane ./chrony-{{ item }}.bu -o ./embed/openshift/99_chrony_{{ item }}.yaml
+        chdir: "{{ ansible_facts['user_dir'] }}"
+        creates: embed/openshift/99_chrony_{{ item }}.yaml
+      loop:
+        - master
+        - worker
+      when: published_install_config.changed
+
+    - name: Everything should be set by now, so create SNO install config.
+      ansible.builtin.command:
+        cmd: openshift-install-fips create single-node-ignition-config
+        chdir: "{{ ansible_facts['user_dir'] }}/embed"
+      when: published_install_config.changed
+      register: recreated_sno_cfg
+
+    - name: Ensure custom ISO is gone if anything was changed.
+      ansible.builtin.file:
+        path: "{{ ansible_facts['user_dir'] }}/sno-embedded-cfg.iso"
+        state: absent
+      when:
+        - recreated_sno_cfg is defined
+        - recreated_sno_cfg.changed
+
+    - name: Check if custom ISO is there.
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/sno-embedded-cfg.iso"
+        get_attributes: no
+        get_checksum: no
+        get_mime: no
+      register: custom_iso
+
+    - name: Embed install config in the ISO.
+      ansible.builtin.command:
+        cmd: coreos-installer iso ignition embed -fi ./embed/bootstrap-in-place-for-live-iso.ign -o sno-embedded-cfg.iso {{ ansible_facts['user_dir'] }}/Downloads/rhcos-418.94.202501221327-0-live.x86_64.iso
+        chdir: "{{ ansible_facts['user_dir'] }}"
+      when: not custom_iso.stat.exists
+
+- name: Copy the ISO file to target machine and write it to /dev/sdb
+  hosts: master01.ocp4.example.com
+  gather_subset: min
+  become: yes
+  tasks:
+    - name: Copy the ISO file to master01.
+      ansible.builtin.copy:
+        src: /home/student/sno-embedded-cfg.iso
+        dest: /root/sno-embedded-cfg.iso
+        mode: 0644
+      register: copied_iso
+
+    - name: Write the ISO to /dev/sdb if it was changed.
+      ansible.builtin.command:
+        cmd: dd if=/root/sno-embedded-cfg.iso of=/dev/sdb conv=sync bs=4k
+      when: copied_iso.changed
+      register: wrote_iso
+
+    - name: Wipe the filesystem of /dev/sda if ISO was written to /dev/sdb.
+      ansible.builtin.command:
+        cmd: wipefs -af /dev/sda
+      when: wrote_iso.changed
+      register: wiped_fs
+
+    - name: Reboot the machine if filesystem was wiped.
+      ansible.builtin.command:
+        cmd: reboot
+      ignore_errors: yes
+      when: wiped_fs.changed
+...

+ 15 - 0
playbooks/60-agent-inst-prep.yml

@@ -0,0 +1,15 @@
+---
+# Perform the preparation tasks for agent-based installation.
+# Basically the same as 50-coreos-inst-prep.yml plus a couple of steps.
+- import_playbook: 50-coreos-inst-prep.yml
+
+- name: Additional tasks for agent installation.
+  hosts: workstation.lab.example.com
+  become: yes
+  gather_subset: min
+  tasks:
+    - name: Ensure nmstate is installed.
+      ansible.builtin.yum:
+        name: nmstate
+        state: present
+...

+ 233 - 0
playbooks/62-agent-installation.yml

@@ -0,0 +1,233 @@
+---
+# Configure the agent installation artifacts for SNO.
+# Mostly the same as 52-coreos-installer.yml, but some changes.
+- name: Prepare the files required for a SNO installation using agent install.
+  hosts: workstation.lab.example.com
+  become: no
+  gather_subset: min
+  tasks:
+    - name: Check the dependency status.
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/{{ item }}"
+        get_attributes: no
+        get_checksum: no
+        get_mime: no
+      register: dependencies
+      loop:
+        - install-pull-secret
+        - .ssh/openshift.pub
+        - ca/ca-cert.pem
+        - mirror/working-dir/cluster-resources/idms-oc-mirror.yaml
+        - Downloads/rhcos-418.94.202501221327-0-live.x86_64.iso
+
+    - ansible.builtin.assert:
+        that:
+          - dependencies.results[0].stat.exists
+          - dependencies.results[1].stat.exists
+          - dependencies.results[2].stat.exists
+          - dependencies.results[3].stat.exists
+          - dependencies.results[4].stat.exists
+        fail_msg: |
+          ERROR: Either pull secret, SSH keypair, CA certificate, RHCOS ISO, or mirror artifacts are missing.
+          Ensure all the relevant preceding tasks have been completed:
+            - Quay prerequisites,
+            - Quay deployment,
+            - oc-mirror prerequisites,
+            - oc-mirror execution,
+            - coreos-installer prerequisites
+          Exiting.
+        success_msg: "OK, dependencies exist."
+
+    - name: Check whether someone fiddled with installation before.
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/agent/.openshift_install.log"
+      register: install_log
+
+    - name: Warn if installation log was found.
+      ansible.builtin.pause:
+        prompt: |
+          WARNING: Found .openshift_install.log in the cluster working directory. This usually
+                   means there were previous attempts of creating installation artifacts.
+          
+                   If you want to recreate the cluster working directory from scratch, run this
+                   playbook with the variable "recreate_cluster_dir" set to any value like this:
+          
+                    ansible-playbook -e recreate_cluster_dir=yes ./52-coreos-installer.yml
+          
+                   Continuing in 5 seconds unless you interrupt execution.
+        seconds: 5
+      when:
+        - install_log.stat.exists
+        - recreate_cluster_dir is not defined
+
+    - name: Load the dependencies as facts.
+      ansible.builtin.set_fact:
+        pull_secret: "{{ lookup('ansible.builtin.file', ansible_facts['user_dir'] + '/install-pull-secret') }}"
+        public_key: "{{ lookup('ansible.builtin.file', ansible_facts['user_dir'] + '/.ssh/openshift.pub') }}"
+        lab_ca_cert: "{{ lookup('ansible.builtin.file', ansible_facts['user_dir'] + '/ca/ca-cert.pem') }}"
+        content_sources: "{{ lookup('ansible.builtin.file', ansible_facts['user_dir'] + '/mirror/working-dir/cluster-resources/idms-oc-mirror.yaml')
+                              | ansible.builtin.from_yaml_all }}"
+
+    - name: Set the fact determining installation type (required for templating).
+      ansible.builtin.set_fact:
+        install_type: agent
+        install_host: master02.ocp4.example.com
+
+    - name: Collect facts from the target machine (must be reachable for that).
+      delegate_to: "{{ install_host }}"
+      delegate_facts: yes
+      ansible.builtin.setup:
+        gather_subset: min,interfaces
+
+    - name: Ensure install-config is there.
+      ansible.builtin.template:
+        src: templates/install-config-template.yaml.j2
+        dest: "{{ ansible_facts['user_dir'] }}/install-config-agent.yaml"
+        mode: 0644
+        owner: student
+        group: student
+      register: updated_install_config
+
+    - name: Ensure agent-config is there.
+      ansible.builtin.template:
+        src: templates/agent-config-template.yaml.j2
+        dest: "{{ ansible_facts['user_dir'] }}/agent-config-sno.yaml"
+        mode: 0644
+        owner: student
+        group: student
+      register: updated_agent_config
+
+    - name: Remove the installation directory if so required.
+      ansible.builtin.file:
+        path: "{{ ansible_facts['user_dir'] }}/agent"
+        state: absent
+      when:
+        - recreate_cluster_dir is defined
+        - recreate_cluster_dir
+
+    - name: Ensure the presence of installation directory.
+      ansible.builtin.file:
+        path: "{{ ansible_facts['user_dir'] }}/agent"
+        state: directory
+        mode: 0755
+
+    - name: Also, ensure that the right install-config.yaml file is in there.
+      ansible.builtin.copy:
+        src: "{{ ansible_facts['user_dir'] }}/install-config-agent.yaml"
+        remote_src: yes
+        dest: "{{ ansible_facts['user_dir'] }}/agent/install-config.yaml"
+        mode: 0644
+      register: published_install_config
+      when:
+        - (not install_log.stat.exists) or (recreate_cluster_dir is defined) or updated_install_config.changed or updated_agent_config.changed
+
+    - name: The same, but for agent-config.yaml.
+      ansible.builtin.copy:
+        src: "{{ ansible_facts['user_dir'] }}/agent-config-sno.yaml"
+        remote_src: yes
+        dest: "{{ ansible_facts['user_dir'] }}/agent/agent-config.yaml"
+        mode: 0644
+      register: published_agent_config
+      when:
+        - (not install_log.stat.exists) or (recreate_cluster_dir is defined) or updated_install_config.changed or updated_agent_config.changed
+
+    - name: This block will only execute if install-config or agent-config files were published.
+      block:
+
+        - name: Ensure the presence of customization directory.
+          ansible.builtin.file:
+            path: "{{ ansible_facts['user_dir'] }}/agent/openshift"
+            state: directory
+            mode: 0755
+
+        - name: Render chrony customizations in home directory.
+          ansible.builtin.template:
+            src: templates/chrony-customization.bu.j2
+            dest: "{{ ansible_facts['user_dir'] }}/chrony-{{ item }}.bu"
+            mode: 0644
+            owner: student
+            group: student
+          loop:
+            - master
+            - worker
+
+        - name: Publish chrony customizations in manifests directory.
+          ansible.builtin.command:
+            cmd: butane ./chrony-{{ item }}.bu -o ./agent/openshift/99_chrony_{{ item }}.yaml
+            chdir: "{{ ansible_facts['user_dir'] }}"
+            creates: agent/openshift/99_chrony_{{ item }}.yaml
+          loop:
+            - master
+            - worker
+
+        - name: Ensure the agent image cache directory exists.
+          ansible.builtin.file:
+            path: "{{ ansible_facts['user_dir'] }}/.cache/agent/image_cache"
+            state: directory
+            mode: 0755
+
+        - name: Ensure that the agent ISO and all other artifacts are gone if anything was updated.
+          ansible.builtin.file:
+            path: "{{ ansible_facts['user_dir'] }}/agent/{{ item }}"
+            state: absent
+          loop:
+            - agent.x86_64.iso
+            - auth
+            - rendezvousIP
+            - .openshift_install.log
+            - .openshift_install_state.json
+
+      when: published_install_config.changed or published_agent_config.changed
+
+    - name: Check whether the ISO is there.
+      ansible.builtin.stat:
+        path: "{{ ansible_facts['user_dir'] }}/agent/agent.x86_64.iso"
+        get_attributes: no
+        get_checksum: no
+        get_mime: no
+      register: agent_iso
+
+    - name: Ensure that CoreOS ISO is a link to the downloaded one in Downloads.
+      ansible.builtin.file:
+        path: "{{ ansible_facts['user_dir'] }}/.cache/agent/image_cache/coreos-x86_64.iso"
+        state: hard
+        src: "{{ ansible_facts['user_dir'] }}/Downloads/rhcos-418.94.202501221327-0-live.x86_64.iso"
+
+    - name: Create agent installation ISO.
+      ansible.builtin.command:
+        cmd: openshift-install-fips agent create image
+        chdir: "{{ ansible_facts['user_dir'] }}/agent"
+      when: not agent_iso.stat.exists
+
+#- name: Copy the ISO file to target machine and write it to /dev/sdb
+#  hosts: master02.ocp4.example.com
+#  gather_subset: min
+#  become: yes
+#  tasks:
+#    - name: Copy the ISO file to master01.
+#      ansible.builtin.copy:
+#        src: /home/student/agent/agent.x86_64.iso
+#        dest: /root/agent.x86_64.iso
+#        mode: 0644
+#      register: copied_iso
+#
+#    # TODO: ensure /dev/sdb1 exists and is bootable
+#
+#    - name: Write the ISO to /dev/sdb1 if it was changed.
+#      ansible.builtin.command:
+#        cmd: dd if=/root/agent.x86_64.iso of=/dev/sdb1 conv=sync bs=4k
+#      when: copied_iso.changed
+#      register: wrote_iso
+#
+#    - name: Wipe the filesystem of /dev/sda if ISO was written to /dev/sdb1.
+#      ansible.builtin.command:
+#        cmd: wipefs -af /dev/sda
+#      when: wrote_iso.changed
+#      register: wiped_fs
+#
+#    - name: Reboot the machine if filesystem was wiped.
+#      ansible.builtin.command:
+#        cmd: reboot
+#      ignore_errors: yes
+#      when: wiped_fs.changed
+...

+ 14 - 0
playbooks/ansible.cfg

@@ -0,0 +1,14 @@
+[defaults]
+# required to evade implicit conversion to string in k8s resource_definition
+jinja2_native = True
+inventory = ./inventory.yml
+remote_user = lab
+ask_pass = no
+
+# shut up about python interpreter
+interpreter_python = auto_silent
+
+[privilege_escalation]
+become = no
+become_method = sudo
+become_askpass = no

+ 35 - 0
playbooks/inventory.yml

@@ -0,0 +1,35 @@
+all:
+  hosts:
+    workstation.lab.example.com:
+      ansible_user: student
+      ansible_connection: local
+
+    bastion.lab.example.com:
+      ansible_host: 172.25.250.254
+      ansible_user: devops
+
+  vars:
+    registry_host: registry.redhat.io
+    quay_version: 3.15
+
+  children:
+    openshift:
+      children:
+        masters:
+          hosts:
+            master01.ocp4.example.com:
+            master02.ocp4.example.com:
+            master03.ocp4.example.com:
+        workers:
+          hosts:
+            worker01.ocp4.example.com:
+            worker02.ocp4.example.com:
+
+    infra:
+      hosts:
+        utility.lab.example.com:
+        # power is unreachable with student's key
+        #power.lab.example.com:
+        registry.ocp4.example.com:
+        idm.ocp4.example.com:
+

+ 19 - 0
playbooks/site.yml

@@ -0,0 +1,19 @@
+---
+# Unfortunately, you must run this one yourself.
+#- import_playbook: 00-general-pre-flight.yml
+
+# Do everything else all at once, and in correct order.
+- import_playbook: 10-quay-tmp-fixes.yml
+- import_playbook: 20-general-post-tmp.yml
+
+- import_playbook: 30-quay-pre-tasks.yml
+- import_playbook: 32-quay-deploy.yml
+- import_playbook: 33-clair-deploy.yml
+- import_playbook: 34-clair-disable.yml
+
+- import_playbook: 40-mirror-prep.yml
+- import_playbook: 45-oc-mirror.yml
+
+- import_playbook: 50-coreos-inst-prep.yml
+- import_playbook: 52-coreos-installer.yml
+...

+ 36 - 0
playbooks/templates/agent-config-template.yaml.j2

@@ -0,0 +1,36 @@
+apiVersion: v1alpha1
+kind: AgentConfig
+metadata:
+  name: agent-cluster
+rendezvousIP: {{ hostvars[install_host]['ansible_facts']['default_ipv4']['address'] }}
+additionalNTPSources:
+  - utility.lab.example.com
+hosts:
+  - hostname: {{ hostvars[install_host]['inventory_hostname_short'] }}
+    rootDeviceHints:
+      deviceName: /dev/vda
+    interfaces:
+      - name: {{ hostvars[install_host]['ansible_facts']['default_ipv4']['interface'] }}
+        macAddress: {{ hostvars[install_host]['ansible_facts']['default_ipv4']['macaddress'] }}
+    networkConfig:
+      interfaces:
+        - name: {{ hostvars[install_host]['ansible_facts']['default_ipv4']['interface'] }}
+          type: ethernet
+          state: up
+          mac-address: {{ hostvars[install_host]['ansible_facts']['default_ipv4']['macaddress'] }}
+          ipv4:
+            enabled: true
+            address:
+              - ip: {{ hostvars[install_host]['ansible_facts']['default_ipv4']['address'] }}
+                prefix-length: 24
+            dhcp: false
+      dns-resolver:
+        config:
+          server:
+            - 192.168.50.254
+      routes:
+        config:
+          - destination: 0.0.0.0/0
+            next-hop-address: 127.0.0.1
+            next-hop-interface: {{ hostvars[install_host]['ansible_facts']['default_ipv4']['interface'] }}
+            table-id: 254

+ 18 - 0
playbooks/templates/chrony-customization.bu.j2

@@ -0,0 +1,18 @@
+variant: openshift
+version: 4.18.0
+metadata:
+  name: 99-{{ item }}-chrony
+  labels:
+    machineconfiguration.openshift.io/role: {{ item }}
+storage:
+  files:
+  - path: /etc/chrony.conf
+    mode: 0644
+    overwrite: true
+    contents:
+      inline: |
+        server utility.lab.example.com iburst
+        driftfile /var/lib/chrony/drift
+        makestep 1.0 3
+        rtcsync
+        logdir /var/log/chrony

+ 28 - 0
playbooks/templates/clair.service.j2

@@ -0,0 +1,28 @@
+[Unit]
+Description=Clair Container
+Wants=network-online.target
+After=network-online.target
+
+[Service]
+Restart=on-failure
+TimeoutStopSec=30
+ExecStartPre=/usr/bin/podman rm --ignore -f clair
+ExecStart=/usr/bin/podman run \
+              --conmon-pidfile %t/%n-pid \
+              --cidfile %t/%n-cid \
+              --cgroups=no-conmon \
+              --name=clair -d \
+              --network=quay \
+              -e CLAIR_CONF=/clair/config.yaml \
+              -e CLAIR_MODE=combo \
+              -v ./clair:/clair:Z \
+              -v ./tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:Z \
+              {{ registry_host }}/quay/clair-rhel8:v3.15
+ExecStop=/usr/bin/podman stop --ignore --cidfile %t/%n-cid -t 30 && \
+             /bin/rm -f %t/%n-pid %t/%n-cid
+Type=forking
+PIDFile=%t/%n-pid
+KillMode=none
+
+[Install]
+WantedBy=default.target

+ 38 - 0
playbooks/templates/install-config-template.yaml.j2

@@ -0,0 +1,38 @@
+apiVersion: v1
+metadata:
+  name: {{ install_type }}
+baseDomain: ocp4.example.com
+{% if install_type == 'iso' %}
+bootstrapInPlace:
+  installationDisk: /dev/sda
+{% endif %}
+compute:
+  - hyperthreading: Enabled
+    name: worker
+    replicas: 0
+controlPlane:
+  hyperthreading: Enabled
+  name: master
+  replicas: 1
+networking:
+  clusterNetwork:
+    - cidr: 10.128.0.0/14
+      hostPrefix: 23
+  networkType: OVNKubernetes
+{% if install_type == 'agent' %}
+  machineNetwork:
+    - cidr: {{ hostvars[install_host]['ansible_facts']['default_ipv4']['address'] }}/32
+{% endif %}
+  serviceNetwork:
+    - 172.30.0.0/16
+platform:
+  none: {}
+fips: false
+pullSecret: '{{ pull_secret | ansible.builtin.to_json }}'
+sshKey: '{{ public_key }}'
+additionalTrustBundle: |
+{{ lab_ca_cert | ansible.builtin.regex_replace('^', '  ', multiline=True) }}
+imageDigestSources:
+{# There are two ImageDigestMirrorSet resources in idms-oc-mirror.yaml - use a loop. #}
+{% for cs in content_sources %}
+{{ cs.spec.imageDigestMirrors | list | to_yaml }}{% endfor %}

+ 29 - 0
playbooks/templates/quay-pg.service.j2

@@ -0,0 +1,29 @@
+[Unit]
+Description=Quay PostgreSQL Container
+Wants=network-online.target
+After=network-online.target
+
+[Service]
+Restart=on-failure
+TimeoutStopSec=30
+ExecStartPre=/usr/bin/podman rm --ignore -f postgresql
+ExecStart=/usr/bin/podman run \
+              --conmon-pidfile %t/%n-pid \
+              --cidfile %t/%n-cid \
+              --cgroups=no-conmon \
+              --name=postgresql -d \
+              --network=quay \
+              -v /local/quay-pg:/var/lib/pgsql/data:Z \
+              -e POSTGRESQL_USER=quay \
+              -e POSTGRESQL_PASSWORD=secret \
+              -e POSTGRESQL_DATABASE=quay \
+              -e POSTGRESQL_ADMIN_PASSWORD=verysecret \
+              {{ registry_host }}/rhel9/postgresql-15:latest
+ExecStop=/usr/bin/podman stop --ignore --cidfile %t/%n-cid -t 30 && \
+             /bin/rm -f %t/%n-pid %t/%n-cid
+Type=forking
+PIDFile=%t/%n-pid
+KillMode=none
+
+[Install]
+WantedBy=default.target

+ 25 - 0
playbooks/templates/quay-redis.service.j2

@@ -0,0 +1,25 @@
+[Unit]
+Description=Quay Redis Container
+Wants=network-online.target
+After=network-online.target
+
+[Service]
+Restart=on-failure
+TimeoutStopSec=30
+ExecStartPre=/usr/bin/podman rm --ignore -f redis
+ExecStart=/usr/bin/podman run \
+              --conmon-pidfile %t/%n-pid \
+              --cidfile %t/%n-cid \
+              --cgroups=no-conmon \
+              --name=redis -d \
+              --network=quay \
+              -e REDIS_PASSWORD=verysecret \
+              {{ registry_host }}/rhel9/redis-7:latest
+ExecStop=/usr/bin/podman stop --ignore --cidfile %t/%n-cid -t 30 && \
+             /bin/rm -f %t/%n-pid %t/%n-cid
+Type=forking
+PIDFile=%t/%n-pid
+KillMode=none
+
+[Install]
+WantedBy=default.target

+ 27 - 0
playbooks/templates/quay.service.j2

@@ -0,0 +1,27 @@
+[Unit]
+Description=Quay Container
+Wants=network-online.target,quay-pg.service,quay-redis.service
+After=network-online.target
+
+[Service]
+Restart=on-failure
+TimeoutStopSec=30
+ExecStartPre=/usr/bin/podman rm --ignore -f quay
+ExecStart=/usr/bin/podman run \
+              --conmon-pidfile %t/%n-pid \
+              --cidfile %t/%n-cid \
+              --cgroups=no-conmon \
+              --name=quay -d \
+              --network=quay \
+              -p 80:8080 -p 443:8443 \
+              -v ./config:/conf/stack:Z \
+              -v /local/quay:/registry:Z \
+              {{ registry_host }}/quay/quay-rhel8:v3.15
+ExecStop=/usr/bin/podman stop --ignore --cidfile %t/%n-cid -t 30 && \
+             /bin/rm -f %t/%n-pid %t/%n-cid
+Type=forking
+PIDFile=%t/%n-pid
+KillMode=none
+
+[Install]
+WantedBy=default.target

Някои файлове не бяха показани, защото твърде много файлове са промени