--- # Ensure images are mirrored in the local Quay registry instance. - name: Ensure registry has a default (pre-mirrored) set of images. hosts: registry.ocp4.example.com gather_subset: min become: no remote_user: quay tasks: - name: Verify that the image manifests exist. containers.podman.podman_container_exec: name: postgresql command: psql -d quay -U postgres -t -c 'SELECT COUNT(id) FROM manifest' ignore_errors: yes register: quay_mft # TODO: demote the verbosity level of this one or remove it. - debug: var=quay_mft # TODO: why? - debug: msg: "{{ (quay_mft.stderr | ansible.builtin.regex_search('FATAL: database .* does not exist')) }}" - name: Remember the number of image manifests in quay. ansible.builtin.set_fact: quay_nmft: "{{ quay_mft.stdout_lines[0] | default(0) | trim | int }}" - name: Import quay backup if manifests seem to be missing. block: - name: Ensure quay service is stopped. ansible.builtin.systemd_service: name: quay scope: user state: stopped - name: Ensure quay container is stopped. containers.podman.podman_container: name: quay image: "{{ registry_host }}/quay/quay-rhel8:v{{ quay_version }}" state: stopped timeout: 60 # NOTE: this is required because of SELinux context change for the container below. - name: Ensure database backup file is owned by quay user. remote_user: lab become: yes ansible.builtin.file: path: /local/backups/quay-db.backup owner: quay group: quay mode: 0644 - name: Create the database if necessary. containers.podman.podman_container_exec: name: postgresql command: psql -d postgres -U postgres -t -c 'CREATE DATABASE quay OWNER quay' when: - quay_mft.rc > 0 - (quay_mft.stderr | ansible.builtin.regex_search('FATAL: database .* does not exist')) is defined - (quay_mft.stderr | ansible.builtin.regex_search('FATAL: database .* does not exist')) != "" # TODO: something is wrong with the above regex_searches - name: Create a temporary pgpass file ansible.builtin.copy: dest: /tmp/pgpass owner: quay group: quay mode: 0600 content: | postgresql:5432:quay:postgres:verysecret - name: Ensure the pgpass file is owned by postgres user of the container. become_method: containers.podman.podman_unshare become: yes ansible.builtin.file: path: /tmp/pgpass state: file owner: 26 - name: Run pg_restore in a quay_import container (takes a couple of seconds). containers.podman.podman_container: name: quay_import image: "{{ registry_host }}/rhel9/postgresql-15:latest" rm: yes detach: no network: - quay volumes: - /local/backups/quay-db.backup:/quay-db.backup:Z - /tmp/pgpass:/var/lib/pgsql/.pgpass:Z command: - pg_restore - -dquay - -Upostgres - -hpostgresql - -c - /quay-db.backup state: started register: quay_import ignore_errors: yes failed_when: - (quay_import.stderr | regex_search('FATAL')) is defined - (quay_import.stderr | regex_search('FATAL')) != "" # TODO: what's up with no output, no rc, but still failed?!? # TODO: demote the verbosity level of this one - debug: var=quay_import - name: Restore the ownership of the file. become_method: containers.podman.podman_unshare become: yes ansible.builtin.file: path: /tmp/pgpass state: file owner: 0 - name: Remove the pgpass file ansible.builtin.file: path: /tmp/pgpass state: absent # XXX OBSOLETED BY OVERLAY MOUNT XXX #- name: Remove the current Quay data directories. # remote_user: lab # become: yes # ansible.builtin.file: # path: /local/quay/{{ item }} # state: absent # loop: # - sha256 # - uploads # #- name: Extract the latest Quay data directory backup (takes around half an hour). # remote_user: lab # become: yes # ansible.builtin.command: # cmd: tar xpf /local/backups/quay-data.tar.bz2 -C /local - name: Ensure working directories exist remote_user: lab become: yes ansible.builtin.file: path: "{{ item.path }}" state: directory owner: "{{ item.owner | default('root') }}" group: "{{ item.group | default('root') }}" mode: "{{ item.mode | default('0755') }}" loop: - path: /local/overlay/upper - path: /local/overlay/work - path: /local/quay owner: quay group: quay mode: 0775 - name: Underlay /local/backups/quay to /local/quay remote_user: lab become: yes ansible.posix.mount: boot: yes fstype: overlay path: /local/quay opts: lowerdir=/local/backups/quay,upperdir=/local/overlay/upper,workdir=/local/overlay/work src: overlay state: mounted - name: Ensure quay service is started after this. ansible.builtin.systemd_service: name: quay scope: user state: started - name: wait for quay to become ready again ansible.builtin.uri: method: GET url: https://registry.ocp4.example.com/ headers: Accept: application/json Content-Type: application/json validate_certs: no status_code: - 200 - 404 - 502 register: startup_wait until: startup_wait.status == 200 retries: 30 delay: 5 when: - quay_nmft < 200 - name: Ensure "oc mirror" has completed (non-idempotent, but only downloads 5-10 images if anything). hosts: workstation.lab.example.com gather_subset: min become: no tasks: - name: Ensure working directory exists. ansible.builtin.file: path: "{{ ansible_facts['user_dir'] }}/mirror" state: directory mode: 0755 owner: student group: student - name: Ensure image set config is correct. ansible.builtin.copy: dest: "{{ ansible_facts['user_dir'] }}/image-set-config.yaml" mode: 0644 owner: student group: student content: | kind: ImageSetConfiguration apiVersion: mirror.openshift.io/v2alpha1 mirror: platform: channels: - name: stable-4.18 type: ocp minVersion: 4.18.6 maxVersion: 4.18.6 graph: true operators: - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.18 full: false packages: - name: node-maintenance-operator - catalog: registry.redhat.io/redhat/certified-operator-index:v4.18 full: false packages: - name: crunchy-postgres-operator additionalImages: - name: registry.redhat.io/ubi9/ubi:latest - name: registry.redhat.io/ubi9/toolbox:latest - name: Kick off "oc mirror". ansible.builtin.command: cmd: oc mirror --v2 -c {{ ansible_facts['user_dir'] }}/image-set-config.yaml --workspace file://{{ ansible_facts['user_dir'] }}/mirror/ docker://registry.ocp4.example.com register: mirror_output - name: Show what happened on stdout. ansible.builtin.debug: var: mirror_output.stdout_lines - name: Show what happened on stderr. ansible.builtin.debug: var: mirror_output.stderr_lines ...