45-oc-mirror.yml 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. ---
  2. # Ensure images are mirrored in the local Quay registry instance.
  3. - name: Ensure registry has a default (pre-mirrored) set of images.
  4. hosts: registry.ocp4.example.com
  5. gather_subset: min
  6. become: no
  7. remote_user: quay
  8. tasks:
  9. - name: Verify that the image manifests exist.
  10. containers.podman.podman_container_exec:
  11. name: postgresql
  12. command: psql -d quay -U postgres -t -c 'SELECT COUNT(id) FROM manifest'
  13. ignore_errors: yes
  14. register: quay_mft
  15. # TODO: demote the verbosity level of this one or remove it.
  16. - debug: var=quay_mft
  17. # TODO: why?
  18. - debug:
  19. msg: "{{ (quay_mft.stderr | ansible.builtin.regex_search('FATAL: database .* does not exist')) }}"
  20. - name: Remember the number of image manifests in quay.
  21. ansible.builtin.set_fact:
  22. quay_nmft: "{{ quay_mft.stdout_lines[0] | default(0) | trim | int }}"
  23. - name: Import quay backup if manifests seem to be missing.
  24. block:
  25. - name: Ensure quay service is stopped.
  26. ansible.builtin.systemd_service:
  27. name: quay
  28. scope: user
  29. state: stopped
  30. - name: Ensure quay container is stopped.
  31. containers.podman.podman_container:
  32. name: quay
  33. image: "{{ registry_host }}/quay/quay-rhel8:v{{ quay_version }}"
  34. state: stopped
  35. timeout: 60
  36. # NOTE: this is required because of SELinux context change for the container below.
  37. - name: Ensure database backup file is owned by quay user.
  38. remote_user: lab
  39. become: yes
  40. ansible.builtin.file:
  41. path: /local/backups/quay-db.backup
  42. owner: quay
  43. group: quay
  44. mode: 0644
  45. - name: Create the database if necessary.
  46. containers.podman.podman_container_exec:
  47. name: postgresql
  48. command: psql -d postgres -U postgres -t -c 'CREATE DATABASE quay OWNER quay'
  49. when:
  50. - quay_mft.rc > 0
  51. - (quay_mft.stderr | ansible.builtin.regex_search('FATAL: database .* does not exist')) is defined
  52. - (quay_mft.stderr | ansible.builtin.regex_search('FATAL: database .* does not exist')) != ""
  53. # TODO: something is wrong with the above regex_searches
  54. - name: Create a temporary pgpass file
  55. ansible.builtin.copy:
  56. dest: /tmp/pgpass
  57. owner: quay
  58. group: quay
  59. mode: 0600
  60. content: |
  61. postgresql:5432:quay:postgres:verysecret
  62. - name: Ensure the pgpass file is owned by postgres user of the container.
  63. become_method: containers.podman.podman_unshare
  64. become: yes
  65. ansible.builtin.file:
  66. path: /tmp/pgpass
  67. state: file
  68. owner: 26
  69. - name: Run pg_restore in a quay_import container (takes a couple of seconds).
  70. containers.podman.podman_container:
  71. name: quay_import
  72. image: "{{ registry_host }}/rhel9/postgresql-15:latest"
  73. rm: yes
  74. detach: no
  75. network:
  76. - quay
  77. volumes:
  78. - /local/backups/quay-db.backup:/quay-db.backup:Z
  79. - /tmp/pgpass:/var/lib/pgsql/.pgpass:Z
  80. command:
  81. - pg_restore
  82. - -dquay
  83. - -Upostgres
  84. - -hpostgresql
  85. - -c
  86. - /quay-db.backup
  87. state: started
  88. register: quay_import
  89. ignore_errors: yes
  90. failed_when:
  91. - (quay_import.stderr | regex_search('FATAL')) is defined
  92. - (quay_import.stderr | regex_search('FATAL')) != ""
  93. # TODO: what's up with no output, no rc, but still failed?!?
  94. # TODO: demote the verbosity level of this one
  95. - debug: var=quay_import
  96. - name: Restore the ownership of the file.
  97. become_method: containers.podman.podman_unshare
  98. become: yes
  99. ansible.builtin.file:
  100. path: /tmp/pgpass
  101. state: file
  102. owner: 0
  103. - name: Remove the pgpass file
  104. ansible.builtin.file:
  105. path: /tmp/pgpass
  106. state: absent
  107. # XXX OBSOLETED BY OVERLAY MOUNT XXX
  108. #- name: Remove the current Quay data directories.
  109. # remote_user: lab
  110. # become: yes
  111. # ansible.builtin.file:
  112. # path: /local/quay/{{ item }}
  113. # state: absent
  114. # loop:
  115. # - sha256
  116. # - uploads
  117. #
  118. #- name: Extract the latest Quay data directory backup (takes around half an hour).
  119. # remote_user: lab
  120. # become: yes
  121. # ansible.builtin.command:
  122. # cmd: tar xpf /local/backups/quay-data.tar.bz2 -C /local
  123. - name: Ensure working directories exist
  124. remote_user: lab
  125. become: yes
  126. ansible.builtin.file:
  127. path: "{{ item.path }}"
  128. state: directory
  129. owner: "{{ item.owner | default('root') }}"
  130. group: "{{ item.group | default('root') }}"
  131. mode: "{{ item.mode | default('0755') }}"
  132. loop:
  133. - path: /local/overlay/upper
  134. - path: /local/overlay/work
  135. - path: /local/quay
  136. owner: quay
  137. group: quay
  138. mode: 0775
  139. - name: Underlay /local/backups/quay to /local/quay
  140. remote_user: lab
  141. become: yes
  142. ansible.posix.mount:
  143. boot: yes
  144. fstype: overlay
  145. path: /local/quay
  146. opts: lowerdir=/local/backups/quay,upperdir=/local/overlay/upper,workdir=/local/overlay/work
  147. src: overlay
  148. state: mounted
  149. - name: Ensure quay service is started after this.
  150. ansible.builtin.systemd_service:
  151. name: quay
  152. scope: user
  153. state: started
  154. - name: wait for quay to become ready again
  155. ansible.builtin.uri:
  156. method: GET
  157. url: https://registry.ocp4.example.com/
  158. headers:
  159. Accept: application/json
  160. Content-Type: application/json
  161. validate_certs: no
  162. status_code:
  163. - 200
  164. - 404
  165. - 502
  166. register: startup_wait
  167. until: startup_wait.status == 200
  168. retries: 30
  169. delay: 5
  170. when:
  171. - quay_nmft < 200
  172. - name: Ensure "oc mirror" has completed (non-idempotent, but only downloads 5-10 images if anything).
  173. hosts: workstation.lab.example.com
  174. gather_subset: min
  175. become: no
  176. tasks:
  177. - name: Ensure working directory exists.
  178. ansible.builtin.file:
  179. path: "{{ ansible_facts['user_dir'] }}/mirror"
  180. state: directory
  181. mode: 0755
  182. owner: student
  183. group: student
  184. - name: Ensure image set config is correct.
  185. ansible.builtin.copy:
  186. dest: "{{ ansible_facts['user_dir'] }}/image-set-config.yaml"
  187. mode: 0644
  188. owner: student
  189. group: student
  190. content: |
  191. kind: ImageSetConfiguration
  192. apiVersion: mirror.openshift.io/v2alpha1
  193. mirror:
  194. platform:
  195. channels:
  196. - name: stable-4.18
  197. type: ocp
  198. minVersion: 4.18.6
  199. maxVersion: 4.18.6
  200. graph: true
  201. operators:
  202. - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.18
  203. full: false
  204. packages:
  205. - name: node-maintenance-operator
  206. - catalog: registry.redhat.io/redhat/certified-operator-index:v4.18
  207. full: false
  208. packages:
  209. - name: crunchy-postgres-operator
  210. additionalImages:
  211. - name: registry.redhat.io/ubi9/ubi:latest
  212. - name: registry.redhat.io/ubi9/toolbox:latest
  213. - name: Kick off "oc mirror".
  214. ansible.builtin.command:
  215. cmd: oc mirror --v2 -c {{ ansible_facts['user_dir'] }}/image-set-config.yaml --workspace file://{{ ansible_facts['user_dir'] }}/mirror/ docker://registry.ocp4.example.com
  216. register: mirror_output
  217. - name: Show what happened on stdout.
  218. ansible.builtin.debug:
  219. var: mirror_output.stdout_lines
  220. - name: Show what happened on stderr.
  221. ansible.builtin.debug:
  222. var: mirror_output.stderr_lines
  223. ...