45-oc-mirror.yml 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. ---
  2. # Ensure images are mirrored in the local Quay registry instance.
  3. - name: Ensure registry has a default (pre-mirrored) set of images.
  4. hosts: registry.ocp4.example.com
  5. gather_subset: min
  6. become: no
  7. remote_user: quay
  8. tasks:
  9. - name: Verify that the image manifests exist.
  10. containers.podman.podman_container_exec:
  11. name: postgresql
  12. command: psql -d quay -U postgres -t -c 'SELECT COUNT(id) FROM manifest'
  13. ignore_errors: yes
  14. register: quay_mft
  15. # TODO: demote the verbosity level of this one or remove it.
  16. - debug: var=quay_mft
  17. # TODO: why?
  18. - debug:
  19. msg: "{{ (quay_mft.stderr | ansible.builtin.regex_search('FATAL: database .* does not exist')) }}"
  20. - name: Remember the number of image manifests in quay.
  21. ansible.builtin.set_fact:
  22. quay_nmft: "{{ quay_mft.stdout_lines[0] | default(0) | trim | int }}"
  23. - name: Import quay backup if manifests seem to be missing.
  24. block:
  25. - name: Ensure quay service is stopped.
  26. ansible.builtin.systemd_service:
  27. name: quay
  28. scope: user
  29. state: stopped
  30. - name: Ensure quay container is stopped.
  31. containers.podman.podman_container:
  32. name: quay
  33. image: "{{ registry_host }}/quay/quay-rhel8:v{{ quay_version }}"
  34. state: stopped
  35. timeout: 60
  36. # NOTE: this is required because of SELinux context change for the container below.
  37. - name: Ensure database backup file is owned by quay user.
  38. remote_user: lab
  39. become: yes
  40. ansible.builtin.file:
  41. path: /local/backups/quay-db.backup
  42. owner: quay
  43. group: quay
  44. mode: 0644
  45. - name: Create the database if necessary.
  46. containers.podman.podman_container_exec:
  47. name: postgresql
  48. command: psql -d postgres -U postgres -t -c 'CREATE DATABASE quay OWNER quay'
  49. when:
  50. - quay_mft.rc > 0
  51. - (quay_mft.stderr | ansible.builtin.regex_search('FATAL: database .* does not exist')) is defined
  52. - (quay_mft.stderr | ansible.builtin.regex_search('FATAL: database .* does not exist')) != ""
  53. # TODO: something is wrong with the above regex_searches
  54. - name: Create a temporary pgpass file
  55. ansible.builtin.copy:
  56. dest: /tmp/pgpass
  57. owner: quay
  58. group: quay
  59. mode: 0600
  60. content: |
  61. postgresql:5432:quay:postgres:verysecret
  62. - name: Ensure the pgpass file is owned by postgres user of the container.
  63. become_method: containers.podman.podman_unshare
  64. become: yes
  65. ansible.builtin.file:
  66. path: /tmp/pgpass
  67. state: file
  68. owner: 26
  69. - name: Run pg_restore in a quay_import container (takes a couple of seconds).
  70. containers.podman.podman_container:
  71. name: quay_import
  72. image: "{{ registry_host }}/rhel9/postgresql-15:latest"
  73. rm: yes
  74. detach: no
  75. network:
  76. - quay
  77. volumes:
  78. - /local/backups/quay-db.backup:/quay-db.backup:Z
  79. - /tmp/pgpass:/var/lib/pgsql/.pgpass:Z
  80. command:
  81. - pg_restore
  82. - -dquay
  83. - -Upostgres
  84. - -hpostgresql
  85. - -c
  86. - /quay-db.backup
  87. state: started
  88. register: quay_import
  89. ignore_errors: yes
  90. failed_when:
  91. - (quay_import.stderr | regex_search('FATAL')) is defined
  92. - (quay_import.stderr | regex_search('FATAL')) != ""
  93. # TODO: what's up with no output, no rc, but still failed?!?
  94. # TODO: demote the verbosity level of this one
  95. - debug: var=quay_import
  96. - name: Restore the ownership of the file.
  97. become_method: containers.podman.podman_unshare
  98. become: yes
  99. ansible.builtin.file:
  100. path: /tmp/pgpass
  101. state: file
  102. owner: 0
  103. - name: Remove the pgpass file
  104. ansible.builtin.file:
  105. path: /tmp/pgpass
  106. state: absent
  107. # XXX OBSOLETED BY OVERLAY MOUNT XXX
  108. #- name: Remove the current Quay data directories.
  109. # remote_user: lab
  110. # become: yes
  111. # ansible.builtin.file:
  112. # path: /local/quay/{{ item }}
  113. # state: absent
  114. # loop:
  115. # - sha256
  116. # - uploads
  117. #
  118. #- name: Extract the latest Quay data directory backup (takes around half an hour).
  119. # remote_user: lab
  120. # become: yes
  121. # ansible.builtin.command:
  122. # cmd: tar xpf /local/backups/quay-data.tar.bz2 -C /local
  123. - name: Ensure working directories exist
  124. remote_user: lab
  125. become: yes
  126. ansible.builtin.file:
  127. path: "{{ item.path }}"
  128. state: directory
  129. owner: "{{ item.owner | default('root') }}"
  130. group: "{{ item.group | default('root') }}"
  131. mode: "{{ item.mode | default('0755') }}"
  132. loop:
  133. - path: /local/overlay/upper
  134. owner: quay
  135. group: quay
  136. mode: 0775
  137. - path: /local/overlay/work
  138. owner: quay
  139. group: quay
  140. mode: 0775
  141. - path: /local/quay
  142. owner: quay
  143. group: quay
  144. mode: 0775
  145. - name: Underlay /local/backups/quay to /local/quay
  146. remote_user: lab
  147. become: yes
  148. ansible.posix.mount:
  149. boot: yes
  150. fstype: overlay
  151. path: /local/quay
  152. opts: userxattr,lowerdir=/local/backups/quay,upperdir=/local/overlay/upper,workdir=/local/overlay/work
  153. src: overlay
  154. state: mounted
  155. - name: Again ensure the mount has correct ownership.
  156. remote_user: lab
  157. become: yes
  158. ansible.builtin.file:
  159. path: /local/quay
  160. state: directory
  161. owner: quay
  162. group: quay
  163. recurse: yes
  164. mode: 0775
  165. - name: And further ensure that the ownership is by user of the container.
  166. become_method: containers.podman.podman_unshare
  167. become: yes
  168. ansible.builtin.file:
  169. path: /local/quay
  170. state: directory
  171. recurse: yes
  172. owner: 1001
  173. - name: Ensure quay service is started after this.
  174. ansible.builtin.systemd_service:
  175. name: quay
  176. scope: user
  177. state: started
  178. - name: wait for quay to become ready again
  179. ansible.builtin.uri:
  180. method: GET
  181. url: https://registry.ocp4.example.com/
  182. headers:
  183. Accept: application/json
  184. Content-Type: application/json
  185. validate_certs: no
  186. status_code:
  187. - 200
  188. - 404
  189. - 502
  190. register: startup_wait
  191. until: startup_wait.status == 200
  192. retries: 30
  193. delay: 5
  194. when:
  195. - quay_nmft < 200
  196. - name: Ensure "oc mirror" has completed (non-idempotent, but only downloads 5-10 images if anything).
  197. hosts: workstation.lab.example.com
  198. gather_subset: min
  199. become: no
  200. tasks:
  201. - name: Ensure working directory exists.
  202. ansible.builtin.file:
  203. path: "{{ ansible_facts['user_dir'] }}/mirror"
  204. state: directory
  205. mode: 0755
  206. owner: student
  207. group: student
  208. - name: Ensure image set config is correct.
  209. ansible.builtin.copy:
  210. dest: "{{ ansible_facts['user_dir'] }}/image-set-config.yaml"
  211. mode: 0644
  212. owner: student
  213. group: student
  214. content: |
  215. kind: ImageSetConfiguration
  216. apiVersion: mirror.openshift.io/v2alpha1
  217. mirror:
  218. platform:
  219. channels:
  220. - name: stable-4.18
  221. type: ocp
  222. minVersion: 4.18.6
  223. maxVersion: 4.18.6
  224. graph: true
  225. operators:
  226. - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.18
  227. full: false
  228. packages:
  229. - name: node-maintenance-operator
  230. - catalog: registry.redhat.io/redhat/certified-operator-index:v4.18
  231. full: false
  232. packages:
  233. - name: crunchy-postgres-operator
  234. additionalImages:
  235. - name: registry.redhat.io/ubi9/ubi:latest
  236. - name: registry.redhat.io/ubi9/toolbox:latest
  237. - name: Kick off "oc mirror".
  238. ansible.builtin.command:
  239. cmd: oc mirror --v2 -c {{ ansible_facts['user_dir'] }}/image-set-config.yaml --workspace file://{{ ansible_facts['user_dir'] }}/mirror/ docker://registry.ocp4.example.com
  240. register: mirror_output
  241. - name: Show what happened on stdout.
  242. ansible.builtin.debug:
  243. var: mirror_output.stdout_lines
  244. - name: Show what happened on stderr.
  245. ansible.builtin.debug:
  246. var: mirror_output.stderr_lines
  247. ...