45-oc-mirror.yml 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. ---
  2. # Create image set config if necessary, start "oc mirror".
  3. - name: Ensure backup file is owned by quay user.
  4. hosts: registry.ocp4.example.com
  5. gather_subset: min
  6. become: yes
  7. tasks:
  8. - name: Ensure database backup file is owned by quay user.
  9. ansible.builtin.file:
  10. path: /local/backups/quay-db.backup
  11. owner: quay
  12. group: quay
  13. mode: 0644
  14. - name: Ensure registry has a default (pre-mirrored) set of images.
  15. hosts: registry.ocp4.example.com
  16. gather_subset: min
  17. become: no
  18. remote_user: quay
  19. tasks:
  20. - name: Verify that the image manifests exist.
  21. containers.podman.podman_container_exec:
  22. name: postgresql
  23. command: psql -d quay -U postgres -t -c 'SELECT COUNT(id) FROM manifest'
  24. ignore_errors: yes
  25. register: quay_mft
  26. - debug: var=quay_mft
  27. - debug:
  28. msg: "{{ (quay_mft.stderr | ansible.builtin.regex_search('FATAL: database .* does not exist')) }}"
  29. - name: Remember the number of image manifests in quay.
  30. ansible.builtin.set_fact:
  31. quay_nmft: "{{ quay_mft.stdout_lines[0] | default(0) | trim | int }}"
  32. - name: Import quay backup if manifests seem to be missing.
  33. block:
  34. - name: Ensure quay service is stopped.
  35. ansible.builtin.systemd_service:
  36. name: quay
  37. scope: user
  38. state: stopped
  39. - name: Ensure quay container is stopped.
  40. containers.podman.podman_container:
  41. name: quay
  42. image: "{{ registry_host }}/quay/quay-rhel8:v{{ quay_version }}"
  43. state: stopped
  44. timeout: 60
  45. - name: Create the database if necessary.
  46. containers.podman.podman_container_exec:
  47. name: postgresql
  48. command: psql -d postgres -U postgres -t -c 'CREATE DATABASE quay OWNER quay'
  49. when:
  50. - quay_mft.rc > 0
  51. - (quay_mft.stderr | ansible.builtin.regex_search('FATAL: database .* does not exist')) is defined
  52. - (quay_mft.stderr | ansible.builtin.regex_search('FATAL: database .* does not exist')) != ""
  53. # TODO: something is wrong with the above regex_searches
  54. - name: Create a temporary pgpass file
  55. ansible.builtin.copy:
  56. dest: /tmp/pgpass
  57. owner: quay
  58. group: quay
  59. mode: 0600
  60. content: |
  61. postgresql:5432:quay:postgres:verysecret
  62. - name: Ensure the pgpass file is owned by postgres user of the container.
  63. become_method: containers.podman.podman_unshare
  64. become: yes
  65. ansible.builtin.file:
  66. path: /tmp/pgpass
  67. state: file
  68. owner: 26
  69. - name: Run pg_restore in a quay_import container (takes a couple of seconds).
  70. containers.podman.podman_container:
  71. name: quay_import
  72. image: "{{ registry_host }}/rhel9/postgresql-15:latest"
  73. rm: yes
  74. detach: no
  75. network:
  76. - quay
  77. volumes:
  78. - /local/backups/quay-db.backup:/quay-db.backup:Z
  79. - /tmp/pgpass:/var/lib/pgsql/.pgpass:Z
  80. command:
  81. - pg_restore
  82. - -dquay
  83. - -Upostgres
  84. - -hpostgresql
  85. - -c
  86. - /quay-db.backup
  87. state: started
  88. register: quay_import
  89. ignore_errors: yes
  90. failed_when:
  91. - (quay_import.stderr | regex_search('FATAL')) is defined
  92. - (quay_import.stderr | regex_search('FATAL')) != ""
  93. # TODO: what's up with no output, no rc, but still failed?!?
  94. # TODO: demote the verbosity level of this one
  95. - debug: var=quay_import
  96. - name: Restore the ownership of the file.
  97. become_method: containers.podman.podman_unshare
  98. become: yes
  99. ansible.builtin.file:
  100. path: /tmp/pgpass
  101. state: file
  102. owner: 0
  103. - name: Remove the pgpass file
  104. ansible.builtin.file:
  105. path: /tmp/pgpass
  106. state: absent
  107. # XXX OBSOLETED BY OVERLAY MOUNT XXX
  108. #- name: Remove the current Quay data directories.
  109. # remote_user: lab
  110. # become: yes
  111. # ansible.builtin.file:
  112. # path: /local/quay/{{ item }}
  113. # state: absent
  114. # loop:
  115. # - sha256
  116. # - uploads
  117. #
  118. #- name: Extract the latest Quay data directory backup (takes around half an hour).
  119. # remote_user: lab
  120. # become: yes
  121. # ansible.builtin.command:
  122. # cmd: tar xpf /local/backups/quay-data.tar.bz2 -C /local
  123. - name: Ensure working directories exist
  124. ansible.builtin.file:
  125. path: "{{ item.path }}"
  126. state: directory
  127. owner: "{{ item.owner | default('root') }}"
  128. group: "{{ item.group | default('root') }}"
  129. mode: "{{ item.mode | default(0755) }}"
  130. loop:
  131. - path: /local/overlay/upper
  132. - path: /local/overlay/work
  133. - path: /local/quay
  134. owner: quay
  135. group: quay
  136. mode: 0775
  137. - name: Underlay /local/backups/quay to /local/quay
  138. ansible.posix.mount:
  139. boot: yes
  140. fstype: overlay
  141. path: /local/quay
  142. opts: lowerdir=/local/backups/quay,upperdir=/local/overlay/upper,workdir=/local/overlay/work
  143. src: overlay
  144. state: mounted
  145. - name: Ensure quay service is started after this.
  146. ansible.builtin.systemd_service:
  147. name: quay
  148. scope: user
  149. state: started
  150. - name: wait for quay to become ready again
  151. ansible.builtin.uri:
  152. method: GET
  153. url: https://registry.ocp4.example.com/
  154. headers:
  155. Accept: application/json
  156. Content-Type: application/json
  157. validate_certs: no
  158. status_code:
  159. - 200
  160. - 404
  161. - 502
  162. register: startup_wait
  163. until: startup_wait.status == 200
  164. retries: 30
  165. delay: 5
  166. when:
  167. - quay_nmft < 200
  168. - name: Ensure "oc mirror" has completed (non-idempotent, but only downloads 5-10 images if anything).
  169. hosts: workstation.lab.example.com
  170. gather_subset: min
  171. become: no
  172. tasks:
  173. - name: Ensure working directory exists.
  174. ansible.builtin.file:
  175. path: "{{ ansible_facts['user_dir'] }}/mirror"
  176. state: directory
  177. mode: 0755
  178. owner: student
  179. group: student
  180. - name: Ensure image set config is correct.
  181. ansible.builtin.copy:
  182. dest: "{{ ansible_facts['user_dir'] }}/image-set-config.yaml"
  183. mode: 0644
  184. owner: student
  185. group: student
  186. content: |
  187. kind: ImageSetConfiguration
  188. apiVersion: mirror.openshift.io/v2alpha1
  189. mirror:
  190. platform:
  191. channels:
  192. - name: stable-4.18
  193. type: ocp
  194. minVersion: 4.18.6
  195. maxVersion: 4.18.6
  196. graph: true
  197. operators:
  198. - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.18
  199. full: false
  200. packages:
  201. - name: node-maintenance-operator
  202. - catalog: registry.redhat.io/redhat/certified-operator-index:v4.18
  203. full: false
  204. packages:
  205. - name: crunchy-postgres-operator
  206. additionalImages:
  207. - name: registry.redhat.io/ubi9/ubi:latest
  208. - name: registry.redhat.io/ubi9/toolbox:latest
  209. - name: Kick off "oc mirror".
  210. ansible.builtin.command:
  211. cmd: oc mirror --v2 -c {{ ansible_facts['user_dir'] }}/image-set-config.yaml --workspace file://{{ ansible_facts['user_dir'] }}/mirror/ docker://registry.ocp4.example.com
  212. register: mirror_output
  213. - name: Show what happened on stdout.
  214. ansible.builtin.debug:
  215. var: mirror_output.stdout_lines
  216. - name: Show what happened on stderr.
  217. ansible.builtin.debug:
  218. var: mirror_output.stderr_lines
  219. ...