From 260c3f98efe00e2b6ca596a9581b157ee3ebe2e7 Mon Sep 17 00:00:00 2001 From: Lenka Segura Date: Oct 04 2021 21:38:56 +0000 Subject: datanommer: Create manual playbook to run alembic --- diff --git a/playbooks/manual/upgrade/datanommer.yml b/playbooks/manual/upgrade/datanommer.yml index cc1a556..c128dc6 100644 --- a/playbooks/manual/upgrade/datanommer.yml +++ b/playbooks/manual/upgrade/datanommer.yml @@ -1,40 +1,11 @@ -- name: push packages out +- name: Verify the badges backend and stop it hosts: - - badges-backend - - badges-backend-stg - - datagrepper - - datagrepper-stg - - notifs-web - - notifs-web-stg - - busgateway - - busgateway-stg + - badges_backend + - badges_backend_stg user: root vars_files: - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - vars: - testing: False - handlers: - - import_tasks: "{{ handlers_path }}/restart_services.yml" - - tasks: - - name: clean all metadata {%if testing%}(with infrastructure-testing on){%endif%} - command: yum clean all {%if testing%} --enablerepo=infrastructure-tags-stg {%endif%} - check_mode: no - - name: update datanommer packages from main repo - package: name="*datanommer*" state=latest - when: not testing - - name: yum update datanommer packages from testing repo - yum: name="*datanommer*" state=latest enablerepo=infrastructure-tags-stg - when: testing - -- name: verify the badges backend and stop it - hosts: badges_backend:badges_backend_stg - user: root - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" + - /srv/private/ansible/vars.yml - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml handlers: - import_tasks: "{{ handlers_path }}/restart_services.yml" @@ -48,107 +19,90 @@ tasks: - service: name="fedmsg-hub" state=stopped -- name: verify the datagrepper frontend and stop it - hosts: datagrepper:datagrepper_stg +- name: Stop datagrepper + hosts: + - os_masters[0] + - os_masters_stg[0] user: root vars_files: - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" + - /srv/private/ansible/vars.yml - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml handlers: - import_tasks: "{{ handlers_path }}/restart_services.yml" - pre_tasks: - - name: tell nagios to shush - nagios: action=downtime minutes=120 service=host host={{ inventory_hostname_short }}{{ env_suffix }} - delegate_to: noc01.iad2.fedoraproject.org - ignore_errors: true - roles: - - datagrepper tasks: - - service: name="httpd" state=stopped + - name: Scale down datagrepper to 0 pods + command: oc -n datagrepper scale dc/datagrepper --replicas=0 -- name: verify the notifs frontend and stop it - hosts: notifs_web:notifs_web_stg +- name: Stop datanommer + hosts: + - os_masters[0] + - os_masters_stg[0] user: root vars_files: - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" + - /srv/private/ansible/vars.yml - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml handlers: - import_tasks: "{{ handlers_path }}/restart_services.yml" - pre_tasks: - - name: tell nagios to shush - nagios: action=downtime minutes=120 service=host host={{ inventory_hostname_short }}{{ env_suffix }} - delegate_to: noc01.iad2.fedoraproject.org - ignore_errors: true - roles: - - notifs/frontend tasks: - - service: name="httpd" state=stopped + - name: Scale down datanommer to 0 pods + command: oc -n datanommer scale dc/datanommer --replicas=0 + +- name: Upgrade the database + role: openshift/object + app: datanommer + template: job.yml + objectname: job.yml -- name: verify the datanommer backend, stop it, and then upgrade the db - hosts: busgateway:busgateway_stg +- name: Wait for the db-upgrade completion + command: oc get jobs/db-upgrade -o jsonpath='{@.status.succeeded}' + register: status + until: status.stdout | int == 1 + retries: 5 + delay: 30 + +- name: Delete the job in case it finished + role: openshift/object-delete + app: datanommer + objecttype: job + objectname: db-upgrade + when: status.stdout | int == 1 + +- name: Start the datanommer again + hosts: + - os_masters[0] + - os_masters_stg[0] user: root vars_files: - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" + - /srv/private/ansible/vars.yml - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - handlers: - - import_tasks: "{{ handlers_path }}/restart_services.yml" - pre_tasks: - - name: tell nagios to shush - nagios: action=downtime minutes=120 service=host host={{ inventory_hostname_short }}{{ env_suffix }} - delegate_to: noc01.iad2.fedoraproject.org - ignore_errors: true - roles: - - fedmsg/datanommer tasks: - - name: Stop the notification backend - service: name="fedmsg-hub" state=stopped - - - name: Upgrade the database - command: /usr/bin/alembic -c /usr/share/datanommer.models/alembic.ini upgrade head - args: - chdir: /usr/share/datanommer.models/ - async: 20000 - poll: 60 + - name: Scale up datanommer pods + command: oc -n datanommer scale dc/db-datanommer --replicas=1 - - name: And... start the backend again - service: name="fedmsg-hub" state=started - - post_tasks: - - name: tell nagios to unshush - nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }} - delegate_to: noc01.iad2.fedoraproject.org - ignore_errors: true - -- name: restart the frontend pieces (fmn.web and datagrepper) +- name: Start the datagrepper again hosts: - - datagrepper - - datagrepper-stg - - notifs-web - - notifs-web-stg + - os_masters[0] + - os_masters_stg[0] user: root vars_files: - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" + - /srv/private/ansible/vars.yml - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml tasks: - - service: name="httpd" state=started - post_tasks: - - name: tell nagios to unshush - nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }} - delegate_to: noc01.iad2.fedoraproject.org - ignore_errors: true + - name: Scale up datagrepper pods + command: oc -n datagrepper scale dc/datagrepper --replicas=1 -- name: restart the last backend piece (badges) +- name: Restart the last backend piece (badges) hosts: - badges-backend - badges-backend-stg user: root vars_files: - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" + - /srv/private/ansible/vars.yml - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml tasks: - service: name="fedmsg-hub" state=started diff --git a/roles/openshift-apps/datanommer/templates/job.yml b/roles/openshift-apps/datanommer/templates/job.yml new file mode 100644 index 0000000..9f2509d --- /dev/null +++ b/roles/openshift-apps/datanommer/templates/job.yml @@ -0,0 +1,25 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: db-upgrade +spec: + activeDeadlineSeconds: 86400 + backoffLimit: 1 + completion: 1 + template: + metadata: + name: db-upgrade + spec: + containers: + - name: db-upgrade + image: docker-registry.default.svc:5000/datanommer/datanommer:latest + command: ["/opt/app-root/src/.s2i/datanommer-upgrade-db.sh"] + volumeMounts: + - name: fedora-messaging-config-volume + mountPath: "/etc/fedora-messaging" + readOnly: true + volumes: + - name: fedora-messaging-config-volume + configMap: + name: fedora-messaging-config + restartPolicy: Never