From 4efe4052348127b5999bcf20265776180854e69a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Mon, 2 Jun 2025 13:28:56 +0200 Subject: [PATCH 1/9] Combined Dockerfiles Combined Dockerfiles for MongoDB Kubernetes Operator Combined Dockerfiles for Init Ops Manager image Combined Dockerfiles for Init Database and Init AppDB Combined Dockerfiles for Database Combined Dockerfiles for upgrade-hook and readinessprobe Added agent Dockerfiles Created Dockerfile for OpsManager Removed Dockerfile.dcar and README.md about how to build it Add more gitgraphs --- .gitignore | 5 - docker/mongodb-agent-non-matrix/Dockerfile | 13 ++- docker/mongodb-agent-non-matrix/README.md | 17 +++ docker/mongodb-agent/Dockerfile | 39 ++++++- docker/mongodb-agent/README.md | 20 +++- .../mongodb-enterprise-ops-manager/Dockerfile | 95 ++++++++++++++++ .../Dockerfile.dcar | 25 ----- .../Dockerfile.plain | 84 +++++++++++++++ docker/mongodb-enterprise-ops-manager/LICENSE | 3 - .../mongodb-enterprise-ops-manager/README.md | 11 ++ docker/mongodb-kubernetes-database/Dockerfile | 71 ++++++++++++ .../Dockerfile.plain | 87 +++++++++++++++ docker/mongodb-kubernetes-database/README.md | 9 +- .../mongodb-kubernetes-init-appdb/Dockerfile | 52 +++++++++ .../Dockerfile.builder | 22 ++++ .../Dockerfile.plain | 35 ++++++ .../Dockerfile.template | 42 ++++++++ .../Dockerfile.ubi_minimal | 11 ++ .../mongodb-kubernetes-init-appdb/README.md | 11 ++ .../Dockerfile | 50 +++++++++ .../Dockerfile.plain | 34 ++++++ .../README.md | 11 ++ .../Dockerfile | 31 ++++++ .../Dockerfile.plain | 26 +++++ .../README.md | 9 ++ docker/mongodb-kubernetes-operator/Dockerfile | 72 +++++++++++++ .../Dockerfile.plain | 38 +++++++ docker/mongodb-kubernetes-operator/README.md | 10 +- .../Dockerfile | 11 +- .../README.md | 10 ++ .../Dockerfile | 11 +- .../mongodb-kubernetes-upgrade-hook/README.md | 10 ++ ideal_release_flow.md | 66 ++++++++++++ ideal_release_flow.mmd | 28 +++++ ideal_release_flow.png | Bin 0 -> 14467 bytes ideal_release_flow_backport.mmd | 37 +++++++ ideal_release_flow_backport.png | Bin 0 -> 28617 bytes ideal_release_flow_versioning_complex.mmd | 27 +++++ ideal_release_flow_versioning_complex.png | Bin 0 -> 22061 bytes ideal_release_flow_versioning_easy.mmd | 26 +++++ ideal_release_flow_versioning_easy.png | Bin 0 -> 18829 bytes inventories/database.yaml | 8 +- inventories/init_appdb.yaml | 7 +- inventories/init_database.yaml | 3 +- inventories/init_om.yaml | 3 +- inventories/om.yaml | 3 +- inventory.yaml | 1 + lib/sonar/sonar.py | 0 lib/sonar/test/test_final_dockerfiles.py | 101 ++++++++++++++++++ 49 files changed, 1225 insertions(+), 60 deletions(-) create mode 100644 docker/mongodb-agent-non-matrix/README.md create mode 100644 docker/mongodb-enterprise-ops-manager/Dockerfile delete mode 100644 docker/mongodb-enterprise-ops-manager/Dockerfile.dcar create mode 100644 docker/mongodb-enterprise-ops-manager/Dockerfile.plain delete mode 100644 docker/mongodb-enterprise-ops-manager/LICENSE create mode 100644 docker/mongodb-enterprise-ops-manager/README.md create mode 100644 docker/mongodb-kubernetes-database/Dockerfile create mode 100644 docker/mongodb-kubernetes-database/Dockerfile.plain create mode 100644 docker/mongodb-kubernetes-init-appdb/Dockerfile create mode 100644 docker/mongodb-kubernetes-init-appdb/Dockerfile.builder create mode 100644 docker/mongodb-kubernetes-init-appdb/Dockerfile.plain create mode 100644 docker/mongodb-kubernetes-init-appdb/Dockerfile.template create mode 100644 docker/mongodb-kubernetes-init-appdb/Dockerfile.ubi_minimal create mode 100644 docker/mongodb-kubernetes-init-appdb/README.md create mode 100644 docker/mongodb-kubernetes-init-database/Dockerfile create mode 100644 docker/mongodb-kubernetes-init-database/Dockerfile.plain create mode 100644 docker/mongodb-kubernetes-init-database/README.md create mode 100644 docker/mongodb-kubernetes-init-ops-manager/Dockerfile create mode 100644 docker/mongodb-kubernetes-init-ops-manager/Dockerfile.plain create mode 100644 docker/mongodb-kubernetes-init-ops-manager/README.md create mode 100644 docker/mongodb-kubernetes-operator/Dockerfile create mode 100644 docker/mongodb-kubernetes-operator/Dockerfile.plain create mode 100644 docker/mongodb-kubernetes-readinessprobe/README.md create mode 100644 docker/mongodb-kubernetes-upgrade-hook/README.md create mode 100644 ideal_release_flow.md create mode 100644 ideal_release_flow.mmd create mode 100644 ideal_release_flow.png create mode 100644 ideal_release_flow_backport.mmd create mode 100644 ideal_release_flow_backport.png create mode 100644 ideal_release_flow_versioning_complex.mmd create mode 100644 ideal_release_flow_versioning_complex.png create mode 100644 ideal_release_flow_versioning_easy.mmd create mode 100644 ideal_release_flow_versioning_easy.png mode change 100644 => 100755 lib/sonar/sonar.py create mode 100644 lib/sonar/test/test_final_dockerfiles.py diff --git a/.gitignore b/.gitignore index c5ca572c5..9e3cb309c 100644 --- a/.gitignore +++ b/.gitignore @@ -44,11 +44,6 @@ public/architectures/**/secrets/* docker/mongodb-kubernetes-appdb/content/readinessprobe mongodb-kubernetes -docker/mongodb-kubernetes-operator/Dockerfile -docker/mongodb-kubernetes-database/Dockerfile -docker/mongodb-enterprise-ops-manager/Dockerfile -docker/mongodb-kubernetes-init-database/Dockerfile -docker/mongodb-kubernetes-init-ops-manager/Dockerfile docker/mongodb-kubernetes-operator/content/mongodb-kubernetes-operator.tar docker/mongodb-kubernetes-tests/helm_chart/ docker/mongodb-kubernetes-tests/public/ diff --git a/docker/mongodb-agent-non-matrix/Dockerfile b/docker/mongodb-agent-non-matrix/Dockerfile index e1c1caff2..0677126fd 100644 --- a/docker/mongodb-agent-non-matrix/Dockerfile +++ b/docker/mongodb-agent-non-matrix/Dockerfile @@ -1,5 +1,14 @@ -ARG imagebase -FROM ${imagebase} as base +FROM scratch AS base + +ARG agent_version +ARG agent_distro +ARG tools_version +ARG tools_distro + +ADD https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-${agent_version}.${agent_distro}.tar.gz /data/mongodb-agent.tar.gz +ADD https://downloads.mongodb.org/tools/db/mongodb-database-tools-${tools_distro}-${tools_version}.tgz /data/mongodb-tools.tgz + +COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE FROM registry.access.redhat.com/ubi9/ubi-minimal diff --git a/docker/mongodb-agent-non-matrix/README.md b/docker/mongodb-agent-non-matrix/README.md new file mode 100644 index 000000000..79dc0d2d5 --- /dev/null +++ b/docker/mongodb-agent-non-matrix/README.md @@ -0,0 +1,17 @@ +### Building locally + +For building the MongoDB Agent (non-static) image locally use the example command: + +TODO: What to do with label quay.expires-after=48h? +```bash +AGENT_VERSION="108.0.7.8810-1" +TOOLS_VERSION="100.12.0" +AGENT_DISTRO="rhel9_x86_64" +TOOLS_DISTRO="rhel93-x86_64" +docker buildx build --load --progress plain . -f docker/mongodb-agent/Dockerfile -t "mongodb-agent:${AGENT_VERSION}" \ + --build-arg version="${VERSION}" \ + --build-arg agent_version="${AGENT_VERSION}" \ + --build-arg tools_version="${TOOLS_VERSION}" \ + --build-arg agent_distro="${AGENT_DISTRO}" \ + --build-arg tools_distro="${TOOLS_DISTRO}" +``` diff --git a/docker/mongodb-agent/Dockerfile b/docker/mongodb-agent/Dockerfile index 08d8746d8..5ec4e127b 100644 --- a/docker/mongodb-agent/Dockerfile +++ b/docker/mongodb-agent/Dockerfile @@ -1,5 +1,40 @@ -ARG imagebase -FROM ${imagebase} as base +# the init database image gets supplied by pipeline.py and corresponds to the operator version we want to release +# the agent with. This enables us to release the agent for older operator. +ARG init_database_image +FROM ${init_database_image} AS init_database + +FROM public.ecr.aws/docker/library/golang:1.24 AS dependency_downloader + +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ + +COPY go.mod go.sum ./ + +RUN go mod download + +FROM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder + +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ + +COPY --from=dependency_downloader /go/pkg /go/pkg +COPY . /go/src/github.com/mongodb/mongodb-kubernetes + +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go + +FROM scratch AS base +ARG mongodb_tools_url_ubi +ARG mongodb_agent_url_ubi + +COPY --from=readiness_builder /readinessprobe /data/ +COPY --from=readiness_builder /version-upgrade-hook /data/ + +ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz +ADD ${mongodb_agent_url_ubi} /data/mongodb_agent_ubi.tgz + +COPY --from=init_database /probes/probe.sh /data/probe.sh +COPY --from=init_database /scripts/agent-launcher-lib.sh /data/ +COPY --from=init_database /scripts/agent-launcher.sh /data/ +COPY --from=init_database /licenses/LICENSE /data/ FROM registry.access.redhat.com/ubi9/ubi-minimal diff --git a/docker/mongodb-agent/README.md b/docker/mongodb-agent/README.md index 377f4b938..a447d60f0 100644 --- a/docker/mongodb-agent/README.md +++ b/docker/mongodb-agent/README.md @@ -1,4 +1,20 @@ # Mongodb-Agent The agent gets released in a matrix style with the init-database image, which gets tagged with the operator version. -This works by using the multi-stage pattern and build-args. First - retrieve the `init-database:` and retrieve the -binaries from there. Then we continue with the other steps to fully build the image. \ No newline at end of file +This works by using the multi-stage pattern and build-args. First - retrieve the `init-database:` and retrieve the +binaries from there. Then we continue with the other steps to fully build the image. + +### Building locally + +For building the MongoDB Agent image locally use the example command: + +```bash +VERSION="108.0.7.8810-1" +INIT_DATABASE_IMAGE="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database:1.1.0" +MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db/mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" +MONGODB_AGENT_URL_UBI="https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-108.0.7.8810-1.rhel9_x86_64.tar.gz" +docker buildx build --load --progress plain . -f docker/mongodb-agent/Dockerfile -t "mongodb-agent:${VERSION}_1.1.0" \ + --build-arg version="${VERSION}" \ + --build-arg init_database_image="${INIT_DATABASE_IMAGE}" \ + --build-arg mongodb_tools_url_ubi="${MONGODB_TOOLS_URL_UBI}" \ + --build-arg mongodb_agent_url_ubi="${MONGODB_AGENT_URL_UBI}" +``` diff --git a/docker/mongodb-enterprise-ops-manager/Dockerfile b/docker/mongodb-enterprise-ops-manager/Dockerfile new file mode 100644 index 000000000..aa95b4bee --- /dev/null +++ b/docker/mongodb-enterprise-ops-manager/Dockerfile @@ -0,0 +1,95 @@ +# Build compilable stuff + +FROM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder +COPY . /go/src/github.com/mongodb/mongodb-kubernetes +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes + +RUN CGO_ENABLED=0 go build -a -buildvcs=false -o /data/scripts/mmsconfiguration ./docker/mongodb-kubernetes-init-ops-manager/mmsconfiguration/edit_mms_configuration.go +RUN CGO_ENABLED=0 go build -a -buildvcs=false -o /data/scripts/backup-daemon-readiness-probe ./docker/mongodb-kubernetes-init-ops-manager/backupdaemon_readinessprobe/backupdaemon_readiness.go + +# Move binaries and scripts +FROM scratch AS base + +COPY --from=readiness_builder /data/scripts/mmsconfiguration /data/scripts/mmsconfiguration +COPY --from=readiness_builder /data/scripts/backup-daemon-readiness-probe /data/scripts/backup-daemon-readiness-probe + +# After v2.0, when non-Static Agent images will be removed, please ensure to copy those files +# into ./docker/mongodb-enterprise-ops-manager directory. Leaving it this way will make the maintenance easier. +COPY ./docker/mongodb-kubernetes-init-ops-manager/scripts/docker-entry-point.sh /data/scripts +COPY ./docker/mongodb-kubernetes-init-ops-manager/scripts/backup-daemon-liveness-probe.sh /data/scripts +COPY ./docker/mongodb-kubernetes-init-ops-manager/LICENSE /data/licenses/mongodb-enterprise-ops-manager + +FROM registry.access.redhat.com/ubi9/ubi-minimal + +ARG version +ARG om_download_url + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version=${version} \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + +ENV MMS_HOME=/mongodb-ops-manager +ENV MMS_PROP_FILE=${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE=${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR=${MMS_HOME}/logs +ENV MMS_TMP_DIR=${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + +# Replace libcurl-minimal and curl-minimal with the full versions +# https://bugzilla.redhat.com/show_bug.cgi?id=1994521 +RUN microdnf install -y libssh libpsl libbrotli \ + && microdnf download curl libcurl \ + && rpm -Uvh --nodeps --replacefiles "*curl*$( uname -i ).rpm" \ + && microdnf remove -y libcurl-minimal curl-minimal + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + +COPY --from=base /data/licenses /licenses/ +COPY --from=base /data/scripts /opt/scripts + +RUN curl --fail -L -o ops_manager.tar.gz ${om_download_url} \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] diff --git a/docker/mongodb-enterprise-ops-manager/Dockerfile.dcar b/docker/mongodb-enterprise-ops-manager/Dockerfile.dcar deleted file mode 100644 index 639c7930b..000000000 --- a/docker/mongodb-enterprise-ops-manager/Dockerfile.dcar +++ /dev/null @@ -1,25 +0,0 @@ -{% extends "Dockerfile.ubi" %} - - -{% block packages %} -RUN yum install --disableplugin=subscription-manager \ - cyrus-sasl \ - cyrus-sasl-gssapi \ - cyrus-sasl-plain \ - krb5-libs \ - libcurl \ - libpcap \ - lm_sensors-libs \ - net-snmp \ - net-snmp-agent-libs \ - openldap \ - openssl \ - rpm-libs \ - net-tools \ - procps-ng \ - ncurses -{% endblock %} - -{% block healthcheck %} -HEALTHCHECK --timeout=30s CMD ls /mongodb-ops-manager/bin/mongodb-mms || exit 1 -{% endblock %} diff --git a/docker/mongodb-enterprise-ops-manager/Dockerfile.plain b/docker/mongodb-enterprise-ops-manager/Dockerfile.plain new file mode 100644 index 000000000..717014b97 --- /dev/null +++ b/docker/mongodb-enterprise-ops-manager/Dockerfile.plain @@ -0,0 +1,84 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi9/ubi-minimal + + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="8.0.7" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + + +ENV MMS_HOME /mongodb-ops-manager +ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR ${MMS_HOME}/logs +ENV MMS_TMP_DIR ${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + + +# Replace libcurl-minimal and curl-minimal with the full versions +# https://bugzilla.redhat.com/show_bug.cgi?id=1994521 +RUN microdnf install -y libssh libpsl libbrotli \ + && microdnf download curl libcurl \ + && rpm -Uvh --nodeps --replacefiles "*curl*$( uname -i ).rpm" \ + && microdnf remove -y libcurl-minimal curl-minimal + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + + +COPY --from=base /data/licenses /licenses/ + +COPY --from=base /data/scripts /opt/scripts + + + +RUN curl --fail -L -o ops_manager.tar.gz https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-8.0.7.500.20250505T1426Z.tar.gz \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] + + diff --git a/docker/mongodb-enterprise-ops-manager/LICENSE b/docker/mongodb-enterprise-ops-manager/LICENSE deleted file mode 100644 index dc71da876..000000000 --- a/docker/mongodb-enterprise-ops-manager/LICENSE +++ /dev/null @@ -1,3 +0,0 @@ -Usage of the MongoDB Enterprise Operator for Kubernetes indicates agreement with the MongoDB Customer Agreement. - -* https://www.mongodb.com/customer-agreement/ diff --git a/docker/mongodb-enterprise-ops-manager/README.md b/docker/mongodb-enterprise-ops-manager/README.md new file mode 100644 index 000000000..440e839bc --- /dev/null +++ b/docker/mongodb-enterprise-ops-manager/README.md @@ -0,0 +1,11 @@ +### Building locally + +For building the MongoDB Enterprise Ops Manager Docker image locally use the example command: + +```bash +VERSION="8.0.7" +OM_DOWNLOAD_URL="https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-8.0.7.500.20250505T1426Z.tar.gz" +docker buildx build --load --progress plain . -f docker/mongodb-enterprise-ops-manager/Dockerfile -t "mongodb-enterprise-ops-manager:${VERSION}" \ + --build-arg version="${VERSION}" \ + --build-arg om_download_url="${OM_DOWNLOAD_URL}" +``` diff --git a/docker/mongodb-kubernetes-database/Dockerfile b/docker/mongodb-kubernetes-database/Dockerfile new file mode 100644 index 000000000..97fbda8d0 --- /dev/null +++ b/docker/mongodb-kubernetes-database/Dockerfile @@ -0,0 +1,71 @@ +FROM scratch AS base + +COPY ./docker/mongodb-kubernetes-database/LICENSE /data/licenses/mongodb-kubernetes-database + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG VERSION + +LABEL name="MongoDB Kubernetes Database" \ + version="${VERSION}" \ + summary="MongoDB Kubernetes Database Image" \ + description="MongoDB Kubernetes Database Image" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +ENV MMS_HOME=/mongodb-automation +ENV MMS_LOG_DIR=/var/log/mongodb-mms-automation + +RUN microdnf update -y && rm -rf /var/cache/yum + +# these are the packages needed for the agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +RUN microdnf install -y --disableplugin=subscription-manager \ + hostname \ + procps + +# these are the packages needed for MongoDB +# (https://docs.mongodb.com/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ "RHEL/CentOS 8" tab) +RUN microdnf install -y --disableplugin=subscription-manager \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + jq \ + tar \ + xz-libs \ + findutils + +RUN ln -s /usr/lib64/libsasl2.so.3 /usr/lib64/libsasl2.so.2 + +# Set the required perms +RUN mkdir -p "${MMS_LOG_DIR}" \ + && chmod 0775 "${MMS_LOG_DIR}" \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && chmod 0775 /var/lib/mongodb-mms-automation \ + && mkdir -p /data \ + && chmod 0775 /data \ + && mkdir -p /journal \ + && chmod 0775 /journal \ + && mkdir -p "${MMS_HOME}" \ + && chmod -R 0775 "${MMS_HOME}" + +# USER needs to be set for this image to pass RedHat verification. Some customers have these requirements as well +# It does not matter what number it is, as long as it is set to something. +# However, OpenShift will run the container as a random user, +# and the number in this configuration is not relevant. +USER 2000 + +# The docker image doesn't have any scripts so by default does nothing +# The script will be copied in runtime from init containers and the operator is expected +# to override the COMMAND +ENTRYPOINT ["sleep infinity"] + +COPY --from=base /data/licenses/mongodb-kubernetes-database /licenses/mongodb-kubernetes-database diff --git a/docker/mongodb-kubernetes-database/Dockerfile.plain b/docker/mongodb-kubernetes-database/Dockerfile.plain new file mode 100644 index 000000000..ea7b4a8e7 --- /dev/null +++ b/docker/mongodb-kubernetes-database/Dockerfile.plain @@ -0,0 +1,87 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + + +LABEL name="MongoDB Kubernetes Database" \ + version="1.1.0" \ + summary="MongoDB Kubernetes Database Image" \ + description="MongoDB Kubernetes Database Image" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + + + + + +ENV MMS_HOME /mongodb-automation +ENV MMS_LOG_DIR /var/log/mongodb-mms-automation + + + +RUN microdnf update -y && rm -rf /var/cache/yum + +# these are the packages needed for the agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +RUN microdnf install -y --disableplugin=subscription-manager \ + hostname \ + procps + + +# these are the packages needed for MongoDB +# (https://docs.mongodb.com/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ "RHEL/CentOS 8" tab) +RUN microdnf install -y --disableplugin=subscription-manager \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + jq \ + tar \ + xz-libs \ + findutils + + + +RUN ln -s /usr/lib64/libsasl2.so.3 /usr/lib64/libsasl2.so.2 + + +# Set the required perms +RUN mkdir -p "${MMS_LOG_DIR}" \ + && chmod 0775 "${MMS_LOG_DIR}" \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && chmod 0775 /var/lib/mongodb-mms-automation \ + && mkdir -p /data \ + && chmod 0775 /data \ + && mkdir -p /journal \ + && chmod 0775 /journal \ + && mkdir -p "${MMS_HOME}" \ + && chmod -R 0775 "${MMS_HOME}" + + + + +# USER needs to be set for this image to pass RedHat verification. Some customers have these requirements as well +# It does not matter what number it is, as long as it is set to something. +# However, OpenShift will run the container as a random user, +# and the number in this configuration is not relevant. +USER 2000 + + +# The docker image doesn't have any scripts so by default does nothing +# The script will be copied in runtime from init containers and the operator is expected +# to override the COMMAND +ENTRYPOINT ["sleep infinity"] + + +COPY --from=base /data/licenses/mongodb-kubernetes-database /licenses/mongodb-kubernetes-database + + diff --git a/docker/mongodb-kubernetes-database/README.md b/docker/mongodb-kubernetes-database/README.md index a6abf56a9..e7b937e0e 100644 --- a/docker/mongodb-kubernetes-database/README.md +++ b/docker/mongodb-kubernetes-database/README.md @@ -34,11 +34,12 @@ This image can't be built in any host, because it will require the use of a subs host, with subscription service enabled, is required. That's the reason behind using the Redhat build service to build this images with. -## Building the DCAR database image +### Building locally -The dcar image needs to be built manually. +For building the MongoDB Database image locally use the example command: ```bash -docker build . -t 268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/usaf/mongodb-kubernetes-database:1.5.3 -docker push 268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/usaf/mongodb-kubernetes-database:1.5.3 +VERSION="1.0.1" +docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-database/Dockerfile -t "mongodb-kubernetes-database:${VERSION}" \ + --build-arg VERSION="${VERSION}" ``` diff --git a/docker/mongodb-kubernetes-init-appdb/Dockerfile b/docker/mongodb-kubernetes-init-appdb/Dockerfile new file mode 100644 index 000000000..ed0cea9dd --- /dev/null +++ b/docker/mongodb-kubernetes-init-appdb/Dockerfile @@ -0,0 +1,52 @@ +FROM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder + +COPY . /go/src/github.com/mongodb/mongodb-kubernetes +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go + +FROM scratch AS base + +ARG mongodb_tools_url_ubi + +COPY --from=readiness_builder /readinessprobe /data/ +COPY --from=readiness_builder /version-upgrade-hook /data/version-upgrade-hook + +ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz + +COPY ./docker/mongodb-kubernetes-init-database/content/probe.sh /data/probe.sh + +COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh /data/scripts/ +COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher.sh /data/scripts/ + +COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/licenses/ + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version +LABEL name="MongoDB Kubernetes Init AppDB" \ + version="mongodb-kubernetes-init-appdb-${version}" \ + summary="MongoDB Kubernetes AppDB Init Image" \ + description="Startup Scripts for MongoDB Enterprise Application Database for Ops Manager" \ + release="1" \ + vendor="MongoDB" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ +COPY --from=base /data/version-upgrade-hook /probes/version-upgrade-hook + +RUN microdnf -y update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +USER 2000 + +ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] diff --git a/docker/mongodb-kubernetes-init-appdb/Dockerfile.builder b/docker/mongodb-kubernetes-init-appdb/Dockerfile.builder new file mode 100644 index 000000000..69dc6d6af --- /dev/null +++ b/docker/mongodb-kubernetes-init-appdb/Dockerfile.builder @@ -0,0 +1,22 @@ +# Build compilable stuff + +FROM public.ecr.aws/docker/library/golang:1.24 as readiness_builder +COPY . /go/src/github.com/mongodb/mongodb-kubernetes +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go + +FROM scratch +ARG mongodb_tools_url_ubi + +COPY --from=readiness_builder /readinessprobe /data/ +COPY --from=readiness_builder /version-upgrade-hook /data/version-upgrade-hook + +ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz + +COPY ./docker/mongodb-kubernetes-init-database/content/probe.sh /data/probe.sh + +COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh /data/scripts/ +COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher.sh /data/scripts/ + +COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/licenses/ diff --git a/docker/mongodb-kubernetes-init-appdb/Dockerfile.plain b/docker/mongodb-kubernetes-init-appdb/Dockerfile.plain new file mode 100644 index 000000000..d0c5d967a --- /dev/null +++ b/docker/mongodb-kubernetes-init-appdb/Dockerfile.plain @@ -0,0 +1,35 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version +LABEL name="MongoDB Kubernetes Init AppDB" \ + version="mongodb-kubernetes-init-appdb-${version}" \ + summary="MongoDB Kubernetes AppDB Init Image" \ + description="Startup Scripts for MongoDB Enterprise Application Database for Ops Manager" \ + release="1" \ + vendor="MongoDB" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ +COPY --from=base /data/version-upgrade-hook /probes/version-upgrade-hook + + +RUN microdnf -y update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz + + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] + + diff --git a/docker/mongodb-kubernetes-init-appdb/Dockerfile.template b/docker/mongodb-kubernetes-init-appdb/Dockerfile.template new file mode 100644 index 000000000..3c0d45ee4 --- /dev/null +++ b/docker/mongodb-kubernetes-init-appdb/Dockerfile.template @@ -0,0 +1,42 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM {{ base_image }} + +ARG version + +{%- if is_appdb %} +LABEL name="MongoDB Kubernetes Init AppDB" \ + version="mongodb-kubernetes-init-appdb-${version}" \ + summary="MongoDB Kubernetes AppDB Init Image" \ + description="Startup Scripts for MongoDB Enterprise Application Database for Ops Manager" \ +{%- else %} +LABEL name="MongoDB Kubernetes Init Database" \ + version="mongodb-kubernetes-init-database-${version}" \ + summary="MongoDB Kubernetes Database Init Image" \ + description="Startup Scripts for MongoDB Enterprise Database" \ +{%- endif %} + release="1" \ + vendor="MongoDB" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ + +{%- if is_appdb %} +COPY --from=base /data/version-upgrade-hook /probes/version-upgrade-hook +{%- endif %} + +{% block mongodb_tools %} +{% endblock %} + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] + +{% block healthcheck %} +{% endblock %} diff --git a/docker/mongodb-kubernetes-init-appdb/Dockerfile.ubi_minimal b/docker/mongodb-kubernetes-init-appdb/Dockerfile.ubi_minimal new file mode 100644 index 000000000..b5400b147 --- /dev/null +++ b/docker/mongodb-kubernetes-init-appdb/Dockerfile.ubi_minimal @@ -0,0 +1,11 @@ +{% extends "Dockerfile.template" %} + +{% set base_image = "registry.access.redhat.com/ubi8/ubi-minimal" %} + +{% block mongodb_tools %} +RUN microdnf -y update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +{% endblock %} diff --git a/docker/mongodb-kubernetes-init-appdb/README.md b/docker/mongodb-kubernetes-init-appdb/README.md new file mode 100644 index 000000000..d49ca4b3a --- /dev/null +++ b/docker/mongodb-kubernetes-init-appdb/README.md @@ -0,0 +1,11 @@ +### Building locally + +For building the MongoDB Init AppDB image locally use the example command: + +```bash +VERSION="1.0.1" +MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db/mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" +docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-init-appdb/Dockerfile -t "mongodb-kubernetes-init-appdb:${VERSION}" \ + --build-arg version="${VERSION}" \ + --build-arg mongodb_tools_url_ubi="${MONGODB_TOOLS_URL_UBI}" +``` diff --git a/docker/mongodb-kubernetes-init-database/Dockerfile b/docker/mongodb-kubernetes-init-database/Dockerfile new file mode 100644 index 000000000..6c861fb6a --- /dev/null +++ b/docker/mongodb-kubernetes-init-database/Dockerfile @@ -0,0 +1,50 @@ +FROM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder + +COPY . /go/src/github.com/mongodb/mongodb-kubernetes +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go + +FROM scratch AS base + +ARG mongodb_tools_url_ubi + +COPY --from=readiness_builder /readinessprobe /data/ +COPY --from=readiness_builder /version-upgrade-hook /data/version-upgrade-hook + +ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz + +COPY ./docker/mongodb-kubernetes-init-database/content/probe.sh /data/probe.sh + +COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh /data/scripts/ +COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher.sh /data/scripts/ + +COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/licenses/ + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version +LABEL name="MongoDB Kubernetes Init Database" \ + version="mongodb-kubernetes-init-database-${version}" \ + summary="MongoDB Kubernetes Database Init Image" \ + description="Startup Scripts for MongoDB Enterprise Database" \ + release="1" \ + vendor="MongoDB" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ + +RUN microdnf -y update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] diff --git a/docker/mongodb-kubernetes-init-database/Dockerfile.plain b/docker/mongodb-kubernetes-init-database/Dockerfile.plain new file mode 100644 index 000000000..ecf2e32ae --- /dev/null +++ b/docker/mongodb-kubernetes-init-database/Dockerfile.plain @@ -0,0 +1,34 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version +LABEL name="MongoDB Kubernetes Init Database" \ + version="mongodb-kubernetes-init-database-${version}" \ + summary="MongoDB Kubernetes Database Init Image" \ + description="Startup Scripts for MongoDB Enterprise Database" \ + release="1" \ + vendor="MongoDB" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ + + +RUN microdnf -y update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz + + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] + + diff --git a/docker/mongodb-kubernetes-init-database/README.md b/docker/mongodb-kubernetes-init-database/README.md new file mode 100644 index 000000000..0e6657531 --- /dev/null +++ b/docker/mongodb-kubernetes-init-database/README.md @@ -0,0 +1,11 @@ +### Building locally + +For building the MongoDB Init AppDB image locally use the example command: + +```bash +VERSION="1.0.1" +MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db/mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" +docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-init-database/Dockerfile -t "mongodb-kubernetes-init-database:${VERSION}" \ + --build-arg version="${VERSION}" \ + --build-arg mongodb_tools_url_ubi="${MONGODB_TOOLS_URL_UBI}" +``` diff --git a/docker/mongodb-kubernetes-init-ops-manager/Dockerfile b/docker/mongodb-kubernetes-init-ops-manager/Dockerfile new file mode 100644 index 000000000..1229ec929 --- /dev/null +++ b/docker/mongodb-kubernetes-init-ops-manager/Dockerfile @@ -0,0 +1,31 @@ +FROM public.ecr.aws/docker/library/golang:1.24 AS base + +WORKDIR /go/src +ADD ./docker/mongodb-kubernetes-init-ops-manager . +RUN CGO_ENABLED=0 go build -a -buildvcs=false -o /data/scripts/mmsconfiguration ./mmsconfiguration +RUN CGO_ENABLED=0 go build -a -buildvcs=false -o /data/scripts/backup-daemon-readiness-probe ./backupdaemon_readinessprobe/ + +COPY ./docker/mongodb-kubernetes-init-ops-manager/scripts/docker-entry-point.sh /data/scripts/ +COPY ./docker/mongodb-kubernetes-init-ops-manager/scripts/backup-daemon-liveness-probe.sh /data/scripts/ +COPY ./docker/mongodb-kubernetes-init-ops-manager/LICENSE /data/licenses/mongodb-enterprise-ops-manager + +FROM registry.access.redhat.com/ubi9/ubi-minimal + +ARG version + +LABEL name="MongoDB Kubernetes Ops Manager Init" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="mongodb-kubernetes-init-ops-manager-${version}" \ + release="1" \ + summary="MongoDB Kubernetes Ops Manager Init Image" \ + description="Startup Scripts for MongoDB Enterprise Ops Manager" + +COPY --from=base /data/scripts /scripts +COPY --from=base /data/licenses /licenses + +RUN microdnf -y update --nodocs \ + && microdnf clean all + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] diff --git a/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.plain b/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.plain new file mode 100644 index 000000000..f841b9e35 --- /dev/null +++ b/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.plain @@ -0,0 +1,26 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi9/ubi-minimal + +LABEL name="MongoDB Kubernetes Ops Manager Init" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="mongodb-kubernetes-init-ops-manager-1.1.0" \ + release="1" \ + summary="MongoDB Kubernetes Ops Manager Init Image" \ + description="Startup Scripts for MongoDB Enterprise Ops Manager" + + +COPY --from=base /data/scripts /scripts +COPY --from=base /data/licenses /licenses + + +RUN microdnf -y update --nodocs \ + && microdnf clean all + + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] + + diff --git a/docker/mongodb-kubernetes-init-ops-manager/README.md b/docker/mongodb-kubernetes-init-ops-manager/README.md new file mode 100644 index 000000000..71d02da75 --- /dev/null +++ b/docker/mongodb-kubernetes-init-ops-manager/README.md @@ -0,0 +1,9 @@ +### Building locally + +For building the MongoDB Init Ops Manager image locally use the example command: + +```bash +VERSION="1.1.0" +docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-init-ops-manager/Dockerfile -t "mongodb-kubernetes-init-ops-manager:${VERSION}" \ + --build-arg version="${VERSION}" +``` diff --git a/docker/mongodb-kubernetes-operator/Dockerfile b/docker/mongodb-kubernetes-operator/Dockerfile new file mode 100644 index 000000000..dcd3af35c --- /dev/null +++ b/docker/mongodb-kubernetes-operator/Dockerfile @@ -0,0 +1,72 @@ +FROM public.ecr.aws/docker/library/golang:1.24 AS builder + +ARG version +ARG log_automation_config_diff +ARG use_race + +COPY go.sum go.mod /go/src/github.com/mongodb/mongodb-kubernetes/ + +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes +RUN go mod download + +COPY . /go/src/github.com/mongodb/mongodb-kubernetes + +RUN go version +RUN git version +RUN mkdir /build && \ + if [ $use_race = "true" ]; then \ + echo "Building with race detector" && \ + CGO_ENABLED=1 go build -o /build/mongodb-kubernetes-operator \ + -buildvcs=false \ + -race \ + -ldflags=" -X github.com/mongodb/mongodb-kubernetes/pkg/util.OperatorVersion=${version} \ + -X github.com/mongodb/mongodb-kubernetes/pkg/util.LogAutomationConfigDiff=${log_automation_config_diff}"; \ + else \ + echo "Building without race detector" && \ + CGO_ENABLED=0 go build -o /build/mongodb-kubernetes-operator \ + -buildvcs=false \ + -ldflags="-s -w -X github.com/mongodb/mongodb-kubernetes/pkg/util.OperatorVersion=${version} \ + -X github.com/mongodb/mongodb-kubernetes/pkg/util.LogAutomationConfigDiff=${log_automation_config_diff}"; \ + fi + + +ADD https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 /usr/local/bin/jq +RUN chmod +x /usr/local/bin/jq + +RUN mkdir -p /data +RUN cat release.json | jq -r '.supportedImages."mongodb-agent" | { "supportedImages": { "mongodb-agent": . } }' > /data/om_version_mapping.json +RUN chmod +r /data/om_version_mapping.json + +FROM scratch AS base + +COPY --from=builder /build/mongodb-kubernetes-operator /data/ +COPY --from=builder /data/om_version_mapping.json /data/om_version_mapping.json + +ADD docker/mongodb-kubernetes-operator/licenses /data/licenses/ + +FROM registry.access.redhat.com/ubi9/ubi-minimal + +ARG version + +LABEL name="MongoDB Kubernetes Operator" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="${version}" \ + release="1" \ + summary="MongoDB Kubernetes Operator Image" \ + description="MongoDB Kubernetes Operator Image" + +# Building an UBI-based image: https://red.ht/3n6b9y0 +RUN microdnf update \ + --disableplugin=subscription-manager \ + --disablerepo=* --enablerepo=ubi-9-appstream-rpms --enablerepo=ubi-9-baseos-rpms -y \ + && rm -rf /var/cache/yum +RUN microdnf install -y glibc-langpack-en + +COPY --from=base /data/mongodb-kubernetes-operator /usr/local/bin/mongodb-kubernetes-operator +COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json +COPY --from=base /data/licenses /licenses/ + +USER 2000 + +ENTRYPOINT exec /usr/local/bin/mongodb-kubernetes-operator diff --git a/docker/mongodb-kubernetes-operator/Dockerfile.plain b/docker/mongodb-kubernetes-operator/Dockerfile.plain new file mode 100644 index 000000000..7466187f7 --- /dev/null +++ b/docker/mongodb-kubernetes-operator/Dockerfile.plain @@ -0,0 +1,38 @@ +# +# Base Template Dockerfile for Operator Image. +# + +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi9/ubi-minimal + + +LABEL name="MongoDB Kubernetes Operator" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="1.1.0" \ + release="1" \ + summary="MongoDB Kubernetes Operator Image" \ + description="MongoDB Kubernetes Operator Image" + + +# Building an UBI-based image: https://red.ht/3n6b9y0 +RUN microdnf update \ + --disableplugin=subscription-manager \ + --disablerepo=* --enablerepo=ubi-9-appstream-rpms --enablerepo=ubi-9-baseos-rpms -y \ + && rm -rf /var/cache/yum +RUN microdnf install -y glibc-langpack-en + + + + +COPY --from=base /data/mongodb-kubernetes-operator /usr/local/bin/mongodb-kubernetes-operator +COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json +COPY --from=base /data/licenses /licenses/ + +USER 2000 + +ENTRYPOINT exec /usr/local/bin/mongodb-kubernetes-operator + + diff --git a/docker/mongodb-kubernetes-operator/README.md b/docker/mongodb-kubernetes-operator/README.md index 4dc971f03..8335c1d79 100644 --- a/docker/mongodb-kubernetes-operator/README.md +++ b/docker/mongodb-kubernetes-operator/README.md @@ -10,8 +10,16 @@ CGO_ENABLED=0 GOOS=linux GOFLAGS="-mod=vendor" go build -i -o mongodb-kubernetes ### Building the image +For building the MongoDB Init Ops Manager image locally use the example command: + ```bash -docker build -t mongodb-kubernetes-operator:0.1 . +VERSION="1.1.0" +LOG_AUTOMATION_CONFIG_DIFF="false" +USE_RACE="false" +docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-operator/Dockerfile -t "mongodb-kubernetes-operator:${VERSION}" \ + --build-arg version="${VERSION}" \ + --build-arg log_automation_config_diff="${LOG_AUTOMATION_CONFIG_DIFF}" \ + --build-arg use_race="${USE_RACE}" ``` ### Running locally diff --git a/docker/mongodb-kubernetes-readinessprobe/Dockerfile b/docker/mongodb-kubernetes-readinessprobe/Dockerfile index 17c590526..a2f3159b4 100644 --- a/docker/mongodb-kubernetes-readinessprobe/Dockerfile +++ b/docker/mongodb-kubernetes-readinessprobe/Dockerfile @@ -1,6 +1,11 @@ -ARG imagebase -FROM ${imagebase} as base +FROM public.ecr.aws/docker/library/golang:1.24 AS builder + +WORKDIR /go/src +ADD . . + +ARG TARGETARCH +RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} go build -a -o /data/scripts/readinessprobe ./mongodb-community-operator/cmd/readiness/main.go FROM registry.access.redhat.com/ubi9/ubi-minimal -COPY --from=base /probes/readinessprobe /probes/readinessprobe +COPY --from=builder /data/scripts/readinessprobe /probes/readinessprobe diff --git a/docker/mongodb-kubernetes-readinessprobe/README.md b/docker/mongodb-kubernetes-readinessprobe/README.md new file mode 100644 index 000000000..1dd56bae8 --- /dev/null +++ b/docker/mongodb-kubernetes-readinessprobe/README.md @@ -0,0 +1,10 @@ +### Building locally + +For building the readiness probe image locally use the example command: + +```bash +VERSION="1.0.22" +TARGETARCH="amd64" +docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-readinessprobe/Dockerfile -t "mongodb-kubernetes-readinessprobe:${VERSION}" \ + --build-arg TARGETARCH="${TARGETARCH}" +``` diff --git a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile index 362831582..5005f5801 100644 --- a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile +++ b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile @@ -1,6 +1,11 @@ -ARG imagebase -FROM ${imagebase} as base +FROM public.ecr.aws/docker/library/golang:1.24 AS builder + +WORKDIR /go/src +ADD . . + +ARG TARGETARCH +RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} go build -a -o /data/scripts/version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go FROM registry.access.redhat.com/ubi9/ubi-minimal -COPY --from=base /version-upgrade-hook /version-upgrade-hook +COPY --from=builder /data/scripts/version-upgrade-hook /version-upgrade-hook diff --git a/docker/mongodb-kubernetes-upgrade-hook/README.md b/docker/mongodb-kubernetes-upgrade-hook/README.md new file mode 100644 index 000000000..9205118c2 --- /dev/null +++ b/docker/mongodb-kubernetes-upgrade-hook/README.md @@ -0,0 +1,10 @@ +### Building locally + +For building the readiness probe image locally use the example command: + +```bash +VERSION="1.0.9" +TARGETARCH="amd64" +docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-upgrade-hook/Dockerfile -t "mongodb-kubernetes-upgrade-hook:${VERSION}" \ + --build-arg TARGETARCH="${TARGETARCH}" +``` diff --git a/ideal_release_flow.md b/ideal_release_flow.md new file mode 100644 index 000000000..6ce43a0fb --- /dev/null +++ b/ideal_release_flow.md @@ -0,0 +1,66 @@ +## Release from master + +```mermaid +%%{ + init: { + 'logLevel': 'debug', + 'theme': 'dark', + 'gitGraph': { + 'showBranches': true, + 'mainBranchName': 'master', + 'parallelCommits': 'true' + } + } +}%% +gitGraph + checkout master + commit id: "A1" tag:"v1.0.0" + commit id: "A2" + commit id: "A3" tag:"v1.1.0" + commit id: "A4" tag:"v1.2.0" + commit id: "A5" + commit id: "A6" + commit id: "A7" tag:"v2.0.0" + commit id: "A8" + commit id: "A9" tag:"v2.1.0" + commit id: "A10" + commit id: "A11" tag: "v3.0.0" +``` + +## Patching previous versions + +```mermaid +%%{ + init: { + 'logLevel': 'debug', + 'theme': 'dark', + 'gitGraph': { + 'showBranches': true, + 'mainBranchName': 'master', + 'parallelCommits': 'true' + } + } +}%% +gitGraph + checkout master + commit id: "A1" tag: "v1.0.0" + commit id: "A2" + commit id: "A3" tag: "v1.1.0" + commit id: "A4" tag: "v1.2.0" + branch release-1.x + commit id: "B1" tag: "v1.2.1" + commit id: "B2" + commit id: "B3" tag: "v1.2.2" + checkout master + commit id: "A5" + commit id: "A6" + commit id: "A7" tag:"v2.0.0" + commit id: "A8" + commit id: "A9" tag:"v2.1.0" + branch release-2.x + commit id: "C1" tag: "v2.1.1" + commit id: "C2" tag: "v2.1.2" + checkout master + commit id: "A10" + commit id: "A11" tag: "v3.0.0" +``` diff --git a/ideal_release_flow.mmd b/ideal_release_flow.mmd new file mode 100644 index 000000000..c883bac17 --- /dev/null +++ b/ideal_release_flow.mmd @@ -0,0 +1,28 @@ +%%{ + init: { + 'theme': 'dark', + 'logLevel': 'debug', + 'gitGraph': { + 'showBranches': true, + 'mainBranchName': 'master', + 'parallelCommits': true + }, + 'themeVariables': { + 'commitLabelColor': '#ffffff', + 'commitLabelBackground': '#333333' + } + } +}%% +gitGraph + checkout master + commit id: "A1" tag:"v1.0.0" + commit id: "A2" + commit id: "A3" tag:"v1.1.0" + commit id: "A4" tag:"v1.2.0" + commit id: "A5" + commit id: "A6" + commit id: "A7" tag:"v2.0.0" + commit id: "A8" + commit id: "A9" tag:"v2.1.0" + commit id: "A10" + commit id: "A11" tag: "v3.0.0" diff --git a/ideal_release_flow.png b/ideal_release_flow.png new file mode 100644 index 0000000000000000000000000000000000000000..55deaaf5288516a802f85e5ff91f8610a2f20274 GIT binary patch literal 14467 zcma)jWmHse^zP7zlysL!mw?33-6;anT~gAGfG{F0CEX=b0@B?L3ew$;bPfDpfA_9+ z*Zp+w`M}KLoH?=I9nXIDbHdeC<*+cwFdz^JmV&&rCIo_*0lurDBZ9vSOS=}}1Hn~O zP7+c+O0f%p&_EQVCA7TK4>PV9Kj z*yFwk-}8hYHumo|4t?9#(a=hI^Ow~n93ACZkme{NyMryWh^-I($W)C={Fdm}RUg|K zWjt>_Esw&iH#~7vc*O5?_{?u8iz&XjIX+^_Gn&qQ}hij4dmqfdA8{Pw3d#y(@3I>dSoxcXn*l)W-URJT;jKFsb-` zFAoZol7GzFV`gsZZ@75J$k)7^=72_pb*!_nuxyQG9WS+g9oYigw{I+=l0iWV1{`&{ zxw+ZdkPvG)I-k=L#j|It3Q8}>ZES5DoL4)`^y|31h~ftf>KqN< zzki83cFIl^Q2@4Y-xL|*O-}^)(hT3e4X@&FcVB$&xZJw<>(}Yy!wK4OCJ8Zh%xtw^ z=%S-jqxad|ROxZ+6_piE2ESA09WpRDUEEHT!@M-zGd_lq{^9S%#qC>faroBP*Xx{? zm+nUR`(mj34sz1?oq7-6wwhU3NYmrBJU-mj>QtDWbyrqa*1nrIJ7c2;29~Cq$XfQR zE-ihU##or2ucfUGc6?UO5@QD*L_3D+Pod+HAR#@0r?pT7mS zx9Qv2bU{}?>mPwO4et9JF=C>PRwE)ZBHY|AJ*S%*8XEX7oZmiBM@Aw_ROVFuJz4FF zaa!f{ znYq3+e%v$lA&RK=oi6V!EIbT+Z0agIuc^TiyIzT@Xfx6QCl;NJ{4!wS@9!T7pI;WH zPVzgSkPEyxsK!Z5e$MGQQ@J{8ok>bb2|v~`_WxUutf#NPs_$sTItn-8B}+?9jiwax zSXq%!`$aA2H{)3Mx_C0^;lVo7pU8W^cG^h4(W|P%#OwV~(q>{}|0`=P{`N{3%B;Z2g?De%VBC-YQm&fz`@BXlDC$Q>#Cm|=-*VA}^K2!B7M)Y%Y ztttF=FE#ZuA!~O?h?>X^y`303JGh^V{prWQ4|k5zi?n&UbkA|9p#+qahv_cB1LTA} z)JwFs_2UPNZ*On&Ir{@jP1yqK`CFhofE`rjnBx2L^79SB*q z?fuuIc7GQMu>~^gSFWh(*J+mN**RU*ol99gYXDAq-5XU~Tl>3E1s@}ngZMns$=%2h>cqx}G$jZIN=v!9nmw|!On0J5KGlwynD=I2>6jnXLv1%;EcQy7(I=@jth zy7@VA+(<%nt{Z>RytcN-?!W8m6;d>~($dlbMK9Bo+XDXf1gG#IK(2@MonE|P__JJ( z01DiFv_tN&foar4<)1Sgd7&TX0l8`fo2ty8AwzSjK&0#VI?Zk|4UPDd98 zgCRi1?P}@qo_I&d77T8>Kp>dVjIOfwKD*j4^7N%spbCfq54>9P_v{M88XchER?M3) zXm#n?BwlKCS7M8+Z+>MeD_ExgbM_F9gamaOLe03&RE1U@O;r{-Zqy7#mXQTPO&$HG0cBNHk5=E)$&xpv+9O%~$62bPqe1oc z^@Kyof%WFFE_Yz3ZPB2onvRa;XNyia)P?Fy^GjY*1)379><_n>^FA#BV>-sP@mM%G z@dFn9H{4%`!@^Lnll024aj;Mz5HeG{4hts|vi6(xAA2C9>d4CW{%*VOxjEaGz@^6v zcXFK9*4Lk3UOryE65}F8@kU|^{gRTh>o-lCm%Y@lA9yn~SNksRcQ7bc+06*$`$r@o z>)svKaT+yWU(P!Fd zGPU>Fa>z^AD8RA2ORp=PA6q{OyQj`~fO5HS03qw*GTsuWo1+@r-$u6=6*(;M7|&(GaB0TC`)7@bnMs%3 zy-^&-tkD(Mpzybf`JCV7$>C5!H~gj@>x^4n3i9)j^)h(dYIuc&?qULO@zWtJd>Mv@ zH0Yv#uh)BIWK45%03PYAs4XdB27r=L=&pxdpO*}a?`6XH%5OC>k&EAIpagG$v^wfJ zKQ}*R=Wb`WSCr+C3TfXDynmjkxOlmhHg0G^>j~l5z)_ZkUsME^iF!J2k7lT-sEm42 za@#E}G+?44VW>$Ls4oztwV$Kbmp6Tor~e|KA4SfO2pON5JYS8bY!Rc53Tp8Bo2y;A z*x<_Ohk^`QIVv9FN#hrTPlB-c7aR=+0nU#PGcz-WO`gx4;P4Djy1d-W!$qOOiS!

ZP?@nUHpsB2!UJ&R@UV6wSCii|+gVwrNR14SmV2osx%n!0V9 z{gd&-Y4Vu;lJJYQky?Ov78);Z_bZy5hI}(MwU;F_`gJNw2gznYl zW$V=v0Bgpc=6E3;Tq(-ay{JQVu*YEAXM=F2PZe z;1uNB-}1=!80B|k{>pL@(`rHGpbu&$P9UcA`kbZQB$;-_(#|A{rU ztwP(`)zx6&*fE^KP(3?RkKqC@L*h%rdW#<`g2P`&b9#1mc6!Pc&2m5qY%EOWwAvYp z7cR}+@vTn5k^paL>~TmiZ*Xu>Q&V%t4SEVN{)S6KczF2VzkhWYbo0>4?p%MD*DBj{ zohDT)9PI4;`t>V3_h_XaCQbL(tGRw==F68aw~dYA3G@EpE?%DfVsTl9L?3`B46gr0 zG@dTeV#ykdiHR8-8EFBYJ7o3pBkaZO2>W)V85ap5Az^SyrEq4432>hL{QU0j?ucZL z28HnZ0c@TyN0+V^{!fvSY^eK^uof;+1V>s zkMQO3Pc|Q7)6$xJroQ>#T^1A--OUrF4?Lr$7PB5r#}U15Kh_UIN}rsZgi-nIlvcFr zRhk5~IXtoX@#6=;Yu+Z04+jVKf%C!8`ts$p$jC^U2qOPlYqeeEcTLVTE-o%q0oSYW zyUPZH_j|-M_8uCht66mpzNTUb63*R(lRw&{UZk7<>zeKaMB+~iFv;NEGH*NMn(pNJ6IN4T3Qm}xB(O8hO*q) zXjvJVmMZsm*4Eaz46PvJ&A^x5*qc?%IQikRG3)ACfRD75RTm0l2ROP? za|T8JjJUgdvckd^y7$q=(UB1&de+XoC+Y zV?@b@sMIpX$x~&zIr5UVIM1Ty#l5^?CSiyVH_9iA=m~Bqs%P{G3EJJWp)g*Rnc_)N zHsX!8X|8PkI$oEK6gPrU@kK~SwnmAcsF*g**TXja2utp_ z`f@O^GeI#0GfcY_Fae07ViHX0(dbp-FWvbaKfS=K()~_BMu&?Y_v~2}B7!juQ>x%^ zV}^@!uw7zfV#;rx#>V2dsP?V@MfMA@vt!|8=rz_hZ1Hdqr+cPEFPjGd>yNxV6vUtp zeM4hIXUPfxU!r!eJp>^%7*J8*nN{fmFq+jjG&Jy@v=Sc)6IIGzs_TQUuz-L(eO!&6 z111cg6}KA$ndhEgWYX=7;Q;F^CTfZ@(aDXUXyE9<>;t#LOJ29TONT0HZUF&+BYFS( z4KX=6HF5G9OEf;;-{M4_ zq3S6R{7IfZ?BhrElF1@|2bY;$7=keHf6;|msfBkw9BNA4nfDvjow$zfnc=U^W#{MTGzhk#36kfI7cYJEZ(5oljRyO=oo70gAj0Lrq@YHX(8I@zB=ZJl&l6%KJ?_u*S7$ z%F4Cc@{?i#H)*7?)9hN0bGj#mfLrnYDCL!_DM$<8U!l5(afkc+ciY6qxadb#e{Fd6 zv@A9%0&bL&Ef6r;@Cu1?IR0Lb zF*pA*YyWWlTdh{F%yvz~1{48$JY&Cyn_@tQeTNzv`XAo9UxlWt7LE1y>(;uw;9~&M zN}1&c{CS~Xg(Xq3Xu@djM^9IY_^@+J;3DW3E?2~t+Zow-c}3W%zCdNjkobY~7#z`9Rsud8ZOKK)chBSH zNif$lO3FpuZ_hh45{9_-Q7QdPptLf5(J zJt&Hd+#$8>uM@c5D>B^*{4>RXgOnmYE-vWj{K4V=e64TYgiUi1KUd0#`^5z_Gc!W1 zWtaO=tb9J&bL@;wBkTznW{KAH{=SHz`IBHNmZ_d_slw~ea2PfzCMpho+Q=3-JM4#y zxUZa?j%OV#s89lz;NlWW&ARJ~O(A*^Z~_URuvvIKtYgwgHfRYjWQh0H1^)JKF^Zgo zjZ94Mjr*7fDsqrG$@K1moYdJ9M%B#8=osIb(`sEdu?&&@p41YBm*2FF%gZkEGY|+} z5*6K-9Qp~eDj!!Q#)u`r4RkFB#(*1$k)NEM6=Zh$&@sycYrPvC@d`tdjkkU52w~Fa z6-2aoyzM`6w)SmmD?7;?yE;gjba@xi3>>1h-*_+G1`)jT^12S5uS~cK*De)#Z75O_Rs#IfZ;a|(84BqGDP~yc=BDWVC}~Q{3>&1uZF6vQPLK2F8rN54Z0K`Ck0wKwc<-v} z2+162S?O#7V<#^!-SsJ>4%^>_G9O`iCeKW`Q^K@Zj=4W~S*dxT<-wR>Va+p@()75x zY}z(9Ok@lI|Ep9!He5nFOt{H>W#~XVfQV4TqggS{YQY`S(T<5><|;7GANj6UN=hpI z>(_}@(^@@BGP1$VGyhcf_$^hn7*GBA3#g%!F*ODe{7#;iv#?;q>da#PB6IyBbFvD= zq`?Q6mv!Q-87STwsn=Jd^l6Ktx!QRIDagnY$I*q{X(LR8F2_8BR^*x`>U-2uFsP3y zAn9=goJ^|vk%q}X&=i9xJFBA#XyZRtEh0}CD6A|855%sb;@Q}E?sG1}v zs%w{94F%p2rF9`loOCw0ZZ(;+Ih2+0{HR{X$2;)2+dSLuJ1&edtB$Z9OHwEpuUp*R z157ABzEqBS0!B~N{`BfY2Rnp`gE{5pob@s35S-?fg4NWfny?%Bz#_oh?ByFSt1tYKSsJ8ps#H8u7h% zwB5FB+O}^=c>_cXwTmn*=vA}Ir))UVg%}g%XDG0P#J|R6U*a4P1Z{m|QUCmJMP4$EgvP7(E5k5Nd_SpCeZ_N?x<$!$j zQbpSOQ)EEzDKog5nL!{cr(Vd2rT__IV7*x~Oc>hyOZWVFB1TW8B1#VtSDNy0HVqR- z$4);wI3mj=)k406H%-|d8?9!3DlZQikyemD!Sx6l_zQ1si=~D#o!m>`(e3zp)C(p#fpa6wQ!`d>^qb)<1>?25s!u! zh`S?b2VvF;ij(k2_VQJu0mc1WCW6NM13{+D?ZV%? z;BG1bjW>T8 z^H%g3Q?v;Vb0P$y+(!NhjqJ?_pLvg_48?*%Yb%VkwR0$CO%_a^4mhH~hD?8rEubzH z=|5(LWOiI3;hb@8?L4#LAbI)_FPuE>b-eb7q%;|VIJZOtA;(&R8*zu?h3w?r5fj|) z10DbZk?%I+P7yBD_Vn~Xgk&cS-BeX=mzLt=;qPMhRw~L^b7lL#r@*$B&#`fEj5e!g zCQ#%}qkKkMk}y0+Y?`(7PqX%$m^0y8iek6WDPO0ekkfKuDdVr|36PKUoM!yAr1b(=*qw+9bR&Rm0%jJR~0PwSh z7nsl%mm8M^?2OkeiAANQjK0mU!le}zRArBw`|{qrF*?}K(=1W5pZV~Mtner^v#}A{ zMbGnDv0!U+!y>o(lmfW}fCNE`%8e(Wl^~&x2~#jyi7IcpyjCR+2@cl${kus>Xj79p zqoaRd?AYdAyh4G7{Ut9^X<02H1Om#Aiye!WO}*bkrQ!=J5?mb26O>>ZK3^--)kFO{ zdb27`4m1c29XH+-m(*1Vm#8TFi%lz~p?pNL@tyOFchkAnQ~!8L7n@Z65{R}EHYN*-!z zY+{^x7ZVvNeZf}D$Y+1O?9`f~1yO`%JX|-r2k|h4`ZNocPGLkfN(Jh%5izYqE;`of zm7Tbg>@0|HxHm02a+BG*H4>L2RC~GN2F|uFx;g;1Zp>k6Si*Z}tFu69o73pLl1Gs) zj{DcIKRM~-nk`l~09 z4~NIC``$J((5Rm`B}HC4m^JMNjV=X^g}qX?uwEQ>2Rre5(|zqRe9-Ak8~G;+4)`0# zZ{C^rrKYBdT%ZytzBgU{B@V45ydbl!!iPXeUjEYpP?}kAlQRFR^kl#bAI-%s$>?&Y z18ABX-kLIkZ{&Sri4nRMZuTs^yKlNoN`Ddo!jvJzFT^^8A1@ABdEa0Y0o~v>m*&fY0=1a zHCXK6@5_T78#W|J+`xvn>(=?@MauA2_?(Zr)b=i8mhy=mQV4N=VLn&NaBHjRd^bWV zshiJxjb~QoPB>mveoJNu70L3cA6?;iZVxrkf%7?A9755ZzPORNrw2W zfYSl#$=*gvc?DH4ej~cLY-H~vaFK0!$)Xf%XAenjZA-PwyO-z>udh9q138Hz1_(YIRn*PeOVhpl zG*B~pNWtg3yVgVEMw~9_`QZ0^B+V+K$F8IVoh z@WS(d?*bV|xHQQ9+NFtO>tzAgrbN()vIW=k>%a334nTTf&nH#Q0@xvWdQ>zNLP8=U zLIM*fO&!mxhO_OQ&=jARCG21zC{Qf|y-fGVkN=weOWQS$SsHYq4~7cGeqkBzOHiev4vN zO*&ZI`VfE%Spx%4Gqc>hTpQk0>M}A?%EUncY24i0cv)CJljCTzLGpT>VYAeCV+OyLl)dt z>aicjVpg5OHSTS`EIR72ByxwEnV7U&aR0sW!%iEC3jbTd4@Q~b2Q2Z-iG$Kw^K z;KI9;(l|lUuj2o@iUm&(Qa z+NBp8C9kLV_PF>5_>s}jbU2JNQ!VDHQxyDZkhqCT+E$+^=u+-iVbox#Ys;$#7~T(BTlzrP)8a!2`7k^@3@I7b zE4#e@3p65WRs<^9wry zDorCVba0$4hSag&k&6u{r7^-Ga^F!C&1$n4z(lZ89$v5e$f$aD zVS1XFn)v8V!6@%skaPW91w5tr5$p{11$G4rjnarNxMRH+UCK*dvbZSqZ1b|U__HOdT9MqsA>$F7zyZNzvS6#NhKH2_rBtPRPj~p=orB@hYO27Ce9CEe>Xjc3M(XOZ z85xUZivZs4&`)VR#9d&hej4xiOr17bgX`3^H z1)-v(1QNivIy#<*bG2)0W?p6}Jhs5uQc_aD6RdQEl$4ZMReO7RkswR3wFK$1iMnn@ zcH!-Q{`|QMhBG}iWocmn=4|3+cEimLkQ zIoa7XfS?r+(51lzPX*==9UT((%|X(1f*@2}Cue}*C2fQueqhF)57reov^fsd#%%78 zcHN`EW@KO}tgUtT^D|&08cX9>%Tc$f)x*qR97%`5x&S!?iXnKyG!|gC#;f{gz~fQT z(7;p*%<(ido+bS{(wxjOod=_l;^V~FSaBL$3+}fH3Iiu6l|5z>wcKj-d*kf=>pO0Z zO355xPzB=AMguzcD*%j|L3(Ubh)qi3(#o*Z~V?g?qpFw>Qku20w(*Qm;wwz4^~I; z`E#Tn7;pYL@6FB4IMa8<0x}AUk1P*baUl9ywhv~jk&ux3SwWblgW3A^b-i9$7!f{z zWa$}U#_pcvV_dy2R<&0 z;{**efvL&d97fA`pf$z_MdT;C0E0A(&k+%4WeVfMGOn&Xrx-tCrP!+h#sVZ%tzJe- z3Nv1Kb`~UfrM+ETPcL` zxRe!;#;TW22?`39m6g$;TUl7}zIr8bK_KFkt~%u3usbk3Ed3l6peh=?@Nsp7$O)Tapd$u)9Yw`oHlP^+0vF)X!&zc&KYtR97RScM zo}Rk!-tH?iRJXM)n-1B7RaI724hRTPS07(lp@DS)g94HntjOC#>ygw=*KNcoS}DqA1vUdwWYtb^-`x`ba`bDj4|S$J@xQ%*j$Z!`_~)#s^!H26s61)zhd4Xol+X?ydy3 zClF3b(}hPzXM)oJ)faHs?5*cDO#ZvS3T(WPpcw$#=M18R3^S!Dquvw);V>xCx)KNw zd5|Xt2L6_bsH-jBoL>RID^3Fpyv#pYs~UJjQ&ZZA9;oLL#e!h;r%z0*tY;3iSvJ@I zC>Kjh6ciL%m>#XI+^C6Uhh1)7p1;i(HJ~K-1(OLaEdot|JA^>4Q!D_*9pF`92VOE2 zhIk3MDihYXmqqgQnxLrwC_=GGvn2hcc`p!XgQrzeR0O&A_U_JGcqvB;7MmMwG+ff( z-@meAl3(*r`)XyC33dN0O*fJ$A|xULhK4~jGqbZ#4%dUF{clb;nPjr_)R~?(sE9ncOhR})+ES78LDEw zm*2bCSa{1({b_K~aYW8zaZA^KGNP34UxilJ;9y~mWC&dWH6N%+hK7bfn<1O`%tM?2 z!xC7i81;)=l1D@j9RUWI9SvCIYL@7g@x6L=0jm88gc3D~KBFK*GRMNw(%sDs9XVLY z^Jw_%%XQ#Y>cI#LBay<0B{)~Q2?4?N=mFcp2{bz`)uH;0TM(zCX{|N zNt<4$Ry$be(3bV79RaH$$OllY!?)*FS6%>kLO+XLoAeu=0zoTVAZRvOn zjw*(QhJrakzk?kwS^qzK$5 z15WtnvcK8v)R{2D&K&P)^mv;K*tj z$N}HIn%j<$D0~hX;+gsQ{?IaD0{P2Nds4osnTF}|DLWo+;5kr=LMM%Hz4$=~wIMVh z+t^mmQb_*bNg3W8N;W5&>CTx3U9+o;i=CYv1u7=caRDJ$<;#JbX;BT?(6BHL#?{!0qvOS;etH-T6W($%LqSIAC6_2LAiU104C7V}dST1kKe9+oKmWOo^_LEo6+6RdPG1XIc;g7fSAh|oolkJtD z{U}}sT~zEZ5Jg`yGQis@tD{)Gz*T4t!Mui+7NNoPiMa+${9lRyG9#Fdo?02Nr3E@*z+1#p=)=Q5rOLLlIMm)}KdeZT0@ zY&|(PK>suQ!;*wkM^>@PRW?pOAEbAx7+8S<_OGLK4Isnr2u5M@MoLzq%l<^hsG8mn zdh4+cz#i#H@A>bQ$qu3^z)Ci8f9R0Ha9oNOVIvf`GCL<941*;__=r;?oN$#HMgp{ zmi$u?uYMZLGqCxUG#t`=5f|RUh+kamOvbf%Sm=H|I5-FnMhr~(cu?C#x@oK-cR3gL znlOOC-NIA7K(ixsp)EK10J~G4<(_p9yTv1)zg>JX!}oA}MpsX7ZnX;rbhCh9_*S~i z*qJj8D$(|E%5$O!PQylC5c-@K8IrjG*d9w@_usXUNeN)~1IBYPgd(5nKix z)^G_H;cd0m%``m>;bvuJC6QI5yy2$m!I2o@djnf%d5U^1Oa#`=s9by9=x&XK}X>Md=QW<4}56iioOa8;mb0V zh<`AdbV+*U;Ht>^q%AEiLF-sFwt#&yH8lnHC-Ftfui1BN1nDwd8ZicL!nublZwoq9 zP?Jtjrk`&>@Y4NJ*D;H`3i9DGgH60@4lAB}k`qw{&;IzW)BZ`+dLd ze9+;}og43c@|^Qbn4-KSItmdA1Oh?-ASI>@fxxza_s>YM;NSF!ug~B=7$;>(QAqg+ z$u0y!4*4MVPSrjAAj8dF_2bj?XolXKe(LxS#}uttut5wILDBxs@Pi6&>Qx86GrBG= zue{vK$oEjfQ33*_ALNbvF$mf6WyO_UQ`v13a9q$k4|9t9zYC-fr z-)VTkiT*!dUJO`2gpjw7FD#UdS$z30I66AIvXb@fXAeoRX!gYJmk$xW8>j9qX?kYd zX{H=0bJn!v)YxITcz6<&@0x|p`ZgU~^%P{aY`35*L2TKIxadeoC+Fu>aZmH|11@mSjACQSI-jks;jF@^8ZPI))RtGoG+gtEuv+N8lIE*@gto= zrnRLlEjb@vM6K;oQyNO1B2-ITyWVbP#DERyOY!&b2FooTpFe+w|KYM=%$>%_$QVN= zygWAcPQ>vo>2Z1U5m8J`X1_^saWTL1POKOu1v#HkR=eZoKz}?PPVDro5hqbeNr~Ux zE?tVOgoq>TXM^2pQEy*ZHB^@A&<&xAv^@b1pjNI5b(TCd9J1-oSwg>uo)+SK@6S{;JGCl8)A>yYrmSX2 zh|_gV(iT?7RG_%8U%$3%xi!<&Oc9uA#zZleB^4wSqm6fQb#1$wxBB?;z3Krj(7h3L5&Z7r?zq*r2K4e<+0i$7Ug%-(gPYL!ht+Lh_m?y>qkoiBQv zA4EUz7Zq%cW>t(?koxXq(+nnQGU4qPRkYnPK-n-M|5j&nOtSX~A5+ZqSofJVMV0H7W%P;Lc-u?~l#15|fhKpYHbSdfQj(N3FRhb}q_~ zju1)Re_c(23*CNtv`dt}oXw39YB81Zc^`P+|4t$YJm&m-vwc+71`i)m(N1##S2Q%V zhs)F6>*JO7!*~Vj(WXA`jmI%y^Cjiwz8!~kf-|BNNN0P4ti3(mWllQ;ua8A_n_bI* zdDN~5&y>$ycH;vx)nU2*v-aDS8HcKf0u!n2@y@DdZpeV`>AoAkx&6sg)~{>F-?%a? zzqahKHxMz)dyl?MjhK*d*BK8kYgFH5Mt62QjE~PFo>uVuVgf_>0y9O{wdE$7&*|xL z(MjgGC0-(zS*ziX=UWB_X#$cK$Eo68Qopx?f*HTfHg0QrGV}BE$$W3tiVCv0iK52F z#_Cxnr3x1D2P`=Cnw)n(Ba-sE+;N8f4vPwNNLB1e=lk&C0|bH+Ty6O1`B~)TvCZe* zyLZv*Le~%HGD*x{M~&Oq8yl9Y#fL?g`_mcxPG|5?2qM7RD9M{S=u~hLNa`F!H=pMg{lmp zAt8Hv`%1V`;o+Uny-_rtt)#>gZQi$MZYv(!=c7W#RT{1{4Xn5jgC8nKsHWU$^tj<# zW%-KG1)--K5FS9(tl2QFF(39c+*gDA(q&Y)ZMwHDlQ@5vE{m3ngZS^~9=J7xDw7O4=Yr7GewmlPU!Eoi-)Smmlj7Do#ByR|i+L8Ml~ zSW?2X1T5#bCoA1CJyEKwxHHSu_EL*$N2iQ zdb^Yhi`+4KdV0%c;4HkZ4-FIdD`uiqnoK;w1N{SM=jR|BRJV=?6M+oo!gH)*_SGnxdrvfqS*#>5&cGq zbFz{oiVU|;PqU695dY%6Pm8`NZ7t8hrMtWJ4;DR6*Ts8lY*97?l#qkD5Y$vvTb?@* z%||nRCgx7OtPLhto{yj5-@OC7?($!%>wAdjjvd^c$uw`^MbJ=Cz@**sjenQ_V%~Cp z)LbB+^<>f;Z8cNQmsvZgM2)@MU8W6^NMx(GwnLnaZJOlql)Ap4%iTrY;Q}KA1IcI; zeFCa2UG_+(;QbRKSpyaBmEP(2AZvwQ?Z7l)gW~%#HOZh*I0$&?2nYyY?lea}=YO^U zHl|jg1$-<)dh<#_h|l|`a$<0gnCR~QK0ZFa-e%>JEXe%CpuCDvb)AqeV%0J=IsmPm^qZ`?qomP~7EK2dV&a$42jkI-3Id3`ftD zG*Ld^G3M-^_d>lvozo9D;G!f_Ax3nQr73SxWND`v^u*vYguzhF8}zzn8d0eEj|9J z7Fe5BjprqCMZ53Xx_GNzyX$bO7q=Y(Bwfg9YdlZp+2eYtS*_gJn(e{|9=79PuEx#x zR6MT61GBDyDq({jBZx9i>h|_F#(M5-i`Y&PT5d*~A>byTr|ffa+B;2EH+lsJOl`?; z1=xB)mM042WDJokA)W91OonyL;wSq4%u1s*A!K#9ei;P~~B2=|FzPtO^mPGlK zf25d(#_VFF)AiBZ8>=nZ8FeiuRcGgm#Q<2MDA)Dbs;_U%w}#Wqhn1q|dNN4Fy|IpA zu&}V4wnvt(m)s@_6x@9B1Vjzh=bx9h-{orLeK z0Fmsi{mE*j)}eg#{8c|4M~d}vrl69NQkL%>y;A!qUtdFCS3ON_Z7mlU*XY~(11VNY zq<~5drsh2S(#kpOefnlb8wK_h*;YNfGnO6*4F;`@p7nUG}`&| zy=WuvCR9~KDPvaimeE|)RzUVV-)uGb0yw@z5if*1(5*VMDz-Q8^^jm3* zL&3TZc69ipDtdc=uP`#`wH`+IBv_qD*v}4updkdR^sW+==@P;a;&8lqlgzArwY1zi zr>QB=YA-A~XPUNfF!noWzS(?KDk2%@dFPgvkB^U0z4YcP%}!ZaIbF|yE}_gA?0PF@ zDYD$;Qc`bxv(ah7@%#7h4e(lDAJK0Rk!+m1!S zZ;Dg@eiIhW7G6q9qaUb2qJ}0WCTok0%*@PP2MTbIZ`?Lo5)vUBI`8@l4Ql4VQsU6c zBB7!-J8q(|hdDMb-inKF?1O8RmX=0AMsAdz4BvfvdOF`4b~@d;B?r-llarH%kB^P5 z^5@TlZ(_IJzVF__bclAmqG(lcb#=9{urM+*5~lcV^nrT3LBp^oEE6Rsl4GPZwn zX=&*_!_*Ct>bru8;EoO)Y@FVQ5!(0<1rt$M+7h{AfebQcDLpap63^@RQ3WPO(Cmc^)JDkPMq=VdRC5jAxh|IFp(rn@`aI4Np8 zl_J$L2^=CKqF023IIlCl#jYmCLLi&3UKxe`C~4L%R|L(ZuyA|fmwzvM9%A)KGRNSY zw(RWI-1n66>=vA5^7wfFeVZ7^xY?PuBOx()0cCS^LtI=!LQG6Hle1r&;=_;ep8R}p zKBl-oyUw7*1>YdV!onve#wXq{E0i7_V9Ul86r{$cCnqOoprquWq#X4U4eMUdPr^GG zGiD?Brl=GbhwR`mE2deYsHH}W9nmb7lbJ~h5h2eaBgMnRd6l@`Dp^s|+?-eYYETIc z;r;bOZZZMcarbhHRbHM^Rf9IQ^2G%N67V9Ru&?i=l8tq%aHO0?78)kjX~5yauj{Ts zK$|dnHR9gE$yxvN;9-w30Oz$6#~Y66GCeu(CYtRL-Bw@S+^PAawV)hmqICD(E?2vL zN;(7q5wZQ|Os2KtyK)f|^wYgipHY=8?S!D)%4=er_x7rE@)-=wj1v}|W;JtnU&E2R zH%^Uma*wsz(pO{e?(W@uZ}a+1l+iI31l_E;7p%Egnk`ju*TnbwRSFo2izh$Tb{JH? z3YW-rXk5IRoqgrD{J~zTwY62i?U*}7_IEoz$~#Q%2-7-^qotqpUr#76Z~Ei8YHN$v z3?;K48Ww>gf(n_2m%353>U@xu8CtAXa_EQ~(Jdk3C^1Q%SqKY{QlrIo|9(p5|Z^4lwG~cGn$Gc-RD9HixXFQqD4n=4FU)?6} zTQP^K4%1wk_ztNLA2e%0D^5We)pITU3s0bRKS@xF{gOU#x{9Td529Tpiq^;_F;zOzd8y zmzJ8!Bw^xwl`ptASm2J5*tq*=P%(lJY{{|N%n<*L?r!yRJ(sbMp|B#k%5-MtI9q0% zM8k+=ex*~NbT4**gU|+^_Wj1dsS}(UGwv->#Ai>DC{xkf-arL?S7P~Z*D@n-{u#J#fT2T7Nj zDvntW)z4BU4$1&BOn)mWX!sLIy?LM?NzENHtI?pulA+$Uv~)+Mi62~fzA>)V z@|lnv1-&9GV+81Z7Q%9%P0znRgLb9d2^ZGC+Xzc0&oP)%kQx1WZ4^7~Z|>p36j9Xv zZ9HCtxE-4gBeUF>7`G*Op94Yiw`Y_KJ~GL;@2 z6sH_3Y%^jbxH?)0)fR(5PP)r=bBUyFI8vfX1@d1%qG4S(I&X%n(h^Ta1gI4VoJ=U( z1j9pSZ>Q9A2fPq+PeaN4E(|Ve@6+4et0c1t5*0s1Z56x^JwZi=R9amhDGf)k6GYwn z-oVVbe3s7=T$ouAX2=c+4aFc64A+-}_%E&a3KWx=auT)AHRF$d`4zTqw7I%k*v7%j z>D=k+<4kg6cDy{c2@PD^7xX%bL?RJzFtWF>P}NsrNbD|9*uWbrB&=}Kk_gtI{$ynt z77>G(oAqbi1r6ahXq{rK&*sOVe;&BNQpRmQs z9n;X$i#u_HbkuYzmYknOeqEm5 z*FI}7siC8T^vv|?U_ayV&>10c*lh5Ja_$&GR;&n_;rv~Vg79#s-xq0Zulwc4dqE8m z+?k?7XgF-lQMA6QhWyp)mOJX^-0o5Y6~R^Eh3?J zQ7^NsEG?`o$1FGzS~DdIh%fGX?mW3eb4f{QV2|GvXl75f+Zp^0{3XkJ17+&%TnJ zo|^hC0Upwzr1IXJ?SlU`#9um>rXb(*BL;b5eiD19%j(5VO6*aA|A&LzKFp1KMgC4W<$kiE2F^ecQ-+HA==*lahHRr-DO*4H;pqCinhMPTq8h>8Nu% z$6rOiG)l6iL0SwCTPVHgsCX;x+pcihfH)K0OXap%bGAOuu%RgfN7`|7hWEEnvO*gn zw+d826*C}3V27D-qzsw8xtg=!)MCP08092Fe}}m+iVz5+{nT-nB41$avobw;v=ISE zF-IbApoj?(As`B8J{~pT_&}|!AqLO=5e>51yCE(ij?R#%Tt%S0gTH`26w~`Vl@Vb3 zF<>Nu&k%IMuyXed5b;P#7{ z3ZWt1wJJs4qykcx_0=f5EhvWmcZV-Q-_pDydzsOByBtea_lgDYJCFjakNO#x*kdno;~Z zdkj!8lSUoI)9DpVq;-ZBuTZr4i?P3HC=(9NydgIH1o8-$o4Kfy3Ix2bSNS4WhNtaQ(M0q zT9!V~CWk^#>m5od{(H@CS5~s^bJZ~_EU?VjH1~Lv4tJhZkuqZoXUYb?is)9>)cW*i zzhE@=)vs-gi)-x?KcXmZ8sYYe`ugIWoa$0Eh`}ZLG)d557TznI_{8{)izlIv2;xDZ z>f^hl)?KFHs5w8&dXA!UswOXuY>cLFdbwfX4GpyH#Og_T{q^X7oX^;y1h zQ4u}8M`NRV5Ip4OY4u&6GT*!%J3(aB{`A@;#nj}YcWdd;qc{Wm4kth>=erK9-->qP z5nLLSqQ!U2`ll&I$;styZjvG;W@cu_Ck9J`qT}d!EtDeh(@%ZiGW#>79UlroGjcXC zFec>5NyJRY$VeslIwn>T_tEzG$*|V*r~co{Il!#+^z%@NU$8R6QBiP@k0u;^4E+3to$&DQi1zkA zm6X6hNM44D(k}Ff48^#_qgzdT|MY3Tm-%VrwaBdb6Us-AToAi%WK5%MDPy%?sAVsM*c?>9#N&885!^VMO56i+sb#xXOn|f@}WE{&wj8lU^FP_02IT42rV> zWh0#){I$=-#>OU>qxUT-#>|p5Pq}{kzg&;pZB#W~t`%IONLi&pjTiHYen>=!Y{sT! zxU80mn}r*1A#LHYB36p}o#LPI?g$4IP&4=rrwZO*MGf$P zMxCof?RJDsAWM6(-$xO5YTwxz0j^}DR*QL~eiSsD61j6l)*!0U;zY^UF9oRGa>8+eTZH$M=KQ?Am%h$W3+Bc4^PI9~5Gwo+s~G>c<#_eD2ma z2aiy>DA*k)LeLvR;34i?x7$m8e|9+MjkW9GS6l8DSn1?-T3v^Sr@r)?IBI=jSoZ4B zW=isFKj|zCy4yF7S!;dVTGRKXXJmA8ntzib+Ak}k?K7j>5MshcpyzqWb!`JWvd3*u z?Dm+ii8Bh5@~a;8Oe+$YaCjKGaxXWRTh-3bEPD<<;uCYEJRGPfM7g4@TiiaacI>4R z@#N+x6#*PwC!`>Tkh#rB?=9pVCPKWW3q&?;6x2Cm$x2Y%lHZlfpZkqGwg`~6qBUzc zmTa{ELqz=xz$}^G>&W3=dvX1LhlzRC4+$WU;1@3}>FI6N-gd4Uz>>g0R>_{P{cO$N zV5fLL`&_U6+XXcu{41@jy-Fbn0xhrMt}?H?R-U%X6{*LwNZtMY(VLT*bO`YAxAPU1 zu&~?(5K1f7>|zABq7|VK6YSl%_J7#hTz_MYlk) z(l)Hoh^Ld&sy9Ptr(jPRioCc82|>UWmmJte3Io^q^spL8A!_r&e*OPw0Vvp&Hciaa zhPN`8_3%Zjz76f(-j=ABQ`1u8MIl`vVjW_MfF&4 zewCu;PKzQ(is?;LS6odzyWEuC$Wu?(W{6{>ff$6T%Mrp;jVCJ5eDDgNcb*`MnT2PYf5NkjejX=Fj(; z-BP~rZX-~CqNJo_lC@j+W3RveI=CrJ#y7q&6H5L?tBgBsSS=eM*qyiM$WC5$pcTle zD*9jtFeU^m1-@WWLpB1C4F*q1dfBkCi=40B?h)<~6(Lf;0?HCiV31(_{UCnlOn;n-CU+tp8wY&R|B54SiY&lIpy3E-w}W`B zFHchlaq0k=0L2XlSitN|zIs%W<{L96D8rfBUaOCfa*8Zv+{NW307FhHEC2jaJRa#+ zvf@MmAsvv?E}*yU>+8eJW)p*6{BDPtdjWtF?3B{%Dy51U#GCt2@Inb*_QAa#@%T`r zvI{FUF9J_PF(ia){7aNr?3mdrO7?pG*IxiE73r|xMIa_f3$yT0zI{tg9aPDqS^%mf za3+R-XVPR!+9`xGNv{1I&QF7SdV*9Y#iaJ~<=MkSB?-+%u_6FqN)%~p__+D`nK)IU zxQ=`zcEQejp4~A8(tUV1{U+4ec&MV)iiKv_rn|4-MC7k?dFU)h>|c(PtA7;z+x;{&2=Ef|-#bRRnJvsrPLuL`P6lQW`vlQ@ctqea zkyqPw;-$#uk6En$GqENWCOjEHfts1+*K<0A>#(R7sUqJTIPMdI9<6oP4;A2hFMo$| zPq!X1Pmp7W4VgLp94#dcMAt4aEgc#exeCr(EOT~qTPUmDcJWpzJdP$2cF2qDsP!{! za8hflmkpGqWn^W|;<3v-PEZ2Jyx5&MJw44V8`@!Ri67+{pLjDq;dOtf3h>WQj^8IN zIyyS^hFm#P4tB6c$3 z8f@P0P82l%5#pQrYK`V_fv)$C8vC`dFA0Rr@96=_jT_gTvL99I?H*1>4do!@W54^9!2pfqh%1B|4y(w^itWWijw zT^M${G)C_6n}A#Sj87ofzVS0IN^skWY`9oXJe`6TBB5O7TR!6Y;IE5aT5U5v4kNs! z8{aHk!BLsfzZw5}3r&%|3f}v}fO|E1a$>Jv_|iua3Tn0WE@XY2VsNI0h6XvNGP`3~ zB%^KOmho`|ZRqUuP{o#zn zAgKX&7uau7J$Og3>_dAv;KrEDs(+PaXJ3lM-m*`ej&Fxp4uEtCMG6Ee%Ws+-f-F-N=SmYMNDMrxb72}s|XkhO-)U` z3qX{Q$wP^v%!ZY~jdq3Mbg74Gh+EK2rgB+HQGdk`u69C03ZSN=Gj1MoSBGl%{xagA z>*|Iru_K_J=i%i?KnebunAmMr)4EBV&X2G^ud>n2<8J6c*H>tNC(+=rAps(j?rv*t z?r)&V@;8NNCt(zQ8Cql19S-D7z1`gaT7rQ9pv9C!Q%8ry5Y?6+g$wZMIXO9i>#o&W znwdd_>>M2YwdW0DUR$4Jzfg5vzTLS4te(f(y-|FJ1$oLx>enknBl8?FVicWS0%b@w z)zvII4gQJQ(D1t2+82n3INZ8;B}jS7MTZ@oo#5=og1nrZlM@p-#KbbQln>Fb4CUqJ zZEef-D(W?uP=Y(PMd$cJhSPXZgF8RO(}F7mpsPM6gMh$&9O4mUx}cMTVMn)B9#NT^ zz>gp7n!91hr&xyYL4S7$8g!0;9t;p?Alt)3Vx{vw)oL*$_Vo7Rs7Ua+paIgQM6K+T zg+--M%;Djow@_AP=OUbeOrXLWWn+O1dHp(TO?!K`D+TMlm+Xh=@{FB1Ie?!0>9j5H z=l2{LiFWW^mX@ltwL=Mt5Ez%5+T?Y8#He2W_wQegnB;^6%!%dIn>}msDd8`$&iio~ z;E5-`Q;EBb;^bo&<>L>Tot&tjC!T0bq${`MmUVBQi5f|I8W(*hm?o1Tsi zk@0=iEoxVbM9g0T--pxQfdT1#Z;}es#H6HrwUYM7D{UZ+A|s0-d_fuf>C-32gGA^z zu5-I*Ux%fvx}A$V^(+r;gF|%PLmxpDSW(A$Mt0KyTyPV1D{a3+F}A_QZg1nvEwd?C zz(;h0cmsI(T3gA3`$P1~ybd=&xmC^`+}zxRgoJ3;n1zW^mbL;B1nfMq#GcDbR8$Zp zXLg-+Kt7?&X=@`DkeBreG#39NmS97w^PKm=CWNkklp}f z9`L1j5yAd5J8_@{B?l?2cVlyV+leRT=k^1OuMh!@~m^85vM*92^`10%;PtShT7tDifOKgZF?c*h3E?j*_$d zfb$i5ok&~DG@KOTFJAH?JO)S?Z-P0;c@eS|3GndLw6z7^z6F>8m(?^=uqZnxr zZz)in0=?b$F^fJEM|t@XcS{f>IwZkL8f;oiN=j;LgSM7toG0=xK_yf4VRE5zk)xv{ z0|NtK>?X#?sj-ol|3#74JPTq(%xnscd&(6+@R0=@Ie~FH6xY9Yw(@==SK7q+j zk%~WLep|)&C9B^=knip7Wba}D@dqNZ_H{v0KTxB* z{!ImdroS3(3n;<0uFftl=-`Ecp*`q~D}IWzm#ptwRf4oVloFqkV)9vdN{9Dkb=H|eF0A7;lrM)W3xE)YpxCc#+jjf478G%YHEG(=}@&pG(d@NTo#wgIS zbQWc5W=36Cci4cV{bN{EwQ_s^3-EF2mFEs^`p$r^Bd5Pz}5fb`L3h4ZhYho$B2w@{NR4fzcI z&DoyV`1l(4Q=@~Jh|NPd#FvFrdq9<{^_o;oLh~Bu^fd^RtepmIl{B$77_>Arrsn1a z`T0O(wl5G0y7m1_?69so2s|R9xw-jOL=2154dqfXhOePskKRvA+lBHFljMG32GU7!64R;cYE-*N7L8h%}!r9=)GZX>R3yKJkE@f$J zwr*ZzlXu3XeLZDz_w z0AlnH<)S(+Id^w=2?+#JJD}^C3RVP~*~UgqC@zpFFmrL$=HztFFTJOYKR-V|buTk# zBcO~!vdZnW$3=mBS7iWM4UkKsr|t5d=|Q5HL1L7e+FWc0LBKIUG5E%OL{>(Ik(oKM z-(=Qw8C#lGowKes!juCdqU!Fh>OR~5^&H|)?nk;3LyUtHpPv45 zOkiQ!ZKM45!)_qr;8Vm&f$9e&&}p8F>+9a09=%3K(}4tfpu|;W_{fl$1D&6kFm^#} zU~$F6#Qe2=ZX@Fn*nvq*tgWuzYgPjqL(*5TfYE{XX{o8!uYUvKjDyxeq%3W3Z?7?2 zb{XR9V9*+ZEZQ4GcAcb%5=;q%qzw(cK-<}$rFM^cyV82rsFz#wsHWk&9uSObKM(0F zQUo-zHMemq1Qfyi8S>eR_Ppu;YS7^DFJGVx44sx4?@B{0sI!5Jdtm#VRi{A_KyV*F zn(h;vXvfBhW_NXW&wUJy7QF<)>-yTw*%^~cRZ$T$d9b$3Hyveaa#B@I4dl*Bs}`s# zkg<%VYrZdDi?&s&7YpD+7~=eEW;y}{Mb;(TAc2_K0>Rkc-k#ZLCg84`nwo&vB`qxt z$YE^v33^&M?b)M0i)R1Xy#QSYy-NPv z29SUOP!Y%qgoK1RIWK^Ws!KJ90CWKis*a9;AQ~AQw6d|OotA}XT078q&Ht(xC`qKU zL54Z>2IZ!Ui_5fdxm)mfnk>)_zRCZaM=J+ybnd=OO!)gF%1Ui*sRx(?Is)8uZ%~wf z$5YHLLZeP}ZI}_}76CjgjF}2(sNCG@$1JQt!Eu2ubVRVajMEmqH2ZsU5-8~MzYTAJ zh=GQI0XT$x6ADUv)p}xj*HOW1@IiERG%7yJYzT-ts3<63`tMxd@97}DA|=&RRTXPd zWWuxh9fI!J>U$iKl|;%vYVgKoPd`EnJ6x6xURdW|2*yic7P;19Ea%rRkutUW-x$IM zejHY7G9uy@d1YmXAcnY#>NePCx3(^^NMhCP!f{{ynZG;|Ed2bd0p;SnbxR>oG1PA%9STs$Py4Krl_tPX&BZlG3e>|XAcS~%-FHXe zyEHx5ZUYSn9xm>|ax3xibto`@GQWqh@=aqP-~l45`C6-45*9QtZs2*jx6vE3a4GHX zqe6lID+wX1r@S@0pn#5t=X!6dL<9B5VZ;b5Z2exHR}_GxMYB)-WWTSYFx(U{Y-V0) zl>jhKT~UmB5NLDQ+1UY6zM7`5iAEcLIs^N{3GFTF8z&hGmHL*;;0_WiTvQ; zV0v7XA$~MakKou>4xV_8f#MNV06<1qXUtrr`pMqj9_XUv<)d8=0>&&#i;JtH&%5Ud zyyw_OCfHt9o}=rq;1f$2wEC}xWB#t=2MHmbR&MX$fVvue4y*ZJ7lyQq@JFo>gi}!4 zkv<>4i|AMeg~blXjMHt8ZSXPrrF}o zZulyjOUcS*M-0 zR@=n9Brk(S`J8tYB_t$lZCPADACi!?q?z%NL}%Cm;Sh+Q_g9C&v;lTDIx+%~9*~CX zgu9ZxXuw1YxV1}k;DQW^TA?TI4#GNCE?;t>+~`CCN8M3@HYM-LaHaEd2Ig1R2Q$XA=6(?51!oJ$GM&)jck}5f77g#?rF=uDz$?54c_qY2$zMZZUe77Xv(eMd$2_`&N3nG}_ z`=e#;F~+U|_pA7KN5{Jr+~R_q2_v>RCaJF!-$FUcW-u*@tb>)IxNVA9M&Ns_szK9` z%Du|^`Wd&N>N|uIMQ47Wwa#E6O4SO-N_D<3d?EfTByhg@9b-Ho6o3*8RH(=8&w53w zpxwqfAYeQ24iwG)#%?U(=5~F14mc--o?K)92!SX%DDD72K!8ZE%FnZvvg=D!6f+Z( z7-knf2^_+7ft5{mR8-V|P{6AB20=?ehN4~t68qP$UqNh-^0{g<{sj=q2z=H$mh3Gm zQMqqyYtaHnzk~n}1-$w`Rk58P0iJ@^1v^X?AW=9tfsf|{iHt}AvoC|FXW0!v&B>(o z6N%9e6biq7`Rk4l>PN%WaYnu=Jw*5Q_5FAI1{GtJabNlzK9yIsNg#qQmrs6}>ziWU zN-{Ot`Rmtk9n9ziYT01E3M>+VhF72%<>`xFe?yf9CvY7aHR_-*a^_= zBu@qY)3{R|OwGx8pNt8(p6-p}m-C^xY?nUd6g9dR966`J+wmbVM(=-!lYgEin-Nw6AV-w}R7opd&yT>Nu;Yz@rurd=(r7$D#QxvH{=Ld> zYiqmei89ah=rLe(7r@Kf>WLz*<*|MHcrk$&(Y;V5LcW2_i-`@}_!?NXdWGIno6o)Q z(<#RFYamSx*yupUp=Hu+0d!`}p7usCP@N}Vf?j~O)K*b3_HrGqUqJ~B z+(Zdpsy{ehkLTp#I$UXgt{D|}a~S@H1ZKT9e85RwSOcd(dO;E8P@An*A~QGufdKt` zv+I%fb%tA^YVi^mLhDl;vo`JaCJaQ+cQ+4!F%iiug0~kF(Qd?~q_)=9K(?!=*bp>q)n|^7Uq3A;+V0K<>zBQ8=(2UJkUmFR8OXrFi$XbBRL~Qm zd(i`l%qmyP7Cnv@8^Ks5&`K5*sH>{ZX8zyl7#M4Pz`D)O5%d^kDcI7&Zm21m?`HgH6Ri4e{Nn`#B0K3mj77LHps zE~@WBLV$OGKG)`@@f`#LBDJ5-g9A|%0>pj0tR7_AXcEEj3~9L!bI#XGncZ6|C+=^p zQ-Z+w9qDrQX6yBeo9Em&BvUsO*hZUwo&J!6Ht=C|`1|E1d#~WS! z#_i@~Ae6X6M5y%BQk?W;up9sJYA~6OhQ-52c?0pUwVGb6@u<0! zo}A0bAidxM-31=2-oqi!%Hq$^O}t>ox27+|dxKRm`@WE~;k+jo8~6Nvu`YbYPm2Fn zjP>D3DaOvKN@HMPfJLu`cN~?I+c^se$EH3Nn9o_)Ar@4=qFC`dZhdg5=C7W!W&yHv zJ~!KizH>QyYz8K#gN*u>k=*ekU(f%vM5nfI6^AXKkaTTXlEG&~De`YYrn;kSP z@95c5kfSmqY~0c?aQqK?CfK^T+r~Q!tu8PTt|Q;RLPgstam+3Zg%bN z>EUlh03jq2{@*}wHXB?c485Ve0@ne*Ujz?OY-Vwo4wO{1oNWs~?&8_Gc;UQym6(b0fD>XCMH@`2ss-SrMq$|8>{V) zOkCIMm9_%(wL1Y=V@YV_#&LJrL4{U29m#j*gPk?Nk$7F4SDI z&Hf$XjWRDix5+11Rwdp#rFZ0Q{apAOkPUa!CEqCqb> zB6GkA>ep7Mwo8?lrAF7+ZK>Lbv=y$=c*i+=$+S=gHlma5kQI!TVgZ!747Y zk5Ak9wX&Ae#4u+IJt{~LB({CI-|L_6SVtZ?pXCCjL#?Rme){@7-tV$rO@+Dm{4RX) zVtNc430CpD`lpcQI}BEzdRIgTFMbWQtz1Uo!n@TVtsKt%bYvQ$hEtFJQPdUgm%lT0 z`fQ?|CJYG1iIACSd#EH5N7#N`y5hz<4E^0sZsyf)KY!@=66&t1lNbwpQNhT-kN&>= zE88cu_hsSlBbKKzFxgWocNYd{^zOz=-!y8mpuqGP`V9Bdl?XF|iS~%GhmDpQFRLb9 z;bVzJ`8lS0k2P%s8Z2W53QW7TMNW;#ttx5qho_eGW1jOf%HF*YQXGwMPz}x`ID%7O ztf0kZcv+<3r+sgH{5}@0XU^lOo(k5ewR4Nj$w#OJA^HE&0z6OkA$}(IAQ1nomGic3 zd1`vWSky10;h`+owyl(SDkOw;6z9*Xo8$S*TGicXMF`HP-1zPSir58&#Tm=*ckGx9 zVM9Mx0+I{@j%6c!otyOK8{B^Ti&*!2JL@fG;OOdnKUC87dHU56FjL-sp>o|nU-jwP zN;dqI$aH75x}&ClLZR+%hp-AkDG<9>#%h|kujz-c5B=W#3lA9C^GTz5eNJPh6p zBDl|sq>E4coUwC1gZ%KTRRUUHBf+l+5gyxu&!N%_x;nW@(0#XziWL`9$gh{EtKEp_ z*g>2uOEtK|{KgY*>ga*F`#Nu4?(VrJS$KsIP#t7AL`aSD>`Jaw#-uWbCbtmtVI(g# z$!Qs%yQ%BAt?(MD?xtSpMC*O)k9yfj#eme+>~2J4Au4|(7@v^L5A{WRD&?(No;`a2 z!@&aVP(#NFo8o8NT^M?Yo$J~5yK#Q~JPcepqvamtSn%sYFS3^XynokyMko@%FQI|^)bIb- zJ9m565GP-voh05+ap7i|2BU4qRTYU~vsDRoUZ3vBtxkEGAoy>X;_n<7aZ5x_^w3y9Ci>{RWR7tYo{Oa>sxMj#+{ zX5qvr_BXMZc89ZS>kBeJRii;LH8?w1=@$Q#*xoFRCR(*0@lAhGl&X4e_-&cK&MH$D zMaaT-QP|xqjfIVUtz&qzrK5(Xzcfp=q`p}tWc(sL2cQE+Rgrsd>K;N4%_F)UBeJU8 zCGM=b(@t+99}DAzylA6y74`5#J9CDYeHHHBzc9}5N%x0zZD6&yf85_GTO!OV^R%O& z{#lWKAM@}f>gNN7Zw#^gIsmd*6U2|+{~KW2P>P?n->9jf@(Cr~>dks>r~C;l2OOkw zM=pE6ny*@Tr5U}){ zFyNxZNr{2sceD^$TAYY(X8=1F$berR5d%|ymYk`8UX#uPEtEtqz(xB_IJnc~@{|Px z1U4P{lK)q8U)dMM`-i)PNJ|MwBaIRQ(t<7^At5O#A)?fRpmZ23NFyBr($Xc}Ez&L0 zARsN>aK8Nh=M|hsXCKV?S=otu?!0O`0g(Yv-l6hK`#h-6_V=AWB1uU|z(xn)yt|v* zp@)>8tQ86c=Ox0&N0Ow0g%*e(PEK68{J_0TgsGWTsW=nU{mjAAvN9_n8gFm!pofDaBN@Y1c}aV2_dqCp3ID;wl{+#U?%77z zv2p?cv7R1P3OO;l9>5~BVcA!=v4L6>WGD8MEyf2jEk-#5u(g0zP?|KFD5Ps#y21GW zzOK@}9i@~W^BB-7&_C*M8oUBF2rNeI!EnW~$|zXvEH5{rt$+rCyCEc$B}uESj1Bvn z#DBMTC)&!;kplm5OG)&xF%?Hc-h+lziHqM1Z#A_T@IO8leLX;dvH!`m^2a`Dv%}|g z7K``*LuoTz-eS*|LPK-U79b;GnGN5A#@eH$us89ZnXVrCo>}p`MjgKi^_Xun4S#;U z91V$)cuwlu43I)uh_1Z6FEY6S4Ts=Q&JEGEvn#j{1K%&dR4wkjIlvvajyzKPe@#qC zN=nj^kbv1D4{2HYdt^jM8I%gP96g6T-{e16C+H)MrvKG-sCHrd7^7U89Csp~RR4Z@ z_9pZn`U$1|fDB!M&6pb*t!v*_PvyCzZKHFB+(goni=Oc-$0c8?)blXL@BDc2{}fX~ z5$}bh-e?&W;MTtgvzMc}PZF*%ema3?yL5jfF~~eh#x^Krp|f7u%lpPHpODEkhhfxe zM?)oEe@HiV&B)~OPcABl$1Z_ge507ymR{%R>}_`XbnDKiX(!dC%6!oO(=40pmCZ$+ zjsA{l9l3YQY8{lcSxV@Ona+#9 zFTM2aA}@jNL2>hTmf_i5D*>$gwx>&^Z=Qurdg>=iygR}h`jh?Hgtcj8$s}D3#!VV- z@BJBKk{>)x_zNnY%lTct1lFKlI^1KE47-o9^4Yq`pv-OOwhaac6FlqL`7emOE8e!2hK2<(( z5#!?%*qdKyzUxpwVec+-K-e5))C7RG@e5j01kdP~cw9?I`t2d(}2 zg9yaeSJaBMI~|9C^UHZ_5yrNH%kl@f;md!%cKryo^m9R@_4|DI_*@FZM1nd$aVoU3 zu;R9?x|q}4Za|SfA9^o*diHoOU}u@xu<<=B;;p5E?_+pw+NE0BYzr1X*w@+j>GJJ7 zm$gpo@lV-(u6BL8g82HdMDJO{Z*H7U*&4su&)1Y6szfCk)4X5gIC}28=W`V6yH}$~ zia?Z%aGostJdh$py!70A-8F4-=jQ1R(sP8|o4E)0@fOtSmi()M6x{UI;h!#^V8qHn zk>Ate#X)?pbrUyTt*PKx8k#srP`o0N{LY0@;KK^lt$zX6Cyw)w8ET>@efjD<8L~Ui zl*g(BMi!1E%YV$KZ|+Z%gqXd?#Q*EjbGpgU$j3VSry@+=Da0y4DUR{kfMBP6u@1?n z(JgbPGy(-e{Ci(A*Z*FvH+G3^N)HzkvXQ%*JN=HdQ$)eHizapbxw&xAoP0YkOF!3l zli+*}M1a%Y6)vFCpd6pcgsQBRG&orc`KiGUE#D)CPfa8`02p zAdJKMvFi7mL45p*{s(<`?xXDNrwj2h-yO0O>`R9gx(R%=l;$3z#Q#8}9$H-d$G8RE zq86h2Z<0kzn4T7UodO#J_v7pK;FFt+WI`yDj@gRMlCbH+4Yk5%Op|GB^BTeRpwcTz)4jw5h+luMta;M3N~Lz0Ywig@cX!Jd2jD0eAC!!B-POH3%&7AwuzE%okWlR=Y`n(~b5@@(s`5AYv1JfgsP{t)N6+!a<|H@39C z8{|5P?hn24RMe;XpWG6#CS(<7Bw4aMpT53Wq;ivh zxHjv+mo%I<$GajUs;;ObllZ1@`>!7!+Fp%RnPkNjn#YCMCD5jnj0G|&*yO6^ zK709}^kaBx3Q$0>#&Sj!>8UPKE^lmrh6Ee&_1ia20kZr_?4u73;hCSCGOk)Qv}0jb z-fN@-MI|PJf{KcYlG1HbM3){5ai5heLG)DK2^RaDD zlll0;x6Jf})H3#QEX>Z~q3+`0>peCWRn7;olf;$+YMU1OW2yqn9e2V*h43(t1I$y|%vorH0U#!HrWygsKu_kpqqM)|J)7%%pzMZoI5Jo|au)lY|)Kpwx1Z zJx`|^G<-m7&&`=RB|l8{(?wBFt0R*OA=2*R{EUj%P`BzeS2YP7h87NUWR}vxL>Z8$ z1Hu5?KaS0^?f`LBI<3cbF2IbhqvL3+WAt+e5r)LiS^LZApl8pXp+fyU zfO>m`lZ;{Vl$wriSAs+-eE`H(1lX7Xt=k{IdRN5sauf}M1jIDB%rLD6-H7+a*#YG7jM4IarZqXg{}K^%0s@g2 zNuG=DOt-nX#ws0OI5=!iI!~+MnuhdEO@Q#_m&MwvTA^ET3aGFeP3? z3STN@d{epKj$y$)ZUh7et3G%83D#k#j2McpnMUzcPk`2Kx&d!ZB9=sX>Fax9g>co93|L3H4n;cxKR zfzfsoWZzDwhp+79(=#$4^Qr5wE3!mZz97Fyu%RQaW_Rccm!hCLsq$s@a%~h5agj`f zHIG~XWqM{NB+S*rg2k$o68eN|UAcwqI8k<}zSTXbS0Ec)Us(YupGM9w9#BQ;-|VeuL%0EL9;$MIk`;#!?IT8*$$?meY{!v2gZ8Pw z8HY+@dU5koedo1b57f0lIBZ}LZ#R$2inAHaee2d33ra?&88QYTCDX?z*Cv9YmV zW=<}nqoY&tD8@2llFYuwbgcNfht$PRuk?X%fUcQI5ThOE zty`U)oz?cU@1GVN=s+YF!@HK5$;rblpBtOsXs(TLWzRiOkGUfu<|pgmoia#F zBO@b8rAUH%i6ET|B!yFPEV0xz5__FQCCRBMTwEeYYE?BvmxR|67YO|~UW<=u3%W4v zN~oznd9suzyzTdH!8y?^>NQ0JArjg~h==2c(LhFAPe&|k9CEBqDD!R?d5kWK%O~)Y zPXrp;m_5^OH<4f>wXRnsh9AbF;N?|y#?u02n^!(1{)iZDgPPenDq+4;So7Z>h)u9# z^A{9VHqP4PV!w~gUyui>$&~wMf;Jm`x$eYrr@_Og55YgJPH{JfxHnf%mrP|)D5zgA zX7t}xNGnA!KX?h-x1mfzu)D87LIXP{R$L<3k&vNrV26u`#DX0fI~fCZ$oGA){r}=; z6R{9ThcNL685p&XUqdj|U_;((>FJOpHNHMu@QDOzE^s5MH>9gh_ws)1=YcVYMZ3Jz z;eER6*WXP-tG1b}dZUpgA1SQm_{~l-PW>{~fvQt4>b*@XGYrhBK%mq{m`XzG>&^;9YrN(E9~XQ~?59 zK&UuolQ_gJt&_6{9F8wB{qf(w{g0<0A`Q8mtxc7a15q&zIDn*pcqiPALOABtYjnTP8n8BNPd276wxhkQx%`it>rXo!Aq;9INS6AtYIIo< z`_6*`_F^}Fy2kmRNvH+wNI`FRHyhf%a^hs8_8@Zf>A34s0_3(X?YPi(c6u71Zu5CB zw^CtEIg^-2CPBXgS~^LH-va4aiZJkn!3h#$Nzpt;+yOynnbYU{`w|;4%<_TG`jn>L zdFH<5*BlySx1$BWDO@>qX}zUgXkUo8%R=k&0=~_rC1c<9d7?YdMwgNr?bcU!%Y$+)zh#ttbLx| z>%tTH1BQV}r3G5My5-S|mBTG2cf7-gH?Li`)(p+p5u7vI{i}5)Uo$cmL1}n)b_SB2 z^TSf{g#4VGRmLPQ{ToyVi*=sp(CW>qTVi5IW3QgzDHnhO5TZ}!IaEPo6li?%JEws1 z`t^HYXEgRcD{Y@owmd#?-~S@*EoCzAC%YJg#{A#uzKi~|dBh%IYeoBK=LiHZU<5>m zQa$z9+D!5&uB;R@=@i*tH>fmgU5KLrmqc_21Kc(^8QeEDzfBZBH~Pwqzwa(teb%SQ z8dh9tJ=W-FEVb3LT2RkWsP{}0jEX-xJ5LUtJY`0o_IjVckV?9MD79kibd6ywUC;bM){$-Zhpwd`q(ZmnB5e_pI%Y)aiU%#cKgc>jUQ0o;=GFRORMQnY5 zbq(8tMe6RuV`62iHoIeN#M?@=Y0PxZE~Bvf+e#X5$BTp;et4Qw#rk92LOgr(PVN?tl@>P55pQK0f|W5gV8F;U^-tzl8=%(Nr0qaR6@6 ziHW7OuNA`WTIaF+NjAl5sk)msbNj8F?mk{y?w59&6~#({GwiB(ao;=TPM#_ zGnW(?|IGO;U#49>dnRLPDMXX-S^Q;NOAA`SbwMbOUx+Xy&REJrON3&8VtOY?iTPyM zXgqUO<{E1nqQBC@?pWjWY2>AO+7WoXdMauZyO`~goI?su=#n1LCx?; z|B2lQg{aw18+uJ=YlG5z>-WSnJG+-&m4uO(XohVMXv#HxdibtSg_>sS^zbk}BkI$r zW3z5z^v=JsZb73fh=$t>rZKe_M;i@=cV>RKis^-{|00W(=ct(2kY+e6^#1koWE|$l zNHp@D60iGl-S~$jH(X!GCo3VInzm(Qh78wCapg#TV6#>6h4Y=s`0wGIfE^Y?4bDpi zI$p(GLT~-+%1P3KC7(Ms0(QJcQiDJ-b!kw@ahavmTN9oxluYyI-k>&&Na(=ZagllR z;rQb8=wD$6-M>$Z4wb$tsoGS$Y@Ftbulx&j=2}U%o7@?nezPtP4!T1qL)v7);h(~Z zlw@ezcrEm%=v-b0hmIOyEBH9$;Q=e?5a_}=fvspyyd zW{&y|S|Mfk8(vSv5Bm!{D3l)gm=dyNcuQ|9Ry>M}C`@W4-FBsmn|sCL))+8ae8qAh zU`KP#y#M9{Q~iN^)=%<(bi{mCLLe5IYMRu z^up=zZ(7@j_O;+>Vo#x4{u%;H_((A<07htzb*ksbqf|FLTajMsZVttU!u)0$2m9Bk>x~n` zBSPc9G%Pt>mG2&22`;){5_s3@nC7nMTJPSZrwA4ApT2kPk^*zr3Bt+6k9~i+p(Uq& zr_kk0zRFL2gKvZlQVzBwG4_Ck4}866=8pnwbBi<`&)m&p`s#ZxAiMqh!S0(~p9Rw} zLT0pShJEv=4l~=80p6Yte0lBgyODiN;j%mmfohuEE{4QA&4a4Z=IK$3JYR85an;*o z`4j@Br-z)(M#5Lo~cLWGndik$3@7yJC zrO=5|>cHuEYkgH^(IATIc~(R%PU`cnI@@8hk6Ideb@`S97@$H*HdizZ-0fCVMZ0`% zeDmy@KuO2YdqQSZNM>#3CJr&tZHDNb(Y6t5dVP!}jvz949IldG9k+<+#XX-|nd!Yk zS)xXUiSwp&xF7k82^r;WR18efGv3O(&7~R?p3b!}>DD|<%%h@t*LLz}%-^43bGa=p z*%_v$4w$|fL>8UNg3}hPEq;fRNCht|vuq7i*_d>_mV{YRUXStPn5gfTmW|cMMuZyw zRE`L6nWM+6F!S$*jP;6?=5_Sem8Mk~*gaNYJ8RB(l=qGn3(;WC^Rwf4{mi+}V`sn&Hw#v(k(*MFREb(!yF}vs9z2jWVAoupydk`K=qEW|c+_F@@ zOGJYF({GkD`I(`BfsnbFxJ8y?lrv_K}^`gmR#)1ztK9kA^~F@P;3AMN+nIhSqBGMC<7p#)ZN`(O)W%=d%>X+RuJF^ z2fqTC^7C{K_V$2KCH5U08Um3b1^yM4L0AOC3{ojQEae**Zh#;?Jaq9fU0GcP%`_x2 zjjy9#zJzQBa4xB4mJ}7inSsBpo}3yQ0x}nT*xeS^C=_Hm0EKB*^ljKGp!H^W2dr@P z^UB6-pbTu}$JY0uAf|g@tp-pRs$4=`JZ>FE0APIWmH_8*2K%BA{q^e$MIK*Xuh5O{>1VoOU{@Dcu zFC=_PG@d-k99@Ov*n13Wo}OoYeW6g`m4SX`Vtx)VS+ zM@PYNF&lZkJRLY;QVQhE0&%&oDFlAfkr5apYZ98h{CC}I%N>6Or*Qt<)EpWgUp?R= z!^H+eQn@4eGE4RI?3TbIT;F3VEeAJFN8DZx*Xnyl;VR*~&Ns8ei5+_P23t@Z(K1zq!Alyvor>=9u46T>f*8qWBo}br;__)Rh}< zFn8;Wd%GV6`8^HKh?uT2Q>ewG(Q`|;`J9r4T-h`(XD?m_l zmoN*Mrrv3uqqlgxFNY zN^dw3`h-3S@q9Ddt3;uJSN(Z3r>8JS)?*5bR%cM?Y}T0XfcC@W>dm+Q%NRXT>ka+vNl0^KYN0}`65GD!EoS^o$UsaefuVq3KNlVPd<=u(uBD@> zd~O+OPWNeZxA2uL#h)=RVc3>a`C$Ca^}DF_-To5-|y)iyA8Icq#oF{nPRTV&!K_`dp;bm3h5SJ61y)4G&oi0=~Bbj{4kMygUY`d0o*Vp=qi8EU|oT3Ys>8SrWR(9a)rDh!1 zcs}}&;cV)FL>2$EaL#DyAX~VD4xQUI4}W7bTBRM=Xd3?Jwz$#{5&G}|9dwujcehi)J0zSv}n5uPtTHrw@yqmv7`;3Q4{abU!pZFBIgOMrerS5h<|2| zFuS_-h>{dp_sx=;KX|I%bT1_^BC&C{=iTxb8<;knrm=Hsl1I=!;blVy#_+VM2lU$r z2kYM!9*@YJv|Sl;T0Q1C!DeF!36BaT$yjTw#0 z3Dms|onwxao<5&ZXOFWrUTHokYoE_rUExxGq@?}xS+#rnm;7CqE6gpk4o2&Jfxp{u znYE{fqxiT&6`*h)4vdd2IT@vDOw?ZHqTdJ|1Vq#7O2ycG0%4IK&- ziLJ^nJ?9Vhw>z>s-@p{)Ov8Iy63}Z|O<$ElpF5U3{S?I=S&6+|`5RhNF0`-#hwPYN6OOGAxOx8QVpN>RQW&Wv8o> z-woD-xj1mj0$~G{ygMwy|EgWy*Q`)tMsTCabGB?Q6u-u4L3cr1*!Hcv~0t zo3^C+E2GYK(#GSvW0eHWsTP}pT))O;xsYb8bK|2IxvbJ0P1+xC{<}+r4iyR3F+#PK z#Ib*up=)cf~l~v}N2c3?5q@uO6Dz zSE-*`cG#(y7(!P}><-PX5n_ahXlh>(nY|J#F_O1TPd7kx){^^aCtQiS+Vf21a!rar zw=6gW5%1cqGz(E~RhjB!-#_!Di+jF!|1HCr1d*9_ zfSrn08sl)c;X5FU)HIZ!RQ&Y_Hplc0iRO z-ip0H8)5jDD{E+I2v!zwtg$4~VkSm@3J4ISCbTL&2A?p<+zy*5noA@_?DyBk1PAXz zAsF(7v^xSv2ePrvFo1a0$IF4rOHh4LK28fBhLaP(7r_d0@Xbbu2GXT~Iq`fEllX9+ zTn46Pa&oOer~ulH%M95q;OGvVFQdDHlkp(fH>)b(U4vu_HZsiLC4=Y;GBoZjyk_&h%IXMzYhNfy?ix4 zGxG!9XyCfutd2mI{n)ybut-{FW`JypWx#eUmWK1>z!I<3&i z+U1?K2P68dn|Q=D;^C`TTj`IY0p<6gvMT)zwAsMG0OTq_T!E7*r10C=*u-SV30#7G z6|?<9Mo*t^Na#246Z$sKI(U1ugdH5_^}mD=g4Xk{S$kpI-w_a26r2qvdZ>MNM1;+U zau{(L0O;c}Lmm-59i2H9yrSsgc296$ch&pLO4q%^O}$u@;E?S&{IzpKMr6%rBThZn=O3@ck8nC_ZCgl^{M1>fcA? zEJR);NJ~U)IOE)ZZp8QJInlZ&Hlkdfu`YD zX^0MmhG$`J-X~-$94~!-_zYh8R2IA7hjH2J7Mbefxhf2K-M?GNN!=*~>h3TbUbO%v zB@|DN^&h~mf??P6rSZSpeJUbr*6XXdc$YmN%xKLkP-d@r|j#0*_ubfsE zKyLAgOx*v!#}}}JFSBcEJle>y0=kJHw}qK`7DtJ&BIEMxQX%bv{QN+4SX719L((3s uGa#yN8Oi?N#R1f literal 0 HcmV?d00001 diff --git a/ideal_release_flow_versioning_complex.mmd b/ideal_release_flow_versioning_complex.mmd new file mode 100644 index 000000000..baec7cace --- /dev/null +++ b/ideal_release_flow_versioning_complex.mmd @@ -0,0 +1,27 @@ +%%{ + init: { + 'theme': 'dark', + 'logLevel': 'debug', + 'gitGraph': { + 'showBranches': true, + 'mainBranchName': 'master', + 'parallelCommits': true + }, + 'themeVariables': { + 'commitLabelColor': '#ffffff', + 'commitLabelBackground': '#333333' + } + } +}%% +gitGraph + checkout master + commit id: "A1" tag:"v1.0.0" + commit id: "A2: fix v1.0.1" + commit id: "A3: feature v1.1.0" + commit id: "A4: fix v1.1.0" + commit id: "A5: fix v1.1.0" tag:"v1.1.0" + commit id: "A6: feature v1.2.0" + commit id: "A7: feature v1.2.0" + commit id: "A8: breaking v2.0.0" + commit id: "A9: breaking v2.0.0" tag:"v2.0.0" + commit id: "A10: feature v2.1.0" diff --git a/ideal_release_flow_versioning_complex.png b/ideal_release_flow_versioning_complex.png new file mode 100644 index 0000000000000000000000000000000000000000..71422202e5ffb37e363064a458a778ab12b4a457 GIT binary patch literal 22061 zcmX6_cRZEt8%OpI**n?0lF%^{l8~KEva+({*n|*5k~l`PSIFK8AuB7J?45P&-{t-J z<9+*ZJm>a2*L_{zb)Rr`RYej)dO{2g43dXR@){TzSTEq;{`gq%-*M6Earhsmi-w{c zM$s?E6$}hkjEC~FTArWQQ#=d}N0JV=_HeIU8T^2Ce@l3e^NozCj_lCakIxNM-By!( zHkW!f8%`X)K16I;dN5^+hkHGeLl8Ia*dyjjicXe*$4ME9qkv9`Z~W)XkQ1* z*CMQ&Oa&9uCQe&)i%z}OOWwa+Qg@}Wi${oNTSACv1P!Znc+7M7{Qvu>lMh=6CDr9O zoo4kToC4?m|B@G1Rn;%Q)e`bTpoQSg|6f}8KwS3af7d9g2xD9&M8^Jgl^C+EXrFJM zsqL`%s>H!gj!%oEDN?ub_4T!}AxPeO6B!v97S^;_>lC7_oHelF{%qGsBR5{@)W9TY z7TYNIx9$G&^0K4fp(K6O>*kS-KNb1zyOv_fQOzE+Eyl*hm9@1e2OHy)_wLamnUZ@D zuG^+IHaxh~A$%PAhK7uEbVvIPjxn8jk*nGX36k#XGdrHN$Poo<)xO@u_W707)f$hj zDZlyydP@_1U4w`*xT>B+ii3s4-wM=4T}8#UWJfA|Y&?`7bMjV8Nd58gaZOFl`3mZS zuFO$IRrSYut=QM|;gJ~>U!l&)a(^bv1-hN@J6D3-)rhw9kt~Xbt-n$*v+ZOZX1$Jo zPY$~5%(X3%F&nZW=I7>g+#*ysM;xXMjdczzhQ9B&#dKQO+S~h_(KPB2ASfv*QVWa8 ztRFlp8@q z`JOsn97MoNz8=s0S=QJ)G->obL+{MrahO0)v?!9Y_^y%pp;vVqy*7R(MDDr%6=06` zSlnCLLY|CY>@Ft5D-@<$@N#Ba2Q2e@I;EDBRby)msUP48`c_ZO+ zd<)fkDD>*@_U!oM$CC8)@yZK3iN^Y~0)}dFUS8hC#`VVoT|(0hK9Z)G%+Zw|z0BJ# zsfMXsi5G)pzS}*H$RK(4BO2q)_gd^%w~tOzy*K^NCZ`uV7l?7Mg!tj;E-yHVZ%#K} zEG9epUTj@VnOETQ&)<|WYH(g)m_A)>I6Jvb-xGUXG3JKHom;fTudON_rTu0Dqbpcg z(gKONtTFDU{5m<@V)5Q-TU=Z$DJk(x;-BHx(KI@9ls;v{mOVM#@Lp-7VIpmh_C0-; zpuAurDkxY~T}>C57Tf9ftF-6c9=1vxpa1m%69#hWrK`(6+Hq=TAgc8?H)Rqx)tI}gAt9yzHhjA}DybHt&m zb(+N{;jbKjoUYDDikGBvzTxP*YjUw~BGiD+@GG6JKl-B14fXAPxLNNs8#rc%Yt&<+ z#p~woP96CUA#8j=jhMRk;Op2%TwEME_PyQin~k1l`xi%B7YCKv9}?f!)Yh6vxEGXv z4wg$tdLFwDW~2VtfAjZqsy6tF_KX`N_C4^F8bwFVw-~!h`1txV2d4R59Qs}KUZA75 ze73yLpCfU(opu3Zx;$|q!|}4$tz&BGCB1l=XmG;RL$)kQIvY#?bW-$$ z8*87P9OUKk3KwlmG#K$yFD)%#l64pR?&mkf4qQ+dW$QWC?fe*eF{1iOYHW0L*-y8B z+EJKxU|_)aa54GZW6bX0;&{{J%AYmSfx+ndZLHBCZ1>*! zP7u`Zxi&1oo8a}gFWrcQXRN*qhlSYkn6e(|-Rtxw-^IrlsBT z8XHY2tCs!pb4r}HQtx4&v5_%Jp$8&_v%cS4M&R;6BBq_ zjY0{^eiy?AvOfg7FGivSvCfSJvI7DFrlzJYc7-m?4J?HSxg*;|g(yFe=nWVO(x4OC z%$wOJtZ&gfkG-kG*&9TieojxvjDEQHyTiP6Z*LE|-HgZL_wQkhIW|^6r-dNRYIL)y zUgFC-`2%kHs6|(aSR(ZRnQiEhLak)shzIm6EG*2-=ZP05eoKpsfm869L2V4ydp{6 zuz8_lzmWFZxXl??Z75Pa!tnVff2bOY`FZ8|%aaYwVl*24^XE^K^En#mO=KE&_E)r^ zwa2t%9^+6Z_XzUvmj_eaYaF&Ub8W_|hcMxH%Z?=*D!;`~QXa(WSIk~mtMbl)(Knc#x?n-1^Wt z>?sxH^#rAX!2yndf1ReRp{*~AvmV^OnW>SBj~&=)x47%6k*mc^P4jmt^s_QKc11;{ zTE5oc;GhfrV^2?UY*~s@J$?%9_=FJKsY0#XV?5k12|ts@#)PT6Om7k51nTA9GUj~r z=rPH|1Xlc-i;NZFu@>v;`|D3>U zbniPL4CV+w$`xjsO{aA|e-w6d~q&yatHp*Oa|3TFI=2 zY+L!`{8N-`cfyvco`yb8yyaeZ1ZvH96w><<0S(S5R{-er40yPA{LX&d$y*FCr33 z%Ggw;JWolv)o{ohA}5mE(;*V9#PK>LFgzSU(LsU(S|3 zGpBRBV%z@B{@Q42K&|V(t+%40(;#C+XSb5tbglOtG3^2C3dhq;kB&j3^Nc}jV%&Mj z=~GV2Sgq;fL6qgt>cW6B#*^IN^|c;LmR8R+vwm%EmM!(3)HNZ@MQPb%m#-DP(;Bt= zW&-1!I{$V@ml9_j0d`BJu}?z#g1KgXbZ?60-r)&rv!V)T8LN<^fzLfLZK#veO~3O{ z8CG<+cMQBM%&2LveQi5p0^{OCzbUUrnf-c+((0uy}W)8yC1C% z%{-Hg>x@_C5Q6b0ru|iqzwx-oaoxSv=SVkNg)>g+v!mZU!xKhQu?qckCF#>yva{lk z-HTr&J>;IC+^D%kDQ+?(j;#4Wl}JiR`cS_*ca}&gv#rp``q}eKVcc|JWaMy(k_-83 z@)D5aj3(6zmq=7W4HT zuQrVE@=b}=B;$=p0vEz*t z!(5FXn_Exm=&U{d6>V~&e2i6+lgM`E&85f-`(UJ5eqJo)#DouTgzeL z;RGN4aBC5sx^}2Vj5ELAEVvRB)O!6opH%1Oz86>a7q5P@Q=i3&YSx>QEMw~jn%QcZ zhSzg;79VH!SGAbx2_~)?KZ@wLmakBb@H#ugBMU6(kNxLWFHwJPsY#iq9mu*I6pbKc zCL`jhZQbc_XSw|C+H5N?FRu{$r5#V)(ANqIiv2}HNo7_{C`BASo&5dQn^x*YS6x?D zhqT@P89aX6ydk0{xN&+qf8egGuHFn+uW4)hQtR=gkYZG@yl8aPrfj?yn`9-VUQAtu zeX^y+)Xy*HMX7s=l$))#yJK>XJM;>zOx{$4%c9FtveK*isk>2&m>+jCtt_orDk=(N zv}IJYcMH>U9a9Y*>(AVs<_n_@RhL>7qkqe0=H5#O7+o+N-TU?*dd|I8&F^&QXF91= z+d1+dhu7sF`1fS1SPWyZTw`o(C`3OhExfZcb!tj`=;wlS{r`w3$eC>^b3bsE*dXFUutho*T1OHUrT*u z$#h})%4X%)#`wq%nP7JQNO5KPw<1Da89Oma30}e|-lz88LPA1rYlX|DhnbtJf1T>i z8_K5-4Ct_M#!ikJkiC=HZ!Ju(i{r+Mn<7X>*q`&ReLg8E#mHd%WoG7+udPD-{Iorm z)(I{MEbP+j?CO6SzUmjkB41cb!qiO799&;7u-{kZun@f~%FfQ1a-9)3v)@!0yD}b^ zDy~z$lWuUqId^GsxUVxrZq~*7@Qt~7Uk;FS>g(BU=1C*AZ4}|QUS2jf;n%1+DI)B} zk`)R8yqkvy?~L7LtL+iX)=BQMq>ti!ue6Lx$%mdzq+e)UOlDP|spDlaKXGco+@Brd`x5s8^! z+~`MQgh@QToWxyEUjCYdJG52g?(Gl-pJBrriCq<|_Uq|hrPjjEWEFP(S zALOIDRG=TF8Z|uXZj3KVON<+YnZ+yn!b@E>o3j3!L9(IX$Y~3f@{5Lw3Z}5I2y-qI z%|Za5S~qW+m^i~<7+JHgaqHR{ zGgRpxPfkhZd!)4ODb@4%@nY}KUF^VtfB)zb2u~|!)7kI?(Q<4da{b*s_Z;j)mL3yQ z_lL=T^y<~2ii)OV+rG8I zn>Q)r;z)2?C&w$bm2a4^vXbME-Mwp!Pk|ru=+UD?#FXE_OB*tm)qlxEs;FqdkkZ^C#QP>JtWSWye5!G}fB4(N%?r=Uc1=y)c2m**1L_mQ zp6*{7xxa7Ias{ZCA0`)g& z`POtC`gYoA=g4tEU&>j*iY9ZkmANy8aLa=8lUyy)B)iJ-NryX83fE(mV1Uo>u=;(0 z$;#euTj5o2{I?e#r3|Y-YJ}R{W@9~8Da%4MNrjW>nK>&y#Ul=EEH+V5C~vvlq|gv0 z_F3oZKh<|9Po*N-oNFgZLxVcaWyx>dxl@@l_&`BHL16x4!H_khd#w!jo|n{d+XMDk z+G|YqW%{$3@4uqaPGZR_oS~o6IOA+cBt>L$0?m3*4_2af5Y6a@erl0EE6u1V&OPMd z@DxUlw#}6!Gk5uFx`G}qiJ zD)0Cm=dyBgDhiSeWQ{}bO^7orYNBZ6Usbx8ab_ea(_!U zhTj?sp_7!eEG?ctG=C=W=#gplLylPfM~}9Z1wZw?W>w14qBz%I)2UFpsrdLC3y(j4bc`3HZlKhVKawxE65B2q5D=O%`*!ePOEm`Rp9T_Q~sSFd7 zPq!GCz?fd0gMvrkMiy~}ilkL9%A~a!S0d%SWww=D*45N$`=$4Z?Y+euEnDCtbtNf7(ppFR9NT8UT_d6~_720+H zKU#SH{OHxIWo;YTZ13%v=AW%Bf#kc2WGo7~z>|Cy5_FAE7KC)K@q{;slDZ($+nB}%awXo=mel@=u-oOEy z@}8lgyE!+WpdWEL+8Yh!x0{!$KR(&bns4_z8=o$+eczil7|kqE{xUK)r=8=Ur&QA{ z?diI!xV39duiNK!cga{Kd3T5C_Ggu2)~?6&NC%OKhuC|5zUbBj%6J*p)4!q8^0j}P0`i)%x?0sbG~oS$EF zPEp9*_)~q8Y=O!RrUWpXDZ*+%iEZXPl?~@^TKU+reZ9us_6z9(Ia;fi-xB;;O^5(Z zMn`jyu(dx%>GNDm%WEb{^}>O?;N!dG?6LUIfU*xDTxuT}E5hzM>Lv-7pHmRutEM=c z*}Cuzk>{WYzc@LWZa3xJKRrJiXmoEQ%oun_QNBYU8>7eJ<$K}V@{>Nk&BFI&NVmTT zYHs>$@lFz|t(#B~E5K=Q+wAg{>JOfd{UoveS|v{WZy{FcvmbiuP-oQfA;$pO`7BFc z5k*M_NDh3nTmdpUxdrkJsIch6$tZ?#3~_=Mn(R+ ztlRVF4ywNhiLyCv*mENVlP9|uoF{D@NFVqNKh+55xN(=RtoX{okJ#9&p@Y>EU!xT= zcd8~Xc?dU7)1I!ZZ$tb1ll&ps0a3U!XWj=|I$EZhHv|)t-^KsVR%eaoItr7`&i?Wl zxw~f4Zut-HMLsVdaM+tIDLslXy;QUPpjd@na8y*4-$aJd(le8OZOZ7k9!f~~u)qSd z586a)X6CQ76hl^WzrVbKyv`Dg`i1V*->&$E0a4`^1e+giIc3V${uT*mj~LwjffjcTvnxem!+eF*!7jm%FYT24T-{@#L9e< zTXFmL@{v`+(=N-=^XL)d!A#1Z6puEhYb!A-&dOM8Mg}gmfFWDA+DTJ}cdFUIvFQ`^ z4Yjl?Y$y+aY~D&NGEULq1q~o%(#&bn9oKTd_!g~{W_Jq4TR z?Z;?sa)2tv^}a^A8nH?o`9rG*p4=9q0NuwoY;tqq?%{Wyk8v&is_<|Jv=}bLK@suiCx-S>Lq7mR;9)7hHUTc zc5+>R_DqYg&9bsqhAVr}$f1&7&|%~)(?c4vMz#X({&N zX?tF0DUu%F?8fA>2ccP%+3xS{-HPjsOi+Fn+`{epZ>T_iMi#IpT?G&|fZ9q~8c$S+ zy^d@Vx-Kp*Po6wsH6v0oVCtDw;&?}!PJUygXXfje9T+;?JUroV-VFTe?adpy)b;~d zBO)jXLi+pr*;f8Wvq-;Y%_%AAvk>j^S1lR|dQBN2k9io`H@vs!>gc#NTxb9)LH9a; zj<%=X2QBvStAzIhv9kwXfjb9sCy3A~s(12r)Kt}gfa~RIQBhJ}r+WQ4?(pCMNbtXZ z&iO+U`8q+Wzark^rbuZ$Lo1i_%ciI<9YxTLOGpS84;*s`_V_wXH+&BcjzQLfDQ0V% zmz&$%t;Jw?|Ni|-$7$7v4|8*K$M7BOyz0-vkfe^dAu9ScJ>AUyD$j@!_$9fyHrCcT z85!(+e0;x$jv~HfXBQjQ{#bDKSRcJgAsG;DCpMn-C~!FdUn!Czqr3*;%+kIS z!^60;>aw!3Y_{1#Iyd^994ZaiV}Fl~#C38rBlXguOc%NmK@ECsTMJ4vi14!H@hK@r z#>UL6s|BiE6jU86uC}*6YQH)PHDr|%!b>R&;*0IHsQJ%bA-!a4*7$ejXGsZV)s>Z% z-Q3))C%>8^15NJW%bWfGf3r_$16c!P$eqJl(AbR*F#)tcxcU_QxxdcJ5Ld_Cr~1K# zb_*(Ja&`@5RT4~0Oxxxlc+W6e

eI%Z~AwS=_v*1p$$QSy^Jj9=qIu`>WLv!D?6q4(s^ z6tiP0C;tRfy88T$R-*{VHs}EB@9$rZ_vHIqX-}U%#R|X(KEab}{rCK!-OAGw-Jhv= z{rYu){`JN}9UGw{@@G2_IpfHfrS!D4pjz%pO4gQ_2cLDVOA~@~usK?KU0NE|IvW`k zg&FqKLe!2~O9t6c@d*op0sqeeY_||$1+d<^bBB}D($jNv!#x`Acw3tp?h`+xrG*95 zr%Uz3+?;uZzP)IYu!H2q>)oxtwD*56I2+Zvlc->u9aS>@Am3+6k(+XV0p7P;Opm3c z-+5i{Y8eROf{A2gWI-mS!PZSfR$xb(nWeYR`m?tgKlG7y1d-=V0|npDe2=! zdftRQE#CjX^yN#D6ji^i`a|nm`fhIfEiHk~rXNyLa9)n|6%9CxHNAfQS~FX9e}DfP z4Gk9;7atX2{?MDAAi^*;V03Yvs;N@B>fFFGG&MA+2tx)}z7uC)MpeS=1q1{hKYk4E z(w8rm4i2PU0pg~uIO~5u)O%o{l(Q&aiI|R?5%l|KYFdwPKHS5>PpE9hc}?p#FOZ^| zsKhaT^~tNpoP%u9>To>w_(|}rpMCz;_UD#zs`dPICkOv#fb4AdF2yR z*^>@<$-~g>$yg8*-|QL0-~WM@^Y%?f_QC-oaqaI+y@9=Qk~R-3BB(rN(xK6FC&SO6 zxcEIoirlEX8aI;USRU)XN>kY1SFc_%F)`WL*Z?{!OR$cqR|{?J9~prf(bd$vLdp}c zrjYs4(vnj|C<$;foIwf7GX zYYhcUETSVK?q^C0gsNe@24@KDw1x&mbbF>2FLdDX@$rB`!aTlycoPYOSFfrSGDYa4 zU_ucP5b#q!%F;;R_(bt}Hqg~sOE;ry_NM1}dOd}`qod#UjBJxZx@|2NjcQ*7A3g%8 zTCNu81WryRpFd+WgdObdO*H!XRo8mePi?p}5WhY;dc_hJIN4BMUcO%#ZCotvcYf-; z!Xw2Tt(^6z+(y&Yb?^K4U@cy_16QtGnJH>B2g#pc8T)&c+Zy!*BjW4$25D&K*RKN> zqARWvV##0wdwY8~8-@^g>$sx>KSBBNqep9K{^K`;D_Z$ObaZrUdMs%N2M4Tt z6mgxPKIP}<%TE95?*6^vYI=%u1$-alLzm~x41%6qcj%+!*(LAT9yx z;3UWAj9Y0XlMW6_$b9)z*t8Y%3eUfv&MAyhlo^;iD<)YBf8MHaIzl{TQq2jcl#w;$ z`FLTXQ(%$N{QPBDMF!DSRaI4JQ0XPboY_A*Fo0D+ZmJTioD~!lG&9oC(P2OSt#Cg6 zCkqPPQg~0>isR$`wdP8LzNYL>3kmOo0x|>xJ$;dJJ-t)!HGjJUMRytD;ir!1{Y%V^KmZ)Qa+8%D7i}R-3sW7S*WubL2Bvh==HK*5Zc<>+Mp2~ak%fV{2iT%Q{iM0 zcVgw|@1Austaio`lf*nKXR<_Fl`H> zDD`jl<1mCYH8lesQ-t>dhRk@=3*aqW-WeVyZqUp1Q4w@4tOZ#TOo6ykgaAo+wly@i z0rsmKBm{2J3uvLSx0c)+8P}ohgCM>$-)=qcf<#XEU4`*qX4T8g935K6O^zQdH;YD# z|3C+`TEi28o&$TcsED-ssj-0pA4NE5t}|>O6f(_%dcnW``t_@%$L6+Y-Ri0>+RMSA z$WRb3g}fnbY_$x2R93bN0i&Qv2fx`68X1DBPtndxglcULrqN$syi6jhi7MJ zwH{kOG^=3Os8oLGU)FF$DgstACU49hGf*$8D?qRWqiA{vokk0 z7Z!$(7HnFQm}5)Z(E-ix7JY@j&tf;2sbfAT2LbfMzkjnbz3gwzZfu0P<{{1q1AyQO zZ^F-6rFQy~q`Qoyqg8>$X`#IgcO1|Rj_@9x7XJdQA3SLL(y00- zG->C(3g>jSOLj%Y(26U7&(@J@L&2|(LD|7kJ1~KGsoubvzI_u*Qo%KWNYp0Ed+B=j7= zx0aO^*XdicaJ8BAlJfH36BD1{3xz6%010z+>+JR`1nrg&P$1l({?SoAeSJdJ#Gvev z{{GVPa_paJZ+iWCLaGfnK{%*0c64*QtyB!y+0U<0F5SP&wEA#tXb8s_`UrSkfZAb4 zU}L41l~Hbx?{;nYsH&<43Q$BTXliNw9v>&tNOSe@07%%}R`Q)|dk0bhOtiF45-DIS z!`xEMb}==@IFP?jfC5<+HY=u4==)uPt+lnFanh5^A^x?bWE;Kn?5}raNr^vF-2@*w zhi$}CI%Y?kVO*TpwNNwZ;OW9_J-I{oy|TypmUHz)Mn*<||C7*GSypllP0i+~`05zX zkXX?4OB594s2Uh3M%ZRvExStK;o-#!-8@YzY3CB>Uv?6YSH6*;{6LOsxQ0v+)^)=Q z6WXSiSB*yQM5QCjxHz=FPvl2-wCK&7g*q>QNa*P3czE1FGQq_^wSWH}|NgzIiVAqH z!NHI+5x#vJhen0~l_+QwIH}`9HBl+0Rdk;;wADiNsL`XOfg8%X#sQ0*iG}6RQ;I(7 zOIlhQ+`@(kTtv{xHwat&^Q?QEELJ+U4m}*mn|dMQEK8jm)EI@#0J56k;azIt`_T5F zudu#jsiLJovS(@W2HdC2%U2_JpJ;FANK@g|eEISvFsv6Z_zbi9<|sd4-@JKq#q~H> zGrQ!lZohwf+sOj7zUR-)&CM+=I7IERywIPLlZiAGGW+=j1hjQ^v$L`w+%`O{IcIoD9N#t$0}`c7ikgDed*H7Vll&ZXTeZ~Y%We0cZnU3kRX zfwVWFOy^gHy0;&)$Fg6)o+#P4cD;MM+Hf3)N*TRJAj$lB!`N#j>)^n>O7e0`oOi;u zb*?`BovT@rd`ApTJV(rU?b?mFPKcz5{NO_M78e%odrJKmIf>|KId52iek#KnpO^?p zl#G~|pGFlkSBM(%QZ2)cdX#}Hu2UERyGBIM2L=+WQLsNqV=yY(S^OgJA+8)(L4NA* zUkjAEUJ(LfS`9uYNTgwN(7)=yh1IgWyr0u_qm9ZRW^ac1i3Z4s`yAgoKpXy&rbEAn zw=#|HUHB9r%Qdp%3IG!Nh-&toTen=@-0r5Rq84`#1={WQ2|oF5P1VD!j)+{sL3a5d z&O=(cRQvX6{JFP<;5ko~KEuQPVSp7dL%IFFb(T@um+|hLX?U+cq6mNlbGW(?Xc>AEF(?V&z~{6!mUjQ9wa5xCxG`Y%U%JtUrD_wEe(#z?01aOEg$HVsJkvd+EH@(LvP={ZHb`m-lo&P z_{-{z!aR$MZCHSQB)CK@B`oZ}{#S3)hdxTdNoRD{>GPK_5{#sUI(*a-;3a(B0I~8d zlcezY$@<#bm8gpH^7y#8!LczQVMoaGOvPyR$;oyVaoHzV<2qFyK787KhpS_;PM+Py z-hQ9VlL%>2+HM6q0&p8l&OmfkMzC69m)q`Qk3Sk)GbtbdlZ8c>oR2oXnHeRTw0?ee za!&Ts`qpb~6hHNg_P0!|u2etHJ7u>oAs5VtQ9CizxBJIVrt1<~CMH|160*HJFL6l; z5VGSSm=MWmv69O_cmOetKY#wr&YGc{Xv5|6ii=GSrF>)+vV-u*NQsF(>y69CK!*dS zcX+znTc*cfPR3-$Iq~KMM?Hp=o|+mL>y-@ot=qTV{?lMI0%R~~nx=MjGW$7h+z3Dk zP{ES+>xk&h4$ll7Y7X zqQdhQdKTI1Z%@O(@ZuJ|kVykE%}K^QCD*coE;({|WMpY+iGdh5L0Q|-kl0(Pg_Zg_ebnMj`S1awat1Ht9wPCD zWcDB(1yYV2N?7yh)1-IrM8(A3$HoRo^$-W2t!i?{Y3t}LE-XCdrv}2TlW+FE6&8B+ z>eXa1dtS%{mW_cpCLsa(px4WnrInR;w9v$(?x0A*M4d`)^4+H^*%1D4Y%26AjX}3r z7X@mdnVH#*8#iDosBlt#-wxV(&0*Kl*Qc(grUrumR;{O}2gFg7@==Zs3cw(HY))O> zC*>?cY%K9>?BvR(wDj~ffT*1FduKpCK{U4%)z?q1?E{7mk}~Yj%wrRoT6L}Y^-Hx- zCmH;0I=W8FGM{%D&m8M2#y6l8K|6sr*T}ua%bQnLHaNf2M8fp0gaC7#)OTObypCkl zy`z6-rHPCAiP^mE!pnP~HEfP-v0-$rdevy&4lAA6#x5%)3g?50VbCKzdtt zH1dxi2YTj^42XX3-jQ6hzIOfLtA&Iv1ACN z*gTQQ)5V@-%$+0{R21QYMm1D_=V|9}^7GqWoS$vMNspLK_?7!Z?rc<>5w){`*%&#P zjkkjg;YD0rRsg}xOVPjyyPivtbgoix%%VBI0(t0 zy}%IosPY9m?giH9<=|9QR74?s=s%cYz3=?>3o@@yr+INalHPYMu=t{9;n;%};PY;5 zVj}N(d(cL_y-&g^xgcKV_qjV}#^r|n;odCZ=0AF%dP3$nR%!g3{o2|Z5gA!Dyi-&Z z2@z3OXJ;osWEGX)wiQlZl#x3;iCtibfYt?P7CfauJIc?=N$a zuL;qZmS*bYBn;0SLgH*}5Fm!l4D$>?ci^YHX~LG5F9X7f8`SJjx$7wfj2$YUfgw2| zA>q@f^&QWcvF|0{vsE~+Q-t&I@Gz1pvd1F#`W0atK>9r?l;|0f0zY_S!VnZIW>pW5 zO1bn=D0omIojq7VP^_y%`sD9b=Q;zcgN_EuYjpcf`Y0fZ(4(DPTVvLJoW(yyMiS=_ zeT<1oQ|AW22l6EHHOVt%QOQQCG9czjnSUleh>O!*P}PC(DMmBzd)0#&2U3Zv#H3{z z3{uvh1GC1%r$$EigoT?dM4yX3H84n4;`rIq1NS>%a-*fS6}WRtOUrPna#tZDn)%+J zfq{&XH4s0=MMVeT28@g(e)uptJS-+5kq0Rk1 zp%!m~3TLhB%DXmmIwmHPq2>e_$AF%}fv+}1h>I`pc#1hrO^l4-S?$WK@Q@G_L-Yf$ z4$2ppIULi05(2{R;o$*iEl|CR4pyd-y{D(A8{?IavIbxpMKLN(Y;h@JxmV2IM&7T%4WD_3e#~rvUr;`Xa>rq2pdwQTZHY<(vc6(LCiS8zeiD zdoJN_lx|sB89cW_omLSo-~g8~{N=vizvEkH(R+hx*9dXY2N5(vEtD?)y$kLNBL)86 zC=id~@qilIV|jRZ=s5n3`O}vd!Wq^DhiG_ z0mX-`Xm`?0Cps8QSsH@_1HV%5J<1;R*!VLvIw~$A!WP$wI7dHA?0SBS{{Givem=h1 zSG!*nGGA?0FCCwp08)KsZVvk%qQA1D-0RR$zj?pe2``qt6MVWuFqH+v6GSlxyurSL z#sI=GVTh=d6hArs%nm6DQlJ0*?ouz_cL*3B!%+k+EiELnPA=W4+7L7gP)C8|Z`*kA z?+_?Gg{}}A+yCbeY%AMRz17v#Tl7(r4wZWRC0SI@M%V-sL7}F*dbR)e@6YPopteB= z1`evu9h{AzorhQlAa|fY@$vECF~oI#9$W#20AFTTSo6?(9w*uT*0AH@xDB)m^HSPR z#B}uZtINwWxWnzZC~PcHB8-<;R?;XI7l#7Ng2h-uU>_7 zZ#boTkt7H>PX^&R5TMiVheEai_UMlvKR|8;MO#s^{p{orP&oWLL_f{T#)N-xg;BB8 zZKOKD_9LkWFNFL0^&r)32>W&a`t`?74C6_KK1n+^a6~wQhf!wd2U>b!d^{YzV}t2f zP+(_j+O$MYq>OfzNEs3I(^S0%GM3lY!5$Ot)S!Ffep2 zI0FKknMoU2!y%7)^QPwIJgy-w@Bt(eX=KDCzGMufPAA`o^C1;s7Mzi|iNsPYg9$ba zzIlxsA85>MY|Ycz1YJYf&8FlLZMZ>ZmX=>~ayo1)01HAPgQN@Ww^R@8vYzBSFyJin z)gXh&83$xfQbNMn#RV#Sbo7IA7JFRhE&9j#Ltav;fI#*6si7V@<6yD8u}V;qwT9#O z_JIV`o8vTd(Ry)mazfhz3DG|XLIq9rNNq!iSJqfG_-$k(DJ;m zbqSu3rIpo{DWHPzh+!8M6**X1;>u{Ht2aITb>G?PL9UiNoOYt724L7qo|2kM+BwB# zEg7XS?_7-`4h4DFEraNom!E%yoQatkS2nX;UjPOhg!=Az9{^*?)5?Pmn~<2;N{%Dq z=qAAwATtUy?>(I^*axk%a4-vIB1Xo!{b8lQ+NZR1Ehi+Fm>&OoP?~8owgbt$k<60r zD#as0f`TB$x3{;`P*WRMIhldOSVWPBCT5Ono4-seg|=eerz+CU$si3-awH7FBIsAI zCS^h(X(Jr~${H7Y0I`8Bcj@|a(P5xq7c%8jA@`xN8|dlfsB;4ofH_uaH#X-g;W$|n zLR=6{f&d3+G!DitWF?>{f%I_g+BJ*}m}O4#-th+rd$I8ZKHZpmj79HcSYepj+1VND z>;HEzV4eUI4Dca^1A#!&hU+&1XKwQFaxsp(;4z*)dw ziUAY|(C-sn-t1iU&r#uIU||WB{kjRPIB*RSw_#!JCQh}!sG%2}*$x3MVEsf(ny1T2ZOux`n zx!FP#AQ*-`h}htTZ*L1TM$b9o0lvDQ$%@9t(~Xg>$D9Hc3R~7r%zJIP5bpWm*0hP9 zUfgUfKqn$($LWgs= zFdf!Lid)5lalZ0_X92J2H5G>MYw75G0q4!i3N|_!DQTcNZ872zwADwC@NjVf)ld;$ zhARQ0>+636)D6ywA@R+5ff8ek7XlG&Z{EHI`v?;Ka3^{!*Y>^8b+^6H!IM>{QIU}t zFA5bhAF8UtE4Fc^<2r-!@FI1AMB8<}7ciY9(6|o10z6j{+Q?|>bJ}|5hGsZ?{NiZ- zrj(Qvl)0VL*Rl1L6`KowRT%m{$xPE8G(7W5bx;<~yt zDXNYyUIcXkEdjU4PAs{7o`;ICcw~HJ#HvCc4v@BnlFf>OAORu-#uLz3EX!_+ih`X9 zgVAB4>M8{?@EZdoBfpT)l9M$IdkU%kX0pPMBQ#$grSbtiDae(HC>ooN{2AEGpxBvhFDhoEO69?YOVK70sDR~=a2uqU@) zM2a8)DFKUmT5aecngqoS%^5Zf_|+h1%URdCNu+=-0NN9K><1`}F%ZEFO-!OAeFNzb zxmv3b@?eqnwJX(g-}sY&LOI@|Z*GOuY++;b zFn=iHqo5={0#Ge@LEwe_+ua5J0!(zl`7T(}3=Ba_7Nhf&W9~4zEJUl}c%Z20KcJ|7 z$=)cXS?o4B3>4xhAOIh`*w|P>;RhP+32@unTk>C26DazIS z(9m#v>?M&xdg5gosb-4t+xnJVdW;gsxAJnk$r^1A3bwUqIt1ON`10V(HxU0JUfn(q z)=jL^@9F92dx+U@S5i9}Ntb|VgM4wBk{YP&w+_B!dJz#3Bc+Jus7edbF^5WUTNaj< z{NP+D++Kj{m#EYf6pa8NC*W&e)>Gv;fNeMxG;l(260^hVuL=ndE#7->t6Fefm=>U_ zODTS5Mv-HKbPB;BfI}#$R~npgupdC48jT?LMq=SO);2x|iKjgE?24`ojMkE`^0t7{ z;Vc7o#uZ&q>LT8}SzTK@LLvr)u0BF4bG!rl0!*)>q9Op4kgEcslb{R=<>M-1KI;Bfg}jjx-zRWO*Pr0PK4wiBx?FZah-24}*&)UeJ|L~98FXoigA!X+W`QHB-peP%zpUy-Hxx$qAw6%`e5jiAjs zI_MNKt9+`E53?eOu>3-+@&TzdlDBWyq zX12D3!JjcdqENj*f0_%^f;R&F9hU1*dD0Pk9nQlg3Yom-Yuq&5Zs$tw;fz&+tRi^1 zcbzTZNGF^Q6%(VzF75=%Y;UiftCc1!cBu`(5oOm8_d`ixEK!7OL1*vpZ(iDk4hM%@ z2D~Y~N78}P!c-K8=w7qwW$S_tARw>-Ra|mK>L9-tB)7>1pYiz}z?R=CDhNVaz*Bwu zmef-&oCFbTuM2)Gbp-qlRQDR6^@jO*}GCSV-3N3&c;dgkWm zfs+Hzl1pce>vRefwu`I;Q4w0?-IsI0;~Nl)@dIR~p+OO=ROh}yFP!mb!`+iZ?i#8D z=#Ilw9f4o2633q!x3$`>2B40M06Zev{w?=k|9qcX`G>*hQ`9(22_SWlafIFsAz#4K zFm>}RJN@%}6T$2m$a=K9y9+HC-WM#>J9iEfPoK6BAh>f zKOeFgTNBm0{}#G1nBj>iH9iM~#z6su3yi#2CF~R_PR<`(`W!#da1H1(y;8d>-rf*j zk@h)u1#$iKRO;*bhr-JSG3CUimI{s@OUL2Lw7WPWV0+z?HoR+LbD}y9yfu@1%+VUT z1)%+@sp)c4HHDtT-=H@~iMEH179USlGO1@|M56FMFffow(p|4m2S%=}I(%@0_$NGH zfWJFl_28=m-?SK$f9pUC>4&F~3`n(RVl?6CGe41aTxd zz5*YjBEwoTz5#n50|kY4`8f{b8$Xy)O;LuqS^(3>?Zn^==lm{6fUXkIp}Tu}d=`_e zpc7G0Qo1iAqFo?DS^y=bh%Cc`1$h%i28sm1EIS*-8WflB+mHxZY2l=xw!<`zZE3sm1UlAQ+>Xxn>~1q zimF7ffK2+-2|gSqNsxg7iACulCnpD7yRWYgf~7h-)UZxrS`fEw9UVha6hFLCtkmy} zi%}mwguQ*c=v)oq&x{PL;CcTDv&dcmvoKrW!)f683nuapP(X@`clxjk24DC{CcfDeEFiY%o95waY2eG3c-$5j9~~cO0Afl<_v+-pezcmvyLTK+ z;mOGfY~fF;*-9L%o14DdGnj_B@Z-8VsQ?*aQBe>UU}QGuCj9uJ;Aw=jO<;f+ ze$ae~0ErEtZii7$zcsAONDz`@loP|FwWMt0q*eQ{Qwnvw_g9A+n`3tPs2a%Vv6{Uz z0M*^STU}au)v*11hLAk53*e$7Y?_PwYZSonOi@)uB1iQd z+qc6n0FU(1%P4{wSwh3<2s#mJyn#Q9CIfY8?idejN@RjLoXq!)Q8@EqdD-Or3RlLy z@4Uj8R?+AwraW=S%aV0R9PVt< zM9TC((s0NI9^w9fI$xFL%>LNm8y9l7Ngg@^!p17Q?)=12M3UnIxZpcdw;&q=m(E6% zK_-)%^!dnZaZNy^!IA)?YlNY_eS4x)!L^*6O~rzzOWAXF&+BnzWUOt_U#zq$fO~yASoYJLpTJVgp*T5=US3_eRl^S^e4O&VxM?e8n`$4WVicW3&e8$Vo1Px!+8bT9z#(JDt-~r0D!?S2RmR7U zcS((x_bqpCl3G*YV#eOR+FDuwQPJ|yazynkeWY|63r zP)h!;Qgn1D0|KI>JR%nuu)7Ynn#nc$g@=bjB04%=;)+H_N8zTd)lO^=*KPHaF5szj zI52SLdlWdz?~(~>89l|=&4dVpqo6?l?g8|LR@q7dqnN6BV+)9SdM^%DI_;0ISBr5% zMPansYpO`=Jlqw~p6@*iyA{MZI9T2$*AwZ6WiZm9SaEiv`Q!HY|61uqph7bR*yva# z1(k>w$dVo%X;;UD4gcA~nnU`HD^!%aVH5*A>b-g~ExctJBAp7LGv(F)K+RN?mKM01 z_-s5#Qi+a@{U#7S9g{q4T5d+!gP{Q+DhN%|+t5|jF(s$b)WEdGA`yq3uCAApJq4Fr zgaeURt^Yv|uf`&(s*b%2?$QIM5Pb>ZG0L@5=VN3NHq$U*af?9QBKqt8IE9u?Tn=|w zP|?e`UcWxO&DWrU!kiW(l*Vs2_MFKosgXW;j0lTx_*E&aaN7opqln?p`fR%fYCCv{ zhO-S&H>dI5F|M(%n=ad2^|#*#MXC+Ax*o}vj*N_8OGSJOhEE613-!SJ7^?A76& z!};s>-@9*LYU4E~5Wx#!VJZ3+Bn7N%qnvtw(WwCQ(AXtH^6L}`iuwc5WKr{-ZKLyX ztI)>A2ID%@&I{4?J-aUNw1`d3F-DUG+xKMs>=&HaU?Sg>s$qqLm}aQiF(F|Gga%QK zKC3j*iDD5J{Do!VlRFO``p|A!Uw3kV+UvGZp;h=tbcG{ZEyy+({^ImILoXG)_p%=B zm_d>c;s$jCtYkL#G%B%4-pk-DXlf$bMEQUPT6Y59HM;%$AxMkf zHsHCiP0WdLaG`N>B)4+=`r;F4EPilR(^p5fnI!i-W&tARBtq=5vsWh)1%rc_D0L^0 zlXXt6OL}#DN{ZFD?oLAQ3vV;!Y@7r5R0~oy*S%#$3Ri^EU}g2|G)xQ>&ZTx?lfaNL zkAN!40OH{NQ`#PiBvamJ`!DetXN>w zx^0}StlPQq%M;4Aq%7fAM&{(Irw!a{>);{X+WsfCwdUWnVkX?k8LowGzE=3wC|#dN zN85^N%h^?pnhy#&9%57Vc=;&DTU_9PS?yC|u}X5QNN14ax2@ZM4NQd9_Q<^&^*RIE#QjaO7x9(4xS!~Z%O0`ku#5tt^O7}2PF$# zW}&`(Q%5M}&g+lslP;V;k5ZUTyBvPQBJRV`(9v*usW-W{KafFv{rWY;9)B$Kp*OC4 zn{hg}!>PxE(0Xp%Y-7Sk)&jn9*7!9`)=a&395ncLq?Q+c-ohBO!<=YIR`K)J`}!tUK> zF7oi-vPT%(*97ignJ;{kX&h1KE(pFCM!axhp5(Tc;@eAGoZGk>ydx}T3OFM?vsp;6?O(X-Pl@YXSdxml%$A_6%9Hx~ z2};+q!XSxna;NJgra*Y7a`W>~>#no4g`;bvAk+89<)QSFBrF10BFh^Y>F{5J4+;0! zty?QL#V9`46M#xmdlIhD`2gHyB)b% z>MBgPo8j^F^(a_G2$kTL26Nz{ZAh1u+_;-$cCM_Ebok{CV+9#KmH~Uz`D%|9`(CPl>25U_u+Ql&7ymtIulU*2+4Tl~>yzK=$;5GnhKYY{p{fhDPdU*a zE+2Qotz@^E_D-V-x4WvDhHXBpCa0ze!({480yYaggUto$X@;gM3czBEb@Tq|Tq3KVO|uDU&9vWqQi#BR zO&I>nX!FHPicL=DeiQJ%b#?DDejcWnm=GRly|L~t7N+~KSvGcT*+i4sqE9+aKL+oT z$#^b_k1mO%An5bp_<4H1h8%eNR$KCD$+(+`QQzCQQa!#;aR1XPdAW;!;@wiZs-38& z^#c*X!CXa25ogP8q6?mOcPIFJ-rU9F*BoWX*8^2Amvv&xctR#D$WF<}gB}-?e7VE(yyZrBU z6sy?^;mP=FCm{(qlD6@{Bp!5akRgf8S?{`dI?KI=4Mx3}hk34g8c ze(W|ApfxL|?Xr|26in*0E~D=vBo0Lb+5<9$;rvh z?XaL?BI90VN;i`zD;dEDRZckeVit;9K|$gC@Hc~F;ljE1iJK&IQs;i}Z&QI5^KTUs zJ`5!J+uPgAE6blWZU%J|T9ALKs`#R9qoXr8Tw*!twZdz~;;~t|kmfybk0+^fV0hU5 z{PgJJs4q3mdvU@t#eZLqw3v}Z(b94u>p{HFN%f@MNl*6v>ejfao12@d=@h=qX}WfP z@DmLsC4!Hngidh$kXfgr#l^)apVOJJuq(bJea}iA(^0u-55}n1uT>sAw5#(PKH(i6 z9_FCP&Cj=qQ03gM*Wjki%*>QIYE@TIP&|89PdIs5gQ(6Zq_-jW}u^c_UzfwZ;|>X zt{f$fKixjZeJ1BC^&X5D{k~^|{66P9EN3$^DN-+9sh)?g4;~~dgqrsM(LltBI82=O zM){sr51N=1cBYJ1yGt^=(T^A!v@h()i1|ng3jV2{tFuh4{X14uTj%rm@pSOLkfLmB z8=H%zW*PgTVs=46r}p{IL$4&o2i9d(Wo3INmnXlN7illkyEf^);W>Gd^wl>RnLf26 z#eBcBvva)a<;sH0#gXsnf-m3TZ{Nv&`;pzz$LudaU(WoFAeBCxn$t1X za~gZas-)oIao=6ct@dgAd+y@HMs`KSEkupM`E-=;*v@X>OJZVG&KFM}67Ro@l87K{ z$jf>vm{4d^Uw84tzphSNIyLp}+oIXnL0HUn$J^}$0jE=*llcx-j+KToXT#l}KYy-? z?WCZf(AU?OEs_vN5K1xAFtn%j8R+ZV3MZ*$KTuX~xM-J}g-{$UH1c$rtXp5VH}no+ zZ7CQSU^$&AFwWKU1euV24Tc9T^Ke5RXdjQHYL_|*#* zT?a=-j>ApPw+kvJKld$Z|=ZRs4)UcJfUn%GRR+DrNT%KNvC(?t@D1Ob~zr!r^V7cifvibh7st|d{C zO!>LF>3vIjdU{{$&#Tn3tDW|iV=msRWe0Kz#ty&flzRB^;Y(3QR=PoplHPQ4TIn$- z@#HCwZ@N5F)yWdB9wODgunxDIvL0BJ&{cfhZfRX|#u6>o{UUOH*zD`OR(7&$AZ-^Nrmr86Ti>&_z zuC!{BiPop-_peZSwvCbq%Wmn)7nUzy?lBxB)_ct_WkZVa-HuIlW8_*Fnq<>0E0q;- zg>+Rh>Avu99G_Cc>+osUnR^Cl}W&@wt#k9V(4nrQ2{E$qar60?A|>Dw)R0aWC>sQz2*MY+T%QxtZnI& zc>{;ACbOi@dGcZGHYIi6^&glxazAkbbc}tDGM_YCSXwsZu8o#!>FDsgXOsVYZQ4&^ zRx(_C`}WMzbq&=i19#+SwBm6NQC^zQ?^$j3gmZ7Uw7Sm6{LE*L^OIR~UqCH**YbOGrqF ziytp{`%d)r%`|dQhd(XTkDqCC((YYyj&Be4(OXT%+x>M--D_s!A|=N_?oIxCEj2Z@ zd){Y?ic!?ml!sR3W2AVJ)@S!0bnu2s&AC*2A9x38=Y9NmIhdK|$T+^c)Beln-ilV8 zaR~{PFINWGS-ensdP|uJ7j*{f?=I@ATUuITVq#d%`eeM%Y)p1~J8XqdXfb#X^n{a$ zA3P;Rkt6RHwZ#V^A1f=j9>A?|@Le(RFAwf%qhw|_HZ(j<-tbzGm^@NTv%6j@cm*c_ z4(5BfU*UTg$bvKS{(>qPdGqE?_xS|N3HOsV$Z)brJKV6&q~zJ6U%!6svNp1JKHUh# z$X__5rwxrp@6L85D@q@--eG6&DJRoU*xuQ3T^)M9+erH2g%g%t`vpQ;x-KK*-@?~A zLW&y%4V1S{=DsqLG_Q?TP!Tr8gkugG8uC@t)G(T_Q&g$@JLx%9j=7$#mQjQ^<>lqE zX}1@I8S)Lnr<;E_-xFmdU}-ihM%=o!A*L=VD(ZHyZX~Mpl#U1xje@hY&?R0O57asf z474sAsa|~|5RObII6RdP!=W_OGoXw39aq?1qF>-F?wz%P9Q2*y2y7s;-Ake=)0ZaU zy9cFtc+CY7M{ir#x3Xu*M^k@MPbei+mxwMN~q_dKEtEjzsc&7kBa_^qX?WT4R!SOKYz}q($rMTh7TwZ1Q!<< zZxt8cXllZ|eMRo==-Qf7FtQNUk%#tT?w)r3Ew9-8``5epxMHk#rNYv;du0Szb-3T% zxl_}Ff@B)``gM3%v`W^JfB1LfXV0QSk76%0fH#Pr;*ZZT6zdy9rT-Q%i zH=t;D;(6)l8#3ltMk-g?YQ-_;UQxm|E zb=j*cIM0iV;osA&EPufzHO1ynhJ5kumRbnRREIKs$f!@An7t+>JoNIBRZUDxTwYs= zZJ+DYooxE8LP7CO@D8@wAE`2){0S*9H&ZKuDk+qX94nDEiH=O;n)Ezc@XPTju z_L?IF;^S30Lted#X`Q`?PeKrk{J^D~Doe%!pW$M3t0dHVXZnclh4A|smgx@v z?urUIAGVNFhnIcY_{cZ7gHPi2cev3&JAW^^F(iUB7V_#QQ*7%e4Q__K{9E-r9#~{F#!1F=J0{$f%M+H0~d_%%bU^c z*REaTOnv(AgX;YIkr5|dQV%03DK52?($O_(B(<%ugdp3$IU19oCo!$&$9o$-mSy?{ z?HgYpJ+_)}Px`FcOh10?_(goXR}pRrXKbq?19iBTkyhE`GY?VDy}bh#DW9=v$HPsg z@TS*fg!RXN9ysRxhM70HSa-DuZ)*OF!EJdU0{Af_lz@QqI)T&bOcZ{|E3b?7As%ga zJ-xNx5ss?#^vu#;v!U&C+tDpu7A3e|=O*>rgddkng_G7q>Mv@0O^bLoBnX55xm4Gm z>-bXLpuQ*2>ig*YDD`6aWO3~Dv@9f}B}9oM=BJ7P0Frv&nTX=8Eli1ywzNo%c_VJQK|;M$5#<$Gf^R0v7Ew{kI~Q~zhlnIyIDMfJFSMRiI3gC7gw z@gvV}p!DhINgr0?d~kQx`D&z@-|wSVAtV=kI6HHH{=9i7_oyu{I5?O}sxPr~zFRH% zXE#T!vF~&^zxKOT>vG)*F)vz~STY>3ei8LWscN*DZe*1hIma8O%~L=aCNh_ z;SMbkeZfHScG5?+?B=PG(6%`z=t{NT{iY6+-y1Ez!F6N$alcAgB0=u3xki5f?c(A- zO-+SB|7#Q!R7`foy68K12m<7re+kqZ^4gT?0`!Z(!wzf+4U2f{8P^ue#9#V zdU|T+E_1=TYu5+@7E4FAif=;!W1zv;w=UD)Tm90vL^SQx^yrZ(;(m4?Z%21{lUam9 zrjg<7*3lz>W9~r_fuh&0Vn&5WaQj#dXibatq0T-(Ua+t5q|(F%fKs{ExM7lir*+PS zpND6vBBwEeHUGn-V!YH1TNeqR;`M%`8zKQbe0+Qr?my3L;_6CzgicB=bNV&d_lXV(H)2frHjT9?u3>HV3VZE9+I zKR>^en@bQ4?Z(r&+3(})O2tFwf=1O|l@H;$(UsJ`YHF0G&Apn zV)Au#AMEYF&deo z)?7A6%!)-BNjPJbvosoPsKf84qdF`~pp4d8lnk2-($&m6igHl2nhRDM@@Y(r9-$Hf&#RBxC>j$;%;m#}<151uM{nlhcg7gu< zi*a0|C&Jq;Q_F@NYRU&cGQQrWkI*l`+v%IS`yn7XHB~3S|CV>1$;lfZyx1^4p;&b_ zm(mJ$0;HvYtN(LfS49eDClQ>n7X>gN>R?0trwBsh}O!}?} zUi>ukSA&tIhmi!Dt@q#P$-OD42=`_>cq;z$b#2F%PUbCzabCO&*vYl4}39^H?pOTTaAR_eD^L>qOh*2hCLIt`$V3 zD(9zEVMKK|Q1cKYC#MPX`i-~^n-?1vtF>+sK(SLsBGVoonoQG5Y38B{0zQ4p3U;i# z5aDpDZmgLUP5(gu?~Qmb znW&GV)@4#xBXNl9qyyyj=PabY+i#UDFLf)nx#qjbk{w4jnEv|xYs$3CI%wuQf{hJJ z?(NOWkH~aLJ<|66u*4Ki#3l z@@Kt$^O>!={6OCmA$r`$kC%t_0vk|ekMBAD%lnq`Z#(YRIsXzqkffu`zKq#eNRPnj zzHVuQr2tyO;BS0)%WA$5kwQH=+AZmxr=iw92IjQPcf;6=2U+FmvF_qhxQo0^`=(jg z;dgFi!LY6Lek<24qfiCTSWQihJOpA>0HAklw%TK^H)oWwtzPz%((u*e;}eInicy_K z3Gw8*ta9pX4_ClAP*@9`?HChO->Y!kWe}cvOp$XqM4K)B)j$WOYKa$= z=T^GkuN6q@oVZvmIZW8?k2m&Mt3S7a_hMU5+jhEVN>wT3$ex*Hm|0s}dyHIEpmP77 zju{%}lRES-b&1?9u%dX^V9NNb1i3$G>^`?}vF~teIarTppyBM~+P|6Sg35^>L~~Kx zl!OK!=Lq<=`oC1;wECGx5qDE*`@l- z#Mrxk$yI|cyy-Z5!RO$dg<)q<8oY@igP2jwQ_h^ zeQhElU-=fr`j*ftbFR6dd+VUC|1Ymqa{7o@n*Q(0sN&ntMs9sQe1MBLW8iyna0uh+ zG&$4C@%3zpQTlY|d4SCMV))2-{le$L(z9o7frMi(eGkgM0;q%8gudvS!{t)Wk2jeQ zes$azBhR+xy?m2E^D6X+aX!tou~`~5a=2q#q?5u5R2 z4*&a#$G=l?V_h-fFJUehZsQA1Si^FU2vjF{bmyK|w*Y3v+v< zBmup%F?O^>3f3xZlip`5j+_&~@g29RbpFdw$kemv#~=*;`MBh=X*g`MmVEc|9VIh0 z=ZPS{vvRLhX+e|}9wu7WR=7H+$kEZN#lC`n0iq`5kvb6?LOP1ud4IdYeMvP5A4>uxKjL{P{cNH^WLOmW-cFI zYkxW`xah2m{8>@IMJ!r(kL**%mkx8`{Ql)RRjlq6xXUqsugncib1 zRNYBUPF|c|CC*x15)~08VpJt0CH2r!02rWgd&^>7=s3)Z-^IVT_dI=mc&Wh5aH%;T z>;^fC+T+|#=;YyyRpkUqp!Dxw2ocQTq-ESdS-u)J{0*I*w9&-_|uQ@#-ol-~U{ac&VX=Uyprg@xn>t z&$X8?MYYWL3-Wn+3o0B83bYA>&C2vaDN0tW7+r(u)tZ~bRKA8%eF)h8Q6b9Id%>mp zA*bf^=UedA&C70RMqQ?Mw6}ry%5(P6VYA0QOKBbQ-_ZIMb_TRx#vkf@daC#GxFcaj z!~t1h$=h_5_-tRK&i{bKqP=y_iI<0m+tR{93qcr6!9+#HMAfv%&sZ`4ZFDU;Ir;wm z`wE$l#O~a=D`i(P97ImuYgSCK9=WRf($)0=2L(yUodUGltvK=I$i`{EtGF*_Zhrw` zOCeKPO>MwRsP7H{`4UP&F-~!D6&{kynbz1WpPMQ{S5RgPb1)vT`C#twf$BeLUE!kgGA2{V8FleK#ix;I{`0buBEcsL^o z7o0IHk|cEF^zXMX-}DPoMT95nUf@?L+ps^9|CM4?ja4RUYHwdyR#sMCE=+4Nvg&e@ za0e#J;*-WRJ3G6#9A}et-qp3W$?bE(LP9u%)69uUNfO@2rJWY}g@wi;7v=Q_lo&vM zvxdnNQpCzyx~`9jW%tq1&>RdsiNamhBZG#|dHHW{Q`3Fnr174fAI+8}QE_=$Sy>eo zG;(i=8H90?Hq4$pVeviPGTltR(4ULBhm;l)5jg;s)}UYjNLyePfBYlFr8?9^#DT6Q zHszj!fu4H#c8buC@K>!e$DjGmq%4hR7;AEJxaUL^hUv*nhj;k+>|yTxy^sIzn`wml zlTwb~^~Ea!dh+hxxzl$z_lK}#_zQ{~hMHziK#KAwjKK@M4llp&KLZHABr~$G#4B+S zd6jGID(a#zBch_N;$Zpv<{H}Yk)kjks;N1NGL{lBc)2^fq**P%J%m@9Jr^9v)5q49 z=kh?^q3}iA>br>e);d-QCbZv=0I{3l96GNHFWnQ zBgYSGXwKc$MV3B-6LtAE%JlghF(Qq1cRVooR)CfbCkA3;4Cr%M z2eC0RrvI$P;Dm%a4+03eQQ!3`(6X2f{V{34L$XB^tjaOIg%RC0L-H^B4DNxK7-{_Gkd;utUp&#vZ!n11 z3(-g9C~sa~jc}5Yfq{giB++?LRQ?S)EfW(NVPU-Yx^lna)Fa=XJ}@>;K_fa_T4?F% zVb0!GOKx=WNlAe$Q9w>ke&fcCzP>)ZKup2H_c;CMXD2|;y>N2+WwzDRL*``&CnxZx zp`dV@`hh8(YGP=(>Qo8qlbQ1J(y)2OGDgbnlH}RsB+LREFz|KSBXVAchwp4}&pNrw zI+o3bB7}rWGcwpf0JO3qVDt~mpHo1y+T?%zN_u%dssz=E(r1018?~+rT~KH>w6%eQ zGIeo(@G6QKmgK;|z=L=`NSP4~B3&IFi+dhcm&2!kk{nleaegA0@FgdQ`^JqdI38zV zjU3g-I)i$j+FmP3_w7#_xntj-vqvZ-B_?{XI#SiVm*Upv+if-#$jB^q|90?yy#SdN zj|ZLwl!nJqcN<501)YmBs`>g}bR|p3oq4AD`}?=Y^Uow-tukEX=j2q#(gmd}oGi!}kb;M8C#qfftA4l7If0fE5gGZ7C6@rQa!5_ zLj*R+X7@o_YBD?8Y$UBTw7-8JcZ7Lk^jWJ|-qdb#uNBS%?>LpY)>tmMbwBt*fl3>z zeDVEwUHbl;{PhRz{)E4j)joXWZNiELYPPE`Gzuk#f&(D{XhMmnP-rOz0oQj6?IJv}IQ_oJa9 z+93(XL9X37aS0qbY8skb2n0|KFU7}KS0OH*mJGuJbIaD}!ACeEKp2pcs;#IPoO5cO z+Xg8`!gbwm61OmLZ!YF$K%Z+#NeQ&bw!)rHPJhe=p(4L|^9Jw- z?dX`8Z}Ycd0s72$B+TKBdDKoUIol5vHP~0=qw~Ie`4SwAo27B3%UXz@Gq!U6?p-*P zm7AL=KR-VNn48;&2GdVjS>v^yG#|eW+d`#UT3WKPp#Vyt(8z!UjS1>j8dhd4qyx|? zpcmZQ+5&q9*>g-w8XB5hv^5(2cO@QLxTb@%JFe~(>5>)xa!6i-&m?EP!9UfyIX(AM5QY`d>NjO~1|K0aFEaMDLH zg}8C!Da7L%3tZA5W9p)yoiBa6If#{_aVk%RlN!j2bw8%2*1tO3mC8Wg6%qmw4AkY+ z)Kr<MT<0SGCnWR)xz zS69!9>|U$1k9;2HU`NQcG0O04?JBIRJBR3dpjHM*W7QUAX7&$CMZ>F)nsR?03Tc#8t(4y&~*9w)*nRiq3XiJ!$G5eE}p#5m5hyv z{RMgN-b-<&W6QTX-v|_Zq`9&7dWzJCK>`p?DgeF3!@~o{4gat!*mXBCN2Z7on|F#v z*W&q%RMgcc%WP;oB%$=qJfoq{`Jie6=hD>d8yg!-k@DoAxFVYJ@#CsgN+34kZTCR` z<*V#j-{)`N1RdXYT5xf3dDbwI$IqtXLEfm9A>b?cZA$J}M&rWydzwBOpl z#1AzoC7Z^k`?7xfcbqC`eooHY_BmF$w9-;aiCRodwCS@!z$1r;htrKAVvK4=MqqNm z@=wdlLr6+8+TjtSO@BQr8HO~)XH?~UcD%=HQ0`~`DKU{g%7>0V2ckN*m64Wqadq`{ zw@)1sjsFcgIy%VIL+^WJgIPnBfKZ_aS+LRgeNmABsYnA(e@tbxBy)G*H37LYL*slD z_pkN)Sj6b5On#F(NqE632psm}@T+?wK+D9$1l6dzT0%(3xjRKFjWfMsBY*|{%d4NB zKokH8k-R1VI|KspZ)Qg3Y_~5uI=ZlsuQWUe8)2$pY;0_4ncY4Igig+vFCSAYvD=vj5O!3=zk4@iD-1|fJAc^L#mC1-U0q#KF$9?|n*gVwpax*8ct0@NO$^1i-n+>>IABt0?&I9L>6LU-GYTgdZ93(pMWL;)>5%9Pn=4!8kaB^p>)mG;_F!|B^y5kv$zMcZ9A#H=}f-@bhVcidm=y~-C0J%RpvObM!d z+}xp2Q8J$UJjdMo82+zYunnK2C-^&yGd;A73JcR~WH3`k-Itf=Ng&fQG-PC87#$dR z^?NwY-&!p8XJcc-;xeQdtGX+Us4F;Ffelm19ly;5^Yii+ojo2rxGFwMgpY=ZJv%$| zc`xYSHw<+ZkgtryQ>(PL?ruez2S?OGD5$$2?A>tKe)S43yU!gU@~+YkZ;;AeR)>1W z#@@Ivky7V`KJTZ$%4C#&HR!q@q1;=`RPTXlM>m}CMdvC*zKx9y#S@bc?0JBYpmTG! zTf%{KNN-77RMHEHu?q|bqYr)Eh@}EUVbrb`-SxEI|9>sH7 zBz<@97xnQyve~JIz`41(A$Ea#zLLyQ4W@3h#v3DLHc&N(EZ{xU>EsaQiOOh!Mn$%0 zNG9%MYH@LKQ1#%ia=?;AnUTAgV& zfhuTv4a#l(F@T??x_0fw-m=yz`V=_t93G63AE}v;s4r$?)g=*Jx&6!0(d5oOSKK2J zyu54MbM4?q97+d&RL9|=Xt^M6hgcruL(Ob8cMcx6Sp{Uuf@!fXeFRXHj~%iBG_A^F zZZlFvC_E|FrQtLDF`-5sj+-<{=o_iRO=(>1LQE9d+p9%(Z13SgpEP*v7x3DY%J|=w z8?0rUy(jyah++n}hmq}Km;YLwWQwIcDM41Z%J#c^@W%?(#MP`q3#g+^S3Nxr)nzh7jFt3a8v!+=Ta*Bw;y0R`naPeyub=bEMsZ`ssAA4W zZ7@5d$iEO;AE|vFs&Fvz@sW1hRE0uOeZTY~tJey^A5ECNjZIt>dH;J}17Z6yS_+Ck z!^3*o+F-#A^NE)Af8D*XFGnnJsvqblE=ve6?cOw_4JGyMx$(2q`WuhL%!KPj6y#%Bk|@ z%a;(cq&XYRSSa$9`~aPE+nwll@6fusEzl8|Zznm_g>`!3rC}vi6sE6NPJyQ+fR!T{ zDfVKX$R&*|2e;l_P)tNb<^vKQAa5ePna$}&T>;0+F-UxT@zt+5 z;Xnrs+vAOwB~31mYE7|e+EL}f1MmU&oE2l?BG8nCS!rodAxsAy@Nm$&_Z1YNuevKN zoPi>x%t3){Fgt5*yFk0yMhUoz$gC%y68>u{3qnP2KN=H;B4BLzzM|sJk=Lqx zcm-?VYh?QD{5%OU@#6~mEix|@Zu-V|Lwi`=;I?);iIFh}Y&LuSh@Lb82+0j{EobM_ zs0w^@c?e%liZHLCH~2Zwvw6SCfjR;XR8nm@yh|G3A?LN!ZXHCNWp|3vH{#hYd$>x| zlQ%$u9ZYySv{u~1i<4y?f(kY^W(ba7fZ*{Q`2U^#TS>`PLP9tu#2h%It2hRs2$PGg zG~5Vje13lF9;L=&`#>qXPsDEIT}sNjOLcrZmLFc;hYz%G1$FfF5>hWMLd9agn15bD zXR9S|=j`Fn4?&~S-qr>=HtMEpOBy6epdxJT?Oj9}p<#Vc-jG?sz`$ToSn@R+RGc|*zr4I^WLZPsyn%3)c`^US_^Sih95*(ag&VJ+Y50u|1qG6ll57MM{OYT! zt35qE!5??~_HF+NPDl;%Y}ccee&~T2t2v4#Dm=Wss|%pK{OA-;-u%1;scRYw4fo^` zhL*kkIzT{WF7I^9ROs@g z`kuQ3=4@#p7@;M}0nnT3wcR=}0QNAC#h!FPza^I4%wkTzrX4?uKF zcp1nv;B16HuAY+lq!9{F9;IZqwYGxA`QE*IX>N)D8T9oNHFD)gCBLMx{nnDZ?tlWr ze{Af%%-+x0*>psB!N}IOwv1jYfX+hnykcTUfT}j-f>~SOH>@vcIsjA3%gaM_K^NX? z4t3e?*|UVCq{f#_aUwagzPAMgpaz4_v2TfopC4aB)#B+>a8W;0RZXiEFh{KomvHg$ zd?_yO1?N&!RCZR@Qws|Kr$&`dxHgiiC>X?ewQCeGL4dl6h`>5IyRZOOUYM(lKm;V! z*w${Di<)u!diR~rFqKQr(0@^c%?=b8EIPA8^tHK4%K{kx^$RDw3Bk<`2^SWRJpbxF zB1mQagE5d->vYg)b93|E-AiiaO+-Zh-@iRprRnMEEiFpUhgf0>NN!3n#-}vT+!YZ4 zQUE#xzgtmf^lAQtw#RioXUFf}y#tRRK$CQouAZKlfB+c|z8v2W$16e^)QIVaEjpEuhu-kMZrn$v-KYBEXXk&>iG&MMIh7tm?xKAmS2J zQ%y`ve1UJTG^}&oFy6?=l-Z>rC*K0^ZbxTlS!wCt{(kp`?>|`{!Ck3~z4=LF34GD~ zMpdOBH<&#xk%epD0_mg_A@uB20!MgM(5^%G z=~B(qL^X1OmnVqJ{SfpAu>7J7ammm*S^$mUO1TTMun;(M!geEI{pE;l1+ODAeW=F0 z1d^V;y+W$MR3Ji3tTH|Qv6~w-y$5@HfFhcH{|<0_mWK2-<+5Rp5)l&`=~zA)gN99y_}NG~zy){N<*t^0#lv z{^lv-qva#AFj_=%_BSRc;cjPg(|Mt!&JUr^eUxzRY90kV3G!INEOZAoH8rp^=124B z6bEA(p6jRF}1&>R{G|5}^AKY!-u z=XH6g%RTrz$YDv0)%(hX$sYq{vaqmFX}GSbE(P%me$)PZonGhbE$n$1$M%)vaUmQ3 ziB@Z4V;^N%MV)2|68UzO{2?}fz8AS9Uxp4%Jv>0g2j@$rVdVV28v%U|aFf8pz;Su0 zuLd^2)+*~VkIIUB43A)ULUwj`V9-54phn!w*IUwMK<%8_3PPsCHlFHnD9jcmdRZE< zPw1);YiuhtfY71=6NNyiu#&xrjMOg}fNfyGNIA|}e#7r%IcGW05kT654P$`~v@9$Y z!?sX@RXM>}Cb7tm<+HevQWwZ_ytA3Ckh!$70y{|{Ye&t85{A<1H1x%==LO=Ec6E2d zf{#$xft^WoQou*_4oQ&-q3l2uV`KYiD-8P7>N(Z~ryR){Y*T^E4dE)ZMz&kRoP5_D z^@y9YXGU`GFI1Mb+16ONViEf>*%C4Uy-;0%!y9a4W(8*ZVO~y-5}*M0ZH}P|0l!wt z94$tYg#^o#gpVI!psm?9e7D;X=y&b^;mW3 zgydvi1fmKOteZ0#0S-P1<7E$iV)G%r9$*YC!#4A8kYtPc`E?5~eqQB=J%6i+SXem_ z*kyNHn;D_#0&@=u7hnba1icsD&}igB%LF?=UP3MHcT;`|&EchY1OE&b%aED1U+&l`PO7 z!SV4sf&o$<*#Cd}1p5V@f6-n6(oIgT0ltKW9bpLxSKd?m;cd#jRG-8(Z`hFXKHv^~ zJycKl3RIt3*Jn0XR-jYCUO-@4@k%owh#fo0x-s`SRkPTw7&o09@`DPYiL+A zH8qkT9%g0T;NXC;hn=Ue>F;>=nX?SdVH(!UzR}T9MFjt6y1w>5fByW(z!uJV9fSq~_H7{J z-VqlUe@$HR$4UrDzGx*F-;YF-l3`FnE{_EHv-GkUf+mxg1nN_k#?tI8M|ssgG(e!M zC3W)g^OIh=l8}<(QfUbB2F?&rdF~}m;~+M}%Z6=XCz@1-c0NdKMtnLj18DR+m8|8p zH6|7oe5w@F$Q*9U*C=irZdesx8%!am0owu@qNTO^$q5s`|r zvX|D@gmy%j=v&)NLa6k*SS1c1<-pDjXCw)^SD@{A5{W_8fKUQ5i1~YBLIS3z2S|V* zQNe5qCXDs>2ixBc(Oe`2ZfWO7T$7pd!-s1SWb5mEK;{CI85$~j*{P2%)XdO2(@ zpvGZ#562+F)TNI;<>f8!uZYFDfFV zqdOY5Oa(%-t4l3G6^~Hs2jioo4GF8XpU3uqY5*9$j*1#w*#lkcV4n#S!J1)@%K?nQ zWlgn8$A?KoasLRoo}lV7*G5GjAuKLV*!7PX!TLWN#llW6JFw+2VW35S9Sk%#TRry# zTUHKe-4YTEXC-B2KnDnOa=vOXh5iQXosn!e^6D9ov%$#d_wOyr^nu3r_C5o*2Sk^N z+cYlpHug3+cXnx5SU_aMETjsP_kbN#KYoY7u+1$jI6n~M@}~R0?z8}dCfI0TH*cB# z%)fs>NXiZQ4uHE<%f99<5I}dl)8`7MP#3l#?L!m@CU~unRYKQZRVDsqg8-Edeb46X z?JB@&un!eDD}ba_j1PG6cZa>a)wrG=jfEb?vRc3s?VBBHFy)cyIB|+89^IpYs1ny0^S;~IgliN znkK6@dxN$6(PM9*-Cn$S!9)VPK-yYbM19Xs*$onYRuE{*QBms%C-L6A36^C4E3~X1 z(vULb;kSTXrKZN?O86O4^c?~kI5is^o5uaYwdl+gjSc*3$liC>2m%a@ zEBu$w1?cUC0EZNZLp;T$344j6m43}^!S!-+a)P1_JwU(p9we^ms2mjBdMHZiC;;X? zpo~yZu#pjf?i4gn*3f-ioxK4EF!ingAqi0E<(_!%*Uj4=o0v(JJj78hE-rd{!u073Y2yp95XX>emzIj@yF*<(PftAs=b0JD~wdP(M2e~|SjH17}G z{Rd6T?sa~@G^}XzBAYjce9ac8_=IRvKI&EaYb-2LwRxC0{cs-E9IwMjjQ$vC2xwuk z``y|sxx$m3I$zdS{+dlJmpgaT05?>RlNdK*YRSw0Y6!${SOFEF z?#q{s8D5eQ>~mojtCTepEPXb0qn)Y!-LI<*mf-@ zivSz}4!H$_a>2*JF*{WJ#J<9SmF%q&$7*?zZiC^b0Se{!{=GWT%yd({R%s)ZFSr6& zfr!7Hcrm7l9a-5P*)rIk34CQ{?;$iG+ecn3b%A(!kV*lXL82WQ85te*RG+jPDGkHu zcY@Hxd=Ew@1j@fxs|fntw~*}!(m(@;2Lk}f4?GiaInFLX;M)o_j8e>pB7}vhy+A$& zH-M}q(Boi)keVR5h6c_I5HvJV1OedE#mEC7KoJ(?Eg^(b=7<4JLFQ~pod}Qp#*LSj%a$MHOZjuo| zRsc!^YCad4AS)8IOu5rF=dN8p_lff2dzGf&*AjxsiY(0M4hMZ{UF6FQu^B;P?!z*uU`|!sr3K( z1K>lBE*f^&gNj-DPQWJ(tG1|_JiosR{7BGzjf{v(N_wpf@D}Tyj=kviZyLFry9XbV zjo>*TK0fEr^uykC+2|7h(BQTUlMg7U4ZzRwTVJXM7fkhdo$rN@V1lHybmV*ArYq|N#MgNAKkBbDjb zFH1|$vioxTmH?UenDyY~XaiXWYsuZ!9g6B|izAqS;O+zB0wo)aP#M#lhH|*G5w|G9 zIHHwc{jkNUKx6^w{Z3_Wb5r1$x<*36!4O0m%KyO5!NCCy6%{ajDjYGupCU1+-pbUq zL!a)lMqjl<5JVm)QV5TA0DbIoow7v>-nj!P5dea(uMB412{}9^ChB8NbqzF3K(m9e z3Vx*RS>=-9H}-RY;$)e>Lp6t)fkX$L;u-A9w=RQz^6i3H&2!v=J6{Yc)ztC3LKt%;O>1ZVmppHSR!%NBy_ze@lbvXg|-JRW`XoI9>D;y~P z(@KVb`0`{F1US$WDn>FsuO0_Zmi5{;TmT{t0vztL={pQMw5-oD$vh*B>W#t^XfTSO zH1B%UwwklazPw=Lqf1vrr+LJp5xBlC&dyL|ZLwR@u#mSLVDsqAZ({n0c}$GMWTu=N z+z!|Y0Bi_F7|tjLcmj&zD&FrhnWHctA7}z-uFx-79LJUSTlSHElTRR31%n6+3j>V= z^t1x)l409QLlQMy9JK7>VjDca!m1R~qF=Ts?4W+T3i${~j*n_wg$!KB?5m4yjjouO zRBxN{UIHDlXwD3?PFYO-p(hi(d%%wd*>_RsqSEP^nCz^rw{@MVhPu?g4yKB~BpfG# z`iX>1_?L8{kv^F$Dm|us4%D2X!n;#-o&^70+A`XZQ<~d?H~xFpPy-*_ z(aR?baSz=udVTq*Ap+d$|M#)~&j%3|UXY5B`WyE3VK>0@l`xd=tH~E4O#}W9NmvH! literal 0 HcmV?d00001 diff --git a/inventories/database.yaml b/inventories/database.yaml index 05d123f31..b179a30cf 100644 --- a/inventories/database.yaml +++ b/inventories/database.yaml @@ -11,6 +11,7 @@ images: stages: - name: database-build-context task_type: docker_build + tags: ["final_dockerfile"] dockerfile: Dockerfile.builder output: - registry: $(inputs.params.registry)/mongodb-kubernetes-database-context @@ -19,7 +20,7 @@ images: - name: init-appdb-template-ubi task_type: dockerfile_template distro: ubi - tags: ["ubi"] + tags: ["ubi", "final_dockerfile"] inputs: - version output: @@ -28,12 +29,11 @@ images: - name: database-build-ubi task_type: docker_build dockerfile: $(stages['init-appdb-template-ubi'].outputs[0].dockerfile) - tags: ["ubi"] + tags: ["ubi", "final_dockerfile"] buildargs: imagebase: $(inputs.params.registry)/mongodb-kubernetes-database-context:$(inputs.params.version_id) output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-database - tag: $(inputs.params.version_id) + - dockerfile: $(functions.tempfile) - name: master-latest task_type: tag_image diff --git a/inventories/init_appdb.yaml b/inventories/init_appdb.yaml index 50d5d4199..228968135 100644 --- a/inventories/init_appdb.yaml +++ b/inventories/init_appdb.yaml @@ -6,13 +6,14 @@ images: - name: init-appdb vars: context: . - template_context: docker/mongodb-kubernetes-init-database + template_context: docker/mongodb-kubernetes-init-appdb platform: linux/amd64 stages: - name: init-appdb-build-context task_type: docker_build - dockerfile: docker/mongodb-kubernetes-init-database/Dockerfile.builder + tags: ["final_dockerfile"] + dockerfile: docker/mongodb-kubernetes-init-appdb/Dockerfile.builder buildargs: mongodb_tools_url_ubi: $(inputs.params.mongodb_tools_url_ubi) output: @@ -22,7 +23,7 @@ images: - name: init-appdb-template-ubi task_type: dockerfile_template template_file_extension: ubi_minimal - tags: ["ubi"] + tags: ["final_dockerfile"] inputs: - is_appdb output: diff --git a/inventories/init_database.yaml b/inventories/init_database.yaml index 57ab81679..15901536c 100644 --- a/inventories/init_database.yaml +++ b/inventories/init_database.yaml @@ -12,6 +12,7 @@ images: stages: - name: init-database-build-context task_type: docker_build + tags: ["final_dockerfile"] dockerfile: docker/mongodb-kubernetes-init-database/Dockerfile.builder buildargs: mongodb_tools_url_ubi: $(inputs.params.mongodb_tools_url_ubi) @@ -24,7 +25,7 @@ images: - name: init-database-template-ubi task_type: dockerfile_template template_file_extension: ubi_minimal - tags: ["ubi"] + tags: ["ubi", "final_dockerfile"] inputs: - is_appdb output: diff --git a/inventories/init_om.yaml b/inventories/init_om.yaml index f3d310470..c05d34c3c 100644 --- a/inventories/init_om.yaml +++ b/inventories/init_om.yaml @@ -11,6 +11,7 @@ images: stages: - name: init-ops-manager-build-context task_type: docker_build + tags: ["final_dockerfile"] dockerfile: Dockerfile.builder output: - registry: $(inputs.params.registry)/mongodb-kubernetes-init-ops-manager-context @@ -19,7 +20,7 @@ images: - name: init-ops-manager-template-ubi task_type: dockerfile_template template_file_extension: ubi_minimal - tags: ["ubi"] + tags: ["ubi", "final_dockerfile"] inputs: - version output: diff --git a/inventories/om.yaml b/inventories/om.yaml index e4daf3103..d9b802376 100644 --- a/inventories/om.yaml +++ b/inventories/om.yaml @@ -13,6 +13,7 @@ images: stages: - name: ops-manager-context task_type: docker_build + tags: ["final_dockerfile"] dockerfile: docker/mongodb-enterprise-ops-manager/Dockerfile.builder output: - registry: $(inputs.params.registry)/ops-manager-context @@ -21,7 +22,7 @@ images: - name: ops-manager-template-ubi task_type: dockerfile_template template_file_extension: ubi - tags: ["ubi"] + tags: ["ubi", "final_dockerfile"] inputs: - om_download_url - version diff --git a/inventory.yaml b/inventory.yaml index d4beb4137..7d96add38 100644 --- a/inventory.yaml +++ b/inventory.yaml @@ -39,6 +39,7 @@ images: - name: operator-template-ubi task_type: dockerfile_template + tags: ["final_dockerfile"] distro: ubi inputs: - version diff --git a/lib/sonar/sonar.py b/lib/sonar/sonar.py old mode 100644 new mode 100755 diff --git a/lib/sonar/test/test_final_dockerfiles.py b/lib/sonar/test/test_final_dockerfiles.py new file mode 100644 index 000000000..f4516baea --- /dev/null +++ b/lib/sonar/test/test_final_dockerfiles.py @@ -0,0 +1,101 @@ +from unittest import skip + +from ..sonar import process_image + + +@skip("This test case is only used to generate the final Dockerfile for ops-manager") +def test_build_om_dockerfile(): + process_image( + image_name="ops-manager", + skip_tags=["release"], + include_tags=["final_dockerfile"], + build_args={ + "registry": "localhost:5000", + "version": "8.0.7", + "om_download_url": "https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-8.0.7.500.20250505T1426Z.tar.gz", + }, + build_options={}, + inventory="inventories/om.yaml", + ) + + +@skip("This test case is only used to generate the final Dockerfile for database") +def test_build_database_dockerfile(): + process_image( + image_name="database", + skip_tags=["release"], + include_tags=["final_dockerfile"], + build_args={ + "registry": "localhost:5000", + "version": "1.1.0", + }, + build_options={}, + inventory="inventories/database.yaml", + ) + + +@skip("This test case is only used to generate the final Dockerfile for init appdb") +def test_build_init_appdb_dockerfile(): + process_image( + image_name="init-appdb", + skip_tags=["release"], + include_tags=["final_dockerfile"], + build_args={ + "registry": "localhost:5000", + "version": "1.1.0", + "is_appdb": True, + "mongodb_tools_url_ubi": "https://downloads.mongodb.org/tools/db/mongodb-database-tools-rhel93-x86_64-100.12.0.tgz", + }, + build_options={}, + inventory="inventories/init_appdb.yaml", + ) + + +@skip("This test case is only used to generate the final Dockerfile for init database") +def test_build_init_database_dockerfile(): + process_image( + image_name="init-database", + skip_tags=["release"], + include_tags=["final_dockerfile"], + build_args={ + "registry": "localhost:5000", + "version": "1.1.0", + "is_appdb": False, + "mongodb_tools_url_ubi": "https://downloads.mongodb.org/tools/db/mongodb-database-tools-rhel93-x86_64-100.12.0.tgz", + }, + build_options={}, + inventory="inventories/init_database.yaml", + ) + + +@skip("This test case is only used to generate the final Dockerfile for init ops manager") +def test_build_init_ops_manager_dockerfile(): + process_image( + image_name="init-ops-manager", + skip_tags=["release"], + include_tags=["final_dockerfile"], + build_args={ + "registry": "localhost:5000", + "version": "1.1.0", + }, + build_options={}, + inventory="inventories/init_om.yaml", + ) + + +def test_build_operator_dockerfile(): + process_image( + image_name="mongodb-kubernetes", + skip_tags=["release"], + include_tags=["final_dockerfile"], + build_args={ + "version": "1.1.0", + "registry": "localhost:5000", + "release_version": "1.1.0", + "log_automation_config_diff": "false", + "use_race": "false", + "debug": False, + }, + build_options={}, + inventory="inventory.yaml", + ) From d5c8a14ff127a36a86a96a37228c5a3a655a2979 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Thu, 12 Jun 2025 17:22:16 +0200 Subject: [PATCH 2/9] First script draft --- scripts/release/build_images.py | 132 +++++++++++++++++++++++++++++++- 1 file changed, 131 insertions(+), 1 deletion(-) diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 6ccaf4a8d..66136f563 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -1 +1,131 @@ -# Methods responsible for building and pushing docker images. +# Methods responsible for building and pushing docker images. +import sys +import traceback + +import boto3 +from botocore.exceptions import BotoCoreError, ClientError +import base64 + +from lib.base_logger import logger +import docker + +logger.info("Starting build images script") + +IMAGE_NAME = "mongodb-kubernetes-operator" +DOCKERFILES_PATH = f"./docker/{IMAGE_NAME}" +CONTEXT_DOCKERFILE = "Dockerfile" +RELEASE_DOCKERFILE = "Dockerfile.plain" +STAGING_REGISTRY = "268558157000.dkr.ecr.us-east-1.amazonaws.com/julienben/operator-staging-temp" +LATEST_TAG = "latest" +LATEST_TAG_CONTEXT = f"{LATEST_TAG}-context" + +def ecr_login_boto3(region: str, account_id: str): + """ + Fetches an auth token from ECR via boto3 and logs + into the Docker daemon via the Docker SDK. + """ + registry = f"{account_id}.dkr.ecr.{region}.amazonaws.com" + # 1) get token + boto3.setup_default_session(profile_name='default') + ecr = boto3.client("ecr", region_name=region) + try: + resp = ecr.get_authorization_token(registryIds=[account_id]) + except (BotoCoreError, ClientError) as e: + raise RuntimeError(f"Failed to fetch ECR token: {e}") + + auth_data = resp["authorizationData"][0] + token = auth_data["authorizationToken"] # base64 of "AWS:password" + username, password = base64.b64decode(token).decode().split(":", 1) + + # 2) docker login + client = docker.APIClient() # low-level client supports login() + login_resp = client.login( + username=username, + password=password, + registry=registry, + reauth=True + ) + # login_resp is a dict like {'Status': 'Login Succeeded'} + status = login_resp.get("Status", "") + if "Succeeded" not in status: + raise RuntimeError(f"Docker login failed: {login_resp}") + logger.info(f"ECR login succeeded: {status}") + +def build_image(docker_client: docker.DockerClient, tag: str, dockerfile: str, path: str, args=None): + """ + Build a Docker image. + + :param path: Build context path (directory with your Dockerfile) + :param dockerfile: Name or relative path of the Dockerfile within `path` + :param tag: Image tag (name:tag) + """ + + try: + image, logs = docker_client.images.build( + path=path, + dockerfile=dockerfile, + tag=tag, + rm=True, # remove intermediate containers after a successful build + pull=False, # set True to always attempt to pull a newer base image + buildargs=args # pass build args if provided + ) + logger.info(f"Successfully built {tag} (id: {image.id})") + # Print build output + for chunk in logs: + if 'stream' in chunk: + logger.debug(chunk['stream']) + except docker.errors.BuildError as e: + logger.error("Build failed:") + for stage in e.build_log: + if "stream" in stage: + logger.debug(stage["stream"]) + elif "error" in stage: + logger.error(stage["error"]) + logger.error(e) + sys.exit(1) + except Exception as e: + logger.error(f"Unexpected error: {e}") + sys.exit(2) + +def push_image(docker_client: docker.DockerClient, image: str, tag: str): + """ + Push a Docker image to a registry. + + :param image: Image name (e.g., 'my-image') + :param tag: Image tag (e.g., 'latest') + """ + try: + response = docker_client.images.push(image, tag=tag) + logger.info(f"Successfully pushed {image}:{tag}") + logger.debug(response) + except docker.errors.APIError as e: + logger.error(f"Failed to push image {image}:{tag} - {e}") + sys.exit(1) + +if __name__ == '__main__': + docker_client = docker.from_env() + logger.info("Docker client initialized") + + # Login to ECR using boto3 + ecr_login_boto3(region='us-east-1', account_id='268558157000') + + # Build context image + image_full_tag = f"{STAGING_REGISTRY}:{LATEST_TAG_CONTEXT}" + logger.info(f"Building image: {image_full_tag}") + context_dockerfile_full_path = f"{DOCKERFILES_PATH}/{CONTEXT_DOCKERFILE}" + logger.info(f"Using Dockerfile at: {context_dockerfile_full_path}") + build_image(docker_client, path=".", dockerfile=context_dockerfile_full_path, tag=LATEST_TAG_CONTEXT, args={'version': '0.0.1'}) + + # Push to staging registry + push_image(docker_client, STAGING_REGISTRY, LATEST_TAG_CONTEXT) + + # Build release image + release_image_full_tag = f'{STAGING_REGISTRY}:latest' + release_dockerfile_full_path = f"{DOCKERFILES_PATH}/{RELEASE_DOCKERFILE}" + logger.info(f"Building release image with tag: {release_image_full_tag}") + logger.info(f"Using Dockerfile at: {release_dockerfile_full_path}") + + build_image(docker_client, path=".", dockerfile=release_dockerfile_full_path, tag=release_image_full_tag, args={'imagebase': image_full_tag}) + + # Push release image + push_image(docker_client, STAGING_REGISTRY, LATEST_TAG) From 8dbf967a660be835e661f5c875affcac0c8ab043 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 18 Jun 2025 11:01:31 +0200 Subject: [PATCH 3/9] Copied pipeline, removed daily builds and --exclude --- scripts/release/atomic_pipeline.py | 1370 ++++++++++++++++++++++++++++ 1 file changed, 1370 insertions(+) create mode 100755 scripts/release/atomic_pipeline.py diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py new file mode 100755 index 000000000..75a9e3051 --- /dev/null +++ b/scripts/release/atomic_pipeline.py @@ -0,0 +1,1370 @@ +#!/usr/bin/env python3 + +"""This pipeline script knows about the details of our Docker images +and where to fetch and calculate parameters. It uses Sonar.py +to produce the final images.""" + +import argparse +import copy +import json +import os +import random +import shutil +import subprocess +import sys +import tarfile +import time +from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor +from dataclasses import dataclass +from datetime import datetime, timedelta, timezone +from queue import Queue +from typing import Callable, Dict, Iterable, List, Optional, Set, Tuple, Union + +import requests +import semver +from opentelemetry import context +from opentelemetry import context as otel_context +from opentelemetry import trace +from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( + OTLPSpanExporter as OTLPSpanGrpcExporter, +) +from opentelemetry.sdk.resources import SERVICE_NAME, Resource +from opentelemetry.sdk.trace import ( + SynchronousMultiSpanProcessor, + Tracer, + TracerProvider, +) +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.trace import NonRecordingSpan, SpanContext, TraceFlags +from packaging.version import Version + +import docker +from lib.base_logger import logger +from lib.sonar.sonar import process_image +from scripts.evergreen.release.agent_matrix import ( + get_supported_operator_versions, + get_supported_version_for_image_matrix_handling, +) +from scripts.evergreen.release.images_signing import ( + mongodb_artifactory_login, + sign_image, + verify_signature, +) +from scripts.evergreen.release.sbom import generate_sbom, generate_sbom_for_cli + +TRACER = trace.get_tracer("evergreen-agent") + +#TODO: better framework for multi arch builds (spike to come) + +def _setup_tracing(): + trace_id = os.environ.get("otel_trace_id") + parent_id = os.environ.get("otel_parent_id") + endpoint = os.environ.get("otel_collector_endpoint") + if any(value is None for value in [trace_id, parent_id, endpoint]): + logger.info("tracing environment variables are missing, not configuring tracing") + return + logger.info(f"parent_id is {parent_id}") + logger.info(f"trace_id is {trace_id}") + logger.info(f"endpoint is {endpoint}") + span_context = SpanContext( + trace_id=int(trace_id, 16), + span_id=int(parent_id, 16), + is_remote=False, + # Magic number needed for our OTEL collector + trace_flags=TraceFlags(0x01), + ) + ctx = trace.set_span_in_context(NonRecordingSpan(span_context)) + context.attach(ctx) + sp = SynchronousMultiSpanProcessor() + span_processor = BatchSpanProcessor( + OTLPSpanGrpcExporter( + endpoint=endpoint, + ) + ) + sp.add_span_processor(span_processor) + resource = Resource(attributes={SERVICE_NAME: "evergreen-agent"}) + provider = TracerProvider(resource=resource, active_span_processor=sp) + trace.set_tracer_provider(provider) + + +DEFAULT_IMAGE_TYPE = "ubi" +DEFAULT_NAMESPACE = "default" + +# QUAY_REGISTRY_URL sets the base registry for all release build stages. Context images and daily builds will push the +# final images to the registry specified here. +# This makes it easy to use ECR to test changes on the pipeline before pushing to Quay. +QUAY_REGISTRY_URL = os.environ.get("QUAY_REGISTRY", "quay.io/mongodb") + + +@dataclass +class BuildConfiguration: + image_type: str + base_repository: str + namespace: str + + include_tags: list[str] + skip_tags: list[str] + + builder: str = "docker" + parallel: bool = False + parallel_factor: int = 0 + architecture: Optional[List[str]] = None + sign: bool = False + all_agents: bool = False + + pipeline: bool = True + debug: bool = True + + def build_args(self, args: Optional[Dict[str, str]] = None) -> Dict[str, str]: + if args is None: + args = {} + args = args.copy() + + args["registry"] = self.base_repository + + return args + + def get_skip_tags(self) -> list[str]: + return make_list_of_str(self.skip_tags) + + def get_include_tags(self) -> list[str]: + return make_list_of_str(self.include_tags) + + def is_release_step_executed(self) -> bool: + if "release" in self.get_skip_tags(): + return False + if "release" in self.get_include_tags(): + return True + return len(self.get_include_tags()) == 0 + + +def make_list_of_str(value: Union[None, str, List[str]]) -> List[str]: + if value is None: + return [] + + if isinstance(value, str): + return [e.strip() for e in value.split(",")] + + return value + + +def get_tools_distro(tools_version: str) -> Dict[str, str]: + new_rhel_tool_version = "100.10.0" + default_distro = {"arm": "rhel90-aarch64", "amd": "rhel90-x86_64"} + if Version(tools_version) >= Version(new_rhel_tool_version): + return {"arm": "rhel93-aarch64", "amd": "rhel93-x86_64"} + return default_distro + + +def operator_build_configuration( + builder: str, + parallel: bool, + debug: bool, + architecture: Optional[List[str]] = None, + sign: bool = False, + all_agents: bool = False, + parallel_factor: int = 0, +) -> BuildConfiguration: + bc = BuildConfiguration( + image_type=os.environ.get("distro", DEFAULT_IMAGE_TYPE), + base_repository=os.environ["BASE_REPO_URL"], + namespace=os.environ.get("namespace", DEFAULT_NAMESPACE), + skip_tags=make_list_of_str(os.environ.get("skip_tags")), + include_tags=make_list_of_str(os.environ.get("include_tags")), + builder=builder, + parallel=parallel, + all_agents=all_agents or bool(os.environ.get("all_agents", False)), + debug=debug, + architecture=architecture, + sign=sign, + parallel_factor=parallel_factor, + ) + + logger.info(f"is_running_in_patch: {is_running_in_patch()}") + logger.info(f"is_running_in_evg_pipeline: {is_running_in_evg_pipeline()}") + if is_running_in_patch() or not is_running_in_evg_pipeline(): + logger.info( + f"Running build not in evg pipeline (is_running_in_evg_pipeline={is_running_in_evg_pipeline()}) " + f"or in pipeline but not from master (is_running_in_patch={is_running_in_patch()}). " + "Adding 'master' tag to skip to prevent publishing to the latest dev image." + ) + bc.skip_tags.append("master") + + return bc + + +def is_running_in_evg_pipeline(): + return os.getenv("RUNNING_IN_EVG", "") == "true" + + +class MissingEnvironmentVariable(Exception): + pass + + +def should_pin_at() -> Optional[Tuple[str, str]]: + """Gets the value of the pin_tag_at to tag the images with. + + Returns its value split on :. + """ + # We need to return something so `partition` does not raise + # AttributeError + is_patch = is_running_in_patch() + + try: + pinned = os.environ["pin_tag_at"] + except KeyError: + raise MissingEnvironmentVariable(f"pin_tag_at environment variable does not exist, but is required") + if is_patch: + if pinned == "00:00": + raise Exception("Pinning to midnight during a patch is not supported. Please pin to another date!") + + hour, _, minute = pinned.partition(":") + return hour, minute + + +def is_running_in_patch(): + is_patch = os.environ.get("is_patch") + return is_patch is not None and is_patch.lower() == "true" + + +def build_id() -> str: + """Returns the current UTC time in ISO8601 date format. + + If running in Evergreen and `created_at` expansion is defined, use the + datetime defined in that variable instead. + + It is possible to pin this time at midnight (00:00) for periodic builds. If + running a manual build, then the Evergreen `pin_tag_at` variable needs to be + set to the empty string, in which case, the image tag suffix will correspond + to the current timestamp. + + """ + + date = datetime.now(timezone.utc) + try: + created_at = os.environ["created_at"] + date = datetime.strptime(created_at, "%y_%m_%d_%H_%M_%S") + except KeyError: + pass + + hour, minute = should_pin_at() + if hour and minute: + logger.info(f"we are pinning to, hour: {hour}, minute: {minute}") + date = date.replace(hour=int(hour), minute=int(minute), second=0) + else: + logger.warning(f"hour and minute cannot be extracted from provided pin_tag_at env, pinning to now") + + string_time = date.strftime("%Y%m%dT%H%M%SZ") + + return string_time + + +def get_release() -> Dict: + with open("release.json") as release: + return json.load(release) + + +def get_git_release_tag() -> tuple[str, bool]: + """Returns the git tag of the current run on releases, on non-release returns the patch id.""" + release_env_var = os.getenv("triggered_by_git_tag") + + # that means we are in a release and only return the git_tag; otherwise we want to return the patch_id + # appended to ensure the image created is unique and does not interfere + if release_env_var is not None: + return release_env_var, True + + patch_id = os.environ.get("version_id", "latest") + return patch_id, False + + +def copy_into_container(client, src, dst): + """Copies a local file into a running container.""" + + os.chdir(os.path.dirname(src)) + srcname = os.path.basename(src) + with tarfile.open(src + ".tar", mode="w") as tar: + tar.add(srcname) + + name, dst = dst.split(":") + container = client.containers.get(name) + + with open(src + ".tar", "rb") as fd: + container.put_archive(os.path.dirname(dst), fd.read()) + + +def create_and_push_manifest(image: str, tag: str, architectures: list[str]) -> None: + """ + Generates docker manifests by running the following commands: + 1. Clear existing manifests + docker manifest rm config.repo_url/image:tag + 2. Create the manifest + docker manifest create config.repo_url/image:tag --amend config.repo_url/image:tag-amd64 --amend config.repo_url/image:tag-arm64 + 3. Push the manifest + docker manifest push config.repo_url/image:tag + + This method calls docker directly on the command line, this is different from the rest of the code which uses + Sonar as an interface to docker. We decided to keep this asymmetry for now, as Sonar will be removed soon. + """ + final_manifest = image + ":" + tag + + args = [ + "docker", + "manifest", + "create", + final_manifest, + ] + + for arch in architectures: + args.extend(["--amend", f"{final_manifest}-{arch}"]) + + args_str = " ".join(args) + logger.debug(f"creating new manifest: {args_str}") + cp = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + if cp.returncode != 0: + raise Exception(cp.stderr) + + args = ["docker", "manifest", "push", final_manifest] + args_str = " ".join(args) + logger.info(f"pushing new manifest: {args_str}") + cp = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + if cp.returncode != 0: + raise Exception(cp.stderr) + + +def try_get_platform_data(client, image): + """Helper function to try and retrieve platform data.""" + try: + return client.images.get_registry_data(image) + except Exception as e: + logger.error("Failed to get registry data for image: {0}. Error: {1}".format(image, str(e))) + return None + + +def check_multi_arch(image: str, suffix: str) -> bool: + """ + Checks if a docker image supports AMD and ARM platforms by inspecting the registry data. + + :param str image: The image name and tag + """ + client = docker.from_env() + platforms = ["linux/amd64", "linux/arm64"] + + for img in [image, image + suffix]: + reg_data = try_get_platform_data(client, img) + if reg_data is not None and all(reg_data.has_platform(p) for p in platforms): + logger.info("Base image {} supports multi architecture, building for ARM64 and AMD64".format(img)) + return True + + logger.info("Base image {} is single-arch, building only for AMD64.".format(img)) + return False + + +@TRACER.start_as_current_span("sonar_build_image") +def sonar_build_image( + image_name: str, + build_configuration: BuildConfiguration, + args: Dict[str, str] = None, + inventory="inventory.yaml", + with_sbom: bool = True, +): + """Calls sonar to build `image_name` with arguments defined in `args`.""" + span = trace.get_current_span() + span.set_attribute("mck.image_name", image_name) + span.set_attribute("mck.inventory", inventory) + if args: + span.set_attribute("mck.build_args", str(args)) + + build_options = { + # Will continue building an image if it finds an error. See next comment. + "continue_on_errors": True, + # But will still fail after all the tasks have completed + "fail_on_errors": True, + "pipeline": build_configuration.pipeline, + } + + logger.info(f"Sonar config bc: {build_configuration}, args: {args}, for image: {image_name}") + + process_image( + image_name, + skip_tags=build_configuration.get_skip_tags(), + include_tags=build_configuration.get_include_tags(), + build_args=build_configuration.build_args(args), + inventory=inventory, + build_options=build_options, + ) + + if with_sbom: + produce_sbom(build_configuration, args) + + +@TRACER.start_as_current_span("produce_sbom") +def produce_sbom(build_configuration, args): + span = trace.get_current_span() + if not is_running_in_evg_pipeline(): + logger.info("Skipping SBOM Generation (enabled only for EVG)") + return + + try: + image_pull_spec = args["quay_registry"] + args.get("ubi_suffix", "") + except KeyError: + logger.error(f"Could not find image pull spec. Args: {args}, BuildConfiguration: {build_configuration}") + logger.error(f"Skipping SBOM generation") + return + + try: + image_tag = args["release_version"] + span.set_attribute("mck.release_version", image_tag) + except KeyError: + logger.error(f"Could not find image tag. Args: {args}, BuildConfiguration: {build_configuration}") + logger.error(f"Skipping SBOM generation") + return + + image_pull_spec = f"{image_pull_spec}:{image_tag}" + print(f"Producing SBOM for image: {image_pull_spec} args: {args}") + + if "platform" in args: + if args["platform"] == "arm64": + platform = "linux/arm64" + elif args["platform"] == "amd64": + platform = "linux/amd64" + else: + # TODO: return here? + logger.error(f"Unrecognized architectures in {args}. Skipping SBOM generation") + else: + platform = "linux/amd64" + + generate_sbom(image_pull_spec, platform) + + +def build_tests_image(build_configuration: BuildConfiguration): + """ + Builds image used to run tests. + """ + image_name = "test" + + # helm directory needs to be copied over to the tests docker context. + helm_src = "helm_chart" + helm_dest = "docker/mongodb-kubernetes-tests/helm_chart" + requirements_dest = "docker/mongodb-kubernetes-tests/requirements.txt" + public_src = "public" + public_dest = "docker/mongodb-kubernetes-tests/public" + + # Remove existing directories/files if they exist + shutil.rmtree(helm_dest, ignore_errors=True) + shutil.rmtree(public_dest, ignore_errors=True) + + # Copy directories and files (recursive copy) + shutil.copytree(helm_src, helm_dest) + shutil.copytree(public_src, public_dest) + shutil.copyfile("release.json", "docker/mongodb-kubernetes-tests/release.json") + shutil.copyfile("requirements.txt", requirements_dest) + + python_version = os.getenv("PYTHON_VERSION", "") + if python_version == "": + raise Exception("Missing PYTHON_VERSION environment variable") + + buildargs = dict({"python_version": python_version}) + + sonar_build_image(image_name, build_configuration, buildargs, "inventories/test.yaml") + + +def build_mco_tests_image(build_configuration: BuildConfiguration): + """ + Builds image used to run community tests. + """ + image_name = "community-operator-e2e" + golang_version = os.getenv("GOLANG_VERSION", "1.24") + if golang_version == "": + raise Exception("Missing GOLANG_VERSION environment variable") + + buildargs = dict({"golang_version": golang_version}) + + sonar_build_image(image_name, build_configuration, buildargs, "inventories/mco_test.yaml") + + +def build_operator_image(build_configuration: BuildConfiguration): + """Calculates arguments required to build the operator image, and starts the build process.""" + # In evergreen, we can pass test_suffix env to publish the operator to a quay + # repository with a given suffix. + test_suffix = os.environ.get("test_suffix", "") + log_automation_config_diff = os.environ.get("LOG_AUTOMATION_CONFIG_DIFF", "false") + version, _ = get_git_release_tag() + + args = { + "version": version, + "log_automation_config_diff": log_automation_config_diff, + "test_suffix": test_suffix, + "debug": build_configuration.debug, + } + + logger.info(f"Building Operator args: {args}") + + image_name = "mongodb-kubernetes" + build_image_generic( + config=build_configuration, + image_name=image_name, + inventory_file="inventory.yaml", + extra_args=args, + registry_address=f"{QUAY_REGISTRY_URL}/{image_name}", + ) + + +def build_database_image(build_configuration: BuildConfiguration): + """ + Builds a new database image. + """ + release = get_release() + version = release["databaseImageVersion"] + args = {"version": version} + build_image_generic(build_configuration, "database", "inventories/database.yaml", args) + + +def build_CLI_SBOM(build_configuration: BuildConfiguration): + if not is_running_in_evg_pipeline(): + logger.info("Skipping SBOM Generation (enabled only for EVG)") + return + + if build_configuration.architecture is None or len(build_configuration.architecture) == 0: + architectures = ["linux/amd64", "linux/arm64", "darwin/arm64", "darwin/amd64"] + elif "arm64" in build_configuration.architecture: + architectures = ["linux/arm64", "darwin/arm64"] + elif "amd64" in build_configuration.architecture: + architectures = ["linux/amd64", "darwin/amd64"] + else: + logger.error(f"Unrecognized architectures {build_configuration.architecture}. Skipping SBOM generation") + return + + release = get_release() + version = release["mongodbOperator"] + + for architecture in architectures: + generate_sbom_for_cli(version, architecture) + + +def build_operator_image_patch(build_configuration: BuildConfiguration): + """This function builds the operator locally and pushed into an existing + Docker image. This is the fastest way I could image we can do this.""" + + client = docker.from_env() + # image that we know is where we build operator. + image_repo = build_configuration.base_repository + "/" + build_configuration.image_type + "/mongodb-kubernetes" + image_tag = "latest" + repo_tag = image_repo + ":" + image_tag + + logger.debug(f"Pulling image: {repo_tag}") + try: + image = client.images.get(repo_tag) + except docker.errors.ImageNotFound: + logger.debug("Operator image does not exist locally. Building it now") + build_operator_image(build_configuration) + return + + logger.debug("Done") + too_old = datetime.now() - timedelta(hours=3) + image_timestamp = datetime.fromtimestamp( + image.history()[0]["Created"] + ) # Layer 0 is the latest added layer to this Docker image. [-1] is the FROM layer. + + if image_timestamp < too_old: + logger.info("Current operator image is too old, will rebuild it completely first") + build_operator_image(build_configuration) + return + + container_name = "mongodb-enterprise-operator" + operator_binary_location = "/usr/local/bin/mongodb-kubernetes-operator" + try: + client.containers.get(container_name).remove() + logger.debug(f"Removed {container_name}") + except docker.errors.NotFound: + pass + + container = client.containers.run(repo_tag, name=container_name, entrypoint="sh", detach=True) + + logger.debug("Building operator with debugging symbols") + subprocess.run(["make", "manager"], check=True, stdout=subprocess.PIPE) + logger.debug("Done building the operator") + + copy_into_container( + client, + os.getcwd() + "/docker/mongodb-kubernetes-operator/content/mongodb-kubernetes-operator", + container_name + ":" + operator_binary_location, + ) + + # Commit changes on disk as a tag + container.commit( + repository=image_repo, + tag=image_tag, + ) + # Stop this container so we can use it next time + container.stop() + container.remove() + + logger.info("Pushing operator to {}:{}".format(image_repo, image_tag)) + client.images.push( + repository=image_repo, + tag=image_tag, + ) + + +def get_supported_variants_for_image(image: str) -> List[str]: + return get_release()["supportedImages"][image]["variants"] + + +def image_config( + image_name: str, + name_prefix: str = "mongodb-kubernetes-", + s3_bucket: str = "enterprise-operator-dockerfiles", + ubi_suffix: str = "-ubi", + base_suffix: str = "", +) -> Tuple[str, Dict[str, str]]: + """Generates configuration for an image suitable to be passed + to Sonar. + + It returns a dictionary with registries and S3 configuration.""" + args = { + "quay_registry": "{}/{}{}".format(QUAY_REGISTRY_URL, name_prefix, image_name), + "ecr_registry_ubi": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/{}{}".format(name_prefix, image_name), + "s3_bucket_http": "https://{}.s3.amazonaws.com/dockerfiles/{}{}".format(s3_bucket, name_prefix, image_name), + "ubi_suffix": ubi_suffix, + "base_suffix": base_suffix, + } + + return image_name, args + +def is_version_in_range(version: str, min_version: str, max_version: str) -> bool: + """Check if the version is in the range""" + try: + parsed_version = semver.VersionInfo.parse(version) + if parsed_version.prerelease: + logger.info(f"Excluding {version} from range {min_version}-{max_version} because it's a pre-release") + return False + version_without_rc = semver.VersionInfo.finalize_version(parsed_version) + except ValueError: + version_without_rc = version + if min_version and max_version: + return version_without_rc.match(">=" + min_version) and version_without_rc.match("<" + max_version) + return True + + +def get_versions_to_rebuild(supported_versions, min_version, max_version): + # this means we only want to release one version, we cannot rely on the below range function + # since the agent does not follow semver for comparison + if (min_version and max_version) and (min_version == max_version): + return [min_version] + return filter(lambda x: is_version_in_range(x, min_version, max_version), supported_versions) + + +def get_versions_to_rebuild_per_operator_version(supported_versions, operator_version): + """ + This function returns all versions sliced by a specific operator version. + If the input is `onlyAgents` then it only returns agents without the operator suffix. + """ + versions_to_rebuild = [] + + for version in supported_versions: + if operator_version == "onlyAgents": + # 1_ works because we append the operator version via "_", all agents end with "1". + if "1_" not in version: + versions_to_rebuild.append(version) + else: + if operator_version in version: + versions_to_rebuild.append(version) + return versions_to_rebuild + + +class TracedThreadPoolExecutor(ThreadPoolExecutor): + """Implementation of :class:ThreadPoolExecutor that will pass context into sub tasks.""" + + def __init__(self, tracer: Tracer, *args, **kwargs): + self.tracer = tracer + super().__init__(*args, **kwargs) + + def with_otel_context(self, c: otel_context.Context, fn: Callable): + otel_context.attach(c) + return fn() + + def submit(self, fn, *args, **kwargs): + """Submit a new task to the thread pool.""" + + # get the current otel context + c = otel_context.get_current() + if c: + return super().submit( + lambda: self.with_otel_context(c, lambda: fn(*args, **kwargs)), + ) + else: + return super().submit(lambda: fn(*args, **kwargs)) + + +def should_skip_arm64(): + """ + Determines if arm64 builds should be skipped based on environment. + Returns True if running in Evergreen pipeline as a patch. + """ + return is_running_in_evg_pipeline() and is_running_in_patch() + +@TRACER.start_as_current_span("sign_image_in_repositories") +def sign_image_in_repositories(args: Dict[str, str], arch: str = None): + span = trace.get_current_span() + repository = args["quay_registry"] + args["ubi_suffix"] + tag = args["release_version"] + if arch: + tag = f"{tag}-{arch}" + + span.set_attribute("mck.tag", tag) + + sign_image(repository, tag) + verify_signature(repository, tag) + + +def find_om_in_releases(om_version: str, releases: Dict[str, str]) -> Optional[str]: + """ + There are a few alternatives out there that allow for json-path or xpath-type + traversal of Json objects in Python, I don't have time to look for one of + them now but I have to do at some point. + """ + for release in releases: + if release["version"] == om_version: + for platform in release["platform"]: + if platform["package_format"] == "deb" and platform["arch"] == "x86_64": + for package in platform["packages"]["links"]: + if package["name"] == "tar.gz": + return package["download_link"] + return None + + +def get_om_releases() -> Dict[str, str]: + """Returns a dictionary representation of the Json document holdin all the OM + releases. + """ + ops_manager_release_archive = ( + "https://info-mongodb-com.s3.amazonaws.com/com-download-center/ops_manager_release_archive.json" + ) + + return requests.get(ops_manager_release_archive).json() + + +def find_om_url(om_version: str) -> str: + """Gets a download URL for a given version of OM.""" + releases = get_om_releases() + + current_release = find_om_in_releases(om_version, releases["currentReleases"]) + if current_release is None: + current_release = find_om_in_releases(om_version, releases["oldReleases"]) + + if current_release is None: + raise ValueError("Ops Manager version {} could not be found".format(om_version)) + + return current_release + + +def build_init_om_image(build_configuration: BuildConfiguration): + release = get_release() + init_om_version = release["initOpsManagerVersion"] + args = {"version": init_om_version} + build_image_generic(build_configuration, "init-ops-manager", "inventories/init_om.yaml", args) + + +def build_om_image(build_configuration: BuildConfiguration): + # Make this a parameter for the Evergreen build + # https://github.com/evergreen-ci/evergreen/wiki/Parameterized-Builds + om_version = os.environ.get("om_version") + if om_version is None: + raise ValueError("`om_version` should be defined.") + + om_download_url = os.environ.get("om_download_url", "") + if om_download_url == "": + om_download_url = find_om_url(om_version) + + args = { + "version": om_version, + "om_download_url": om_download_url, + } + + build_image_generic( + config=build_configuration, + image_name="ops-manager", + inventory_file="inventories/om.yaml", + extra_args=args, + registry_address=f"{QUAY_REGISTRY_URL}/mongodb-enterprise-ops-manager", + ) + + +def build_image_generic( + config: BuildConfiguration, + image_name: str, + inventory_file: str, + extra_args: dict = None, + registry_address: str = None, + is_multi_arch: bool = False, + multi_arch_args_list: list = None, + is_run_in_parallel: bool = False, +): + """Build image generic builds context images and is used for triggering release. During releases + it signs and verifies the context image. + """ + + if not multi_arch_args_list: + multi_arch_args_list = [extra_args or {}] + + version = multi_arch_args_list[0].get("version", "") # the version is the same in multi-arch for each item + registry = f"{QUAY_REGISTRY_URL}/mongodb-kubernetes-{image_name}" if not registry_address else registry_address + + for args in multi_arch_args_list: # in case we are building multiple architectures + args["quay_registry"] = registry + sonar_build_image(image_name, config, args, inventory_file, False) + if is_multi_arch: + # we only push the manifests of the context images here, + # since daily rebuilds will push the manifests for the proper images later + architectures = [v["architecture"] for v in multi_arch_args_list] + create_and_push_manifest(registry_address, f"{version}-context", architectures=architectures) + if not config.is_release_step_executed(): + # Normally daily rebuild would create and push the manifests for the non-context images. + # But since we don't run daily rebuilds on ecr image builds, we can do that step instead here. + # We only need to push manifests for multi-arch images. + create_and_push_manifest(registry_address, version, architectures=architectures) + + # Sign and verify the context image if on releases if requied. + if config.sign and config.is_release_step_executed(): + sign_and_verify_context_image(registry, version) + +def sign_and_verify_context_image(registry, version): + sign_image(registry, version + "-context") + verify_signature(registry, version + "-context") + + +def build_init_appdb(build_configuration: BuildConfiguration): + release = get_release() + version = release["initAppDbVersion"] + base_url = "https://fastdl.mongodb.org/tools/db/" + mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) + args = {"version": version, "is_appdb": True, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} + build_image_generic(build_configuration, "init-appdb", "inventories/init_appdb.yaml", args) + + +def build_community_image(build_configuration: BuildConfiguration, image_type: str): + """ + Builds image for community components (readiness probe, upgrade hook). + + Args: + build_configuration: The build configuration to use + image_type: Type of image to build ("readiness-probe" or "upgrade-hook") + """ + + if image_type == "readiness-probe": + image_name = "mongodb-kubernetes-readinessprobe" + inventory_file = "inventories/readiness_probe.yaml" + elif image_type == "upgrade-hook": + image_name = "mongodb-kubernetes-operator-version-upgrade-post-start-hook" + inventory_file = "inventories/upgrade_hook.yaml" + else: + raise ValueError(f"Unsupported image type: {image_type}") + + version, is_release = get_git_release_tag() + golang_version = os.getenv("GOLANG_VERSION", "1.24") + + # Use only amd64 if we should skip arm64 builds + if should_skip_arm64(): + architectures = ["amd64"] + logger.info("Skipping ARM64 builds for community image as this is running in EVG pipeline as a patch") + else: + architectures = build_configuration.architecture or ["amd64", "arm64"] + + multi_arch_args_list = [] + + for arch in architectures: + arch_args = { + "version": version, + "golang_version": golang_version, + "architecture": arch, + } + multi_arch_args_list.append(arch_args) + + ecr_registry = os.environ.get("BASE_REPO_URL", "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev") + base_repo = QUAY_REGISTRY_URL if is_release else ecr_registry + + build_image_generic( + config=build_configuration, + image_name=image_name, + multi_arch_args_list=multi_arch_args_list, + inventory_file=inventory_file, + registry_address=f"{base_repo}/{image_name}", + is_multi_arch=True, # We for pushing manifest anyway, even if arm64 is skipped in patches + ) + + +def build_readiness_probe_image(build_configuration: BuildConfiguration): + """ + Builds image used for readiness probe. + """ + build_community_image(build_configuration, "readiness-probe") + + +def build_upgrade_hook_image(build_configuration: BuildConfiguration): + """ + Builds image used for version upgrade post-start hook. + """ + build_community_image(build_configuration, "upgrade-hook") + + +def build_agent_in_sonar( + build_configuration: BuildConfiguration, + image_version, + init_database_image, + mongodb_tools_url_ubi, + mongodb_agent_url_ubi: str, + agent_version, +): + args = { + "version": image_version, + "mongodb_tools_url_ubi": mongodb_tools_url_ubi, + "mongodb_agent_url_ubi": mongodb_agent_url_ubi, + "init_database_image": init_database_image, + } + + agent_quay_registry = QUAY_REGISTRY_URL + f"/mongodb-agent-ubi" + args["quay_registry"] = agent_quay_registry + args["agent_version"] = agent_version + + build_image_generic( + config=build_configuration, + image_name="mongodb-agent", + inventory_file="inventories/agent.yaml", + extra_args=args, + registry_address=agent_quay_registry, + is_run_in_parallel=True, + ) + + +def build_multi_arch_agent_in_sonar( + build_configuration: BuildConfiguration, + image_version, + tools_version, +): + """ + Creates the multi-arch non-operator suffixed version of the agent. + This is a drop-in replacement for the agent + release from MCO. + This should only be called during releases. + Which will lead to a release of the multi-arch + images to quay and ecr. + """ + + logger.info(f"building multi-arch base image for: {image_version}") + is_release = build_configuration.is_release_step_executed() + args = { + "version": image_version, + "tools_version": tools_version, + } + + arch_arm = { + "agent_distro": "amzn2_aarch64", + "tools_distro": get_tools_distro(tools_version=tools_version)["arm"], + "architecture": "arm64", + } + arch_amd = { + "agent_distro": "rhel9_x86_64", + "tools_distro": get_tools_distro(tools_version=tools_version)["amd"], + "architecture": "amd64", + } + + new_rhel_tool_version = "100.10.0" + if Version(tools_version) >= Version(new_rhel_tool_version): + arch_arm["tools_distro"] = "rhel93-aarch64" + arch_amd["tools_distro"] = "rhel93-x86_64" + + ecr_registry = os.environ.get("REGISTRY", "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev") + ecr_agent_registry = ecr_registry + f"/mongodb-agent-ubi" + quay_agent_registry = QUAY_REGISTRY_URL + f"/mongodb-agent-ubi" + joined_args = [args | arch_amd] + + # Only include arm64 if we shouldn't skip it + if not should_skip_arm64(): + joined_args.append(args | arch_arm) + + build_image_generic( + config=build_configuration, + image_name="mongodb-agent", + inventory_file="inventories/agent_non_matrix.yaml", + multi_arch_args_list=joined_args, + registry_address=quay_agent_registry if is_release else ecr_agent_registry, + is_multi_arch=True, # We for pushing manifest anyway, even if arm64 is skipped in patches + is_run_in_parallel=True, + ) + + +def build_agent_default_case(build_configuration: BuildConfiguration): + """ + Build the agent only for the latest operator for patches and operator releases. + + See more information in the function: build_agent_on_agent_bump + """ + release = get_release() + + operator_version, is_release = get_git_release_tag() + + # We need to release [all agents x latest operator] on operator releases + if is_release: + agent_versions_to_build = gather_all_supported_agent_versions(release) + # We only need [latest agents (for each OM major version and for CM) x patch ID] for patches + else: + agent_versions_to_build = gather_latest_agent_versions(release) + + logger.info(f"Building Agent versions: {agent_versions_to_build} for Operator versions: {operator_version}") + + tasks_queue = Queue() + max_workers = 1 + if build_configuration.parallel: + max_workers = None + if build_configuration.parallel_factor > 0: + max_workers = build_configuration.parallel_factor + with ProcessPoolExecutor(max_workers=max_workers) as executor: + logger.info(f"running with factor of {max_workers}") + for agent_version in agent_versions_to_build: + # We don't need to keep create and push the same image on every build. + # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. + if build_configuration.is_release_step_executed() or build_configuration.all_agents: + tasks_queue.put( + executor.submit( + build_multi_arch_agent_in_sonar, + build_configuration, + agent_version[0], + agent_version[1], + ) + ) + _build_agent_operator( + agent_version, build_configuration, executor, operator_version, tasks_queue, is_release + ) + + queue_exception_handling(tasks_queue) + + +def build_agent_on_agent_bump(build_configuration: BuildConfiguration): + """ + Build the agent matrix (operator version x agent version), triggered by PCT. + + We have three cases where we need to build the agent: + - e2e test runs + - operator releases + - OM/CM bumps via PCT + + We don't require building a full matrix on e2e test runs and operator releases. + "Operator releases" and "e2e test runs" require only the latest operator x agents + + In OM/CM bumps, we release a new agent which we potentially require to release to older operators as well. + This function takes care of that. + """ + release = get_release() + is_release = build_configuration.is_release_step_executed() + + if build_configuration.all_agents: + # We need to release [all agents x latest operator] on operator releases to make e2e tests work + # This was changed previously in https://github.com/mongodb/mongodb-kubernetes/pull/3960 + agent_versions_to_build = gather_all_supported_agent_versions(release) + else: + # we only need to release the latest images, we don't need to re-push old images, as we don't clean them up anymore. + agent_versions_to_build = gather_latest_agent_versions(release) + + legacy_agent_versions_to_build = release["supportedImages"]["mongodb-agent"]["versions"] + + tasks_queue = Queue() + max_workers = 1 + if build_configuration.parallel: + max_workers = None + if build_configuration.parallel_factor > 0: + max_workers = build_configuration.parallel_factor + with ProcessPoolExecutor(max_workers=max_workers) as executor: + logger.info(f"running with factor of {max_workers}") + + # We need to regularly push legacy agents, otherwise ecr lifecycle policy will expire them. + # We only need to push them once in a while to ecr, so no quay required + if not is_release: + for legacy_agent in legacy_agent_versions_to_build: + tasks_queue.put( + executor.submit( + build_multi_arch_agent_in_sonar, + build_configuration, + legacy_agent, + # we assume that all legacy agents are build using that tools version + "100.9.4", + ) + ) + + for agent_version in agent_versions_to_build: + # We don't need to keep create and push the same image on every build. + # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. + if build_configuration.is_release_step_executed() or build_configuration.all_agents: + tasks_queue.put( + executor.submit( + build_multi_arch_agent_in_sonar, + build_configuration, + agent_version[0], + agent_version[1], + ) + ) + for operator_version in get_supported_operator_versions(): + logger.info(f"Building Agent versions: {agent_version} for Operator versions: {operator_version}") + _build_agent_operator( + agent_version, build_configuration, executor, operator_version, tasks_queue, is_release + ) + + queue_exception_handling(tasks_queue) + + +def queue_exception_handling(tasks_queue): + exceptions_found = False + for task in tasks_queue.queue: + if task.exception() is not None: + exceptions_found = True + logger.fatal(f"The following exception has been found when building: {task.exception()}") + if exceptions_found: + raise Exception( + f"Exception(s) found when processing Agent images. \nSee also previous logs for more info\nFailing the build" + ) + + +def _build_agent_operator( + agent_version: Tuple[str, str], + build_configuration: BuildConfiguration, + executor: ProcessPoolExecutor, + operator_version: str, + tasks_queue: Queue, + use_quay: bool = False, +): + agent_distro = "rhel9_x86_64" + tools_version = agent_version[1] + tools_distro = get_tools_distro(tools_version)["amd"] + image_version = f"{agent_version[0]}_{operator_version}" + mongodb_tools_url_ubi = ( + f"https://downloads.mongodb.org/tools/db/mongodb-database-tools-{tools_distro}-{tools_version}.tgz" + ) + mongodb_agent_url_ubi = f"https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-{agent_version[0]}.{agent_distro}.tar.gz" + # We use Quay if not in a patch + # We could rely on input params (quay_registry or registry), but it makes templating more complex in the inventory + non_quay_registry = os.environ.get("REGISTRY", "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev") + base_init_database_repo = QUAY_REGISTRY_URL if use_quay else non_quay_registry + init_database_image = f"{base_init_database_repo}/mongodb-kubernetes-init-database:{operator_version}" + + tasks_queue.put( + executor.submit( + build_agent_in_sonar, + build_configuration, + image_version, + init_database_image, + mongodb_tools_url_ubi, + mongodb_agent_url_ubi, + agent_version[0], + ) + ) + + +def gather_all_supported_agent_versions(release: Dict) -> List[Tuple[str, str]]: + # This is a list of a tuples - agent version and corresponding tools version + agent_versions_to_build = list() + agent_versions_to_build.append( + ( + release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["cloud_manager"], + release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["cloud_manager_tools"], + ) + ) + for _, om in release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["ops_manager"].items(): + agent_versions_to_build.append((om["agent_version"], om["tools_version"])) + + # lets not build the same image multiple times + return sorted(list(set(agent_versions_to_build))) + + +def gather_latest_agent_versions(release: Dict) -> List[Tuple[str, str]]: + """ + This function is used when we release a new agent via OM bump. + That means we will need to release that agent with all supported operators. + Since we don't want to release all agents again, we only release the latest, which will contain the newly added one + :return: the latest agent for each major version + """ + agent_versions_to_build = list() + agent_versions_to_build.append( + ( + release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["cloud_manager"], + release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["cloud_manager_tools"], + ) + ) + + latest_versions = {} + + for version in release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["ops_manager"].keys(): + parsed_version = semver.VersionInfo.parse(version) + major_version = parsed_version.major + if major_version in latest_versions: + latest_parsed_version = semver.VersionInfo.parse(str(latest_versions[major_version])) + latest_versions[major_version] = max(parsed_version, latest_parsed_version) + else: + latest_versions[major_version] = version + + for major_version, latest_version in latest_versions.items(): + agent_versions_to_build.append( + ( + release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["ops_manager"][str(latest_version)][ + "agent_version" + ], + release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["ops_manager"][str(latest_version)][ + "tools_version" + ], + ) + ) + + # TODO: Remove this once we don't need to use OM 7.0.12 in the OM Multicluster DR tests + # https://jira.mongodb.org/browse/CLOUDP-297377 + agent_versions_to_build.append(("107.0.12.8669-1", "100.10.0")) + + return sorted(list(set(agent_versions_to_build))) + + +def get_builder_function_for_image_name() -> Dict[str, Callable]: + """Returns a dictionary of image names that can be built.""" + + image_builders = { + "cli": build_CLI_SBOM, + "test": build_tests_image, + "operator": build_operator_image, + "mco-test": build_mco_tests_image, + # TODO: add support to build this per patch + "readiness-probe": build_readiness_probe_image, + "upgrade-hook": build_upgrade_hook_image, + "operator-quick": build_operator_image_patch, + "database": build_database_image, + "agent-pct": build_agent_on_agent_bump, + "agent": build_agent_default_case, + # + # Init images + "init-appdb": build_init_appdb, + "init-database": build_init_database, + "init-ops-manager": build_init_om_image, + # + # Ops Manager image + "ops-manager": build_om_image, + } + + return image_builders + + +# TODO: nam static: remove this once static containers becomes the default +def build_init_database(build_configuration: BuildConfiguration): + release = get_release() + version = release["initDatabaseVersion"] # comes from release.json + base_url = "https://fastdl.mongodb.org/tools/db/" + mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) + args = {"version": version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi, "is_appdb": False} + build_image_generic(build_configuration, "init-database", "inventories/init_database.yaml", args) + + +def build_image(image_name: str, build_configuration: BuildConfiguration): + """Builds one of the supported images by its name.""" + get_builder_function_for_image_name()[image_name](build_configuration) + + +def build_all_images( + images: Iterable[str], + builder: str, + debug: bool = False, + parallel: bool = False, + architecture: Optional[List[str]] = None, + sign: bool = False, + all_agents: bool = False, + parallel_factor: int = 0, +): + """Builds all the images in the `images` list.""" + build_configuration = operator_build_configuration( + builder, parallel, debug, architecture, sign, all_agents, parallel_factor + ) + if sign: + mongodb_artifactory_login() + for image in images: + build_image(image, build_configuration) + + +def calculate_images_to_build( + images: List[str], include: Optional[List[str]], exclude: Optional[List[str]] +) -> Set[str]: + """ + Calculates which images to build based on the `images`, `include` and `exclude` sets. + + >>> calculate_images_to_build(["a", "b"], ["a"], ["b"]) + ... ["a"] + """ + + if not include and not exclude: + return set(images) + include = set(include or []) + exclude = set(exclude or []) + images = set(images or []) + + for image in include.union(exclude): + if image not in images: + raise ValueError("Image definition {} not found".format(image)) + + images_to_build = include.intersection(images) + return images_to_build + + +def main(): + _setup_tracing() + _setup_tracing() + + parser = argparse.ArgumentParser() + parser.add_argument("--include", action="append", help="list of images to include") + parser.add_argument("--builder", default="docker", type=str, help="docker or podman") + parser.add_argument("--list-images", action="store_true") + parser.add_argument("--parallel", action="store_true", default=False) + parser.add_argument("--debug", action="store_true", default=False) + parser.add_argument( + "--arch", + choices=["amd64", "arm64"], + nargs="+", + help="for operator and community images only, specify the list of architectures to build for images", + ) + parser.add_argument("--sign", action="store_true", default=False) + parser.add_argument( + "--parallel-factor", + type=int, + default=0, + help="the factor on how many agents are built in parallel. 0 means all CPUs will be used", + ) + parser.add_argument( + "--all-agents", + action="store_true", + default=False, + help="optional parameter to be able to push " + "all non operator suffixed agents, even if we are not in a release", + ) + args = parser.parse_args() + + images_list = list(get_builder_function_for_image_name().keys()) + if args.list_images: + print(images_list) + sys.exit(0) + + if args.arch == ["arm64"]: + print("Building for arm64 only is not supported yet") + sys.exit(1) + + if not args.sign: + logger.warning("--sign flag not provided, images won't be signed") + + images_to_build = args.include.intersection(images_list) + + build_all_images( + images_to_build, + args.builder, + debug=args.debug, + parallel=args.parallel, + architecture=args.arch, + sign=args.sign, + all_agents=args.all_agents, + parallel_factor=args.parallel_factor, + ) + + +if __name__ == "__main__": + main() From 406eb012104320141946c8421a9568fb8463e6bd Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 18 Jun 2025 11:03:19 +0200 Subject: [PATCH 4/9] Remove pin_at and build_id --- scripts/release/atomic_pipeline.py | 53 ------------------------------ 1 file changed, 53 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 75a9e3051..da523c726 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -201,64 +201,11 @@ class MissingEnvironmentVariable(Exception): pass -def should_pin_at() -> Optional[Tuple[str, str]]: - """Gets the value of the pin_tag_at to tag the images with. - - Returns its value split on :. - """ - # We need to return something so `partition` does not raise - # AttributeError - is_patch = is_running_in_patch() - - try: - pinned = os.environ["pin_tag_at"] - except KeyError: - raise MissingEnvironmentVariable(f"pin_tag_at environment variable does not exist, but is required") - if is_patch: - if pinned == "00:00": - raise Exception("Pinning to midnight during a patch is not supported. Please pin to another date!") - - hour, _, minute = pinned.partition(":") - return hour, minute - - def is_running_in_patch(): is_patch = os.environ.get("is_patch") return is_patch is not None and is_patch.lower() == "true" -def build_id() -> str: - """Returns the current UTC time in ISO8601 date format. - - If running in Evergreen and `created_at` expansion is defined, use the - datetime defined in that variable instead. - - It is possible to pin this time at midnight (00:00) for periodic builds. If - running a manual build, then the Evergreen `pin_tag_at` variable needs to be - set to the empty string, in which case, the image tag suffix will correspond - to the current timestamp. - - """ - - date = datetime.now(timezone.utc) - try: - created_at = os.environ["created_at"] - date = datetime.strptime(created_at, "%y_%m_%d_%H_%M_%S") - except KeyError: - pass - - hour, minute = should_pin_at() - if hour and minute: - logger.info(f"we are pinning to, hour: {hour}, minute: {minute}") - date = date.replace(hour=int(hour), minute=int(minute), second=0) - else: - logger.warning(f"hour and minute cannot be extracted from provided pin_tag_at env, pinning to now") - - string_time = date.strftime("%Y%m%dT%H%M%SZ") - - return string_time - - def get_release() -> Dict: with open("release.json") as release: return json.load(release) From 988b91b2485aae9593c81de9f4e42961f77e8180 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 18 Jun 2025 11:03:26 +0200 Subject: [PATCH 5/9] Remove namespace --- scripts/release/atomic_pipeline.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index da523c726..a75de37c9 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -100,7 +100,6 @@ def _setup_tracing(): class BuildConfiguration: image_type: str base_repository: str - namespace: str include_tags: list[str] skip_tags: list[str] @@ -168,7 +167,6 @@ def operator_build_configuration( bc = BuildConfiguration( image_type=os.environ.get("distro", DEFAULT_IMAGE_TYPE), base_repository=os.environ["BASE_REPO_URL"], - namespace=os.environ.get("namespace", DEFAULT_NAMESPACE), skip_tags=make_list_of_str(os.environ.get("skip_tags")), include_tags=make_list_of_str(os.environ.get("include_tags")), builder=builder, From 17afb2841026f50b733e3e8466d1eef76c14c6d6 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 18 Jun 2025 11:47:29 +0200 Subject: [PATCH 6/9] Replace usage of sonar --- scripts/release/atomic_pipeline.py | 115 +++++++++++++---------------- 1 file changed, 50 insertions(+), 65 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index a75de37c9..3efa9b3b2 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -40,7 +40,7 @@ import docker from lib.base_logger import logger -from lib.sonar.sonar import process_image +from build_images import process_image from scripts.evergreen.release.agent_matrix import ( get_supported_operator_versions, get_supported_version_for_image_matrix_handling, @@ -54,7 +54,8 @@ TRACER = trace.get_tracer("evergreen-agent") -#TODO: better framework for multi arch builds (spike to come) +# TODO: better framework for multi arch builds (spike to come) + def _setup_tracing(): trace_id = os.environ.get("otel_trace_id") @@ -101,9 +102,6 @@ class BuildConfiguration: image_type: str base_repository: str - include_tags: list[str] - skip_tags: list[str] - builder: str = "docker" parallel: bool = False parallel_factor: int = 0 @@ -123,19 +121,6 @@ def build_args(self, args: Optional[Dict[str, str]] = None) -> Dict[str, str]: return args - def get_skip_tags(self) -> list[str]: - return make_list_of_str(self.skip_tags) - - def get_include_tags(self) -> list[str]: - return make_list_of_str(self.include_tags) - - def is_release_step_executed(self) -> bool: - if "release" in self.get_skip_tags(): - return False - if "release" in self.get_include_tags(): - return True - return len(self.get_include_tags()) == 0 - def make_list_of_str(value: Union[None, str, List[str]]) -> List[str]: if value is None: @@ -167,8 +152,6 @@ def operator_build_configuration( bc = BuildConfiguration( image_type=os.environ.get("distro", DEFAULT_IMAGE_TYPE), base_repository=os.environ["BASE_REPO_URL"], - skip_tags=make_list_of_str(os.environ.get("skip_tags")), - include_tags=make_list_of_str(os.environ.get("include_tags")), builder=builder, parallel=parallel, all_agents=all_agents or bool(os.environ.get("all_agents", False)), @@ -307,45 +290,36 @@ def check_multi_arch(image: str, suffix: str) -> bool: @TRACER.start_as_current_span("sonar_build_image") -def sonar_build_image( +def pipeline_process_image( image_name: str, - build_configuration: BuildConfiguration, + dockerfile_path: str, args: Dict[str, str] = None, - inventory="inventory.yaml", with_sbom: bool = True, ): """Calls sonar to build `image_name` with arguments defined in `args`.""" span = trace.get_current_span() span.set_attribute("mck.image_name", image_name) - span.set_attribute("mck.inventory", inventory) if args: span.set_attribute("mck.build_args", str(args)) + # TODO use these build_options = { # Will continue building an image if it finds an error. See next comment. "continue_on_errors": True, # But will still fail after all the tasks have completed "fail_on_errors": True, - "pipeline": build_configuration.pipeline, } - logger.info(f"Sonar config bc: {build_configuration}, args: {args}, for image: {image_name}") + logger.info(f"Dockerfile args: {args}, for image: {image_name}") - process_image( - image_name, - skip_tags=build_configuration.get_skip_tags(), - include_tags=build_configuration.get_include_tags(), - build_args=build_configuration.build_args(args), - inventory=inventory, - build_options=build_options, - ) + process_image(image_name, dockerfile_path=dockerfile_path, dockerfile_args=args) if with_sbom: - produce_sbom(build_configuration, args) + produce_sbom(args) @TRACER.start_as_current_span("produce_sbom") -def produce_sbom(build_configuration, args): +def produce_sbom(args): span = trace.get_current_span() if not is_running_in_evg_pipeline(): logger.info("Skipping SBOM Generation (enabled only for EVG)") @@ -354,7 +328,7 @@ def produce_sbom(build_configuration, args): try: image_pull_spec = args["quay_registry"] + args.get("ubi_suffix", "") except KeyError: - logger.error(f"Could not find image pull spec. Args: {args}, BuildConfiguration: {build_configuration}") + logger.error(f"Could not find image pull spec. Args: {args}") logger.error(f"Skipping SBOM generation") return @@ -362,13 +336,14 @@ def produce_sbom(build_configuration, args): image_tag = args["release_version"] span.set_attribute("mck.release_version", image_tag) except KeyError: - logger.error(f"Could not find image tag. Args: {args}, BuildConfiguration: {build_configuration}") + logger.error(f"Could not find image tag. Args: {args}") logger.error(f"Skipping SBOM generation") return image_pull_spec = f"{image_pull_spec}:{image_tag}" print(f"Producing SBOM for image: {image_pull_spec} args: {args}") + platform = "linux/amd64" if "platform" in args: if args["platform"] == "arm64": platform = "linux/arm64" @@ -377,8 +352,6 @@ def produce_sbom(build_configuration, args): else: # TODO: return here? logger.error(f"Unrecognized architectures in {args}. Skipping SBOM generation") - else: - platform = "linux/amd64" generate_sbom(image_pull_spec, platform) @@ -412,7 +385,7 @@ def build_tests_image(build_configuration: BuildConfiguration): buildargs = dict({"python_version": python_version}) - sonar_build_image(image_name, build_configuration, buildargs, "inventories/test.yaml") + pipeline_process_image(image_name, "docker/mongodb-kubernetes-tests/Dockerfile", buildargs) def build_mco_tests_image(build_configuration: BuildConfiguration): @@ -426,7 +399,7 @@ def build_mco_tests_image(build_configuration: BuildConfiguration): buildargs = dict({"golang_version": golang_version}) - sonar_build_image(image_name, build_configuration, buildargs, "inventories/mco_test.yaml") + pipeline_process_image(image_name, "docker/mongodb-community-tests/Dockerfile", buildargs) def build_operator_image(build_configuration: BuildConfiguration): @@ -450,9 +423,9 @@ def build_operator_image(build_configuration: BuildConfiguration): build_image_generic( config=build_configuration, image_name=image_name, - inventory_file="inventory.yaml", - extra_args=args, + dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile", registry_address=f"{QUAY_REGISTRY_URL}/{image_name}", + extra_args=args, ) @@ -463,7 +436,7 @@ def build_database_image(build_configuration: BuildConfiguration): release = get_release() version = release["databaseImageVersion"] args = {"version": version} - build_image_generic(build_configuration, "database", "inventories/database.yaml", args) + build_image_generic(build_configuration, "database", "docker/mongodb-kubernetes-database.yaml", extra_args=args) def build_CLI_SBOM(build_configuration: BuildConfiguration): @@ -578,6 +551,7 @@ def image_config( return image_name, args + def is_version_in_range(version: str, min_version: str, max_version: str) -> bool: """Check if the version is in the range""" try: @@ -650,6 +624,7 @@ def should_skip_arm64(): """ return is_running_in_evg_pipeline() and is_running_in_patch() + @TRACER.start_as_current_span("sign_image_in_repositories") def sign_image_in_repositories(args: Dict[str, str], arch: str = None): span = trace.get_current_span() @@ -709,7 +684,12 @@ def build_init_om_image(build_configuration: BuildConfiguration): release = get_release() init_om_version = release["initOpsManagerVersion"] args = {"version": init_om_version} - build_image_generic(build_configuration, "init-ops-manager", "inventories/init_om.yaml", args) + build_image_generic( + build_configuration, + "init-ops-manager", + "docker/mongodb-kubernetes-init-ops-manager/Dockerfile", + extra_args=args, + ) def build_om_image(build_configuration: BuildConfiguration): @@ -731,18 +711,18 @@ def build_om_image(build_configuration: BuildConfiguration): build_image_generic( config=build_configuration, image_name="ops-manager", - inventory_file="inventories/om.yaml", - extra_args=args, + dockerfile_path="docker/mongodb-enterprise-ops-manager/Dockerfile", registry_address=f"{QUAY_REGISTRY_URL}/mongodb-enterprise-ops-manager", + extra_args=args, ) def build_image_generic( config: BuildConfiguration, image_name: str, - inventory_file: str, - extra_args: dict = None, + dockerfile_path: str, registry_address: str = None, + extra_args: dict = None, is_multi_arch: bool = False, multi_arch_args_list: list = None, is_run_in_parallel: bool = False, @@ -759,7 +739,7 @@ def build_image_generic( for args in multi_arch_args_list: # in case we are building multiple architectures args["quay_registry"] = registry - sonar_build_image(image_name, config, args, inventory_file, False) + pipeline_process_image(image_name, dockerfile_path, args, False) if is_multi_arch: # we only push the manifests of the context images here, # since daily rebuilds will push the manifests for the proper images later @@ -775,6 +755,7 @@ def build_image_generic( if config.sign and config.is_release_step_executed(): sign_and_verify_context_image(registry, version) + def sign_and_verify_context_image(registry, version): sign_image(registry, version + "-context") verify_signature(registry, version + "-context") @@ -786,7 +767,9 @@ def build_init_appdb(build_configuration: BuildConfiguration): base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": version, "is_appdb": True, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} - build_image_generic(build_configuration, "init-appdb", "inventories/init_appdb.yaml", args) + build_image_generic( + build_configuration, "init-appdb", "docker/mongodb-kubernetes-init-appdb/Dockerfile", extra_args=args + ) def build_community_image(build_configuration: BuildConfiguration, image_type: str): @@ -800,10 +783,10 @@ def build_community_image(build_configuration: BuildConfiguration, image_type: s if image_type == "readiness-probe": image_name = "mongodb-kubernetes-readinessprobe" - inventory_file = "inventories/readiness_probe.yaml" + dockerfile_path = "docker/mongodb-kubernetes-readinessprobe/Dockerfile" elif image_type == "upgrade-hook": image_name = "mongodb-kubernetes-operator-version-upgrade-post-start-hook" - inventory_file = "inventories/upgrade_hook.yaml" + dockerfile_path = "docker/mongodb-kubernetes-upgrade-hook/Dockerfile" else: raise ValueError(f"Unsupported image type: {image_type}") @@ -833,10 +816,10 @@ def build_community_image(build_configuration: BuildConfiguration, image_type: s build_image_generic( config=build_configuration, image_name=image_name, - multi_arch_args_list=multi_arch_args_list, - inventory_file=inventory_file, + dockerfile_path=dockerfile_path, registry_address=f"{base_repo}/{image_name}", - is_multi_arch=True, # We for pushing manifest anyway, even if arm64 is skipped in patches + is_multi_arch=True, + multi_arch_args_list=multi_arch_args_list, ) @@ -854,7 +837,7 @@ def build_upgrade_hook_image(build_configuration: BuildConfiguration): build_community_image(build_configuration, "upgrade-hook") -def build_agent_in_sonar( +def build_agent_pipeline( build_configuration: BuildConfiguration, image_version, init_database_image, @@ -876,9 +859,9 @@ def build_agent_in_sonar( build_image_generic( config=build_configuration, image_name="mongodb-agent", - inventory_file="inventories/agent.yaml", - extra_args=args, + dockerfile_path="docker/mongodb-agent/Dockerfile", registry_address=agent_quay_registry, + extra_args=args, is_run_in_parallel=True, ) @@ -932,10 +915,10 @@ def build_multi_arch_agent_in_sonar( build_image_generic( config=build_configuration, image_name="mongodb-agent", - inventory_file="inventories/agent_non_matrix.yaml", - multi_arch_args_list=joined_args, + dockerfile_path="docker/mongodb-agent-non-matrix/Dockerfile", registry_address=quay_agent_registry if is_release else ecr_agent_registry, - is_multi_arch=True, # We for pushing manifest anyway, even if arm64 is skipped in patches + is_multi_arch=True, + multi_arch_args_list=joined_args, is_run_in_parallel=True, ) @@ -1094,7 +1077,7 @@ def _build_agent_operator( tasks_queue.put( executor.submit( - build_agent_in_sonar, + build_agent_pipeline, build_configuration, image_version, init_database_image, @@ -1201,7 +1184,9 @@ def build_init_database(build_configuration: BuildConfiguration): base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi, "is_appdb": False} - build_image_generic(build_configuration, "init-database", "inventories/init_database.yaml", args) + build_image_generic( + build_configuration, "init-database", "docker/mongodb-kubernetes-init-database.yaml", extra_args=args + ) def build_image(image_name: str, build_configuration: BuildConfiguration): From 46831486df0fd125edb7fdece11d61e8602233f5 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 18 Jun 2025 16:50:34 +0200 Subject: [PATCH 7/9] WIP, passing builds on staging temp + multi arch manifests --- docker/mongodb-kubernetes-tests/release.json | 253 +++++++++++++++++++ scripts/release/atomic_pipeline.py | 169 ++++++------- 2 files changed, 335 insertions(+), 87 deletions(-) create mode 100644 docker/mongodb-kubernetes-tests/release.json diff --git a/docker/mongodb-kubernetes-tests/release.json b/docker/mongodb-kubernetes-tests/release.json new file mode 100644 index 000000000..4fdb45ec1 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/release.json @@ -0,0 +1,253 @@ +{ + "mongodbToolsBundle": { + "ubi": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz" + }, + "mongodbOperator": "1.1.0", + "initDatabaseVersion": "1.1.0", + "initOpsManagerVersion": "1.1.0", + "initAppDbVersion": "1.1.0", + "databaseImageVersion": "1.1.0", + "agentVersion": "108.0.2.8729-1", + "openshift": { + "minimumSupportedVersion": "4.6" + }, + "search": { + "community": { + "version": "1.47.0" + } + }, + "supportedImages": { + "readinessprobe": { + "ssdlc_name": "MongoDB Controllers for Kubernetes Readiness Probe", + "versions": [ + "1.0.22" + ], + "variants": [ + "ubi" + ] + }, + "operator-version-upgrade-post-start-hook": { + "ssdlc_name": "MongoDB Controllers for Kubernetes Operator Version Upgrade Hook", + "versions": [ + "1.0.9" + ], + "variants": [ + "ubi" + ] + }, + "ops-manager": { + "ssdlc_name": "MongoDB Controllers for Kubernetes Enterprise Ops Manager", + "versions": [ + "6.0.25", + "6.0.26", + "6.0.27", + "7.0.12", + "7.0.13", + "7.0.14", + "7.0.15", + "8.0.5", + "8.0.6", + "8.0.7" + ], + "variants": [ + "ubi" + ] + }, + "mongodb-kubernetes": { + "Description": "We support 3 last versions, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", + "ssdlc_name": "MongoDB Controllers for Kubernetes Operator", + "versions": [ + "1.0.0", + "1.0.1", + "1.1.0" + ], + "variants": [ + "ubi" + ] + }, + "mongodb-kubernetes-operator": { + "Description": "Community Operator daily rebuilds", + "ssdlc_name": "MongoDB Community Operator", + "versions": [ + "0.12.0", + "0.11.0", + "0.10.0", + "0.9.0", + "0.8.3", + "0.8.2", + "0.8.1", + "0.8.0", + "0.7.9", + "0.7.8", + "0.7.7", + "0.7.6" + ], + "variants": [ + "ubi" + ] + }, + "mongodb-agent": { + "Description": "Agents corresponding to OpsManager 5.x and 6.x series", + "ssdlc_name": "MongoDB Controllers for Kubernetes MongoDB Agent", + "Description for specific versions": { + "11.0.5.6963-1": "An upgraded version for OM 5.0 we use for Operator-only deployments", + "12.0.28.7763-1": "OM 6 basic version" + }, + "versions": [ + "108.0.2.8729-1" + ], + "opsManagerMapping": { + "Description": "These are the agents from which we start supporting static containers.", + "cloud_manager": "13.35.0.9498-1", + "cloud_manager_tools": "100.12.1", + "ops_manager": { + "6.0.25": { + "agent_version": "12.0.33.7866-1", + "tools_version": "100.10.0" + }, + "6.0.26": { + "agent_version": "12.0.34.7888-1", + "tools_version": "100.10.0" + }, + "6.0.27": { + "agent_version": "12.0.35.7911-1", + "tools_version": "100.10.0" + }, + "7.0.13": { + "agent_version": "107.0.13.8702-1", + "tools_version": "100.10.0" + }, + "7.0.14": { + "agent_version": "107.0.13.8702-1", + "tools_version": "100.10.0" + }, + "7.0.15": { + "agent_version": "107.0.15.8741-1", + "tools_version": "100.11.0" + }, + "8.0.5": { + "agent_version": "108.0.4.8770-1", + "tools_version": "100.11.0" + }, + "8.0.6": { + "agent_version": "108.0.6.8796-1", + "tools_version": "100.11.0" + }, + "8.0.7": { + "agent_version": "108.0.7.8810-1", + "tools_version": "100.12.0" + } + } + }, + "variants": [ + "ubi" + ] + }, + "init-ops-manager": { + "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", + "ssdlc_name": "MongoDB Controllers for Kubernetes Init Ops Manager", + "versions": [ + "1.0.0", + "1.0.1", + "1.1.0" + ], + "variants": [ + "ubi" + ] + }, + "init-database": { + "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", + "ssdlc_name": "MongoDB Controllers for Kubernetes Init Database", + "versions": [ + "1.0.0", + "1.0.1", + "1.1.0" + ], + "variants": [ + "ubi" + ] + }, + "init-appdb": { + "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", + "ssdlc_name": "MongoDB Controllers for Kubernetes Init AppDB", + "versions": [ + "1.0.0", + "1.0.1", + "1.1.0" + ], + "variants": [ + "ubi" + ] + }, + "database": { + "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", + "ssdlc_name": "MongoDB Controllers for Kubernetes Database", + "versions": [ + "1.0.0", + "1.0.1", + "1.1.0" + ], + "variants": [ + "ubi" + ] + }, + "mongodb-enterprise-server": { + "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", + "ssdlc_name": "MongoDB Enterprise Server", + "versions": [ + "4.4.0-ubi8", + "4.4.1-ubi8", + "4.4.2-ubi8", + "4.4.3-ubi8", + "4.4.4-ubi8", + "4.4.5-ubi8", + "4.4.6-ubi8", + "4.4.7-ubi8", + "4.4.8-ubi8", + "4.4.9-ubi8", + "4.4.10-ubi8", + "4.4.11-ubi8", + "4.4.12-ubi8", + "4.4.13-ubi8", + "4.4.14-ubi8", + "4.4.15-ubi8", + "4.4.16-ubi8", + "4.4.17-ubi8", + "4.4.18-ubi8", + "4.4.19-ubi8", + "4.4.20-ubi8", + "4.4.21-ubi8", + "5.0.0-ubi8", + "5.0.1-ubi8", + "5.0.2-ubi8", + "5.0.3-ubi8", + "5.0.4-ubi8", + "5.0.5-ubi8", + "5.0.6-ubi8", + "5.0.7-ubi8", + "5.0.8-ubi8", + "5.0.9-ubi8", + "5.0.10-ubi8", + "5.0.11-ubi8", + "5.0.12-ubi8", + "5.0.13-ubi8", + "5.0.14-ubi8", + "5.0.15-ubi8", + "5.0.16-ubi8", + "5.0.17-ubi8", + "5.0.18-ubi8", + "6.0.0-ubi8", + "6.0.1-ubi8", + "6.0.2-ubi8", + "6.0.3-ubi8", + "6.0.4-ubi8", + "6.0.5-ubi8", + "8.0.0-ubi8", + "8.0.0-ubi9" + ], + "variants": [ + "ubi" + ] + } + } +} diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 3efa9b3b2..beae7e2d2 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -40,7 +40,7 @@ import docker from lib.base_logger import logger -from build_images import process_image +from .build_images import process_image from scripts.evergreen.release.agent_matrix import ( get_supported_operator_versions, get_supported_version_for_image_matrix_handling, @@ -94,7 +94,7 @@ def _setup_tracing(): # QUAY_REGISTRY_URL sets the base registry for all release build stages. Context images and daily builds will push the # final images to the registry specified here. # This makes it easy to use ECR to test changes on the pipeline before pushing to Quay. -QUAY_REGISTRY_URL = os.environ.get("QUAY_REGISTRY", "quay.io/mongodb") +QUAY_REGISTRY_URL = "268558157000.dkr.ecr.us-east-1.amazonaws.com/julienben/staging-temp" @dataclass @@ -102,7 +102,6 @@ class BuildConfiguration: image_type: str base_repository: str - builder: str = "docker" parallel: bool = False parallel_factor: int = 0 architecture: Optional[List[str]] = None @@ -141,7 +140,6 @@ def get_tools_distro(tools_version: str) -> Dict[str, str]: def operator_build_configuration( - builder: str, parallel: bool, debug: bool, architecture: Optional[List[str]] = None, @@ -152,7 +150,6 @@ def operator_build_configuration( bc = BuildConfiguration( image_type=os.environ.get("distro", DEFAULT_IMAGE_TYPE), base_repository=os.environ["BASE_REPO_URL"], - builder=builder, parallel=parallel, all_agents=all_agents or bool(os.environ.get("all_agents", False)), debug=debug, @@ -169,7 +166,6 @@ def operator_build_configuration( f"or in pipeline but not from master (is_running_in_patch={is_running_in_patch()}). " "Adding 'master' tag to skip to prevent publishing to the latest dev image." ) - bc.skip_tags.append("master") return bc @@ -233,7 +229,9 @@ def create_and_push_manifest(image: str, tag: str, architectures: list[str]) -> This method calls docker directly on the command line, this is different from the rest of the code which uses Sonar as an interface to docker. We decided to keep this asymmetry for now, as Sonar will be removed soon. """ - final_manifest = image + ":" + tag + logger.debug(f"image: {image}, tag: {tag}, architectures: {architectures}") + final_manifest = image + logger.debug(f"push_manifest - final_manifest={final_manifest}") args = [ "docker", @@ -243,7 +241,8 @@ def create_and_push_manifest(image: str, tag: str, architectures: list[str]) -> ] for arch in architectures: - args.extend(["--amend", f"{final_manifest}-{arch}"]) + logger.debug(f"push_manifest - amending {final_manifest}:{tag}-{arch}") + args.extend(["--amend", f"{final_manifest}:{tag}-{arch}"]) args_str = " ".join(args) logger.debug(f"creating new manifest: {args_str}") @@ -293,16 +292,21 @@ def check_multi_arch(image: str, suffix: str) -> bool: def pipeline_process_image( image_name: str, dockerfile_path: str, - args: Dict[str, str] = None, + dockerfile_args: Dict[str, str] = None, + base_registry: str = None, + architecture=None, + sign: bool = False, with_sbom: bool = True, ): - """Calls sonar to build `image_name` with arguments defined in `args`.""" + """Calls sonar to build `image_name` with arguments defined in `args`. + :param architecture: + """ span = trace.get_current_span() span.set_attribute("mck.image_name", image_name) - if args: - span.set_attribute("mck.build_args", str(args)) + if dockerfile_args: + span.set_attribute("mck.build_args", str(dockerfile_args)) - # TODO use these + # TODO use these? build_options = { # Will continue building an image if it finds an error. See next comment. "continue_on_errors": True, @@ -310,12 +314,22 @@ def pipeline_process_image( "fail_on_errors": True, } - logger.info(f"Dockerfile args: {args}, for image: {image_name}") + logger.info(f"Dockerfile args: {dockerfile_args}, for image: {image_name}") - process_image(image_name, dockerfile_path=dockerfile_path, dockerfile_args=args) + if not dockerfile_args: + dockerfile_args = {} + logger.debug(f"Build args: {dockerfile_args}") + process_image( + image_name, + dockerfile_path=dockerfile_path, + dockerfile_args=dockerfile_args, + base_registry=base_registry, + architecture=architecture, + sign=sign, + ) if with_sbom: - produce_sbom(args) + produce_sbom(dockerfile_args) @TRACER.start_as_current_span("produce_sbom") @@ -379,7 +393,7 @@ def build_tests_image(build_configuration: BuildConfiguration): shutil.copyfile("release.json", "docker/mongodb-kubernetes-tests/release.json") shutil.copyfile("requirements.txt", requirements_dest) - python_version = os.getenv("PYTHON_VERSION", "") + python_version = os.getenv("PYTHON_VERSION", "3.11") if python_version == "": raise Exception("Missing PYTHON_VERSION environment variable") @@ -397,7 +411,7 @@ def build_mco_tests_image(build_configuration: BuildConfiguration): if golang_version == "": raise Exception("Missing GOLANG_VERSION environment variable") - buildargs = dict({"golang_version": golang_version}) + buildargs = dict({"GOLANG_VERSION": golang_version}) pipeline_process_image(image_name, "docker/mongodb-community-tests/Dockerfile", buildargs) @@ -424,7 +438,7 @@ def build_operator_image(build_configuration: BuildConfiguration): config=build_configuration, image_name=image_name, dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile", - registry_address=f"{QUAY_REGISTRY_URL}/{image_name}", + registry_address=QUAY_REGISTRY_URL, extra_args=args, ) @@ -710,9 +724,9 @@ def build_om_image(build_configuration: BuildConfiguration): build_image_generic( config=build_configuration, - image_name="ops-manager", + image_name="mongodb-enterprise-ops-manager", dockerfile_path="docker/mongodb-enterprise-ops-manager/Dockerfile", - registry_address=f"{QUAY_REGISTRY_URL}/mongodb-enterprise-ops-manager", + registry_address=QUAY_REGISTRY_URL, extra_args=args, ) @@ -721,44 +735,49 @@ def build_image_generic( config: BuildConfiguration, image_name: str, dockerfile_path: str, - registry_address: str = None, - extra_args: dict = None, + registry_address: str | None = None, + extra_args: dict | None = None, + multi_arch_args_list: list[dict] | None = None, is_multi_arch: bool = False, - multi_arch_args_list: list = None, - is_run_in_parallel: bool = False, ): - """Build image generic builds context images and is used for triggering release. During releases - it signs and verifies the context image. + """ + Build one or more architecture-specific images, then (optionally) + push a manifest and sign the result. """ - if not multi_arch_args_list: - multi_arch_args_list = [extra_args or {}] - - version = multi_arch_args_list[0].get("version", "") # the version is the same in multi-arch for each item - registry = f"{QUAY_REGISTRY_URL}/mongodb-kubernetes-{image_name}" if not registry_address else registry_address + # 1) Defaults + registry = registry_address or "268558157000.dkr.ecr.us-east-1.amazonaws.com/julienben/staging-temp" + args_list = multi_arch_args_list or [extra_args or {}] + version = args_list[0].get("version", "") + architectures = [args.get("architecture") for args in args_list] + + # 2) Build each arch + for base_args in args_list: + # merge in the registry without mutating caller’s dict + build_args = {**base_args, "quay_registry": registry} + logger.debug(f"Build args: {build_args}") + + for arch in architectures: + logger.debug(f"Building {image_name} for arch={arch}") + logger.debug(f"build image generic - registry={registry}") + pipeline_process_image( + image_name, + dockerfile_path, + build_args, + registry, + architecture=arch, + sign=True, + with_sbom=False, + ) - for args in multi_arch_args_list: # in case we are building multiple architectures - args["quay_registry"] = registry - pipeline_process_image(image_name, dockerfile_path, args, False) + # 3) Multi-arch manifest if is_multi_arch: - # we only push the manifests of the context images here, - # since daily rebuilds will push the manifests for the proper images later - architectures = [v["architecture"] for v in multi_arch_args_list] - create_and_push_manifest(registry_address, f"{version}-context", architectures=architectures) - if not config.is_release_step_executed(): - # Normally daily rebuild would create and push the manifests for the non-context images. - # But since we don't run daily rebuilds on ecr image builds, we can do that step instead here. - # We only need to push manifests for multi-arch images. - create_and_push_manifest(registry_address, version, architectures=architectures) - - # Sign and verify the context image if on releases if requied. - if config.sign and config.is_release_step_executed(): - sign_and_verify_context_image(registry, version) - + create_and_push_manifest(registry+"/"+image_name, version, architectures=architectures) -def sign_and_verify_context_image(registry, version): - sign_image(registry, version + "-context") - verify_signature(registry, version + "-context") + # 4) Signing (only on real releases) + if config.sign: + sign_image(registry, version) + verify_signature(registry, version) def build_init_appdb(build_configuration: BuildConfiguration): @@ -805,8 +824,9 @@ def build_community_image(build_configuration: BuildConfiguration, image_type: s for arch in architectures: arch_args = { "version": version, - "golang_version": golang_version, + "GOLANG_VERSION": golang_version, "architecture": arch, + "TARGETARCH": arch, } multi_arch_args_list.append(arch_args) @@ -817,7 +837,7 @@ def build_community_image(build_configuration: BuildConfiguration, image_type: s config=build_configuration, image_name=image_name, dockerfile_path=dockerfile_path, - registry_address=f"{base_repo}/{image_name}", + registry_address=QUAY_REGISTRY_URL, is_multi_arch=True, multi_arch_args_list=multi_arch_args_list, ) @@ -1196,7 +1216,6 @@ def build_image(image_name: str, build_configuration: BuildConfiguration): def build_all_images( images: Iterable[str], - builder: str, debug: bool = False, parallel: bool = False, architecture: Optional[List[str]] = None, @@ -1205,46 +1224,19 @@ def build_all_images( parallel_factor: int = 0, ): """Builds all the images in the `images` list.""" - build_configuration = operator_build_configuration( - builder, parallel, debug, architecture, sign, all_agents, parallel_factor - ) + build_configuration = operator_build_configuration(parallel, debug, architecture, sign, all_agents, parallel_factor) if sign: mongodb_artifactory_login() for image in images: + logger.info(f"====Building image {image}====") build_image(image, build_configuration) - -def calculate_images_to_build( - images: List[str], include: Optional[List[str]], exclude: Optional[List[str]] -) -> Set[str]: - """ - Calculates which images to build based on the `images`, `include` and `exclude` sets. - - >>> calculate_images_to_build(["a", "b"], ["a"], ["b"]) - ... ["a"] - """ - - if not include and not exclude: - return set(images) - include = set(include or []) - exclude = set(exclude or []) - images = set(images or []) - - for image in include.union(exclude): - if image not in images: - raise ValueError("Image definition {} not found".format(image)) - - images_to_build = include.intersection(images) - return images_to_build - - def main(): _setup_tracing() _setup_tracing() parser = argparse.ArgumentParser() parser.add_argument("--include", action="append", help="list of images to include") - parser.add_argument("--builder", default="docker", type=str, help="docker or podman") parser.add_argument("--list-images", action="store_true") parser.add_argument("--parallel", action="store_true", default=False) parser.add_argument("--debug", action="store_true", default=False) @@ -1270,7 +1262,7 @@ def main(): ) args = parser.parse_args() - images_list = list(get_builder_function_for_image_name().keys()) + images_list = get_builder_function_for_image_name().keys() if args.list_images: print(images_list) sys.exit(0) @@ -1282,11 +1274,14 @@ def main(): if not args.sign: logger.warning("--sign flag not provided, images won't be signed") - images_to_build = args.include.intersection(images_list) + # TODO check that image names are valid + images_to_build = list(sorted(set(args.include).intersection(images_list))) + if not images_to_build: + logger.error("No images to build, please ensure images names are correct.") + sys.exit(1) build_all_images( images_to_build, - args.builder, debug=args.debug, parallel=args.parallel, architecture=args.arch, From ebaff311d58d5e72461d529b72ff90c13275fc3d Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 20 Jun 2025 17:03:08 +0200 Subject: [PATCH 8/9] Split in multiple files, cleanup --- scripts/release/__init__.py | 0 scripts/release/atomic_pipeline.py | 414 +++----------------- scripts/release/build_configuration.py | 17 + scripts/release/build_images.py | 123 +++--- scripts/release/main.py | 193 +++++++++ scripts/release/optimized_operator_build.py | 88 +++++ 6 files changed, 420 insertions(+), 415 deletions(-) create mode 100644 scripts/release/__init__.py create mode 100644 scripts/release/build_configuration.py create mode 100644 scripts/release/main.py create mode 100644 scripts/release/optimized_operator_build.py diff --git a/scripts/release/__init__.py b/scripts/release/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index beae7e2d2..b0bb4d9a7 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -4,46 +4,26 @@ and where to fetch and calculate parameters. It uses Sonar.py to produce the final images.""" -import argparse -import copy +# TODO: test pipeline, e.g with a test registry + import json import os -import random import shutil import subprocess -import sys -import tarfile import time -from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor -from dataclasses import dataclass -from datetime import datetime, timedelta, timezone +from concurrent.futures import ProcessPoolExecutor from queue import Queue -from typing import Callable, Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union import requests import semver -from opentelemetry import context -from opentelemetry import context as otel_context from opentelemetry import trace -from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( - OTLPSpanExporter as OTLPSpanGrpcExporter, -) -from opentelemetry.sdk.resources import SERVICE_NAME, Resource -from opentelemetry.sdk.trace import ( - SynchronousMultiSpanProcessor, - Tracer, - TracerProvider, -) -from opentelemetry.sdk.trace.export import BatchSpanProcessor -from opentelemetry.trace import NonRecordingSpan, SpanContext, TraceFlags from packaging.version import Version import docker from lib.base_logger import logger -from .build_images import process_image from scripts.evergreen.release.agent_matrix import ( get_supported_operator_versions, - get_supported_version_for_image_matrix_handling, ) from scripts.evergreen.release.images_signing import ( mongodb_artifactory_login, @@ -51,43 +31,14 @@ verify_signature, ) from scripts.evergreen.release.sbom import generate_sbom, generate_sbom_for_cli +from .build_configuration import BuildConfiguration -TRACER = trace.get_tracer("evergreen-agent") +from .build_images import process_image +from .optimized_operator_build import build_operator_image_fast # TODO: better framework for multi arch builds (spike to come) - -def _setup_tracing(): - trace_id = os.environ.get("otel_trace_id") - parent_id = os.environ.get("otel_parent_id") - endpoint = os.environ.get("otel_collector_endpoint") - if any(value is None for value in [trace_id, parent_id, endpoint]): - logger.info("tracing environment variables are missing, not configuring tracing") - return - logger.info(f"parent_id is {parent_id}") - logger.info(f"trace_id is {trace_id}") - logger.info(f"endpoint is {endpoint}") - span_context = SpanContext( - trace_id=int(trace_id, 16), - span_id=int(parent_id, 16), - is_remote=False, - # Magic number needed for our OTEL collector - trace_flags=TraceFlags(0x01), - ) - ctx = trace.set_span_in_context(NonRecordingSpan(span_context)) - context.attach(ctx) - sp = SynchronousMultiSpanProcessor() - span_processor = BatchSpanProcessor( - OTLPSpanGrpcExporter( - endpoint=endpoint, - ) - ) - sp.add_span_processor(span_processor) - resource = Resource(attributes={SERVICE_NAME: "evergreen-agent"}) - provider = TracerProvider(resource=resource, active_span_processor=sp) - trace.set_tracer_provider(provider) - - +TRACER = trace.get_tracer("evergreen-agent") DEFAULT_IMAGE_TYPE = "ubi" DEFAULT_NAMESPACE = "default" @@ -97,30 +48,6 @@ def _setup_tracing(): QUAY_REGISTRY_URL = "268558157000.dkr.ecr.us-east-1.amazonaws.com/julienben/staging-temp" -@dataclass -class BuildConfiguration: - image_type: str - base_repository: str - - parallel: bool = False - parallel_factor: int = 0 - architecture: Optional[List[str]] = None - sign: bool = False - all_agents: bool = False - - pipeline: bool = True - debug: bool = True - - def build_args(self, args: Optional[Dict[str, str]] = None) -> Dict[str, str]: - if args is None: - args = {} - args = args.copy() - - args["registry"] = self.base_repository - - return args - - def make_list_of_str(value: Union[None, str, List[str]]) -> List[str]: if value is None: return [] @@ -140,6 +67,7 @@ def get_tools_distro(tools_version: str) -> Dict[str, str]: def operator_build_configuration( + base_registry: str, parallel: bool, debug: bool, architecture: Optional[List[str]] = None, @@ -148,8 +76,8 @@ def operator_build_configuration( parallel_factor: int = 0, ) -> BuildConfiguration: bc = BuildConfiguration( + base_registry=base_registry, image_type=os.environ.get("distro", DEFAULT_IMAGE_TYPE), - base_repository=os.environ["BASE_REPO_URL"], parallel=parallel, all_agents=all_agents or bool(os.environ.get("all_agents", False)), debug=debug, @@ -174,10 +102,6 @@ def is_running_in_evg_pipeline(): return os.getenv("RUNNING_IN_EVG", "") == "true" -class MissingEnvironmentVariable(Exception): - pass - - def is_running_in_patch(): is_patch = os.environ.get("is_patch") return is_patch is not None and is_patch.lower() == "true" @@ -201,21 +125,6 @@ def get_git_release_tag() -> tuple[str, bool]: return patch_id, False -def copy_into_container(client, src, dst): - """Copies a local file into a running container.""" - - os.chdir(os.path.dirname(src)) - srcname = os.path.basename(src) - with tarfile.open(src + ".tar", mode="w") as tar: - tar.add(srcname) - - name, dst = dst.split(":") - container = client.containers.get(name) - - with open(src + ".tar", "rb") as fd: - container.put_archive(os.path.dirname(dst), fd.read()) - - def create_and_push_manifest(image: str, tag: str, architectures: list[str]) -> None: """ Generates docker manifests by running the following commands: @@ -298,9 +207,7 @@ def pipeline_process_image( sign: bool = False, with_sbom: bool = True, ): - """Calls sonar to build `image_name` with arguments defined in `args`. - :param architecture: - """ + """Builds a Docker image with arguments defined in `args`.""" span = trace.get_current_span() span.set_attribute("mck.image_name", image_name) if dockerfile_args: @@ -413,7 +320,7 @@ def build_mco_tests_image(build_configuration: BuildConfiguration): buildargs = dict({"GOLANG_VERSION": golang_version}) - pipeline_process_image(image_name, "docker/mongodb-community-tests/Dockerfile", buildargs) + pipeline_process_image(image_name, "docker/mongodb-community-tests/Dockerfile", buildargs, base_registry=build_configuration.base_registry) def build_operator_image(build_configuration: BuildConfiguration): @@ -435,14 +342,19 @@ def build_operator_image(build_configuration: BuildConfiguration): image_name = "mongodb-kubernetes" build_image_generic( - config=build_configuration, image_name=image_name, dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile", - registry_address=QUAY_REGISTRY_URL, + registry_address=build_configuration.base_registry, extra_args=args, + sign=build_configuration.sign, ) +def build_operator_image_patch(build_configuration: BuildConfiguration): + if not build_operator_image_fast(build_configuration): + build_operator_image(build_configuration) + + def build_database_image(build_configuration: BuildConfiguration): """ Builds a new database image. @@ -450,7 +362,7 @@ def build_database_image(build_configuration: BuildConfiguration): release = get_release() version = release["databaseImageVersion"] args = {"version": version} - build_image_generic(build_configuration, "database", "docker/mongodb-kubernetes-database.yaml", extra_args=args) + build_image_generic(image_name="mongodb-kubernetes-database", dockerfile_path="docker/mongodb-kubernetes-database/Dockerfile", registry_address=build_configuration.base_registry, extra_args=args, sign=build_configuration.sign) def build_CLI_SBOM(build_configuration: BuildConfiguration): @@ -475,160 +387,6 @@ def build_CLI_SBOM(build_configuration: BuildConfiguration): generate_sbom_for_cli(version, architecture) -def build_operator_image_patch(build_configuration: BuildConfiguration): - """This function builds the operator locally and pushed into an existing - Docker image. This is the fastest way I could image we can do this.""" - - client = docker.from_env() - # image that we know is where we build operator. - image_repo = build_configuration.base_repository + "/" + build_configuration.image_type + "/mongodb-kubernetes" - image_tag = "latest" - repo_tag = image_repo + ":" + image_tag - - logger.debug(f"Pulling image: {repo_tag}") - try: - image = client.images.get(repo_tag) - except docker.errors.ImageNotFound: - logger.debug("Operator image does not exist locally. Building it now") - build_operator_image(build_configuration) - return - - logger.debug("Done") - too_old = datetime.now() - timedelta(hours=3) - image_timestamp = datetime.fromtimestamp( - image.history()[0]["Created"] - ) # Layer 0 is the latest added layer to this Docker image. [-1] is the FROM layer. - - if image_timestamp < too_old: - logger.info("Current operator image is too old, will rebuild it completely first") - build_operator_image(build_configuration) - return - - container_name = "mongodb-enterprise-operator" - operator_binary_location = "/usr/local/bin/mongodb-kubernetes-operator" - try: - client.containers.get(container_name).remove() - logger.debug(f"Removed {container_name}") - except docker.errors.NotFound: - pass - - container = client.containers.run(repo_tag, name=container_name, entrypoint="sh", detach=True) - - logger.debug("Building operator with debugging symbols") - subprocess.run(["make", "manager"], check=True, stdout=subprocess.PIPE) - logger.debug("Done building the operator") - - copy_into_container( - client, - os.getcwd() + "/docker/mongodb-kubernetes-operator/content/mongodb-kubernetes-operator", - container_name + ":" + operator_binary_location, - ) - - # Commit changes on disk as a tag - container.commit( - repository=image_repo, - tag=image_tag, - ) - # Stop this container so we can use it next time - container.stop() - container.remove() - - logger.info("Pushing operator to {}:{}".format(image_repo, image_tag)) - client.images.push( - repository=image_repo, - tag=image_tag, - ) - - -def get_supported_variants_for_image(image: str) -> List[str]: - return get_release()["supportedImages"][image]["variants"] - - -def image_config( - image_name: str, - name_prefix: str = "mongodb-kubernetes-", - s3_bucket: str = "enterprise-operator-dockerfiles", - ubi_suffix: str = "-ubi", - base_suffix: str = "", -) -> Tuple[str, Dict[str, str]]: - """Generates configuration for an image suitable to be passed - to Sonar. - - It returns a dictionary with registries and S3 configuration.""" - args = { - "quay_registry": "{}/{}{}".format(QUAY_REGISTRY_URL, name_prefix, image_name), - "ecr_registry_ubi": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/{}{}".format(name_prefix, image_name), - "s3_bucket_http": "https://{}.s3.amazonaws.com/dockerfiles/{}{}".format(s3_bucket, name_prefix, image_name), - "ubi_suffix": ubi_suffix, - "base_suffix": base_suffix, - } - - return image_name, args - - -def is_version_in_range(version: str, min_version: str, max_version: str) -> bool: - """Check if the version is in the range""" - try: - parsed_version = semver.VersionInfo.parse(version) - if parsed_version.prerelease: - logger.info(f"Excluding {version} from range {min_version}-{max_version} because it's a pre-release") - return False - version_without_rc = semver.VersionInfo.finalize_version(parsed_version) - except ValueError: - version_without_rc = version - if min_version and max_version: - return version_without_rc.match(">=" + min_version) and version_without_rc.match("<" + max_version) - return True - - -def get_versions_to_rebuild(supported_versions, min_version, max_version): - # this means we only want to release one version, we cannot rely on the below range function - # since the agent does not follow semver for comparison - if (min_version and max_version) and (min_version == max_version): - return [min_version] - return filter(lambda x: is_version_in_range(x, min_version, max_version), supported_versions) - - -def get_versions_to_rebuild_per_operator_version(supported_versions, operator_version): - """ - This function returns all versions sliced by a specific operator version. - If the input is `onlyAgents` then it only returns agents without the operator suffix. - """ - versions_to_rebuild = [] - - for version in supported_versions: - if operator_version == "onlyAgents": - # 1_ works because we append the operator version via "_", all agents end with "1". - if "1_" not in version: - versions_to_rebuild.append(version) - else: - if operator_version in version: - versions_to_rebuild.append(version) - return versions_to_rebuild - - -class TracedThreadPoolExecutor(ThreadPoolExecutor): - """Implementation of :class:ThreadPoolExecutor that will pass context into sub tasks.""" - - def __init__(self, tracer: Tracer, *args, **kwargs): - self.tracer = tracer - super().__init__(*args, **kwargs) - - def with_otel_context(self, c: otel_context.Context, fn: Callable): - otel_context.attach(c) - return fn() - - def submit(self, fn, *args, **kwargs): - """Submit a new task to the thread pool.""" - - # get the current otel context - c = otel_context.get_current() - if c: - return super().submit( - lambda: self.with_otel_context(c, lambda: fn(*args, **kwargs)), - ) - else: - return super().submit(lambda: fn(*args, **kwargs)) def should_skip_arm64(): @@ -699,10 +457,11 @@ def build_init_om_image(build_configuration: BuildConfiguration): init_om_version = release["initOpsManagerVersion"] args = {"version": init_om_version} build_image_generic( - build_configuration, - "init-ops-manager", + "mongodb-kubernetes-init-ops-manager", "docker/mongodb-kubernetes-init-ops-manager/Dockerfile", + registry_address=build_configuration.base_registry, extra_args=args, + sign=build_configuration.sign, ) @@ -723,22 +482,22 @@ def build_om_image(build_configuration: BuildConfiguration): } build_image_generic( - config=build_configuration, - image_name="mongodb-enterprise-ops-manager", + image_name="mongodb-enterprise-ops-manager-ubi", dockerfile_path="docker/mongodb-enterprise-ops-manager/Dockerfile", - registry_address=QUAY_REGISTRY_URL, + registry_address=build_configuration.base_registry, extra_args=args, + sign=build_configuration.sign, ) def build_image_generic( - config: BuildConfiguration, image_name: str, dockerfile_path: str, - registry_address: str | None = None, + registry_address: str, extra_args: dict | None = None, multi_arch_args_list: list[dict] | None = None, is_multi_arch: bool = False, + sign: bool = False, ): """ Build one or more architecture-specific images, then (optionally) @@ -746,7 +505,7 @@ def build_image_generic( """ # 1) Defaults - registry = registry_address or "268558157000.dkr.ecr.us-east-1.amazonaws.com/julienben/staging-temp" + registry = registry_address args_list = multi_arch_args_list or [extra_args or {}] version = args_list[0].get("version", "") architectures = [args.get("architecture") for args in args_list] @@ -766,16 +525,16 @@ def build_image_generic( build_args, registry, architecture=arch, - sign=True, + sign=False, with_sbom=False, ) # 3) Multi-arch manifest if is_multi_arch: - create_and_push_manifest(registry+"/"+image_name, version, architectures=architectures) + create_and_push_manifest(registry + "/" + image_name, version, architectures=architectures) # 4) Signing (only on real releases) - if config.sign: + if sign: sign_image(registry, version) verify_signature(registry, version) @@ -787,7 +546,11 @@ def build_init_appdb(build_configuration: BuildConfiguration): mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": version, "is_appdb": True, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} build_image_generic( - build_configuration, "init-appdb", "docker/mongodb-kubernetes-init-appdb/Dockerfile", extra_args=args + "mongodb-kubernetes-init-appdb", + "docker/mongodb-kubernetes-init-appdb/Dockerfile", + registry_address=build_configuration.base_registry, + extra_args=args, + sign=build_configuration.sign, ) @@ -830,16 +593,13 @@ def build_community_image(build_configuration: BuildConfiguration, image_type: s } multi_arch_args_list.append(arch_args) - ecr_registry = os.environ.get("BASE_REPO_URL", "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev") - base_repo = QUAY_REGISTRY_URL if is_release else ecr_registry - build_image_generic( - config=build_configuration, image_name=image_name, dockerfile_path=dockerfile_path, - registry_address=QUAY_REGISTRY_URL, + registry_address=build_configuration.base_registry, is_multi_arch=True, multi_arch_args_list=multi_arch_args_list, + sign=build_configuration.sign, ) @@ -872,17 +632,16 @@ def build_agent_pipeline( "init_database_image": init_database_image, } - agent_quay_registry = QUAY_REGISTRY_URL + f"/mongodb-agent-ubi" - args["quay_registry"] = agent_quay_registry + agent_quay_registry = build_configuration.base_registry + f"/mongodb-agent-ubi" + args["quay_registry"] = build_configuration.base_registry args["agent_version"] = agent_version build_image_generic( - config=build_configuration, - image_name="mongodb-agent", + image_name="mongodb-agent-ubi", dockerfile_path="docker/mongodb-agent/Dockerfile", - registry_address=agent_quay_registry, + registry_address=build_configuration.base_registry, extra_args=args, - is_run_in_parallel=True, + sign=build_configuration.sign, ) @@ -901,7 +660,6 @@ def build_multi_arch_agent_in_sonar( """ logger.info(f"building multi-arch base image for: {image_version}") - is_release = build_configuration.is_release_step_executed() args = { "version": image_version, "tools_version": tools_version, @@ -923,9 +681,6 @@ def build_multi_arch_agent_in_sonar( arch_arm["tools_distro"] = "rhel93-aarch64" arch_amd["tools_distro"] = "rhel93-x86_64" - ecr_registry = os.environ.get("REGISTRY", "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev") - ecr_agent_registry = ecr_registry + f"/mongodb-agent-ubi" - quay_agent_registry = QUAY_REGISTRY_URL + f"/mongodb-agent-ubi" joined_args = [args | arch_amd] # Only include arm64 if we shouldn't skip it @@ -933,13 +688,13 @@ def build_multi_arch_agent_in_sonar( joined_args.append(args | arch_arm) build_image_generic( - config=build_configuration, - image_name="mongodb-agent", + image_name="mongodb-agent-ubi", dockerfile_path="docker/mongodb-agent-non-matrix/Dockerfile", - registry_address=quay_agent_registry if is_release else ecr_agent_registry, + registry_address=build_configuration.base_registry, is_multi_arch=True, multi_arch_args_list=joined_args, is_run_in_parallel=True, + sign=build_configuration.sign, ) @@ -1205,7 +960,7 @@ def build_init_database(build_configuration: BuildConfiguration): mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi, "is_appdb": False} build_image_generic( - build_configuration, "init-database", "docker/mongodb-kubernetes-init-database.yaml", extra_args=args + "mongodb-kubernetes-init-database", "docker/mongodb-kubernetes-init-database/Dockerfile", registry_address=build_configuration.base_registry, extra_args=args, sign=build_configuration.sign ) @@ -1216,6 +971,7 @@ def build_image(image_name: str, build_configuration: BuildConfiguration): def build_all_images( images: Iterable[str], + base_registry: str, debug: bool = False, parallel: bool = False, architecture: Optional[List[str]] = None, @@ -1224,72 +980,12 @@ def build_all_images( parallel_factor: int = 0, ): """Builds all the images in the `images` list.""" - build_configuration = operator_build_configuration(parallel, debug, architecture, sign, all_agents, parallel_factor) + build_configuration = operator_build_configuration( + base_registry, parallel, debug, architecture, sign, all_agents, parallel_factor + ) if sign: mongodb_artifactory_login() - for image in images: - logger.info(f"====Building image {image}====") + for idx, image in enumerate(images): + logger.info(f"====Building image {image} ({idx}/{len(images)-1})====") + time.sleep(1) build_image(image, build_configuration) - -def main(): - _setup_tracing() - _setup_tracing() - - parser = argparse.ArgumentParser() - parser.add_argument("--include", action="append", help="list of images to include") - parser.add_argument("--list-images", action="store_true") - parser.add_argument("--parallel", action="store_true", default=False) - parser.add_argument("--debug", action="store_true", default=False) - parser.add_argument( - "--arch", - choices=["amd64", "arm64"], - nargs="+", - help="for operator and community images only, specify the list of architectures to build for images", - ) - parser.add_argument("--sign", action="store_true", default=False) - parser.add_argument( - "--parallel-factor", - type=int, - default=0, - help="the factor on how many agents are built in parallel. 0 means all CPUs will be used", - ) - parser.add_argument( - "--all-agents", - action="store_true", - default=False, - help="optional parameter to be able to push " - "all non operator suffixed agents, even if we are not in a release", - ) - args = parser.parse_args() - - images_list = get_builder_function_for_image_name().keys() - if args.list_images: - print(images_list) - sys.exit(0) - - if args.arch == ["arm64"]: - print("Building for arm64 only is not supported yet") - sys.exit(1) - - if not args.sign: - logger.warning("--sign flag not provided, images won't be signed") - - # TODO check that image names are valid - images_to_build = list(sorted(set(args.include).intersection(images_list))) - if not images_to_build: - logger.error("No images to build, please ensure images names are correct.") - sys.exit(1) - - build_all_images( - images_to_build, - debug=args.debug, - parallel=args.parallel, - architecture=args.arch, - sign=args.sign, - all_agents=args.all_agents, - parallel_factor=args.parallel_factor, - ) - - -if __name__ == "__main__": - main() diff --git a/scripts/release/build_configuration.py b/scripts/release/build_configuration.py new file mode 100644 index 000000000..51dbd4d6d --- /dev/null +++ b/scripts/release/build_configuration.py @@ -0,0 +1,17 @@ +from typing import Optional, List +from dataclasses import dataclass + + +@dataclass +class BuildConfiguration: + image_type: str + base_registry: str + + parallel: bool = False + parallel_factor: int = 0 + architecture: Optional[List[str]] = None + sign: bool = False + all_agents: bool = False + + pipeline: bool = True + debug: bool = True diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 66136f563..92d4a3590 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -1,23 +1,16 @@ -# Methods responsible for building and pushing docker images. +# This file is the new Sonar +import base64 import sys -import traceback +from typing import Dict import boto3 from botocore.exceptions import BotoCoreError, ClientError -import base64 -from lib.base_logger import logger import docker +from lib.base_logger import logger +from lib.sonar.sonar import create_ecr_repository +from scripts.evergreen.release.images_signing import sign_image, verify_signature -logger.info("Starting build images script") - -IMAGE_NAME = "mongodb-kubernetes-operator" -DOCKERFILES_PATH = f"./docker/{IMAGE_NAME}" -CONTEXT_DOCKERFILE = "Dockerfile" -RELEASE_DOCKERFILE = "Dockerfile.plain" -STAGING_REGISTRY = "268558157000.dkr.ecr.us-east-1.amazonaws.com/julienben/operator-staging-temp" -LATEST_TAG = "latest" -LATEST_TAG_CONTEXT = f"{LATEST_TAG}-context" def ecr_login_boto3(region: str, account_id: str): """ @@ -26,7 +19,7 @@ def ecr_login_boto3(region: str, account_id: str): """ registry = f"{account_id}.dkr.ecr.{region}.amazonaws.com" # 1) get token - boto3.setup_default_session(profile_name='default') + boto3.setup_default_session(profile_name="default") ecr = boto3.client("ecr", region_name=region) try: resp = ecr.get_authorization_token(registryIds=[account_id]) @@ -39,41 +32,41 @@ def ecr_login_boto3(region: str, account_id: str): # 2) docker login client = docker.APIClient() # low-level client supports login() - login_resp = client.login( - username=username, - password=password, - registry=registry, - reauth=True - ) + login_resp = client.login(username=username, password=password, registry=registry, reauth=True) # login_resp is a dict like {'Status': 'Login Succeeded'} status = login_resp.get("Status", "") if "Succeeded" not in status: raise RuntimeError(f"Docker login failed: {login_resp}") - logger.info(f"ECR login succeeded: {status}") + logger.debug(f"ECR login succeeded: {status}") -def build_image(docker_client: docker.DockerClient, tag: str, dockerfile: str, path: str, args=None): + +def build_image(docker_client: docker.DockerClient, tag: str, dockerfile: str, path: str, args: Dict[str, str] = {}): """ Build a Docker image. + :param docker_client: :param path: Build context path (directory with your Dockerfile) :param dockerfile: Name or relative path of the Dockerfile within `path` :param tag: Image tag (name:tag) + :param args: """ try: + if args: + args = {k: str(v) for k, v in args.items()} image, logs = docker_client.images.build( path=path, dockerfile=dockerfile, tag=tag, - rm=True, # remove intermediate containers after a successful build - pull=False, # set True to always attempt to pull a newer base image - buildargs=args # pass build args if provided + rm=True, # remove intermediate containers after a successful build + pull=False, # set True to always attempt to pull a newer base image + buildargs=args, # pass build args if provided ) logger.info(f"Successfully built {tag} (id: {image.id})") # Print build output for chunk in logs: - if 'stream' in chunk: - logger.debug(chunk['stream']) + if "stream" in chunk: + logger.debug(chunk["stream"]) except docker.errors.BuildError as e: logger.error("Build failed:") for stage in e.build_log: @@ -82,50 +75,68 @@ def build_image(docker_client: docker.DockerClient, tag: str, dockerfile: str, p elif "error" in stage: logger.error(stage["error"]) logger.error(e) - sys.exit(1) + raise RuntimeError(f"Failed to build image {tag}") except Exception as e: logger.error(f"Unexpected error: {e}") - sys.exit(2) + raise RuntimeError(f"Failed to build image {tag}") + def push_image(docker_client: docker.DockerClient, image: str, tag: str): """ Push a Docker image to a registry. + :param docker_client: :param image: Image name (e.g., 'my-image') :param tag: Image tag (e.g., 'latest') """ + logger.debug(f"push_image - image: {image}, tag: {tag}") + image_full_uri = f"{image}:{tag}" try: - response = docker_client.images.push(image, tag=tag) - logger.info(f"Successfully pushed {image}:{tag}") - logger.debug(response) - except docker.errors.APIError as e: - logger.error(f"Failed to push image {image}:{tag} - {e}") + output = docker_client.images.push(image, tag=tag) + if "error" in output: + raise RuntimeError(f"Failed to push image {image_full_uri} {output}") + logger.info(f"Successfully pushed {image_full_uri}") + except Exception as e: + logger.error(f"Failed to push image {image_full_uri} - {e}") sys.exit(1) -if __name__ == '__main__': +LATEST_TAG = "latest" +def process_image( + image_name: str, + dockerfile_path: str, + dockerfile_args: Dict[str, str], + base_registry: str, + architecture: str = None, + sign: bool = False, +): docker_client = docker.from_env() - logger.info("Docker client initialized") - + logger.debug("Docker client initialized") # Login to ECR using boto3 - ecr_login_boto3(region='us-east-1', account_id='268558157000') - - # Build context image - image_full_tag = f"{STAGING_REGISTRY}:{LATEST_TAG_CONTEXT}" - logger.info(f"Building image: {image_full_tag}") - context_dockerfile_full_path = f"{DOCKERFILES_PATH}/{CONTEXT_DOCKERFILE}" - logger.info(f"Using Dockerfile at: {context_dockerfile_full_path}") - build_image(docker_client, path=".", dockerfile=context_dockerfile_full_path, tag=LATEST_TAG_CONTEXT, args={'version': '0.0.1'}) + ecr_login_boto3(region="us-east-1", account_id="268558157000") + + # Helper to automatically create registry with correct name + should_create_repo = False + if should_create_repo: + repo_to_create="julienben/staging-temp/"+image_name + logger.debug(f"repo_to_create: {repo_to_create}") + create_ecr_repository(repo_to_create) + logger.info(f"Created repository {repo_to_create}") + + # Build image + docker_registry = f"{base_registry}/{image_name}" + arch_tag = f"-{architecture}" if architecture else "" + image_tag = f"{LATEST_TAG}{arch_tag}" + image_full_uri = f"{docker_registry}:{image_tag}" + logger.info(f"Building image: {image_full_uri}") + logger.info(f"Using Dockerfile at: {dockerfile_path}") + logger.debug(f"Build args: {dockerfile_args}") + build_image(docker_client, path=".", dockerfile=f"{dockerfile_path}", tag=image_full_uri, args=dockerfile_args) # Push to staging registry - push_image(docker_client, STAGING_REGISTRY, LATEST_TAG_CONTEXT) - - # Build release image - release_image_full_tag = f'{STAGING_REGISTRY}:latest' - release_dockerfile_full_path = f"{DOCKERFILES_PATH}/{RELEASE_DOCKERFILE}" - logger.info(f"Building release image with tag: {release_image_full_tag}") - logger.info(f"Using Dockerfile at: {release_dockerfile_full_path}") - - build_image(docker_client, path=".", dockerfile=release_dockerfile_full_path, tag=release_image_full_tag, args={'imagebase': image_full_tag}) + logger.info(f"Pushing image: {image_tag} to {docker_registry}") + push_image(docker_client, docker_registry, image_tag) - # Push release image - push_image(docker_client, STAGING_REGISTRY, LATEST_TAG) + if sign: + logger.info("Signing image") + sign_image(docker_registry, image_tag) + verify_signature(docker_registry, image_tag) diff --git a/scripts/release/main.py b/scripts/release/main.py new file mode 100644 index 000000000..6521bdcfa --- /dev/null +++ b/scripts/release/main.py @@ -0,0 +1,193 @@ +import argparse +import os +import sys +import time +from typing import Dict, Callable, Iterable, Optional, List + +from opentelemetry import context, trace +from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( + OTLPSpanExporter as OTLPSpanGrpcExporter, +) +from opentelemetry.sdk.resources import SERVICE_NAME, Resource +from opentelemetry.sdk.trace import ( + SynchronousMultiSpanProcessor, + TracerProvider, +) +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.trace import NonRecordingSpan, SpanContext, TraceFlags + +from lib.base_logger import logger +from scripts.evergreen.release.images_signing import mongodb_artifactory_login +from scripts.release.atomic_pipeline import ( + build_CLI_SBOM, + build_tests_image, + build_operator_image, + build_mco_tests_image, + build_readiness_probe_image, + build_upgrade_hook_image, + build_operator_image_patch, + build_database_image, + build_agent_on_agent_bump, + build_agent_default_case, + build_init_appdb, + build_init_database, + build_init_om_image, + build_om_image, operator_build_configuration, +) +from scripts.release.build_configuration import BuildConfiguration + + +def get_builder_function_for_image_name() -> Dict[str, Callable]: + """Returns a dictionary of image names that can be built.""" + + image_builders = { + "cli": build_CLI_SBOM, + "test": build_tests_image, + "operator": build_operator_image, + "mco-test": build_mco_tests_image, + # TODO: add support to build this per patch + "readiness-probe": build_readiness_probe_image, + "upgrade-hook": build_upgrade_hook_image, + "operator-quick": build_operator_image_patch, + "database": build_database_image, + "agent-pct": build_agent_on_agent_bump, + "agent": build_agent_default_case, + # + # Init images + "init-appdb": build_init_appdb, + "init-database": build_init_database, + "init-ops-manager": build_init_om_image, + # + # Ops Manager image + "ops-manager": build_om_image, + } + + return image_builders + +def build_image(image_name: str, build_configuration: BuildConfiguration): + """Builds one of the supported images by its name.""" + get_builder_function_for_image_name()[image_name](build_configuration) + + +def build_all_images( + images: Iterable[str], + base_registry: str, + debug: bool = False, + parallel: bool = False, + architecture: Optional[List[str]] = None, + sign: bool = False, + all_agents: bool = False, + parallel_factor: int = 0, +): + """Builds all the images in the `images` list.""" + build_configuration = operator_build_configuration( + base_registry, parallel, debug, architecture, sign, all_agents, parallel_factor + ) + if sign: + mongodb_artifactory_login() + for idx, image in enumerate(images): + logger.info(f"====Building image {image} ({idx}/{len(images)-1})====") + time.sleep(1) + build_image(image, build_configuration) + + +def _setup_tracing(): + trace_id = os.environ.get("otel_trace_id") + parent_id = os.environ.get("otel_parent_id") + endpoint = os.environ.get("otel_collector_endpoint") + if any(value is None for value in [trace_id, parent_id, endpoint]): + logger.info("tracing environment variables are missing, not configuring tracing") + return + logger.info(f"parent_id is {parent_id}") + logger.info(f"trace_id is {trace_id}") + logger.info(f"endpoint is {endpoint}") + span_context = SpanContext( + trace_id=int(trace_id, 16), + span_id=int(parent_id, 16), + is_remote=False, + # Magic number needed for our OTEL collector + trace_flags=TraceFlags(0x01), + ) + ctx = trace.set_span_in_context(NonRecordingSpan(span_context)) + context.attach(ctx) + sp = SynchronousMultiSpanProcessor() + span_processor = BatchSpanProcessor( + OTLPSpanGrpcExporter( + endpoint=endpoint, + ) + ) + sp.add_span_processor(span_processor) + resource = Resource(attributes={SERVICE_NAME: "evergreen-agent"}) + provider = TracerProvider(resource=resource, active_span_processor=sp) + trace.set_tracer_provider(provider) + + +def main(): + _setup_tracing() + + parser = argparse.ArgumentParser() + parser.add_argument("--include", action="append", help="list of images to include") + parser.add_argument("--list-images", action="store_true") + parser.add_argument("--parallel", action="store_true", default=False) + parser.add_argument("--debug", action="store_true", default=False) + parser.add_argument( + "--arch", + choices=["amd64", "arm64"], + nargs="+", + help="for operator and community images only, specify the list of architectures to build for images", + ) + parser.add_argument("--sign", action="store_true", default=False) + parser.add_argument( + "--parallel-factor", + type=int, + default=0, + help="the factor on how many agents are built in parallel. 0 means all CPUs will be used", + ) + parser.add_argument( + "--all-agents", + action="store_true", + default=False, + help="optional parameter to be able to push " + "all non operator suffixed agents, even if we are not in a release", + ) + args = parser.parse_args() + + images_list = get_builder_function_for_image_name().keys() + + if args.list_images: + print(images_list) + sys.exit(0) + + if not args.include: + logger.error(f"--include is required") + sys.exit(1) + + if args.arch == ["arm64"]: + print("Building for arm64 only is not supported yet") + sys.exit(1) + + if not args.sign: + logger.warning("--sign flag not provided, images won't be signed") + + # TODO check that image names are valid + images_to_build = sorted(list(set(args.include).intersection(images_list))) + if not images_to_build: + logger.error("No images to build, please ensure images names are correct.") + sys.exit(1) + + TEMP_HARDCODED_BASE_REGISTRY = "268558157000.dkr.ecr.us-east-1.amazonaws.com/julienben/staging-temp" + + build_all_images( + base_registry=TEMP_HARDCODED_BASE_REGISTRY, + images=images_to_build, + debug=args.debug, + parallel=args.parallel, + architecture=args.arch, + sign=args.sign, + all_agents=args.all_agents, + parallel_factor=args.parallel_factor, + ) + + +if __name__ == "__main__": + main() diff --git a/scripts/release/optimized_operator_build.py b/scripts/release/optimized_operator_build.py new file mode 100644 index 000000000..4bced21e9 --- /dev/null +++ b/scripts/release/optimized_operator_build.py @@ -0,0 +1,88 @@ +import os +import subprocess +import tarfile +from datetime import datetime, timedelta, timezone + +import docker + + +from lib.base_logger import logger +from scripts.release.build_configuration import BuildConfiguration + + +def copy_into_container(client, src, dst): + """Copies a local file into a running container.""" + + os.chdir(os.path.dirname(src)) + srcname = os.path.basename(src) + with tarfile.open(src + ".tar", mode="w") as tar: + tar.add(srcname) + + name, dst = dst.split(":") + container = client.containers.get(name) + + with open(src + ".tar", "rb") as fd: + container.put_archive(os.path.dirname(dst), fd.read()) + +def build_operator_image_fast(build_configuration: BuildConfiguration) -> bool: + """This function builds the operator locally and pushed into an existing + Docker image. This is the fastest way I could image we can do this.""" + + client = docker.from_env() + # image that we know is where we build operator. + image_repo = build_configuration.base_registry + "/" + build_configuration.image_type + "/mongodb-kubernetes" + image_tag = "latest" + repo_tag = image_repo + ":" + image_tag + + logger.debug(f"Pulling image: {repo_tag}") + try: + image = client.images.get(repo_tag) + except docker.errors.ImageNotFound: + logger.debug("Operator image does not exist locally. Building it now") + return False + + logger.debug("Done") + too_old = datetime.now() - timedelta(hours=3) + image_timestamp = datetime.fromtimestamp( + image.history()[0]["Created"] + ) # Layer 0 is the latest added layer to this Docker image. [-1] is the FROM layer. + + if image_timestamp < too_old: + logger.info("Current operator image is too old, will rebuild it completely first") + return False + + container_name = "mongodb-enterprise-operator" + operator_binary_location = "/usr/local/bin/mongodb-kubernetes-operator" + try: + client.containers.get(container_name).remove() + logger.debug(f"Removed {container_name}") + except docker.errors.NotFound: + pass + + container = client.containers.run(repo_tag, name=container_name, entrypoint="sh", detach=True) + + logger.debug("Building operator with debugging symbols") + subprocess.run(["make", "manager"], check=True, stdout=subprocess.PIPE) + logger.debug("Done building the operator") + + copy_into_container( + client, + os.getcwd() + "/docker/mongodb-kubernetes-operator/content/mongodb-kubernetes-operator", + container_name + ":" + operator_binary_location, + ) + + # Commit changes on disk as a tag + container.commit( + repository=image_repo, + tag=image_tag, + ) + # Stop this container so we can use it next time + container.stop() + container.remove() + + logger.info("Pushing operator to {}:{}".format(image_repo, image_tag)) + client.images.push( + repository=image_repo, + tag=image_tag, + ) + return True From 0f4602a40c647041a76316d1417e54d888093605 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 20 Jun 2025 17:15:40 +0200 Subject: [PATCH 9/9] Add documentation --- scripts/release/atomic_pipeline.py | 46 ++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index b0bb4d9a7..43d9bde28 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -6,6 +6,47 @@ # TODO: test pipeline, e.g with a test registry + +""" +State of things + +All builds are working with reworked Dockerfiles ; except test image +From repo root: + +python -m scripts.release.main \ +--include upgrade-hook \ +--include cli \ +--include test \ +--include operator \ +--include mco-test \ +--include readiness-probe \ +--include upgrade-hook \ +--include operator-quick \ +--include database \ +--include init-appdb \ +--include init-database \ +--include init-ops-manager \ +--include ops-manager + +Should push images to all staging repositories "julienben/staging-temp/***/" on ECR +The base registry is now passed everywhere from one single entry point +Currently hardcoded as TEMP_HARDCODED_BASE_REGISTRY in main.py + + +Tried to split into smaller files: +- main.py to parse arguments and load image building functions +- build_configuration.py to isolate the dataclass +- build_images.py to replace sonar (basic interactions with Docker) +- optimized_operator_build.py to separate this function which is a mess +- atomic_pipeline.py for everything else + +Made a big cleanup (no daily rebuilds, no inventories, no Sonar...) ; still some work to do +The biggest mess is the agent builds + +TODO: +- continue to clean pipeline +""" + import json import os import shutil @@ -281,7 +322,7 @@ def build_tests_image(build_configuration: BuildConfiguration): """ Builds image used to run tests. """ - image_name = "test" + image_name = "mongodb-kubernetes-tests" # helm directory needs to be copied over to the tests docker context. helm_src = "helm_chart" @@ -306,7 +347,8 @@ def build_tests_image(build_configuration: BuildConfiguration): buildargs = dict({"python_version": python_version}) - pipeline_process_image(image_name, "docker/mongodb-kubernetes-tests/Dockerfile", buildargs) + # TODO: don't allow test images to be released to Quay + pipeline_process_image(image_name, "docker/mongodb-kubernetes-tests/Dockerfile", buildargs, base_registry=build_configuration.base_registry) def build_mco_tests_image(build_configuration: BuildConfiguration):