From 47fa0912154faf275523188f9596b655c3e313bd Mon Sep 17 00:00:00 2001 From: mms-build-account Date: Wed, 7 Jun 2023 08:30:17 -0400 Subject: [PATCH] Kubernetes Enterprise Operator Release 1.20.1 (#254) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Updated * Added missing dockerfiles and github workflow files * Removed invalid dockerfiles * Added 1.20.1 dockerfile --------- Co-authored-by: Ɓukasz Sierant --- .../workflows/release-multicluster-cli.yaml | 2 +- .gitignore | 2 + crds.yaml | 46 +- .../12.0.20.7686-1/ubi/Dockerfile | 45 + .../12.0.21.7698-1/ubi/Dockerfile | 45 + .../1.0.16/ubi/Dockerfile | 35 + .../1.0.17/ubi/Dockerfile | 35 + .../1.0.16/ubi/Dockerfile | 34 + .../1.0.17/ubi/Dockerfile | 34 + .../1.0.11/ubi/Dockerfile | 26 + .../1.20.0/ubi/Dockerfile | 39 + .../1.20.1/ubi/Dockerfile | 39 + .../5.0.21/ubi/Dockerfile | 75 ++ .../6.0.12/ubi/Dockerfile | 75 ++ .../6.0.13/ubi/Dockerfile | 75 ++ .../6.0.14/ubi/Dockerfile | 75 ++ mongodb-enterprise-openshift.yaml | 144 ++- mongodb-enterprise.yaml | 24 +- samples/ops-manager/ops-manager-backup.yaml | 2 +- tools/multicluster/Dockerfile | 2 +- tools/multicluster/cmd/debug.go | 138 +++ tools/multicluster/cmd/recover.go | 56 +- tools/multicluster/cmd/setup.go | 48 +- tools/multicluster/go.mod | 5 +- tools/multicluster/go.sum | 3 + tools/multicluster/pkg/common/common.go | 981 ++++++++++++++++++ tools/multicluster/pkg/common/common_test.go | 887 ++++++++++++++++ .../pkg/common/kubeclientcontainer.go | 271 +++++ tools/multicluster/pkg/common/kubeconfig.go | 84 ++ tools/multicluster/pkg/common/utils.go | 21 + tools/multicluster/pkg/debug/anonymize.go | 30 + .../multicluster/pkg/debug/anonymize_test.go | 40 + tools/multicluster/pkg/debug/collectors.go | 357 +++++++ .../multicluster/pkg/debug/collectors_test.go | 172 +++ tools/multicluster/pkg/debug/writer.go | 128 +++ tools/multicluster/pkg/debug/writer_test.go | 72 ++ 36 files changed, 4065 insertions(+), 82 deletions(-) create mode 100644 .gitignore create mode 100644 dockerfiles/mongodb-agent/12.0.20.7686-1/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/12.0.21.7698-1/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-init-appdb/1.0.16/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-init-appdb/1.0.17/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-init-database/1.0.16/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-init-database/1.0.17/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-init-ops-manager/1.0.11/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-operator/1.20.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-operator/1.20.1/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-ops-manager/5.0.21/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-ops-manager/6.0.12/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-ops-manager/6.0.13/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-ops-manager/6.0.14/ubi/Dockerfile create mode 100644 tools/multicluster/cmd/debug.go create mode 100644 tools/multicluster/pkg/common/common.go create mode 100644 tools/multicluster/pkg/common/common_test.go create mode 100644 tools/multicluster/pkg/common/kubeclientcontainer.go create mode 100644 tools/multicluster/pkg/common/kubeconfig.go create mode 100644 tools/multicluster/pkg/common/utils.go create mode 100644 tools/multicluster/pkg/debug/anonymize.go create mode 100644 tools/multicluster/pkg/debug/anonymize_test.go create mode 100644 tools/multicluster/pkg/debug/collectors.go create mode 100644 tools/multicluster/pkg/debug/collectors_test.go create mode 100644 tools/multicluster/pkg/debug/writer.go create mode 100644 tools/multicluster/pkg/debug/writer_test.go diff --git a/.github/workflows/release-multicluster-cli.yaml b/.github/workflows/release-multicluster-cli.yaml index 3eaea5e..405974a 100644 --- a/.github/workflows/release-multicluster-cli.yaml +++ b/.github/workflows/release-multicluster-cli.yaml @@ -14,7 +14,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.19 + go-version: 1.20 - name: Run GoReleaser uses: goreleaser/goreleaser-action@v4 with: diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c38fa4e --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.idea +*.iml diff --git a/crds.yaml b/crds.yaml index 3c14c7a..9d4f028 100644 --- a/crds.yaml +++ b/crds.yaml @@ -61,6 +61,10 @@ spec: x-kubernetes-preserve-unknown-fields: true agent: properties: + logLevel: + type: string + maxLogFileDurationHours: + type: integer startupOptions: additionalProperties: type: string @@ -215,6 +219,10 @@ spec: x-kubernetes-preserve-unknown-fields: true agent: properties: + logLevel: + type: string + maxLogFileDurationHours: + type: integer startupOptions: additionalProperties: type: string @@ -360,6 +368,10 @@ spec: x-kubernetes-preserve-unknown-fields: true agent: properties: + logLevel: + type: string + maxLogFileDurationHours: + type: integer startupOptions: additionalProperties: type: string @@ -736,6 +748,10 @@ spec: x-kubernetes-preserve-unknown-fields: true agent: properties: + logLevel: + type: string + maxLogFileDurationHours: + type: integer startupOptions: additionalProperties: type: string @@ -1008,6 +1024,10 @@ spec: x-kubernetes-preserve-unknown-fields: true agent: properties: + logLevel: + type: string + maxLogFileDurationHours: + type: integer startupOptions: additionalProperties: type: string @@ -1947,6 +1967,10 @@ spec: description: specify startup flags for the AutomationAgent and MonitoringAgent properties: + logLevel: + type: string + maxLogFileDurationHours: + type: integer startupOptions: additionalProperties: type: string @@ -2018,6 +2042,20 @@ spec: - ERROR - FATAL type: string + memberConfig: + description: MemberConfig + items: + properties: + priority: + type: string + tags: + additionalProperties: + type: string + type: object + votes: + type: integer + type: object + type: array members: description: Amount of members for this MongoDB Replica Set maximum: 50 @@ -2027,6 +2065,10 @@ spec: description: specify startup flags for just the MonitoringAgent. These take precedence over the flags set in AutomationAgent properties: + logLevel: + type: string + maxLogFileDurationHours: + type: integer startupOptions: additionalProperties: type: string @@ -2104,10 +2146,6 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: object - project: - description: 'Deprecated: This has been replaced by the PrivateCloudConfig - which should be used instead' - type: string prometheus: description: Enables Prometheus integration on the AppDB. properties: diff --git a/dockerfiles/mongodb-agent/12.0.20.7686-1/ubi/Dockerfile b/dockerfiles/mongodb-agent/12.0.20.7686-1/ubi/Dockerfile new file mode 100644 index 0000000..d6e2c16 --- /dev/null +++ b/dockerfiles/mongodb-agent/12.0.20.7686-1/ubi/Dockerfile @@ -0,0 +1,45 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG agent_version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname nss_wrapper tar gzip procps\ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir -p /agent \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && mkdir -p /var/log/mongodb-mms-automation/ \ + && chmod -R +wr /var/log/mongodb-mms-automation/ \ + # ensure that the agent user can write the logs in OpenShift + && touch /var/log/mongodb-mms-automation/readiness.log \ + && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log + + +COPY --from=base /data/mongodb-agent.tar.gz /agent +COPY --from=base /data/mongodb-tools.tgz /agent +COPY --from=base /data/LICENSE /licenses/LICENSE + +RUN tar xfz /agent/mongodb-agent.tar.gz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && mkdir -p /var/lib/automation/config \ + && chmod -R +r /var/lib/automation/config \ + && rm /agent/mongodb-agent.tar.gz \ + && rm -r mongodb-mms-automation-agent-* + +RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz + +USER 2000 +CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/12.0.21.7698-1/ubi/Dockerfile b/dockerfiles/mongodb-agent/12.0.21.7698-1/ubi/Dockerfile new file mode 100644 index 0000000..d6e2c16 --- /dev/null +++ b/dockerfiles/mongodb-agent/12.0.21.7698-1/ubi/Dockerfile @@ -0,0 +1,45 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG agent_version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname nss_wrapper tar gzip procps\ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir -p /agent \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && mkdir -p /var/log/mongodb-mms-automation/ \ + && chmod -R +wr /var/log/mongodb-mms-automation/ \ + # ensure that the agent user can write the logs in OpenShift + && touch /var/log/mongodb-mms-automation/readiness.log \ + && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log + + +COPY --from=base /data/mongodb-agent.tar.gz /agent +COPY --from=base /data/mongodb-tools.tgz /agent +COPY --from=base /data/LICENSE /licenses/LICENSE + +RUN tar xfz /agent/mongodb-agent.tar.gz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && mkdir -p /var/lib/automation/config \ + && chmod -R +r /var/lib/automation/config \ + && rm /agent/mongodb-agent.tar.gz \ + && rm -r mongodb-mms-automation-agent-* + +RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz + +USER 2000 +CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] \ No newline at end of file diff --git a/dockerfiles/mongodb-enterprise-init-appdb/1.0.16/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-init-appdb/1.0.16/ubi/Dockerfile new file mode 100644 index 0000000..68ae7cc --- /dev/null +++ b/dockerfiles/mongodb-enterprise-init-appdb/1.0.16/ubi/Dockerfile @@ -0,0 +1,35 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version +LABEL name="MongoDB Enterprise Init AppDB" \ + version="mongodb-enterprise-init-appdb-${version}" \ + summary="MongoDB Enterprise AppDB Init Image" \ + description="Startup Scripts for MongoDB Enterprise Application Database for Ops Manager" \ + release="1" \ + vendor="MongoDB" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ +COPY --from=base /data/version-upgrade-hook /probes/version-upgrade-hook + + +RUN microdnf update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz + + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] + + diff --git a/dockerfiles/mongodb-enterprise-init-appdb/1.0.17/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-init-appdb/1.0.17/ubi/Dockerfile new file mode 100644 index 0000000..68ae7cc --- /dev/null +++ b/dockerfiles/mongodb-enterprise-init-appdb/1.0.17/ubi/Dockerfile @@ -0,0 +1,35 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version +LABEL name="MongoDB Enterprise Init AppDB" \ + version="mongodb-enterprise-init-appdb-${version}" \ + summary="MongoDB Enterprise AppDB Init Image" \ + description="Startup Scripts for MongoDB Enterprise Application Database for Ops Manager" \ + release="1" \ + vendor="MongoDB" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ +COPY --from=base /data/version-upgrade-hook /probes/version-upgrade-hook + + +RUN microdnf update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz + + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] + + diff --git a/dockerfiles/mongodb-enterprise-init-database/1.0.16/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-init-database/1.0.16/ubi/Dockerfile new file mode 100644 index 0000000..eb48104 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-init-database/1.0.16/ubi/Dockerfile @@ -0,0 +1,34 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version +LABEL name="MongoDB Enterprise Init Database" \ + version="mongodb-enterprise-init-database-${version}" \ + summary="MongoDB Enterprise Database Init Image" \ + description="Startup Scripts for MongoDB Enterprise Database" \ + release="1" \ + vendor="MongoDB" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ + + +RUN microdnf update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz + + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] + + diff --git a/dockerfiles/mongodb-enterprise-init-database/1.0.17/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-init-database/1.0.17/ubi/Dockerfile new file mode 100644 index 0000000..eb48104 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-init-database/1.0.17/ubi/Dockerfile @@ -0,0 +1,34 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version +LABEL name="MongoDB Enterprise Init Database" \ + version="mongodb-enterprise-init-database-${version}" \ + summary="MongoDB Enterprise Database Init Image" \ + description="Startup Scripts for MongoDB Enterprise Database" \ + release="1" \ + vendor="MongoDB" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ + + +RUN microdnf update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz + + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] + + diff --git a/dockerfiles/mongodb-enterprise-init-ops-manager/1.0.11/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-init-ops-manager/1.0.11/ubi/Dockerfile new file mode 100644 index 0000000..3147f05 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-init-ops-manager/1.0.11/ubi/Dockerfile @@ -0,0 +1,26 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +LABEL name="MongoDB Enterprise Ops Manager Init" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="mongodb-enterprise-init-ops-manager-1.0.11" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Init Image" \ + description="Startup Scripts for MongoDB Enterprise Ops Manager" + + +COPY --from=base /data/scripts /scripts +COPY --from=base /data/licenses /licenses + + +RUN microdnf update --nodocs \ + && microdnf clean all + + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] + + diff --git a/dockerfiles/mongodb-enterprise-operator/1.20.0/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-operator/1.20.0/ubi/Dockerfile new file mode 100644 index 0000000..ee18192 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-operator/1.20.0/ubi/Dockerfile @@ -0,0 +1,39 @@ +# +# Base Template Dockerfile for Operator Image. +# + +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Operator" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="1.20.0" \ + release="1" \ + summary="MongoDB Enterprise Operator Image" \ + description="MongoDB Enterprise Operator Image" + + +# Building an UBI-based image: https://red.ht/3n6b9y0 +RUN microdnf update \ + --disableplugin=subscription-manager \ + --disablerepo=* --enablerepo=ubi-8-appstream-rpms --enablerepo=ubi-8-baseos-rpms -y \ + && rm -rf /var/cache/yum + + + + +COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator +COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json +COPY --from=base /data/licenses /licenses/ + +USER 2000 + + + +ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator + + diff --git a/dockerfiles/mongodb-enterprise-operator/1.20.1/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-operator/1.20.1/ubi/Dockerfile new file mode 100644 index 0000000..29ea441 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-operator/1.20.1/ubi/Dockerfile @@ -0,0 +1,39 @@ +# +# Base Template Dockerfile for Operator Image. +# + +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Operator" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="1.20.1" \ + release="1" \ + summary="MongoDB Enterprise Operator Image" \ + description="MongoDB Enterprise Operator Image" + + +# Building an UBI-based image: https://red.ht/3n6b9y0 +RUN microdnf update \ + --disableplugin=subscription-manager \ + --disablerepo=* --enablerepo=ubi-8-appstream-rpms --enablerepo=ubi-8-baseos-rpms -y \ + && rm -rf /var/cache/yum + + + + +COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator +COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json +COPY --from=base /data/licenses /licenses/ + +USER 2000 + + + +ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator + + diff --git a/dockerfiles/mongodb-enterprise-ops-manager/5.0.21/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-ops-manager/5.0.21/ubi/Dockerfile new file mode 100644 index 0000000..28f5374 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-ops-manager/5.0.21/ubi/Dockerfile @@ -0,0 +1,75 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="5.0.21" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + + +ENV MMS_HOME /mongodb-ops-manager +ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR ${MMS_HOME}/logs +ENV MMS_TMP_DIR ${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + + +COPY --from=base /data/licenses /licenses/ + + + +RUN curl --fail -L -o ops_manager.tar.gz https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-5.0.21.100.20230530T1455Z-1.x86_64.tar.gz \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] + + diff --git a/dockerfiles/mongodb-enterprise-ops-manager/6.0.12/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-ops-manager/6.0.12/ubi/Dockerfile new file mode 100644 index 0000000..f1f54ca --- /dev/null +++ b/dockerfiles/mongodb-enterprise-ops-manager/6.0.12/ubi/Dockerfile @@ -0,0 +1,75 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="6.0.12" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + + +ENV MMS_HOME /mongodb-ops-manager +ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR ${MMS_HOME}/logs +ENV MMS_TMP_DIR ${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + + +COPY --from=base /data/licenses /licenses/ + + + +RUN curl --fail -L -o ops_manager.tar.gz https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-6.0.12.100.20230406T1854Z.tar.gz \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] + + diff --git a/dockerfiles/mongodb-enterprise-ops-manager/6.0.13/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-ops-manager/6.0.13/ubi/Dockerfile new file mode 100644 index 0000000..770ac70 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-ops-manager/6.0.13/ubi/Dockerfile @@ -0,0 +1,75 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="6.0.13" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + + +ENV MMS_HOME /mongodb-ops-manager +ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR ${MMS_HOME}/logs +ENV MMS_TMP_DIR ${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + + +COPY --from=base /data/licenses /licenses/ + + + +RUN curl --fail -L -o ops_manager.tar.gz https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-6.0.13.100.20230502T1610Z.tar.gz \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] + + diff --git a/dockerfiles/mongodb-enterprise-ops-manager/6.0.14/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-ops-manager/6.0.14/ubi/Dockerfile new file mode 100644 index 0000000..3f0e79a --- /dev/null +++ b/dockerfiles/mongodb-enterprise-ops-manager/6.0.14/ubi/Dockerfile @@ -0,0 +1,75 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="6.0.14" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + + +ENV MMS_HOME /mongodb-ops-manager +ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR ${MMS_HOME}/logs +ENV MMS_TMP_DIR ${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + + +COPY --from=base /data/licenses /licenses/ + + + +RUN curl --fail -L -o ops_manager.tar.gz https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-6.0.14.100.20230530T1837Z.tar.gz \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] + + diff --git a/mongodb-enterprise-openshift.yaml b/mongodb-enterprise-openshift.yaml index 54ecfae..775c3cd 100644 --- a/mongodb-enterprise-openshift.yaml +++ b/mongodb-enterprise-openshift.yaml @@ -216,7 +216,7 @@ spec: serviceAccountName: mongodb-enterprise-operator containers: - name: mongodb-enterprise-operator - image: "quay.io/mongodb/mongodb-enterprise-operator-ubi:1.19.1" + image: "quay.io/mongodb/mongodb-enterprise-operator-ubi:1.20.1" imagePullPolicy: Always args: - -watch-resource=mongodb @@ -254,7 +254,7 @@ spec: - name: INIT_DATABASE_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-init-database-ubi - name: INIT_DATABASE_VERSION - value: 1.0.15 + value: 1.0.17 - name: DATABASE_VERSION value: 2.0.2 # Ops Manager @@ -263,30 +263,32 @@ spec: - name: INIT_OPS_MANAGER_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-init-ops-manager-ubi - name: INIT_OPS_MANAGER_VERSION - value: 1.0.10 + value: 1.0.11 # AppDB - name: INIT_APPDB_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-init-appdb-ubi - name: INIT_APPDB_VERSION - value: 1.0.15 + value: 1.0.17 - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:12.0.15.7646-1" + value: "quay.io/mongodb/mongodb-agent-ubi:12.0.21.7698-1" - name: MONGODB_IMAGE - value: mongodb-enterprise-appdb-database-ubi + value: mongodb-enterprise-server - name: MONGODB_REPO_URL value: quay.io/mongodb + - name: MDB_IMAGE_TYPE + value: ubi8 - name: PERFORM_FAILOVER value: 'true' - name: RELATED_IMAGE_MONGODB_ENTERPRISE_DATABASE_IMAGE_2_0_2 value: "quay.io/mongodb/mongodb-enterprise-database-ubi:2.0.2" - - name: RELATED_IMAGE_INIT_DATABASE_IMAGE_REPOSITORY_1_0_15 - value: "quay.io/mongodb/mongodb-enterprise-init-database-ubi:1.0.15" - - name: RELATED_IMAGE_INIT_OPS_MANAGER_IMAGE_REPOSITORY_1_0_10 - value: "quay.io/mongodb/mongodb-enterprise-init-ops-manager-ubi:1.0.10" - - name: RELATED_IMAGE_INIT_APPDB_IMAGE_REPOSITORY_1_0_15 - value: "quay.io/mongodb/mongodb-enterprise-init-appdb-ubi:1.0.15" + - name: RELATED_IMAGE_INIT_DATABASE_IMAGE_REPOSITORY_1_0_17 + value: "quay.io/mongodb/mongodb-enterprise-init-database-ubi:1.0.17" + - name: RELATED_IMAGE_INIT_OPS_MANAGER_IMAGE_REPOSITORY_1_0_11 + value: "quay.io/mongodb/mongodb-enterprise-init-ops-manager-ubi:1.0.11" + - name: RELATED_IMAGE_INIT_APPDB_IMAGE_REPOSITORY_1_0_17 + value: "quay.io/mongodb/mongodb-enterprise-init-appdb-ubi:1.0.17" - name: RELATED_IMAGE_AGENT_IMAGE_11_0_5_6963_1 value: "quay.io/mongodb/mongodb-agent-ubi:11.0.5.6963-1" - name: RELATED_IMAGE_AGENT_IMAGE_11_12_0_7388_1 @@ -295,6 +297,10 @@ spec: value: "quay.io/mongodb/mongodb-agent-ubi:12.0.4.7554-1" - name: RELATED_IMAGE_AGENT_IMAGE_12_0_15_7646_1 value: "quay.io/mongodb/mongodb-agent-ubi:12.0.15.7646-1" + - name: RELATED_IMAGE_AGENT_IMAGE_12_0_20_7686_1 + value: "quay.io/mongodb/mongodb-agent-ubi:12.0.20.7686-1" + - name: RELATED_IMAGE_AGENT_IMAGE_12_0_21_7698_1 + value: "quay.io/mongodb/mongodb-agent-ubi:12.0.21.7698-1" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_5_0_0 value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:5.0.0" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_5_0_1 @@ -337,6 +343,8 @@ spec: value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:5.0.19" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_5_0_20 value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:5.0.20" + - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_5_0_21 + value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:5.0.21" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_0 value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.0" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_1 @@ -361,10 +369,114 @@ spec: value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.10" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_11 value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.11" + - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_12 + value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.12" + - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_13 + value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.13" + - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_14 + value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.14" + # since the official server images end with a different suffix we can re-use the same $mongodbImageEnv + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_0_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.0-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_1_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.1-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_2_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.2-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_3_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.3-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_4_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.4-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_5_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.5-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_6_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.6-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_7_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.7-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_8_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.8-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_9_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.9-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_10_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.10-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_11_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.11-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_12_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.12-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_13_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.13-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_14_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.14-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_15_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.15-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_16_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.16-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_17_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.17-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_18_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.18-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_19_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.19-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_20_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.20-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_21_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:4.4.21-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_0_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.0-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_1_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.1-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_2_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.2-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_3_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.3-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_4_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.4-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_5_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.5-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_6_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.6-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_7_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.7-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_8_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.8-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_9_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.9-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_10_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.10-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_11_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.11-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_12_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.12-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_13_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.13-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_14_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.14-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_15_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.15-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_16_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.16-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_17_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.17-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_18_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:5.0.18-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_6_0_0_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:6.0.0-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_6_0_1_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:6.0.1-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_6_0_2_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:6.0.2-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_6_0_3_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:6.0.3-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_6_0_4_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:6.0.4-ubi8" + - name: RELATED_IMAGE_MONGODB_IMAGE_6_0_5_ubi8 + value: "quay.io/mongodb/mongodb-enterprise-server:6.0.5-ubi8" + # mongodbLegacyAppDb will be deleted in 1.23 release - name: RELATED_IMAGE_MONGODB_IMAGE_4_2_11_ent value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:4.2.11-ent" - name: RELATED_IMAGE_MONGODB_IMAGE_4_2_2_ent value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:4.2.2-ent" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_2_24_ent + value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:4.2.24-ent" - name: RELATED_IMAGE_MONGODB_IMAGE_4_2_6_ent value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:4.2.6-ent" - name: RELATED_IMAGE_MONGODB_IMAGE_4_2_8_ent @@ -375,9 +487,17 @@ spec: value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:4.4.11-ent" - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_4_ent value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:4.4.4-ent" + - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_21_ent + value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:4.4.21-ent" - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_1_ent value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:5.0.1-ent" - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_5_ent value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:5.0.5-ent" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_6_ent + value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:5.0.6-ent" - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_7_ent value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:5.0.7-ent" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_14_ent + value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:5.0.14-ent" + - name: RELATED_IMAGE_MONGODB_IMAGE_5_0_18_ent + value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:5.0.18-ent" diff --git a/mongodb-enterprise.yaml b/mongodb-enterprise.yaml index e3b405d..4b59b8b 100644 --- a/mongodb-enterprise.yaml +++ b/mongodb-enterprise.yaml @@ -219,7 +219,7 @@ spec: runAsUser: 2000 containers: - name: mongodb-enterprise-operator - image: "quay.io/mongodb/mongodb-enterprise-operator:1.19.1" + image: "quay.io/mongodb/mongodb-enterprise-operator:1.20.1" imagePullPolicy: Always args: - -watch-resource=mongodb @@ -251,32 +251,34 @@ spec: value: Always # Database - name: MONGODB_ENTERPRISE_DATABASE_IMAGE - value: quay.io/mongodb/mongodb-enterprise-database + value: quay.io/mongodb/mongodb-enterprise-database-ubi - name: INIT_DATABASE_IMAGE_REPOSITORY - value: quay.io/mongodb/mongodb-enterprise-init-database + value: quay.io/mongodb/mongodb-enterprise-init-database-ubi - name: INIT_DATABASE_VERSION - value: 1.0.15 + value: 1.0.17 - name: DATABASE_VERSION value: 2.0.2 # Ops Manager - name: OPS_MANAGER_IMAGE_REPOSITORY - value: quay.io/mongodb/mongodb-enterprise-ops-manager + value: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi - name: INIT_OPS_MANAGER_IMAGE_REPOSITORY - value: quay.io/mongodb/mongodb-enterprise-init-ops-manager + value: quay.io/mongodb/mongodb-enterprise-init-ops-manager-ubi - name: INIT_OPS_MANAGER_VERSION - value: 1.0.10 + value: 1.0.11 # AppDB - name: INIT_APPDB_IMAGE_REPOSITORY - value: quay.io/mongodb/mongodb-enterprise-init-appdb + value: quay.io/mongodb/mongodb-enterprise-init-appdb-ubi - name: INIT_APPDB_VERSION - value: 1.0.15 + value: 1.0.17 - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent:12.0.15.7646-1" + value: "quay.io/mongodb/mongodb-agent-ubi:12.0.21.7698-1" - name: MONGODB_IMAGE - value: mongodb-enterprise-appdb-database + value: mongodb-enterprise-server - name: MONGODB_REPO_URL value: quay.io/mongodb + - name: MDB_IMAGE_TYPE + value: ubi8 - name: PERFORM_FAILOVER value: 'true' diff --git a/samples/ops-manager/ops-manager-backup.yaml b/samples/ops-manager/ops-manager-backup.yaml index c9558fd..42ba276 100644 --- a/samples/ops-manager/ops-manager-backup.yaml +++ b/samples/ops-manager/ops-manager-backup.yaml @@ -37,7 +37,7 @@ spec: name: admin-user # Configures the list of S3 Oplog Store Configs - s3OpLogStores: + s3OplogStores: - name: my-s3-oplog-store # the name of the secret which contains aws credentials s3SecretRef: diff --git a/tools/multicluster/Dockerfile b/tools/multicluster/Dockerfile index 22965e2..b611be5 100644 --- a/tools/multicluster/Dockerfile +++ b/tools/multicluster/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19 as builder +FROM golang:1.20 as builder WORKDIR /go/src ADD . . diff --git a/tools/multicluster/cmd/debug.go b/tools/multicluster/cmd/debug.go new file mode 100644 index 0000000..b0e48e9 --- /dev/null +++ b/tools/multicluster/cmd/debug.go @@ -0,0 +1,138 @@ +package cmd + +import ( + "fmt" + "os" + "strings" + + "k8s.io/client-go/tools/clientcmd" + + "github.com/10gen/ops-manager-kubernetes/multi/pkg/common" + "github.com/10gen/ops-manager-kubernetes/multi/pkg/debug" + "github.com/spf13/cobra" +) + +type Flags struct { + common.Flags + Anonymize bool + UseOwnerRef bool +} + +func (f *Flags) ParseDebugFlags() error { + if len(common.MemberClusters) > 0 { + f.MemberClusters = strings.Split(common.MemberClusters, ",") + } + + configFilePath := common.LoadKubeConfigFilePath() + kubeconfig, err := clientcmd.LoadFromFile(configFilePath) + if err != nil { + return fmt.Errorf("error loading kubeconfig file '%s': %s", configFilePath, err) + } + if len(f.CentralCluster) == 0 { + f.CentralCluster = kubeconfig.CurrentContext + f.CentralClusterNamespace = kubeconfig.Contexts[kubeconfig.CurrentContext].Namespace + } + + return nil +} + +var debugFlags = &Flags{} + +func init() { + rootCmd.AddCommand(debugCmd) + + debugCmd.Flags().StringVar(&common.MemberClusters, "member-clusters", "", "Comma separated list of member clusters. [optional]") + debugCmd.Flags().StringVar(&debugFlags.CentralCluster, "central-cluster", "", "The central cluster the operator will be deployed in. [optional]") + debugCmd.Flags().StringVar(&debugFlags.MemberClusterNamespace, "member-cluster-namespace", "", "The namespace the member cluster resources will be deployed to. [optional]") + debugCmd.Flags().StringVar(&debugFlags.CentralClusterNamespace, "central-cluster-namespace", "", "The namespace the Operator will be deployed to. [optional]") + debugCmd.Flags().StringVar(&common.MemberClustersApiServers, "member-clusters-api-servers", "", "Comma separated list of api servers addresses. [optional, default will take addresses from KUBECONFIG env var]") + debugCmd.Flags().BoolVar(&debugFlags.Anonymize, "anonymize", true, "True if anonymization should be turned on") + debugCmd.Flags().BoolVar(&debugFlags.UseOwnerRef, "ownerRef", false, "True if the collection should be made with owner references (consider turning it on after CLOUDP-176772 is fixed)") +} + +var debugCmd = &cobra.Command{ + Use: "debug", + Short: "Downloads all resources required for debugging and stores them into the disk", + Long: `'debug' downloads all resources required for debugging and stores them into the disk. + +Example: + +kubectl-mongodb debug +kubectl-mongodb debug setup --central-cluster="operator-cluster" --member-clusters="cluster-1,cluster-2,cluster-3" --member-cluster-namespace=mongodb --central-cluster-namespace=mongodb + +`, + Run: func(cmd *cobra.Command, args []string) { + err := debugFlags.ParseDebugFlags() + if err != nil { + fmt.Printf("error parsing flags: %s\n", err) + os.Exit(1) + } + clientMap, err := common.CreateClientMap(debugFlags.MemberClusters, debugFlags.CentralCluster, common.LoadKubeConfigFilePath(), common.GetKubernetesClient) + if err != nil { + fmt.Printf("failed to create clientset map: %s", err) + os.Exit(1) + } + + var collectors []debug.Collector + collectors = append(collectors, &debug.StatefulSetCollector{}) + collectors = append(collectors, &debug.ConfigMapCollector{}) + collectors = append(collectors, &debug.SecretCollector{}) + collectors = append(collectors, &debug.ServiceAccountCollector{}) + collectors = append(collectors, &debug.RolesCollector{}) + collectors = append(collectors, &debug.RolesBindingsCollector{}) + collectors = append(collectors, &debug.MongoDBCollector{}) + collectors = append(collectors, &debug.MongoDBMultiClusterCollector{}) + collectors = append(collectors, &debug.MongoDBUserCollector{}) + collectors = append(collectors, &debug.OpsManagerCollector{}) + collectors = append(collectors, &debug.MongoDBCommunityCollector{}) + collectors = append(collectors, &debug.EventsCollector{}) + collectors = append(collectors, &debug.LogsCollector{}) + collectors = append(collectors, &debug.AgentHealthFileCollector{}) + + var anonymizer debug.Anonymizer + if debugFlags.Anonymize { + anonymizer = &debug.SensitiveDataAnonymizer{} + } else { + anonymizer = &debug.NoOpAnonymizer{} + } + + var filter debug.Filter + + if debugFlags.UseOwnerRef { + filter = &debug.WithOwningReference{} + } else { + filter = &debug.AcceptAllFilter{} + } + + var collectionResults []debug.CollectionResult + + collectionResults = append(collectionResults, debug.Collect(cmd.Context(), clientMap[debugFlags.CentralCluster], debugFlags.CentralCluster, debugFlags.CentralClusterNamespace, filter, collectors, anonymizer)) + + if len(debugFlags.MemberClusters) > 0 { + for i := range debugFlags.MemberClusters { + collectionResults = append(collectionResults, debug.Collect(cmd.Context(), clientMap[debugFlags.MemberClusters[i]], debugFlags.MemberClusters[i], debugFlags.MemberClusterNamespace, filter, collectors, anonymizer)) + } + } + + fmt.Printf("==== Report ====\n\n") + fmt.Printf("Anonymisation: %v\n", debugFlags.Anonymize) + fmt.Printf("Following owner refs: %v\n", debugFlags.UseOwnerRef) + fmt.Printf("Collected data from %d clusters\n", len(collectionResults)) + fmt.Printf("\n\n==== Collected Data ====\n\n") + + storeDirectory, err := debug.DebugDirectory() + if err != nil { + fmt.Printf("failed to obtain directory for collecting the results: %v", err) + os.Exit(1) + } + + if len(collectionResults) > 0 { + directoryName, compressedFileName, err := debug.WriteToFile(storeDirectory, collectionResults...) + if err != nil { + panic(err) + } + fmt.Printf("Debug data file (compressed): %v\n", compressedFileName) + fmt.Printf("Debug data directory: %v\n", directoryName) + } + }, +} diff --git a/tools/multicluster/cmd/recover.go b/tools/multicluster/cmd/recover.go index 8eeffd0..4c98541 100644 --- a/tools/multicluster/cmd/recover.go +++ b/tools/multicluster/cmd/recover.go @@ -5,6 +5,8 @@ import ( "os" "strings" + "github.com/10gen/ops-manager-kubernetes/multi/pkg/common" + "github.com/spf13/cobra" "golang.org/x/xerrors" "k8s.io/client-go/tools/clientcmd" @@ -13,18 +15,18 @@ import ( func init() { multiclusterCmd.AddCommand(recoverCmd) - recoverCmd.Flags().StringVar(&memberClusters, "member-clusters", "", "Comma separated list of member clusters. [required]") - recoverCmd.Flags().StringVar(&recoverFlags.serviceAccount, "service-account", "mongodb-enterprise-operator-multi-cluster", "Name of the service account which should be used for the Operator to communicate with the member clusters. [optional, default: mongodb-enterprise-operator-multi-cluster]") - recoverCmd.Flags().StringVar(&recoverFlags.centralCluster, "central-cluster", "", "The central cluster the operator will be deployed in. [required]") - recoverCmd.Flags().StringVar(&recoverFlags.memberClusterNamespace, "member-cluster-namespace", "", "The namespace the member cluster resources will be deployed to. [required]") - recoverCmd.Flags().StringVar(&recoverFlags.centralClusterNamespace, "central-cluster-namespace", "", "The namespace the Operator will be deployed to. [required]") - recoverCmd.Flags().BoolVar(&recoverFlags.cleanup, "cleanup", false, "Delete all previously created resources except for namespaces. [optional default: false]") - recoverCmd.Flags().BoolVar(&recoverFlags.clusterScoped, "cluster-scoped", false, "Create ClusterRole and ClusterRoleBindings for member clusters. [optional default: false]") - recoverCmd.Flags().StringVar(&recoverFlags.operatorName, "operator-name", defaultOperatorName, "Name used to identify the deployment of the operator. [optional, default: mongodb-enterprise-operator]") - recoverCmd.Flags().BoolVar(&recoverFlags.installDatabaseRoles, "install-database-roles", false, "Install the ServiceAccounts and Roles required for running database workloads in the member clusters. [optional default: false]") - recoverCmd.Flags().StringVar(&recoverFlags.sourceCluster, "source-cluster", "", "The source cluster for recovery. This has to be one of the healthy member cluster that is the source of truth for new cluster configuration. [required]") - recoverCmd.Flags().BoolVar(&recoverFlags.createServiceAccountSecrets, "create-service-account-secrets", true, "Create service account token secrets. [optional default: true]") - recoverCmd.Flags().StringVar(&memberClustersApiServers, "member-clusters-api-servers", "", "Comma separated list of api servers addresses. [optional, default will take addresses from KUBECONFIG env var]") + recoverCmd.Flags().StringVar(&common.MemberClusters, "member-clusters", "", "Comma separated list of member clusters. [required]") + recoverCmd.Flags().StringVar(&RecoverFlags.ServiceAccount, "service-account", "mongodb-enterprise-operator-multi-cluster", "Name of the service account which should be used for the Operator to communicate with the member clusters. [optional, default: mongodb-enterprise-operator-multi-cluster]") + recoverCmd.Flags().StringVar(&RecoverFlags.CentralCluster, "central-cluster", "", "The central cluster the operator will be deployed in. [required]") + recoverCmd.Flags().StringVar(&RecoverFlags.MemberClusterNamespace, "member-cluster-namespace", "", "The namespace the member cluster resources will be deployed to. [required]") + recoverCmd.Flags().StringVar(&RecoverFlags.CentralClusterNamespace, "central-cluster-namespace", "", "The namespace the Operator will be deployed to. [required]") + recoverCmd.Flags().BoolVar(&RecoverFlags.Cleanup, "cleanup", false, "Delete all previously created resources except for namespaces. [optional default: false]") + recoverCmd.Flags().BoolVar(&RecoverFlags.ClusterScoped, "cluster-scoped", false, "Create ClusterRole and ClusterRoleBindings for member clusters. [optional default: false]") + recoverCmd.Flags().StringVar(&RecoverFlags.OperatorName, "operator-name", common.DefaultOperatorName, "Name used to identify the deployment of the operator. [optional, default: mongodb-enterprise-operator]") + recoverCmd.Flags().BoolVar(&RecoverFlags.InstallDatabaseRoles, "install-database-roles", false, "Install the ServiceAccounts and Roles required for running database workloads in the member clusters. [optional default: false]") + recoverCmd.Flags().StringVar(&RecoverFlags.SourceCluster, "source-cluster", "", "The source cluster for recovery. This has to be one of the healthy member cluster that is the source of truth for new cluster configuration. [required]") + recoverCmd.Flags().BoolVar(&RecoverFlags.CreateServiceAccountSecrets, "create-service-account-secrets", true, "Create service account token secrets. [optional default: true]") + recoverCmd.Flags().StringVar(&common.MemberClustersApiServers, "member-clusters-api-servers", "", "Comma separated list of api servers addresses. [optional, default will take addresses from KUBECONFIG env var]") } // recoverCmd represents the recover command @@ -45,18 +47,18 @@ kubectl-mongodb multicluster recover --central-cluster="operator-cluster" --memb os.Exit(1) } - clientMap, err := createClientMap(recoverFlags.memberClusters, recoverFlags.centralCluster, loadKubeConfigFilePath(), getKubernetesClient) + clientMap, err := common.CreateClientMap(RecoverFlags.MemberClusters, RecoverFlags.CentralCluster, common.LoadKubeConfigFilePath(), common.GetKubernetesClient) if err != nil { fmt.Printf("failed to create clientset map: %s", err) os.Exit(1) } - if err := ensureMultiClusterResources(cmd.Context(), recoverFlags, clientMap); err != nil { + if err := common.EnsureMultiClusterResources(cmd.Context(), RecoverFlags, clientMap); err != nil { fmt.Println(err) os.Exit(1) } - if err := replaceClusterMembersConfigMap(cmd.Context(), clientMap[recoverFlags.centralCluster], recoverFlags); err != nil { + if err := common.ReplaceClusterMembersConfigMap(cmd.Context(), clientMap[RecoverFlags.CentralCluster], RecoverFlags); err != nil { fmt.Println(err) os.Exit(1) } @@ -64,32 +66,32 @@ kubectl-mongodb multicluster recover --central-cluster="operator-cluster" --memb }, } -var recoverFlags = flags{} +var RecoverFlags = common.Flags{} func parseRecoverFlags(args []string) error { - if anyAreEmpty(memberClusters, recoverFlags.serviceAccount, recoverFlags.centralCluster, recoverFlags.memberClusterNamespace, recoverFlags.centralClusterNamespace, recoverFlags.sourceCluster) { + if common.AnyAreEmpty(common.MemberClusters, RecoverFlags.ServiceAccount, RecoverFlags.CentralCluster, RecoverFlags.MemberClusterNamespace, RecoverFlags.CentralClusterNamespace, RecoverFlags.SourceCluster) { return xerrors.Errorf("non empty values are required for [service-account, member-clusters, central-cluster, member-cluster-namespace, central-cluster-namespace, source-cluster]") } - recoverFlags.memberClusters = strings.Split(memberClusters, ",") - if !contains(recoverFlags.memberClusters, recoverFlags.sourceCluster) { - return xerrors.Errorf("source-cluster has to be one of the healthy member clusters: %s", memberClusters) + RecoverFlags.MemberClusters = strings.Split(common.MemberClusters, ",") + if !common.Contains(RecoverFlags.MemberClusters, RecoverFlags.SourceCluster) { + return xerrors.Errorf("source-cluster has to be one of the healthy member clusters: %s", common.MemberClusters) } - if strings.TrimSpace(memberClustersApiServers) != "" { - recoverFlags.memberClusterApiServerUrls = strings.Split(memberClustersApiServers, ",") - if len(recoverFlags.memberClusterApiServerUrls) != len(recoverFlags.memberClusters) { - return xerrors.Errorf("expected %d addresses in member-clusters-api-servers parameter but got %d", len(recoverFlags.memberClusters), len(recoverFlags.memberClusterApiServerUrls)) + if strings.TrimSpace(common.MemberClustersApiServers) != "" { + RecoverFlags.MemberClusterApiServerUrls = strings.Split(common.MemberClustersApiServers, ",") + if len(RecoverFlags.MemberClusterApiServerUrls) != len(RecoverFlags.MemberClusters) { + return xerrors.Errorf("expected %d addresses in member-clusters-api-servers parameter but got %d", len(RecoverFlags.MemberClusters), len(RecoverFlags.MemberClusterApiServerUrls)) } } - configFilePath := loadKubeConfigFilePath() + configFilePath := common.LoadKubeConfigFilePath() kubeconfig, err := clientcmd.LoadFromFile(configFilePath) if err != nil { return xerrors.Errorf("error loading kubeconfig file '%s': %w", configFilePath, err) } - if len(recoverFlags.memberClusterApiServerUrls) == 0 { - if recoverFlags.memberClusterApiServerUrls, err = getMemberClusterApiServerUrls(kubeconfig, recoverFlags.memberClusters); err != nil { + if len(RecoverFlags.MemberClusterApiServerUrls) == 0 { + if RecoverFlags.MemberClusterApiServerUrls, err = common.GetMemberClusterApiServerUrls(kubeconfig, RecoverFlags.MemberClusters); err != nil { return err } } diff --git a/tools/multicluster/cmd/setup.go b/tools/multicluster/cmd/setup.go index 0a24ee1..009b22e 100644 --- a/tools/multicluster/cmd/setup.go +++ b/tools/multicluster/cmd/setup.go @@ -5,6 +5,8 @@ import ( "os" "strings" + "github.com/10gen/ops-manager-kubernetes/multi/pkg/common" + "github.com/spf13/cobra" "golang.org/x/xerrors" "k8s.io/client-go/tools/clientcmd" @@ -13,16 +15,16 @@ import ( func init() { multiclusterCmd.AddCommand(setupCmd) - setupCmd.Flags().StringVar(&memberClusters, "member-clusters", "", "Comma separated list of member clusters. [required]") - setupCmd.Flags().StringVar(&setupFlags.serviceAccount, "service-account", "mongodb-enterprise-operator-multi-cluster", "Name of the service account which should be used for the Operator to communicate with the member clusters. [optional, default: mongodb-enterprise-operator-multi-cluster]") - setupCmd.Flags().StringVar(&setupFlags.centralCluster, "central-cluster", "", "The central cluster the operator will be deployed in. [required]") - setupCmd.Flags().StringVar(&setupFlags.memberClusterNamespace, "member-cluster-namespace", "", "The namespace the member cluster resources will be deployed to. [required]") - setupCmd.Flags().StringVar(&setupFlags.centralClusterNamespace, "central-cluster-namespace", "", "The namespace the Operator will be deployed to. [required]") - setupCmd.Flags().BoolVar(&setupFlags.cleanup, "cleanup", false, "Delete all previously created resources except for namespaces. [optional default: false]") - setupCmd.Flags().BoolVar(&setupFlags.clusterScoped, "cluster-scoped", false, "Create ClusterRole and ClusterRoleBindings for member clusters. [optional default: false]") - setupCmd.Flags().BoolVar(&setupFlags.installDatabaseRoles, "install-database-roles", false, "Install the ServiceAccounts and Roles required for running database workloads in the member clusters. [optional default: false]") - setupCmd.Flags().BoolVar(&setupFlags.createServiceAccountSecrets, "create-service-account-secrets", true, "Create service account token secrets. [optional default: true]") - setupCmd.Flags().StringVar(&memberClustersApiServers, "member-clusters-api-servers", "", "Comma separated list of api servers addresses. [optional, default will take addresses from KUBECONFIG env var]") + setupCmd.Flags().StringVar(&common.MemberClusters, "member-clusters", "", "Comma separated list of member clusters. [required]") + setupCmd.Flags().StringVar(&setupFlags.ServiceAccount, "service-account", "mongodb-enterprise-operator-multi-cluster", "Name of the service account which should be used for the Operator to communicate with the member clusters. [optional, default: mongodb-enterprise-operator-multi-cluster]") + setupCmd.Flags().StringVar(&setupFlags.CentralCluster, "central-cluster", "", "The central cluster the operator will be deployed in. [required]") + setupCmd.Flags().StringVar(&setupFlags.MemberClusterNamespace, "member-cluster-namespace", "", "The namespace the member cluster resources will be deployed to. [required]") + setupCmd.Flags().StringVar(&setupFlags.CentralClusterNamespace, "central-cluster-namespace", "", "The namespace the Operator will be deployed to. [required]") + setupCmd.Flags().BoolVar(&setupFlags.Cleanup, "cleanup", false, "Delete all previously created resources except for namespaces. [optional default: false]") + setupCmd.Flags().BoolVar(&setupFlags.ClusterScoped, "cluster-scoped", false, "Create ClusterRole and ClusterRoleBindings for member clusters. [optional default: false]") + setupCmd.Flags().BoolVar(&setupFlags.InstallDatabaseRoles, "install-database-roles", false, "Install the ServiceAccounts and Roles required for running database workloads in the member clusters. [optional default: false]") + setupCmd.Flags().BoolVar(&setupFlags.CreateServiceAccountSecrets, "create-service-account-secrets", true, "Create service account token secrets. [optional default: true]") + setupCmd.Flags().StringVar(&common.MemberClustersApiServers, "member-clusters-api-servers", "", "Comma separated list of api servers addresses. [optional, default will take addresses from KUBECONFIG env var]") } // setupCmd represents the setup command @@ -42,18 +44,18 @@ kubectl-mongodb multicluster setup --central-cluster="operator-cluster" --member os.Exit(1) } - clientMap, err := createClientMap(setupFlags.memberClusters, setupFlags.centralCluster, loadKubeConfigFilePath(), getKubernetesClient) + clientMap, err := common.CreateClientMap(setupFlags.MemberClusters, setupFlags.CentralCluster, common.LoadKubeConfigFilePath(), common.GetKubernetesClient) if err != nil { fmt.Printf("failed to create clientset map: %s", err) os.Exit(1) } - if err := ensureMultiClusterResources(cmd.Context(), setupFlags, clientMap); err != nil { + if err := common.EnsureMultiClusterResources(cmd.Context(), setupFlags, clientMap); err != nil { fmt.Println(err) os.Exit(1) } - if err := replaceClusterMembersConfigMap(cmd.Context(), clientMap[setupFlags.centralCluster], setupFlags); err != nil { + if err := common.ReplaceClusterMembersConfigMap(cmd.Context(), clientMap[setupFlags.CentralCluster], setupFlags); err != nil { fmt.Println(err) os.Exit(1) } @@ -61,30 +63,30 @@ kubectl-mongodb multicluster setup --central-cluster="operator-cluster" --member }, } -var setupFlags = flags{} +var setupFlags = common.Flags{} func parseSetupFlags(args []string) error { - if anyAreEmpty(memberClusters, setupFlags.serviceAccount, setupFlags.centralCluster, setupFlags.memberClusterNamespace, setupFlags.centralClusterNamespace) { + if common.AnyAreEmpty(common.MemberClusters, setupFlags.ServiceAccount, setupFlags.CentralCluster, setupFlags.MemberClusterNamespace, setupFlags.CentralClusterNamespace) { return xerrors.Errorf("non empty values are required for [service-account, member-clusters, central-cluster, member-cluster-namespace, central-cluster-namespace]") } - setupFlags.memberClusters = strings.Split(memberClusters, ",") + setupFlags.MemberClusters = strings.Split(common.MemberClusters, ",") - if strings.TrimSpace(memberClustersApiServers) != "" { - setupFlags.memberClusterApiServerUrls = strings.Split(memberClustersApiServers, ",") - if len(setupFlags.memberClusterApiServerUrls) != len(setupFlags.memberClusters) { - return xerrors.Errorf("expected %d addresses in member-clusters-api-servers parameter but got %d", len(setupFlags.memberClusters), len(setupFlags.memberClusterApiServerUrls)) + if strings.TrimSpace(common.MemberClustersApiServers) != "" { + setupFlags.MemberClusterApiServerUrls = strings.Split(common.MemberClustersApiServers, ",") + if len(setupFlags.MemberClusterApiServerUrls) != len(setupFlags.MemberClusters) { + return xerrors.Errorf("expected %d addresses in member-clusters-api-servers parameter but got %d", len(setupFlags.MemberClusters), len(setupFlags.MemberClusterApiServerUrls)) } } - configFilePath := loadKubeConfigFilePath() + configFilePath := common.LoadKubeConfigFilePath() kubeconfig, err := clientcmd.LoadFromFile(configFilePath) if err != nil { return xerrors.Errorf("error loading kubeconfig file '%s': %w", configFilePath, err) } - if len(setupFlags.memberClusterApiServerUrls) == 0 { - if setupFlags.memberClusterApiServerUrls, err = getMemberClusterApiServerUrls(kubeconfig, setupFlags.memberClusters); err != nil { + if len(setupFlags.MemberClusterApiServerUrls) == 0 { + if setupFlags.MemberClusterApiServerUrls, err = common.GetMemberClusterApiServerUrls(kubeconfig, setupFlags.MemberClusters); err != nil { return err } } diff --git a/tools/multicluster/go.mod b/tools/multicluster/go.mod index 6551325..d1d4d79 100644 --- a/tools/multicluster/go.mod +++ b/tools/multicluster/go.mod @@ -1,6 +1,6 @@ module github.com/10gen/ops-manager-kubernetes/multi -go 1.19 +go 1.20 require ( github.com/ghodss/yaml v1.0.0 @@ -10,6 +10,7 @@ require ( k8s.io/api v0.24.3 k8s.io/apimachinery v0.24.3 k8s.io/client-go v0.24.3 + k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 ) require ( @@ -32,6 +33,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -51,7 +53,6 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.60.1 // indirect k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/yaml v1.2.0 // indirect diff --git a/tools/multicluster/go.sum b/tools/multicluster/go.sum index 0bc79b0..4aa472a 100644 --- a/tools/multicluster/go.sum +++ b/tools/multicluster/go.sum @@ -51,6 +51,7 @@ github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tN github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -67,6 +68,7 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= @@ -206,6 +208,7 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= diff --git a/tools/multicluster/pkg/common/common.go b/tools/multicluster/pkg/common/common.go new file mode 100644 index 0000000..0b02f67 --- /dev/null +++ b/tools/multicluster/pkg/common/common.go @@ -0,0 +1,981 @@ +package common + +import ( + "context" + "fmt" + "strings" + + "github.com/ghodss/yaml" + "golang.org/x/xerrors" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +type clusterType string + +// This tool handles the creation of ServiceAccounts and roles across multiple clusters. +// Service Accounts, Roles and RoleBindings are created in all the member clusters and the central cluster. +// The Service Account token secrets from the member clusters are merged into a KubeConfig file which is then +// created in the central cluster. + +var MemberClusters string +var MemberClustersApiServers string + +const ( + centralCluster clusterType = "CENTRAL" + memberCluster clusterType = "MEMBER" +) + +// Flags holds all the fields provided by the user. +type Flags struct { + MemberClusters []string + MemberClusterApiServerUrls []string + ServiceAccount string + CentralCluster string + MemberClusterNamespace string + CentralClusterNamespace string + Cleanup bool + ClusterScoped bool + InstallDatabaseRoles bool + OperatorName string + SourceCluster string + CreateServiceAccountSecrets bool +} + +const ( + KubeConfigSecretName = "mongodb-enterprise-operator-multi-cluster-kubeconfig" + KubeConfigSecretKey = "kubeconfig" + AppdbServiceAccount = "mongodb-enterprise-appdb" + DatabasePodsServiceAccount = "mongodb-enterprise-database-pods" + OpsManagerServiceAccount = "mongodb-enterprise-ops-manager" + AppdbRole = "mongodb-enterprise-appdb" + AppdbRoleBinding = "mongodb-enterprise-appdb" + DefaultOperatorName = "mongodb-enterprise-operator" + DefaultOperatorConfigMapName = DefaultOperatorName + "-member-list" +) + +// KubeConfigFile represents the contents of a KubeConfig file. +type KubeConfigFile struct { + ApiVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Clusters []KubeConfigClusterItem `json:"clusters"` + Contexts []KubeConfigContextItem `json:"contexts"` + Users []KubeConfigUserItem `json:"users"` +} + +type KubeConfigClusterItem struct { + Name string `json:"name"` + Cluster KubeConfigCluster `json:"cluster"` +} + +type KubeConfigCluster struct { + CertificateAuthorityData []byte `json:"certificate-authority-data"` + Server string `json:"server"` +} + +type KubeConfigContextItem struct { + Name string `json:"name"` + Context KubeConfigContext `json:"context"` +} + +type KubeConfigContext struct { + Cluster string `json:"cluster"` + Namespace string `json:"namespace"` + User string `json:"user"` +} + +type KubeConfigUserItem struct { + Name string `json:"name"` + User KubeConfigUser `json:"user"` +} + +type KubeConfigUser struct { + Token string `json:"token"` +} + +// multiClusterLabels the labels that will be applied to every resource created by this tool. +func multiClusterLabels() map[string]string { + return map[string]string{ + "multi-cluster": "true", + } +} + +// performCleanup cleans up all of the resources that were created by this script in the past. +func performCleanup(ctx context.Context, clientMap map[string]KubeClient, flags Flags) error { + for _, cluster := range flags.MemberClusters { + c := clientMap[cluster] + if err := cleanupClusterResources(ctx, c, cluster, flags.MemberClusterNamespace); err != nil { + return xerrors.Errorf("failed cleaning up cluster %s namespace %s: %w", cluster, flags.MemberClusterNamespace, err) + } + } + c := clientMap[flags.CentralCluster] + if err := cleanupClusterResources(ctx, c, flags.CentralCluster, flags.CentralClusterNamespace); err != nil { + return xerrors.Errorf("failed cleaning up cluster %s namespace %s: %w", flags.CentralCluster, flags.CentralClusterNamespace, err) + } + return nil +} + +// cleanupClusterResources cleans up all the resources created by this tool in a given namespace. +func cleanupClusterResources(ctx context.Context, clientset KubeClient, clusterName, namespace string) error { + listOpts := metav1.ListOptions{ + LabelSelector: "multi-cluster=true", + } + + // clean up secrets + secretList, err := clientset.CoreV1().Secrets(namespace).List(ctx, listOpts) + + if err != nil { + return err + } + + if secretList != nil { + for _, s := range secretList.Items { + fmt.Printf("Deleting Secret: %s in cluster %s\n", s.Name, clusterName) + if err := clientset.CoreV1().Secrets(namespace).Delete(ctx, s.Name, metav1.DeleteOptions{}); err != nil { + return err + } + } + } + + // clean up service accounts + serviceAccountList, err := clientset.CoreV1().ServiceAccounts(namespace).List(ctx, listOpts) + + if err != nil { + return err + } + + if serviceAccountList != nil { + for _, sa := range serviceAccountList.Items { + fmt.Printf("Deleting ServiceAccount: %s in cluster %s\n", sa.Name, clusterName) + if err := clientset.CoreV1().ServiceAccounts(namespace).Delete(ctx, sa.Name, metav1.DeleteOptions{}); err != nil { + return err + } + } + } + + // clean up roles + roleList, err := clientset.RbacV1().Roles(namespace).List(ctx, listOpts) + if err != nil { + return err + } + + for _, r := range roleList.Items { + fmt.Printf("Deleting Role: %s in cluster %s\n", r.Name, clusterName) + if err := clientset.RbacV1().Roles(namespace).Delete(ctx, r.Name, metav1.DeleteOptions{}); err != nil { + return err + } + } + + // clean up roles + roles, err := clientset.RbacV1().Roles(namespace).List(ctx, listOpts) + if err != nil { + return err + } + + if roles != nil { + for _, r := range roles.Items { + fmt.Printf("Deleting Role: %s in cluster %s\n", r.Name, clusterName) + if err := clientset.RbacV1().Roles(namespace).Delete(ctx, r.Name, metav1.DeleteOptions{}); err != nil { + return err + } + } + } + + // clean up role bindings + roleBindings, err := clientset.RbacV1().RoleBindings(namespace).List(ctx, listOpts) + if !errors.IsNotFound(err) && err != nil { + return err + } + + if roleBindings != nil { + for _, crb := range roleBindings.Items { + fmt.Printf("Deleting RoleBinding: %s in cluster %s\n", crb.Name, clusterName) + if err := clientset.RbacV1().RoleBindings(namespace).Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil { + return err + } + } + } + + // clean up cluster role bindings + clusterRoleBindings, err := clientset.RbacV1().ClusterRoleBindings().List(ctx, listOpts) + if !errors.IsNotFound(err) && err != nil { + return err + } + + if clusterRoleBindings != nil { + for _, crb := range clusterRoleBindings.Items { + fmt.Printf("Deleting ClusterRoleBinding: %s in cluster %s\n", crb.Name, clusterName) + if err := clientset.RbacV1().ClusterRoleBindings().Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil { + return err + } + } + } + + // clean up cluster roles + clusterRoles, err := clientset.RbacV1().ClusterRoles().List(ctx, listOpts) + if !errors.IsNotFound(err) && err != nil { + return err + } + + if clusterRoles != nil { + for _, cr := range clusterRoles.Items { + fmt.Printf("Deleting ClusterRole: %s in cluster %s\n", cr.Name, clusterName) + if err := clientset.RbacV1().ClusterRoles().Delete(ctx, cr.Name, metav1.DeleteOptions{}); err != nil { + return err + } + } + } + + return nil +} + +// ensureNamespace creates the namespace with the given clientset. +func ensureNamespace(ctx context.Context, clientSet KubeClient, nsName string) error { + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: nsName, + Labels: multiClusterLabels(), + }, + } + _, err := clientSet.CoreV1().Namespaces().Create(ctx, &ns, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("failed to create namespace %s: %w", ns.Name, err) + } + + return nil +} + +// ensureAllClusterNamespacesExist makes sure the namespace we will be creating exists in all clusters. +func ensureAllClusterNamespacesExist(ctx context.Context, clientSets map[string]KubeClient, f Flags) error { + for _, clusterName := range f.MemberClusters { + if err := ensureNamespace(ctx, clientSets[clusterName], f.MemberClusterNamespace); err != nil { + return xerrors.Errorf("failed to ensure namespace %s in member cluster %s: %w", f.MemberClusterNamespace, clusterName, err) + } + } + if err := ensureNamespace(ctx, clientSets[f.CentralCluster], f.CentralClusterNamespace); err != nil { + return xerrors.Errorf("failed to ensure namespace %s in central cluster %s: %w", f.CentralClusterNamespace, f.CentralCluster, err) + } + return nil +} + +// EnsureMultiClusterResources copies the ServiceAccount Secret tokens from the specified +// member clusters, merges them into a KubeConfig file and creates a Secret in the central cluster +// with the contents. +func EnsureMultiClusterResources(ctx context.Context, flags Flags, clientMap map[string]KubeClient) error { + if flags.Cleanup { + if err := performCleanup(ctx, clientMap, flags); err != nil { + return xerrors.Errorf("failed performing Cleanup of resources: %w", err) + } + } + + if err := ensureAllClusterNamespacesExist(ctx, clientMap, flags); err != nil { + return xerrors.Errorf("failed ensuring namespaces: %w", err) + } + fmt.Println("Ensured namespaces exist in all clusters.") + + if err := createServiceAccountsAndRoles(ctx, clientMap, flags); err != nil { + return xerrors.Errorf("failed creating service accounts and roles in all clusters: %w", err) + } + fmt.Println("Ensured ServiceAccounts and Roles.") + + secrets, err := getAllWorkerClusterServiceAccountSecretTokens(ctx, clientMap, flags) + if err != nil { + return xerrors.Errorf("failed to get service account secret tokens: %w", err) + } + + if len(secrets) != len(flags.MemberClusters) { + return xerrors.Errorf("required %d serviceaccount tokens but found only %d\n", len(flags.MemberClusters), len(secrets)) + } + + kubeConfig, err := createKubeConfigFromServiceAccountTokens(secrets, flags) + if err != nil { + return xerrors.Errorf("failed to create kube config from service account tokens: %w", err) + } + + kubeConfigBytes, err := yaml.Marshal(kubeConfig) + if err != nil { + return xerrors.Errorf("failed to marshal kubeconfig: %w", err) + } + + centralClusterClient := clientMap[flags.CentralCluster] + if err != nil { + return xerrors.Errorf("failed to get central cluster clientset: %w", err) + } + + if err := createKubeConfigSecret(ctx, centralClusterClient, kubeConfigBytes, flags); err != nil { + return xerrors.Errorf("failed creating KubeConfig secret: %w", err) + } + + if flags.SourceCluster != "" { + if err := setupDatabaseRoles(ctx, clientMap, flags); err != nil { + return xerrors.Errorf("failed setting up database roles: %w", err) + } + fmt.Println("Ensured database Roles in member clusters.") + } else if flags.InstallDatabaseRoles { + if err := installDatabaseRoles(ctx, clientMap, flags); err != nil { + return xerrors.Errorf("failed installing database roles: %w", err) + } + fmt.Println("Ensured database Roles in member clusters.") + } + + return nil +} + +// createKubeConfigSecret creates the secret containing the KubeConfig file made from the various +// service account tokens in the member clusters. +func createKubeConfigSecret(ctx context.Context, centralClusterClient kubernetes.Interface, kubeConfigBytes []byte, flags Flags) error { + kubeConfigSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: KubeConfigSecretName, + Namespace: flags.CentralClusterNamespace, + Labels: multiClusterLabels(), + }, + Data: map[string][]byte{ + KubeConfigSecretKey: kubeConfigBytes, + }, + } + + fmt.Printf("Creating KubeConfig secret %s/%s in cluster %s\n", flags.CentralClusterNamespace, kubeConfigSecret.Name, flags.CentralCluster) + _, err := centralClusterClient.CoreV1().Secrets(flags.CentralClusterNamespace).Create(ctx, &kubeConfigSecret, metav1.CreateOptions{}) + + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("failed creating secret: %w", err) + } + + if errors.IsAlreadyExists(err) { + _, err = centralClusterClient.CoreV1().Secrets(flags.CentralClusterNamespace).Update(ctx, &kubeConfigSecret, metav1.UpdateOptions{}) + if err != nil { + return xerrors.Errorf("failed updating existing secret: %w", err) + } + } + + return nil +} + +func getCentralRules() []rbacv1.PolicyRule { + return []rbacv1.PolicyRule{ + { + Verbs: []string{"*"}, + Resources: []string{ + "mongodbmulticluster", "mongodbmulticluster/finalizers", "mongodbmulticluster/status", + "mongodbusers", "mongodbusers/status", + "opsmanagers", "opsmanagers/finalizers", "opsmanagers/status", + "mongodb", "mongodb/finalizers", "mongodb/status"}, + APIGroups: []string{"mongodb.com"}, + }, + } +} + +func buildCentralEntityRole(namespace string) rbacv1.Role { + rules := append(getCentralRules(), getMemberRules()...) + return rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mongodb-enterprise-operator-multi-role", + Namespace: namespace, + Labels: multiClusterLabels(), + }, + Rules: rules, + } +} + +func buildCentralEntityClusterRole() rbacv1.ClusterRole { + rules := append(getCentralRules(), getMemberRules()...) + rules = append(rules, rbacv1.PolicyRule{ + Verbs: []string{"list", "watch"}, + Resources: []string{"namespaces"}, + APIGroups: []string{""}, + }) + + return rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mongodb-enterprise-operator-multi-cluster-role", + Labels: multiClusterLabels(), + }, + Rules: rules, + } +} +func getMemberRules() []rbacv1.PolicyRule { + return []rbacv1.PolicyRule{ + { + Verbs: []string{"get", "list", "create", "update", "delete", "watch", "deletecollection"}, + Resources: []string{"secrets", "configmaps", "services"}, + APIGroups: []string{""}, + }, + { + Verbs: []string{"get", "list", "create", "update", "delete", "watch", "deletecollection"}, + Resources: []string{"statefulsets"}, + APIGroups: []string{"apps"}, + }, + { + Verbs: []string{"get", "list", "watch"}, + Resources: []string{"pods"}, + APIGroups: []string{""}, + }, + } +} + +func buildMemberEntityRole(namespace string) rbacv1.Role { + return rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mongodb-enterprise-operator-multi-role", + Namespace: namespace, + Labels: multiClusterLabels(), + }, + Rules: getMemberRules(), + } +} + +func buildMemberEntityClusterRole() rbacv1.ClusterRole { + rules := append(getMemberRules(), rbacv1.PolicyRule{ + Verbs: []string{"list", "watch"}, + Resources: []string{"namespaces"}, + APIGroups: []string{""}, + }) + + return rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mongodb-enterprise-operator-multi-cluster-role", + Labels: multiClusterLabels(), + }, + Rules: rules, + } +} + +// buildRoleBinding creates the RoleBinding which binds the Role to the given ServiceAccount. +func buildRoleBinding(role rbacv1.Role, serviceAccount string) rbacv1.RoleBinding { + return rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mongodb-enterprise-operator-multi-role-binding", + Labels: multiClusterLabels(), + Namespace: role.Namespace, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: serviceAccount, + Namespace: role.Namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: role.Name, + APIGroup: "rbac.authorization.k8s.io", + }, + } +} + +// buildClusterRoleBinding creates the ClusterRoleBinding which binds the ClusterRole to the given ServiceAccount. +func buildClusterRoleBinding(clusterRole rbacv1.ClusterRole, sa corev1.ServiceAccount) rbacv1.ClusterRoleBinding { + return rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mongodb-enterprise-operator-multi-cluster-role-binding", + Labels: multiClusterLabels(), + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: sa.Name, + Namespace: sa.Namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: clusterRole.Name, + APIGroup: "rbac.authorization.k8s.io", + }, + } +} + +// createMemberServiceAccountAndRoles creates the ServiceAccount and Roles, RoleBindings, ClusterRoles and ClusterRoleBindings required +// for the member clusters. +func createMemberServiceAccountAndRoles(ctx context.Context, c kubernetes.Interface, f Flags) error { + return createServiceAccountAndRoles(ctx, c, f.ServiceAccount, f.MemberClusterNamespace, f.ClusterScoped, memberCluster) +} + +// createCentralClusterServiceAccountAndRoles creates the ServiceAccount and Roles, RoleBindings, ClusterRoles and ClusterRoleBindings required +// for the central cluster. +func createCentralClusterServiceAccountAndRoles(ctx context.Context, c kubernetes.Interface, f Flags) error { + // central cluster always uses Roles. Never Cluster Roles. + return createServiceAccountAndRoles(ctx, c, f.ServiceAccount, f.CentralClusterNamespace, f.ClusterScoped, centralCluster) +} + +// createServiceAccountAndRoles creates the ServiceAccount and Roles, RoleBindings, ClusterRoles and ClusterRoleBindings required. +func createServiceAccountAndRoles(ctx context.Context, c kubernetes.Interface, serviceAccountName, namespace string, clusterScoped bool, clusterType clusterType) error { + sa := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceAccountName, + Namespace: namespace, + Labels: multiClusterLabels(), + }, + ImagePullSecrets: []corev1.LocalObjectReference{ + {Name: "image-registries-secret"}, + }, + } + + _, err := c.CoreV1().ServiceAccounts(sa.Namespace).Create(ctx, &sa, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating service account: %w", err) + } + + if !clusterScoped { + var role rbacv1.Role + if clusterType == centralCluster { + role = buildCentralEntityRole(sa.Namespace) + } else { + role = buildMemberEntityRole(sa.Namespace) + } + + _, err = c.RbacV1().Roles(sa.Namespace).Create(ctx, &role, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating role: %w", err) + } + + roleBinding := buildRoleBinding(role, sa.Name) + _, err = c.RbacV1().RoleBindings(sa.Namespace).Create(ctx, &roleBinding, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating role binding: %w", err) + } + return nil + } + + var clusterRole rbacv1.ClusterRole + if clusterType == centralCluster { + clusterRole = buildCentralEntityClusterRole() + } else { + clusterRole = buildMemberEntityClusterRole() + } + + _, err = c.RbacV1().ClusterRoles().Create(ctx, &clusterRole, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating cluster role: %w", err) + } + fmt.Printf("created clusterrole: %s\n", clusterRole.Name) + + clusterRoleBinding := buildClusterRoleBinding(clusterRole, sa) + _, err = c.RbacV1().ClusterRoleBindings().Create(ctx, &clusterRoleBinding, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating cluster role binding: %w", err) + } + fmt.Printf("created clusterrolebinding: %s\n", clusterRoleBinding.Name) + return nil +} + +// createServiceAccountsAndRoles creates the required ServiceAccounts in all member clusters. +func createServiceAccountsAndRoles(ctx context.Context, clientMap map[string]KubeClient, f Flags) error { + fmt.Printf("creating central cluster roles in cluster: %s\n", f.CentralCluster) + c := clientMap[f.CentralCluster] + if err := createCentralClusterServiceAccountAndRoles(ctx, c, f); err != nil { + return err + } + if f.CreateServiceAccountSecrets { + if err := createServiceAccountTokenSecret(ctx, c, f.CentralClusterNamespace, f.ServiceAccount); err != nil { + return err + } + } + + for _, memberCluster := range f.MemberClusters { + if memberCluster == f.CentralCluster { + fmt.Printf("skipping creation of member roles in cluster (it is also the central cluster): %s\n", memberCluster) + continue + } + fmt.Printf("creating member roles in cluster: %s\n", memberCluster) + c := clientMap[memberCluster] + if err := createMemberServiceAccountAndRoles(ctx, c, f); err != nil { + return err + } + if f.CreateServiceAccountSecrets { + if err := createServiceAccountTokenSecret(ctx, c, f.MemberClusterNamespace, f.ServiceAccount); err != nil { + return err + } + } + } + + return nil +} + +func createServiceAccountTokenSecret(ctx context.Context, c kubernetes.Interface, namespace string, serviceAccountName string) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-token-secret", serviceAccountName), + Namespace: namespace, + Annotations: map[string]string{ + "kubernetes.io/service-account.name": serviceAccountName, + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + } + + _, err := c.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("cannot create secret %+v: %w", *secret, err) + } + + return nil +} + +// createKubeConfigFromServiceAccountTokens builds up a KubeConfig from the ServiceAccount tokens provided. +func createKubeConfigFromServiceAccountTokens(serviceAccountTokens map[string]corev1.Secret, flags Flags) (KubeConfigFile, error) { + config := &KubeConfigFile{ + Kind: "Config", + ApiVersion: "v1", + } + + for i, clusterName := range flags.MemberClusters { + tokenSecret := serviceAccountTokens[clusterName] + ca, ok := tokenSecret.Data["ca.crt"] + if !ok { + return KubeConfigFile{}, xerrors.Errorf("key 'ca.crt' missing from token secret %s", tokenSecret.Name) + } + + token, ok := tokenSecret.Data["token"] + if !ok { + return KubeConfigFile{}, xerrors.Errorf("key 'token' missing from token secret %s", tokenSecret.Name) + } + + config.Clusters = append(config.Clusters, KubeConfigClusterItem{ + Name: clusterName, + Cluster: KubeConfigCluster{ + CertificateAuthorityData: ca, + Server: flags.MemberClusterApiServerUrls[i], + }, + }) + + ns := flags.MemberClusterNamespace + if flags.ClusterScoped { + ns = "" + } + + config.Contexts = append(config.Contexts, KubeConfigContextItem{ + Name: clusterName, + Context: KubeConfigContext{ + Cluster: clusterName, + Namespace: ns, + User: clusterName, + }, + }) + + config.Users = append(config.Users, KubeConfigUserItem{ + Name: clusterName, + User: KubeConfigUser{ + Token: string(token), + }, + }) + } + return *config, nil +} + +// getAllWorkerClusterServiceAccountSecretTokens returns a slice of secrets that should all be +// copied in the central cluster for the operator to use. +func getAllWorkerClusterServiceAccountSecretTokens(ctx context.Context, clientSetMap map[string]KubeClient, flags Flags) (map[string]corev1.Secret, error) { + allSecrets := map[string]corev1.Secret{} + + for _, cluster := range flags.MemberClusters { + c := clientSetMap[cluster] + sas, err := getServiceAccounts(ctx, c, flags.MemberClusterNamespace) + if err != nil { + return nil, xerrors.Errorf("failed getting service accounts: %w", err) + } + + for _, sa := range sas { + if sa.Name == flags.ServiceAccount { + token, err := getServiceAccountToken(ctx, c, sa) + if err != nil { + return nil, xerrors.Errorf("failed getting service account token: %w", err) + } + allSecrets[cluster] = *token + } + } + } + return allSecrets, nil +} + +func getServiceAccounts(ctx context.Context, lister kubernetes.Interface, namespace string) ([]corev1.ServiceAccount, error) { + saList, err := lister.CoreV1().ServiceAccounts(namespace).List(ctx, metav1.ListOptions{}) + + if err != nil { + return nil, xerrors.Errorf("failed to list service accounts in member cluster namespace %s: %w", namespace, err) + } + return saList.Items, nil +} + +// getServiceAccountToken returns the Secret containing the ServiceAccount token +func getServiceAccountToken(ctx context.Context, secretLister KubeClient, sa corev1.ServiceAccount) (*corev1.Secret, error) { + secretList, err := secretLister.CoreV1().Secrets(sa.Namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, xerrors.Errorf("failed to list secrets in member cluster namespace %s: %w", sa.Namespace, err) + } + for _, secret := range secretList.Items { + // found the associated service account token. + if strings.HasPrefix(secret.Name, fmt.Sprintf("%s-token", sa.Name)) { + return &secret, nil + } + } + return nil, xerrors.Errorf("no service account token found for serviceaccount: %s", sa.Name) +} + +// copySecret copies a Secret from a source cluster to a target cluster +func copySecret(ctx context.Context, src, dst KubeClient, namespace, name string) error { + secret, err := src.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return xerrors.Errorf("failed retrieving secret: %s from source cluster: %w", name, err) + } + _, err = dst.CoreV1().Secrets(namespace).Create(ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: secret.Labels, + }, + Data: secret.Data, + }, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return err + } + return nil +} + +func createServiceAccount(ctx context.Context, c KubeClient, serviceAccountName, namespace string) error { + sa := corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceAccountName, + Namespace: namespace, + Labels: multiClusterLabels(), + }, + } + + _, err := c.CoreV1().ServiceAccounts(sa.Namespace).Create(ctx, &sa, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating service account: %w", err) + } + return nil +} + +func createDatabaseRole(ctx context.Context, c KubeClient, roleName, namespace string) error { + role := rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: namespace, + Labels: multiClusterLabels(), + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{"get"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"patch", "delete", "get"}, + }, + }, + } + roleBinding := rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: namespace, + Labels: multiClusterLabels(), + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: roleName, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: AppdbServiceAccount, + }, + }, + } + _, err := c.RbacV1().Roles(role.Namespace).Create(ctx, &role, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating role: %w", err) + } + + _, err = c.RbacV1().RoleBindings(roleBinding.Namespace).Create(ctx, &roleBinding, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating role binding: %w", err) + } + return nil +} + +// createDatabaseRoles creates the default ServiceAccounts, Roles and RoleBindings required for running database +// instances in a member cluster. +func createDatabaseRoles(ctx context.Context, client KubeClient, f Flags) error { + if err := createServiceAccount(ctx, client, AppdbServiceAccount, f.MemberClusterNamespace); err != nil { + return err + } + if err := createServiceAccount(ctx, client, DatabasePodsServiceAccount, f.MemberClusterNamespace); err != nil { + return err + } + if err := createServiceAccount(ctx, client, OpsManagerServiceAccount, f.MemberClusterNamespace); err != nil { + return err + } + if err := createDatabaseRole(ctx, client, AppdbRole, f.MemberClusterNamespace); err != nil { + return err + } + return nil +} + +// copyDatabaseRoles copies the ServiceAccounts, Roles and RoleBindings required for running database instances +// in a member cluster. This is used for adding new member clusters by copying over the configuration of a healthy +// source cluster. +func copyDatabaseRoles(ctx context.Context, src, dst KubeClient, namespace string) error { + appdbSA, err := src.CoreV1().ServiceAccounts(namespace).Get(ctx, AppdbServiceAccount, metav1.GetOptions{}) + if err != nil { + return xerrors.Errorf("failed retrieving service account %s from source cluster: %w", AppdbServiceAccount, err) + } + dbpodsSA, err := src.CoreV1().ServiceAccounts(namespace).Get(ctx, DatabasePodsServiceAccount, metav1.GetOptions{}) + if err != nil { + return xerrors.Errorf("failed retrieving service account %s from source cluster: %w", DatabasePodsServiceAccount, err) + } + opsManagerSA, err := src.CoreV1().ServiceAccounts(namespace).Get(ctx, OpsManagerServiceAccount, metav1.GetOptions{}) + if err != nil { + return xerrors.Errorf("failed retrieving service account %s from source cluster: %w", OpsManagerServiceAccount, err) + } + appdbR, err := src.RbacV1().Roles(namespace).Get(ctx, AppdbRole, metav1.GetOptions{}) + if err != nil { + return xerrors.Errorf("failed retrieving role %s from source cluster: %w", AppdbRole, err) + } + appdbRB, err := src.RbacV1().RoleBindings(namespace).Get(ctx, AppdbRoleBinding, metav1.GetOptions{}) + if err != nil { + return xerrors.Errorf("failed retrieving role binding %s from source cluster: %w", AppdbRoleBinding, err) + } + if len(appdbSA.ImagePullSecrets) > 0 { + if err := copySecret(ctx, src, dst, namespace, appdbSA.ImagePullSecrets[0].Name); err != nil { + fmt.Printf("failed creating image pull secret %s: %s\n", appdbSA.ImagePullSecrets[0].Name, err) + } + + } + if len(dbpodsSA.ImagePullSecrets) > 0 { + if err := copySecret(ctx, src, dst, namespace, dbpodsSA.ImagePullSecrets[0].Name); err != nil { + fmt.Printf("failed creating image pull secret %s: %s\n", dbpodsSA.ImagePullSecrets[0].Name, err) + } + } + if len(opsManagerSA.ImagePullSecrets) > 0 { + if err := copySecret(ctx, src, dst, namespace, opsManagerSA.ImagePullSecrets[0].Name); err != nil { + fmt.Printf("failed creating image pull secret %s: %s\n", opsManagerSA.ImagePullSecrets[0].Name, err) + } + } + _, err = dst.CoreV1().ServiceAccounts(namespace).Create(ctx, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: appdbSA.Name, + Labels: appdbSA.Labels, + }, + ImagePullSecrets: appdbSA.DeepCopy().ImagePullSecrets, + }, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating service account: %w", err) + } + _, err = dst.CoreV1().ServiceAccounts(namespace).Create(ctx, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: dbpodsSA.Name, + Labels: dbpodsSA.Labels, + }, + ImagePullSecrets: dbpodsSA.DeepCopy().ImagePullSecrets, + }, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating service account: %w", err) + + } + _, err = dst.CoreV1().ServiceAccounts(namespace).Create(ctx, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: opsManagerSA.Name, + Labels: opsManagerSA.Labels, + }, + ImagePullSecrets: opsManagerSA.DeepCopy().ImagePullSecrets, + }, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating service account: %w", err) + } + + _, err = dst.RbacV1().Roles(namespace).Create(ctx, &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: appdbR.Name, + Labels: appdbR.Labels, + }, + Rules: appdbR.DeepCopy().Rules, + }, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating role: %w", err) + } + _, err = dst.RbacV1().RoleBindings(namespace).Create(ctx, &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: appdbRB.Name, + Labels: appdbRB.Labels, + }, + Subjects: appdbRB.DeepCopy().Subjects, + RoleRef: appdbRB.DeepCopy().RoleRef, + }, metav1.CreateOptions{}) + if !errors.IsAlreadyExists(err) && err != nil { + return xerrors.Errorf("error creating role binding: %w", err) + } + + return nil +} + +func installDatabaseRoles(ctx context.Context, clientSet map[string]KubeClient, f Flags) error { + for _, clusterName := range f.MemberClusters { + if err := createDatabaseRoles(ctx, clientSet[clusterName], f); err != nil { + return err + } + } + + return nil +} + +// setupDatabaseRoles installs the required database roles in the member clusters. +// The CommonFlags passed to the CLI must contain a healthy source member cluster which will be treated as +// the source of truth for all the member clusters. +func setupDatabaseRoles(ctx context.Context, clientSet map[string]KubeClient, f Flags) error { + for _, clusterName := range f.MemberClusters { + if clusterName != f.SourceCluster { + if err := copyDatabaseRoles(ctx, clientSet[f.SourceCluster], clientSet[clusterName], f.MemberClusterNamespace); err != nil { + return err + } + } + } + + return nil +} + +// ReplaceClusterMembersConfigMap creates the configmap used by the operator to know which clusters are members of the multi-cluster setup. +// This will replace the existing configmap. +// NOTE: the configmap is hardcoded to be DefaultOperatorConfigMapName +func ReplaceClusterMembersConfigMap(ctx context.Context, centralClusterClient KubeClient, flags Flags) error { + members := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: DefaultOperatorConfigMapName, + Namespace: flags.CentralClusterNamespace, + Labels: multiClusterLabels(), + }, + Data: map[string]string{}, + } + + addToSet(flags.MemberClusters, &members) + + fmt.Printf("Creating Member list Configmap %s/%s in cluster %s\n", flags.CentralClusterNamespace, DefaultOperatorConfigMapName, flags.CentralCluster) + _, err := centralClusterClient.CoreV1().ConfigMaps(flags.CentralClusterNamespace).Create(ctx, &members, metav1.CreateOptions{}) + + if err != nil && !errors.IsAlreadyExists(err) { + return xerrors.Errorf("failed creating secret: %w", err) + } + + if errors.IsAlreadyExists(err) { + if _, err := centralClusterClient.CoreV1().ConfigMaps(flags.CentralClusterNamespace).Update(ctx, &members, metav1.UpdateOptions{}); err != nil { + return xerrors.Errorf("error creating configmap: %w", err) + } + } + + return nil +} + +func addToSet(memberClusters []string, into *corev1.ConfigMap) { + // override or add + for _, memberCluster := range memberClusters { + into.Data[memberCluster] = "" + } +} diff --git a/tools/multicluster/pkg/common/common_test.go b/tools/multicluster/pkg/common/common_test.go new file mode 100644 index 0000000..1f72bae --- /dev/null +++ b/tools/multicluster/pkg/common/common_test.go @@ -0,0 +1,887 @@ +package common + +import ( + "bytes" + "context" + "fmt" + "os" + "strings" + "testing" + + "github.com/ghodss/yaml" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/clientcmd" +) + +const testKubeconfig = `apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: ZHNqaA== + server: https://api.member-cluster-0 + name: member-cluster-0 +- cluster: + certificate-authority-data: ZHNqaA== + server: https://api.member-cluster-1 + name: member-cluster-1 +- cluster: + certificate-authority-data: ZHNqaA== + server: https://api.member-cluster-2 + name: member-cluster-2 +contexts: +- context: + cluster: member-cluster-0 + namespace: citi + user: member-cluster-0 + name: member-cluster-0 +- context: + cluster: member-cluster-1 + namespace: citi + user: member-cluster-1 + name: member-cluster-1 +- context: + cluster: member-cluster-2 + namespace: citi + user: member-cluster-2 + name: member-cluster-2 +current-context: member-cluster-0 +kind: Config +preferences: {} +users: +- name: member-cluster-0 + user: + client-certificate-data: ZHNqaA== + client-key-data: ZHNqaA== +` + +func testFlags(t *testing.T, cleanup bool) Flags { + memberClusters := []string{"member-cluster-0", "member-cluster-1", "member-cluster-2"} + kubeconfig, err := clientcmd.Load([]byte(testKubeconfig)) + assert.NoError(t, err) + + memberClusterApiServerUrls, err := GetMemberClusterApiServerUrls(kubeconfig, memberClusters) + assert.NoError(t, err) + + return Flags{ + MemberClusterApiServerUrls: memberClusterApiServerUrls, + MemberClusters: memberClusters, + ServiceAccount: "test-service-account", + CentralCluster: "central-cluster", + MemberClusterNamespace: "member-namespace", + CentralClusterNamespace: "central-namespace", + Cleanup: cleanup, + ClusterScoped: false, + OperatorName: "mongodb-enterprise-operator", + } + +} + +func TestNamespaces_GetsCreated_WhenTheyDoNotExit(t *testing.T) { + flags := testFlags(t, false) + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + assert.NoError(t, err) + + assertMemberClusterNamespacesExist(t, clientMap, flags) + assertCentralClusterNamespacesExist(t, clientMap, flags) +} + +func TestExistingNamespaces_DoNotCause_AlreadyExistsErrors(t *testing.T) { + flags := testFlags(t, false) + clientMap := getClientResources(flags, namespaceResourceType) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + assert.NoError(t, err) + + assertMemberClusterNamespacesExist(t, clientMap, flags) + assertCentralClusterNamespacesExist(t, clientMap, flags) +} + +func TestServiceAccount_GetsCreate_WhenTheyDoNotExit(t *testing.T) { + flags := testFlags(t, false) + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + assert.NoError(t, err) + assertServiceAccountsExist(t, clientMap, flags) +} + +func TestExistingServiceAccounts_DoNotCause_AlreadyExistsErrors(t *testing.T) { + flags := testFlags(t, false) + clientMap := getClientResources(flags, serviceAccountResourceType) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + assert.NoError(t, err) + assertServiceAccountsExist(t, clientMap, flags) +} + +func TestDatabaseRoles_GetCreated(t *testing.T) { + flags := testFlags(t, false) + flags.ClusterScoped = true + flags.InstallDatabaseRoles = true + + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + assert.NoError(t, err) + assertDatabaseRolesExist(t, clientMap, flags) +} + +func TestRoles_GetsCreated_WhenTheyDoesNotExit(t *testing.T) { + flags := testFlags(t, false) + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + assert.NoError(t, err) + assertMemberRolesExist(t, clientMap, flags) +} + +func TestExistingRoles_DoNotCause_AlreadyExistsErrors(t *testing.T) { + flags := testFlags(t, false) + clientMap := getClientResources(flags, roleResourceType) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + assert.NoError(t, err) + assertMemberRolesExist(t, clientMap, flags) +} + +func TestClusterRoles_DoNotGetCreated_WhenNotSpecified(t *testing.T) { + flags := testFlags(t, false) + flags.ClusterScoped = false + + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + assert.NoError(t, err) + assertMemberRolesExist(t, clientMap, flags) + assertCentralRolesExist(t, clientMap, flags) +} + +func TestClusterRoles_GetCreated_WhenSpecified(t *testing.T) { + flags := testFlags(t, false) + flags.ClusterScoped = true + + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + assert.NoError(t, err) + assertMemberRolesDoNotExist(t, clientMap, flags) + assertMemberClusterRolesExist(t, clientMap, flags) +} + +func TestCentralCluster_GetsRegularRoleCreated_WhenClusterScoped_IsSpecified(t *testing.T) { + flags := testFlags(t, false) + flags.ClusterScoped = true + + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + assert.NoError(t, err) +} + +func TestCentralCluster_GetsRegularRoleCreated_WhenNonClusterScoped_IsSpecified(t *testing.T) { + flags := testFlags(t, false) + flags.ClusterScoped = false + + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + assert.NoError(t, err) + assertCentralRolesExist(t, clientMap, flags) +} + +func TestPerformCleanup(t *testing.T) { + flags := testFlags(t, true) + flags.ClusterScoped = true + + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + assert.NoError(t, err) + + t.Run("Resources get created with labels", func(t *testing.T) { + assertMemberClusterRolesExist(t, clientMap, flags) + assertMemberClusterNamespacesExist(t, clientMap, flags) + assertCentralClusterNamespacesExist(t, clientMap, flags) + assertServiceAccountsExist(t, clientMap, flags) + }) + + err = performCleanup(context.TODO(), clientMap, flags) + assert.NoError(t, err) + + t.Run("Resources with labels are removed", func(t *testing.T) { + assertMemberRolesDoNotExist(t, clientMap, flags) + assertMemberClusterRolesDoNotExist(t, clientMap, flags) + assertCentralRolesDoNotExist(t, clientMap, flags) + }) + + t.Run("Namespaces are preserved", func(t *testing.T) { + assertMemberClusterNamespacesExist(t, clientMap, flags) + assertCentralClusterNamespacesExist(t, clientMap, flags) + }) + +} + +func TestCreateKubeConfig_IsComposedOf_ServiceAccountTokens_InAllClusters(t *testing.T) { + flags := testFlags(t, false) + clientMap := getClientResources(flags) + + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + assert.NoError(t, err) + + kubeConfig, err := readKubeConfig(clientMap[flags.CentralCluster], flags.CentralClusterNamespace) + assert.NoError(t, err) + + assert.Equal(t, "Config", kubeConfig.Kind) + assert.Equal(t, "v1", kubeConfig.ApiVersion) + assert.Len(t, kubeConfig.Contexts, len(flags.MemberClusters)) + assert.Len(t, kubeConfig.Clusters, len(flags.MemberClusters)) + + for i, kubeConfigCluster := range kubeConfig.Clusters { + assert.Equal(t, flags.MemberClusters[i], kubeConfigCluster.Name, "Name of cluster should be set to the member clusters.") + expectedCaBytes, err := readSecretKey(clientMap[flags.MemberClusters[i]], fmt.Sprintf("%s-token", flags.ServiceAccount), flags.MemberClusterNamespace, "ca.crt") + + assert.NoError(t, err) + assert.Contains(t, string(expectedCaBytes), flags.MemberClusters[i]) + assert.Equal(t, 0, bytes.Compare(expectedCaBytes, kubeConfigCluster.Cluster.CertificateAuthorityData), "CA should be read from Service Account token Secret.") + assert.Equal(t, fmt.Sprintf("https://api.%s", flags.MemberClusters[i]), kubeConfigCluster.Cluster.Server, "Server should be correctly configured based on cluster name.") + } + + for i, user := range kubeConfig.Users { + tokenBytes, err := readSecretKey(clientMap[flags.MemberClusters[i]], fmt.Sprintf("%s-token", flags.ServiceAccount), flags.MemberClusterNamespace, "token") + assert.NoError(t, err) + assert.Equal(t, flags.MemberClusters[i], user.Name, "User name should be the name of the cluster.") + assert.Equal(t, string(tokenBytes), user.User.Token, "Token from the service account secret should be set.") + } + +} + +func TestKubeConfigSecret_IsCreated_InCentralCluster(t *testing.T) { + flags := testFlags(t, false) + clientMap := getClientResources(flags) + + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + assert.NoError(t, err) + + centralClusterClient := clientMap[flags.CentralCluster] + kubeConfigSecret, err := centralClusterClient.CoreV1().Secrets(flags.CentralClusterNamespace).Get(context.TODO(), KubeConfigSecretName, metav1.GetOptions{}) + + assert.NoError(t, err) + assert.NotNil(t, kubeConfigSecret) +} + +func TestKubeConfigSecret_IsNotCreated_InMemberClusters(t *testing.T) { + flags := testFlags(t, false) + clientMap := getClientResources(flags) + + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + assert.NoError(t, err) + + for _, memberCluster := range flags.MemberClusters { + memberClient := clientMap[memberCluster] + kubeConfigSecret, err := memberClient.CoreV1().Secrets(flags.CentralClusterNamespace).Get(context.TODO(), KubeConfigSecretName, metav1.GetOptions{}) + assert.True(t, errors.IsNotFound(err)) + assert.Nil(t, kubeConfigSecret) + } +} + +func TestChangingOneServiceAccountToken_ChangesOnlyThatEntry_InKubeConfig(t *testing.T) { + flags := testFlags(t, false) + clientMap := getClientResources(flags) + + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + assert.NoError(t, err) + + kubeConfigBefore, err := readKubeConfig(clientMap[flags.CentralCluster], flags.CentralClusterNamespace) + assert.NoError(t, err) + + firstClusterClient := clientMap[flags.MemberClusters[0]] + + // simulate a service account token changing, re-running the script should leave the other clusters unchanged. + newServiceAccountToken := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-token", flags.ServiceAccount), + Namespace: flags.MemberClusterNamespace, + }, + Data: map[string][]byte{ + "token": []byte("new-token-data"), + "ca.crt": []byte("new-ca-crt"), + }, + } + + _, err = firstClusterClient.CoreV1().Secrets(flags.MemberClusterNamespace).Update(context.TODO(), &newServiceAccountToken, metav1.UpdateOptions{}) + assert.NoError(t, err) + + err = EnsureMultiClusterResources(context.TODO(), flags, clientMap) + assert.NoError(t, err) + + kubeConfigAfter, err := readKubeConfig(clientMap[flags.CentralCluster], flags.CentralClusterNamespace) + assert.NoError(t, err) + + assert.NotEqual(t, kubeConfigBefore.Users[0], kubeConfigAfter.Users[0], "Cluster 0 users should have been modified.") + assert.NotEqual(t, kubeConfigBefore.Clusters[0], kubeConfigAfter.Clusters[0], "Cluster 1 clusters should have been modified") + + assert.Equal(t, "new-token-data", kubeConfigAfter.Users[0].User.Token, "first user token should have been updated.") + assert.Equal(t, []byte("new-ca-crt"), kubeConfigAfter.Clusters[0].Cluster.CertificateAuthorityData, "CA for cluster 0 should have been updated.") + + assert.Equal(t, kubeConfigBefore.Users[1], kubeConfigAfter.Users[1], "Cluster 1 users should have remained unchanged") + assert.Equal(t, kubeConfigBefore.Clusters[1], kubeConfigAfter.Clusters[1], "Cluster 1 clusters should have remained unchanged") + + assert.Equal(t, kubeConfigBefore.Users[2], kubeConfigAfter.Users[2], "Cluster 2 users should have remained unchanged") + assert.Equal(t, kubeConfigBefore.Clusters[2], kubeConfigAfter.Clusters[2], "Cluster 2 clusters should have remained unchanged") +} + +func TestGetMemberClusterApiServerUrls(t *testing.T) { + t.Run("Test comma separated string returns correct values", func(t *testing.T) { + kubeconfig, err := clientcmd.Load([]byte(testKubeconfig)) + assert.NoError(t, err) + + apiUrls, err := GetMemberClusterApiServerUrls(kubeconfig, []string{"member-cluster-0", "member-cluster-1", "member-cluster-2"}) + assert.Nil(t, err) + assert.Len(t, apiUrls, 3) + assert.Equal(t, apiUrls[0], "https://api.member-cluster-0") + assert.Equal(t, apiUrls[1], "https://api.member-cluster-1") + assert.Equal(t, apiUrls[2], "https://api.member-cluster-2") + }) + + t.Run("Test missing cluster lookup returns error", func(t *testing.T) { + kubeconfig, err := clientcmd.Load([]byte(testKubeconfig)) + assert.NoError(t, err) + + _, err = GetMemberClusterApiServerUrls(kubeconfig, []string{"member-cluster-0", "member-cluster-1", "member-cluster-missing"}) + assert.Error(t, err) + }) +} + +func TestMemberClusterUris(t *testing.T) { + t.Run("Uses server values set in CommonFlags", func(t *testing.T) { + flags := testFlags(t, false) + flags.MemberClusterApiServerUrls = []string{"cluster1-url", "cluster2-url", "cluster3-url"} + clientMap := getClientResources(flags) + + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + assert.NoError(t, err) + + kubeConfig, err := readKubeConfig(clientMap[flags.CentralCluster], flags.CentralClusterNamespace) + assert.NoError(t, err) + + for i, c := range kubeConfig.Clusters { + assert.Equal(t, flags.MemberClusterApiServerUrls[i], c.Cluster.Server) + } + + assert.NoError(t, err) + }) +} + +func TestReplaceClusterMembersConfigMap(t *testing.T) { + flags := testFlags(t, false) + + clientMap := getClientResources(flags) + client := clientMap[flags.CentralCluster] + + { + flags.MemberClusters = []string{"member-1", "member-2", "member-3", "member-4"} + err := ReplaceClusterMembersConfigMap(context.Background(), client, flags) + assert.NoError(t, err) + + cm, err := client.CoreV1().ConfigMaps(flags.CentralClusterNamespace).Get(context.Background(), DefaultOperatorConfigMapName, metav1.GetOptions{}) + assert.NoError(t, err) + + expected := map[string]string{} + for _, cluster := range flags.MemberClusters { + expected[cluster] = "" + } + assert.Equal(t, cm.Data, expected) + } + + { + flags.MemberClusters = []string{"member-1", "member-2"} + err := ReplaceClusterMembersConfigMap(context.Background(), client, flags) + cm, err := client.CoreV1().ConfigMaps(flags.CentralClusterNamespace).Get(context.Background(), DefaultOperatorConfigMapName, metav1.GetOptions{}) + assert.NoError(t, err) + + expected := map[string]string{} + for _, cluster := range flags.MemberClusters { + expected[cluster] = "" + } + + assert.Equal(t, cm.Data, expected) + } + +} + +// TestPrintingOutRolesServiceAccountsAndRoleBindings is not an ordinary test. It updates the RBAC samples in the +// samples/multi-cluster-cli-gitops/resources/rbac directory. By default, this test is not executed. If you indent to run +// it, please set EXPORT_RBAC_SAMPLES variable to "true". +func TestPrintingOutRolesServiceAccountsAndRoleBindings(t *testing.T) { + if os.Getenv("EXPORT_RBAC_SAMPLES") != "true" { + t.Skip("Skipping as EXPORT_RBAC_SAMPLES is false") + } + + flags := testFlags(t, false) + flags.ClusterScoped = true + flags.InstallDatabaseRoles = true + + { + sb := &strings.Builder{} + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + cr, err := clientMap[flags.CentralCluster].RbacV1().ClusterRoles().List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + crb, err := clientMap[flags.CentralCluster].RbacV1().ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + sa, err := clientMap[flags.CentralCluster].CoreV1().ServiceAccounts(flags.CentralClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + + sb = marshalToYaml(t, sb, "Central Cluster, cluster-scoped resources", "rbac.authorization.k8s.io/v1", "ClusterRole", cr.Items) + sb = marshalToYaml(t, sb, "Central Cluster, cluster-scoped resources", "rbac.authorization.k8s.io/v1", "ClusterRoleBinding", crb.Items) + sb = marshalToYaml(t, sb, "Central Cluster, cluster-scoped resources", "v1", "ServiceAccount", sa.Items) + + os.WriteFile("../../samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_central_cluster.yaml", []byte(sb.String()), os.ModePerm) + } + + { + sb := &strings.Builder{} + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + cr, err := clientMap[flags.MemberClusters[0]].RbacV1().ClusterRoles().List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + crb, err := clientMap[flags.MemberClusters[0]].RbacV1().ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + sa, err := clientMap[flags.MemberClusters[0]].CoreV1().ServiceAccounts(flags.MemberClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + + sb = marshalToYaml(t, sb, "Member Cluster, cluster-scoped resources", "rbac.authorization.k8s.io/v1", "ClusterRole", cr.Items) + sb = marshalToYaml(t, sb, "Member Cluster, cluster-scoped resources", "rbac.authorization.k8s.io/v1", "ClusterRoleBinding", crb.Items) + sb = marshalToYaml(t, sb, "Member Cluster, cluster-scoped resources", "v1", "ServiceAccount", sa.Items) + + os.WriteFile("../../samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_member_cluster.yaml", []byte(sb.String()), os.ModePerm) + } + + { + sb := &strings.Builder{} + flags.ClusterScoped = false + + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + r, err := clientMap[flags.CentralCluster].RbacV1().Roles(flags.CentralClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + rb, err := clientMap[flags.CentralCluster].RbacV1().RoleBindings(flags.CentralClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + sa, err := clientMap[flags.CentralCluster].CoreV1().ServiceAccounts(flags.CentralClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + + sb = marshalToYaml(t, sb, "Central Cluster, namespace-scoped resources", "rbac.authorization.k8s.io/v1", "Role", r.Items) + sb = marshalToYaml(t, sb, "Central Cluster, namespace-scoped resources", "rbac.authorization.k8s.io/v1", "RoleBinding", rb.Items) + sb = marshalToYaml(t, sb, "Central Cluster, namespace-scoped resources", "v1", "ServiceAccount", sa.Items) + + os.WriteFile("../../samples/multi-cluster-cli-gitops/resources/rbac/namespace_scoped_central_cluster.yaml", []byte(sb.String()), os.ModePerm) + } + + { + sb := &strings.Builder{} + flags.ClusterScoped = false + + clientMap := getClientResources(flags) + err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + + r, err := clientMap[flags.MemberClusters[0]].RbacV1().Roles(flags.MemberClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + rb, err := clientMap[flags.MemberClusters[0]].RbacV1().RoleBindings(flags.MemberClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + assert.NoError(t, err) + sa, err := clientMap[flags.MemberClusters[0]].CoreV1().ServiceAccounts(flags.MemberClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + + sb = marshalToYaml(t, sb, "Member Cluster, namespace-scoped resources", "rbac.authorization.k8s.io/v1", "Role", r.Items) + sb = marshalToYaml(t, sb, "Member Cluster, namespace-scoped resources", "rbac.authorization.k8s.io/v1", "RoleBinding", rb.Items) + sb = marshalToYaml(t, sb, "Member Cluster, namespace-scoped resources", "v1", "ServiceAccount", sa.Items) + + os.WriteFile("../../samples/multi-cluster-cli-gitops/resources/rbac/namespace_scoped_member_cluster.yaml", []byte(sb.String()), os.ModePerm) + } +} + +func marshalToYaml[T interface{}](t *testing.T, sb *strings.Builder, comment string, apiVersion string, kind string, items []T) *strings.Builder { + sb.WriteString(fmt.Sprintf("# %s\n", comment)) + for _, cr := range items { + sb.WriteString(fmt.Sprintf("apiVersion: %s\n", apiVersion)) + sb.WriteString(fmt.Sprintf("kind: %s\n", kind)) + bytes, err := yaml.Marshal(cr) + assert.NoError(t, err) + sb.WriteString(string(bytes)) + sb.WriteString("\n---\n") + } + return sb +} + +func TestConvertToSet(t *testing.T) { + type args struct { + memberClusters []string + cm *corev1.ConfigMap + } + tests := []struct { + name string + args args + expected map[string]string + }{ + { + name: "new members", + args: args{ + memberClusters: []string{"kind-1", "kind-2", "kind-3"}, + cm: &corev1.ConfigMap{Data: map[string]string{}}, + }, + expected: map[string]string{"kind-1": "", "kind-2": "", "kind-3": ""}, + }, + { + name: "one override and one new", + args: args{ + memberClusters: []string{"kind-1", "kind-2", "kind-3"}, + cm: &corev1.ConfigMap{Data: map[string]string{"kind-1": "", "kind-0": ""}}, + }, + expected: map[string]string{"kind-1": "", "kind-2": "", "kind-3": "", "kind-0": ""}, + }, + { + name: "one new ones", + args: args{ + memberClusters: []string{}, + cm: &corev1.ConfigMap{Data: map[string]string{"kind-1": "", "kind-0": ""}}, + }, + expected: map[string]string{"kind-1": "", "kind-0": ""}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + addToSet(tt.args.memberClusters, tt.args.cm) + assert.Equal(t, tt.expected, tt.args.cm.Data) + }) + } +} + +// assertMemberClusterNamespacesExist asserts the Namespace in the member clusters exists. +func assertMemberClusterNamespacesExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { + for _, clusterName := range flags.MemberClusters { + client := clientMap[clusterName] + ns, err := client.CoreV1().Namespaces().Get(context.TODO(), flags.MemberClusterNamespace, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, ns) + assert.Equal(t, flags.MemberClusterNamespace, ns.Name) + assert.Equal(t, ns.Labels, multiClusterLabels()) + } +} + +// assertCentralClusterNamespacesExist asserts the Namespace in the central cluster exists.. +func assertCentralClusterNamespacesExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { + client := clientMap[flags.CentralCluster] + ns, err := client.CoreV1().Namespaces().Get(context.TODO(), flags.CentralClusterNamespace, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, ns) + assert.Equal(t, flags.CentralClusterNamespace, ns.Name) + assert.Equal(t, ns.Labels, multiClusterLabels()) +} + +// assertServiceAccountsAreCorrect asserts the ServiceAccounts are created as expected. +func assertServiceAccountsExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { + for _, clusterName := range flags.MemberClusters { + client := clientMap[clusterName] + sa, err := client.CoreV1().ServiceAccounts(flags.MemberClusterNamespace).Get(context.TODO(), flags.ServiceAccount, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, sa) + assert.Equal(t, flags.ServiceAccount, sa.Name) + assert.Equal(t, sa.Labels, multiClusterLabels()) + } + + client := clientMap[flags.CentralCluster] + sa, err := client.CoreV1().ServiceAccounts(flags.CentralClusterNamespace).Get(context.TODO(), flags.ServiceAccount, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, sa) + assert.Equal(t, flags.ServiceAccount, sa.Name) + assert.Equal(t, sa.Labels, multiClusterLabels()) +} + +// assertDatabaseRolesExist asserts the DatabaseRoles are created as expected. +func assertDatabaseRolesExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { + for _, clusterName := range flags.MemberClusters { + client := clientMap[clusterName] + + // appDB service account + sa, err := client.CoreV1().ServiceAccounts(flags.MemberClusterNamespace).Get(context.TODO(), AppdbServiceAccount, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, sa) + assert.Equal(t, sa.Labels, multiClusterLabels()) + + // database pods service account + sa, err = client.CoreV1().ServiceAccounts(flags.MemberClusterNamespace).Get(context.TODO(), DatabasePodsServiceAccount, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, sa) + assert.Equal(t, sa.Labels, multiClusterLabels()) + + // ops manager service account + sa, err = client.CoreV1().ServiceAccounts(flags.MemberClusterNamespace).Get(context.TODO(), OpsManagerServiceAccount, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, sa) + assert.Equal(t, sa.Labels, multiClusterLabels()) + + // appdb role + r, err := client.RbacV1().Roles(flags.MemberClusterNamespace).Get(context.TODO(), AppdbRole, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, r) + assert.Equal(t, r.Labels, multiClusterLabels()) + assert.Equal(t, []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{"get"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"patch", "delete", "get"}, + }, + }, r.Rules) + + // appdb rolebinding + rb, err := client.RbacV1().RoleBindings(flags.MemberClusterNamespace).Get(context.TODO(), AppdbRoleBinding, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, r) + assert.Equal(t, rb.Labels, multiClusterLabels()) + assert.Equal(t, []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: AppdbServiceAccount, + }, + }, rb.Subjects) + assert.Equal(t, rbacv1.RoleRef{ + Kind: "Role", + Name: AppdbRole, + }, rb.RoleRef) + } +} + +// assertMemberClusterRolesExist should be used when member cluster cluster roles should exist. +func assertMemberClusterRolesExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { + assertClusterRoles(t, clientMap, flags, true, memberCluster) +} + +// assertMemberClusterRolesDoNotExist should be used when member cluster cluster roles should not exist. +func assertMemberClusterRolesDoNotExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { + assertClusterRoles(t, clientMap, flags, false, centralCluster) +} + +// assertClusterRoles should be used to assert the existence of member cluster cluster roles. The boolean +// shouldExist should be true for roles existing, and false for cluster roles not existing. +func assertClusterRoles(t *testing.T, clientMap map[string]KubeClient, flags Flags, shouldExist bool, clusterType clusterType) { + var expectedClusterRole rbacv1.ClusterRole + if clusterType == centralCluster { + expectedClusterRole = buildCentralEntityClusterRole() + } else { + expectedClusterRole = buildMemberEntityClusterRole() + } + + for _, clusterName := range flags.MemberClusters { + client := clientMap[clusterName] + role, err := client.RbacV1().ClusterRoles().Get(context.TODO(), expectedClusterRole.Name, metav1.GetOptions{}) + if shouldExist { + assert.NoError(t, err) + assert.NotNil(t, role) + assert.Equal(t, expectedClusterRole, *role) + } else { + assert.Error(t, err) + assert.Nil(t, role) + } + } + + clusterRole, err := clientMap[flags.CentralCluster].RbacV1().ClusterRoles().Get(context.TODO(), expectedClusterRole.Name, metav1.GetOptions{}) + if shouldExist { + assert.Nil(t, err) + assert.NotNil(t, clusterRole) + } else { + assert.Error(t, err) + } +} + +// assertMemberRolesExist should be used when member cluster roles should exist. +func assertMemberRolesExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { + assertMemberRolesAreCorrect(t, clientMap, flags, true) +} + +// assertMemberRolesDoNotExist should be used when member cluster roles should not exist. +func assertMemberRolesDoNotExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { + assertMemberRolesAreCorrect(t, clientMap, flags, false) +} + +// assertMemberRolesAreCorrect should be used to assert the existence of member cluster roles. The boolean +// shouldExist should be true for roles existing, and false for roles not existing. +func assertMemberRolesAreCorrect(t *testing.T, clientMap map[string]KubeClient, flags Flags, shouldExist bool) { + expectedRole := buildMemberEntityRole(flags.MemberClusterNamespace) + + for _, clusterName := range flags.MemberClusters { + client := clientMap[clusterName] + role, err := client.RbacV1().Roles(flags.MemberClusterNamespace).Get(context.TODO(), expectedRole.Name, metav1.GetOptions{}) + if shouldExist { + assert.NoError(t, err) + assert.NotNil(t, role) + assert.Equal(t, expectedRole, *role) + } else { + assert.Error(t, err) + assert.Nil(t, role) + } + } +} + +// assertCentralRolesExist should be used when central cluster roles should exist. +func assertCentralRolesExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { + assertCentralRolesAreCorrect(t, clientMap, flags, true) +} + +// assertCentralRolesDoNotExist should be used when central cluster roles should not exist. +func assertCentralRolesDoNotExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { + assertCentralRolesAreCorrect(t, clientMap, flags, false) +} + +// assertCentralRolesAreCorrect should be used to assert the existence of central cluster roles. The boolean +// shouldExist should be true for roles existing, and false for roles not existing. +func assertCentralRolesAreCorrect(t *testing.T, clientMap map[string]KubeClient, flags Flags, shouldExist bool) { + client := clientMap[flags.CentralCluster] + + // should never have a cluster role + clusterRole := buildCentralEntityClusterRole() + cr, err := client.RbacV1().ClusterRoles().Get(context.TODO(), clusterRole.Name, metav1.GetOptions{}) + + assert.True(t, errors.IsNotFound(err)) + assert.Nil(t, cr) + + expectedRole := buildCentralEntityRole(flags.CentralClusterNamespace) + role, err := client.RbacV1().Roles(flags.CentralClusterNamespace).Get(context.TODO(), expectedRole.Name, metav1.GetOptions{}) + + if shouldExist { + assert.NoError(t, err, "should always create a role for central cluster") + assert.NotNil(t, role) + assert.Equal(t, expectedRole, *role) + } else { + assert.Error(t, err) + assert.Nil(t, role) + } +} + +// resourceType indicates a type of resource that is created during the tests. +type resourceType string + +var ( + serviceAccountResourceType resourceType = "ServiceAccount" + namespaceResourceType resourceType = "Namespace" + roleBindingResourceType resourceType = "RoleBinding" + roleResourceType resourceType = "Role" +) + +// createResourcesForCluster returns the resources specified based on the provided resourceTypes. +// this function is used to populate subsets of resources for the unit tests. +func createResourcesForCluster(centralCluster bool, flags Flags, clusterName string, resourceTypes ...resourceType) []runtime.Object { + var namespace = flags.MemberClusterNamespace + if centralCluster { + namespace = flags.CentralCluster + } + + resources := make([]runtime.Object, 0) + + // always create the service account token secret as this gets created by + // kubernetes, we can just assume it is always there for tests. + resources = append(resources, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-token", flags.ServiceAccount), + Namespace: namespace, + }, + Data: map[string][]byte{ + "ca.crt": []byte(fmt.Sprintf("ca-cert-data-%s", clusterName)), + "token": []byte(fmt.Sprintf("%s-token-data", clusterName)), + }, + }) + + if containsResourceType(resourceTypes, namespaceResourceType) { + resources = append(resources, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + Labels: multiClusterLabels(), + }, + }) + } + + if containsResourceType(resourceTypes, serviceAccountResourceType) { + resources = append(resources, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: flags.ServiceAccount, + Labels: multiClusterLabels(), + }, + Secrets: []corev1.ObjectReference{ + { + Name: flags.ServiceAccount + "-token", + Namespace: namespace, + }, + }, + }) + } + + if containsResourceType(resourceTypes, roleResourceType) { + role := buildMemberEntityRole(namespace) + resources = append(resources, &role) + } + + if containsResourceType(resourceTypes, roleBindingResourceType) { + role := buildMemberEntityRole(namespace) + roleBinding := buildRoleBinding(role, namespace) + resources = append(resources, &roleBinding) + } + + return resources +} + +// getClientResources returns a map of cluster name to fake.Clientset +func getClientResources(flags Flags, resourceTypes ...resourceType) map[string]KubeClient { + clientMap := make(map[string]KubeClient) + + for _, clusterName := range flags.MemberClusters { + resources := createResourcesForCluster(false, flags, clusterName, resourceTypes...) + clientMap[clusterName] = NewKubeClientContainer(nil, fake.NewSimpleClientset(resources...), nil) + } + resources := createResourcesForCluster(true, flags, flags.CentralCluster, resourceTypes...) + clientMap[flags.CentralCluster] = NewKubeClientContainer(nil, fake.NewSimpleClientset(resources...), nil) + + return clientMap +} + +// containsResourceType returns true if r is in resourceTypes, otherwise false. +func containsResourceType(resourceTypes []resourceType, r resourceType) bool { + for _, rt := range resourceTypes { + if rt == r { + return true + } + } + return false +} + +// readSecretKey reads a key from a Secret in the given namespace with the given name. +func readSecretKey(client KubeClient, secretName, namespace, key string) ([]byte, error) { + tokenSecret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return tokenSecret.Data[key], nil +} + +// readKubeConfig reads the KubeConfig file from the secret in the given cluster and namespace. +func readKubeConfig(client KubeClient, namespace string) (KubeConfigFile, error) { + kubeConfigSecret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), KubeConfigSecretName, metav1.GetOptions{}) + if err != nil { + return KubeConfigFile{}, err + } + + kubeConfigBytes := kubeConfigSecret.Data[KubeConfigSecretKey] + result := KubeConfigFile{} + if err := yaml.Unmarshal(kubeConfigBytes, &result); err != nil { + return KubeConfigFile{}, err + } + + return result, nil +} diff --git a/tools/multicluster/pkg/common/kubeclientcontainer.go b/tools/multicluster/pkg/common/kubeclientcontainer.go new file mode 100644 index 0000000..a10c6f7 --- /dev/null +++ b/tools/multicluster/pkg/common/kubeclientcontainer.go @@ -0,0 +1,271 @@ +package common + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" + v1beta16 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" + "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" + v12 "k8s.io/client-go/kubernetes/typed/apps/v1" + v1beta17 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" + "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + v17 "k8s.io/client-go/kubernetes/typed/authentication/v1" + v1beta18 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" + v18 "k8s.io/client-go/kubernetes/typed/authorization/v1" + v1beta19 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" + v19 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" + v2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2" + "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" + "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2" + v110 "k8s.io/client-go/kubernetes/typed/batch/v1" + "k8s.io/client-go/kubernetes/typed/batch/v1beta1" + v111 "k8s.io/client-go/kubernetes/typed/certificates/v1" + v1beta110 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + v116 "k8s.io/client-go/kubernetes/typed/coordination/v1" + v1beta111 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + v115 "k8s.io/client-go/kubernetes/typed/discovery/v1" + v1beta117 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1" + v114 "k8s.io/client-go/kubernetes/typed/events/v1" + v1beta116 "k8s.io/client-go/kubernetes/typed/events/v1beta1" + v1beta115 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + v1alpha16 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + v1beta114 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + v1beta22 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" + v113 "k8s.io/client-go/kubernetes/typed/networking/v1" + v1beta113 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" + v112 "k8s.io/client-go/kubernetes/typed/node/v1" + v1alpha15 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" + v1beta15 "k8s.io/client-go/kubernetes/typed/node/v1beta1" + v16 "k8s.io/client-go/kubernetes/typed/policy/v1" + v1beta14 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" + v15 "k8s.io/client-go/kubernetes/typed/rbac/v1" + v1alpha13 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" + v1beta112 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + v14 "k8s.io/client-go/kubernetes/typed/scheduling/v1" + v1alpha14 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" + v1beta13 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" + v13 "k8s.io/client-go/kubernetes/typed/storage/v1" + v1alpha12 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" + v1beta12 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" + "k8s.io/client-go/rest" +) + +// KubeClient is wrapper (decorator pattern) over the static and dynamic Kube Clients. +// It provides capabilities of both interfaces along with access to the initial REST configuration. +type KubeClient interface { + kubernetes.Interface + dynamic.Interface + GetRestConfig() *rest.Config +} + +var _ KubeClient = &KubeClientContainer{} + +type KubeClientContainer struct { + staticClient kubernetes.Interface + dynamicClient dynamic.Interface + restConfig *rest.Config +} + +func (k *KubeClientContainer) Discovery() discovery.DiscoveryInterface { + return k.staticClient.Discovery() +} + +func (k *KubeClientContainer) AdmissionregistrationV1() v1.AdmissionregistrationV1Interface { + return k.staticClient.AdmissionregistrationV1() +} + +func (k *KubeClientContainer) AdmissionregistrationV1beta1() v1beta16.AdmissionregistrationV1beta1Interface { + return k.staticClient.AdmissionregistrationV1beta1() +} + +func (k *KubeClientContainer) InternalV1alpha1() v1alpha1.InternalV1alpha1Interface { + return k.staticClient.InternalV1alpha1() +} + +func (k *KubeClientContainer) AppsV1() v12.AppsV1Interface { + return k.staticClient.AppsV1() +} + +func (k *KubeClientContainer) AppsV1beta1() v1beta17.AppsV1beta1Interface { + return k.staticClient.AppsV1beta1() +} + +func (k *KubeClientContainer) AppsV1beta2() v1beta2.AppsV1beta2Interface { + return k.staticClient.AppsV1beta2() +} + +func (k *KubeClientContainer) AuthenticationV1() v17.AuthenticationV1Interface { + return k.staticClient.AuthenticationV1() +} + +func (k *KubeClientContainer) AuthenticationV1beta1() v1beta18.AuthenticationV1beta1Interface { + return k.staticClient.AuthenticationV1beta1() +} + +func (k *KubeClientContainer) AuthorizationV1() v18.AuthorizationV1Interface { + return k.staticClient.AuthorizationV1() +} + +func (k *KubeClientContainer) AuthorizationV1beta1() v1beta19.AuthorizationV1beta1Interface { + return k.staticClient.AuthorizationV1beta1() +} + +func (k *KubeClientContainer) AutoscalingV1() v19.AutoscalingV1Interface { + return k.staticClient.AutoscalingV1() +} + +func (k *KubeClientContainer) AutoscalingV2() v2.AutoscalingV2Interface { + return k.staticClient.AutoscalingV2() +} + +func (k *KubeClientContainer) AutoscalingV2beta1() v2beta1.AutoscalingV2beta1Interface { + return k.staticClient.AutoscalingV2beta1() +} + +func (k *KubeClientContainer) AutoscalingV2beta2() v2beta2.AutoscalingV2beta2Interface { + return k.staticClient.AutoscalingV2beta2() +} + +func (k *KubeClientContainer) BatchV1() v110.BatchV1Interface { + return k.staticClient.BatchV1() +} + +func (k *KubeClientContainer) BatchV1beta1() v1beta1.BatchV1beta1Interface { + //TODO implement me + panic("implement me") +} + +func (k *KubeClientContainer) CertificatesV1() v111.CertificatesV1Interface { + return k.staticClient.CertificatesV1() +} + +func (k *KubeClientContainer) CertificatesV1beta1() v1beta110.CertificatesV1beta1Interface { + return k.staticClient.CertificatesV1beta1() +} + +func (k *KubeClientContainer) CoordinationV1beta1() v1beta111.CoordinationV1beta1Interface { + return k.staticClient.CoordinationV1beta1() +} + +func (k *KubeClientContainer) CoordinationV1() v116.CoordinationV1Interface { + return k.staticClient.CoordinationV1() +} + +func (k *KubeClientContainer) CoreV1() corev1client.CoreV1Interface { + return k.staticClient.CoreV1() +} + +func (k *KubeClientContainer) DiscoveryV1() v115.DiscoveryV1Interface { + return k.staticClient.DiscoveryV1() +} + +func (k *KubeClientContainer) DiscoveryV1beta1() v1beta117.DiscoveryV1beta1Interface { + return k.staticClient.DiscoveryV1beta1() +} + +func (k KubeClientContainer) EventsV1() v114.EventsV1Interface { + return k.staticClient.EventsV1() +} + +func (k *KubeClientContainer) EventsV1beta1() v1beta116.EventsV1beta1Interface { + return k.staticClient.EventsV1beta1() +} + +func (k *KubeClientContainer) ExtensionsV1beta1() v1beta115.ExtensionsV1beta1Interface { + return k.staticClient.ExtensionsV1beta1() +} + +func (k *KubeClientContainer) FlowcontrolV1alpha1() v1alpha16.FlowcontrolV1alpha1Interface { + return k.staticClient.FlowcontrolV1alpha1() +} + +func (k *KubeClientContainer) FlowcontrolV1beta1() v1beta114.FlowcontrolV1beta1Interface { + return k.staticClient.FlowcontrolV1beta1() +} + +func (k *KubeClientContainer) FlowcontrolV1beta2() v1beta22.FlowcontrolV1beta2Interface { + return k.staticClient.FlowcontrolV1beta2() +} + +func (k *KubeClientContainer) NetworkingV1() v113.NetworkingV1Interface { + return k.staticClient.NetworkingV1() +} + +func (k *KubeClientContainer) NetworkingV1beta1() v1beta113.NetworkingV1beta1Interface { + return k.staticClient.NetworkingV1beta1() +} + +func (k *KubeClientContainer) NodeV1() v112.NodeV1Interface { + return k.staticClient.NodeV1() +} + +func (k *KubeClientContainer) NodeV1alpha1() v1alpha15.NodeV1alpha1Interface { + return k.staticClient.NodeV1alpha1() +} + +func (k *KubeClientContainer) NodeV1beta1() v1beta15.NodeV1beta1Interface { + return k.staticClient.NodeV1beta1() +} + +func (k *KubeClientContainer) PolicyV1() v16.PolicyV1Interface { + return k.staticClient.PolicyV1() +} + +func (k *KubeClientContainer) PolicyV1beta1() v1beta14.PolicyV1beta1Interface { + return k.staticClient.PolicyV1beta1() +} + +func (k *KubeClientContainer) RbacV1() v15.RbacV1Interface { + return k.staticClient.RbacV1() +} + +func (k *KubeClientContainer) RbacV1beta1() v1beta112.RbacV1beta1Interface { + return k.staticClient.RbacV1beta1() +} + +func (k *KubeClientContainer) RbacV1alpha1() v1alpha13.RbacV1alpha1Interface { + return k.staticClient.RbacV1alpha1() +} + +func (k *KubeClientContainer) SchedulingV1alpha1() v1alpha14.SchedulingV1alpha1Interface { + return k.staticClient.SchedulingV1alpha1() +} + +func (k *KubeClientContainer) SchedulingV1beta1() v1beta13.SchedulingV1beta1Interface { + return k.staticClient.SchedulingV1beta1() +} + +func (k *KubeClientContainer) SchedulingV1() v14.SchedulingV1Interface { + return k.staticClient.SchedulingV1() +} + +func (k *KubeClientContainer) StorageV1beta1() v1beta12.StorageV1beta1Interface { + return k.staticClient.StorageV1beta1() +} + +func (k *KubeClientContainer) StorageV1() v13.StorageV1Interface { + return k.staticClient.StorageV1() +} + +func (k *KubeClientContainer) StorageV1alpha1() v1alpha12.StorageV1alpha1Interface { + return k.staticClient.StorageV1alpha1() +} + +func (k *KubeClientContainer) Resource(resource schema.GroupVersionResource) dynamic.NamespaceableResourceInterface { + return k.dynamicClient.Resource(resource) +} + +func (k *KubeClientContainer) GetRestConfig() *rest.Config { + return k.restConfig +} + +func NewKubeClientContainer(restConfig *rest.Config, staticClient kubernetes.Interface, dynamicClient dynamic.Interface) *KubeClientContainer { + return &KubeClientContainer{ + staticClient: staticClient, + dynamicClient: dynamicClient, + restConfig: restConfig, + } +} diff --git a/tools/multicluster/pkg/common/kubeconfig.go b/tools/multicluster/pkg/common/kubeconfig.go new file mode 100644 index 0000000..260e48a --- /dev/null +++ b/tools/multicluster/pkg/common/kubeconfig.go @@ -0,0 +1,84 @@ +package common + +import ( + "os" + "path/filepath" + + "golang.org/x/xerrors" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/util/homedir" +) + +const ( + kubeConfigEnv = "KUBECONFIG" +) + +// LoadKubeConfigFilePath returns the path of the local KubeConfig file. +func LoadKubeConfigFilePath() string { + env := os.Getenv(kubeConfigEnv) + if env != "" { + return env + } + return filepath.Join(homedir.HomeDir(), ".kube", "config") +} + +// GetMemberClusterApiServerUrls returns the slice of member cluster api urls that should be used. +func GetMemberClusterApiServerUrls(kubeconfig *clientcmdapi.Config, clusterNames []string) ([]string, error) { + var urls []string + for _, name := range clusterNames { + if cluster := kubeconfig.Clusters[name]; cluster != nil { + urls = append(urls, cluster.Server) + } else { + return nil, xerrors.Errorf("cluster '%s' not found in kubeconfig", name) + } + } + return urls, nil +} + +// CreateClientMap crates a map of all MultiClusterClient for every member cluster, and the operator cluster. +func CreateClientMap(memberClusters []string, operatorCluster, kubeConfigPath string, getClient func(clusterName string, kubeConfigPath string) (KubeClient, error)) (map[string]KubeClient, error) { + clientMap := map[string]KubeClient{} + for _, c := range memberClusters { + clientset, err := getClient(c, kubeConfigPath) + if err != nil { + return nil, xerrors.Errorf("failed to create clientset map: %w", err) + } + clientMap[c] = clientset + } + + clientset, err := getClient(operatorCluster, kubeConfigPath) + if err != nil { + return nil, xerrors.Errorf("failed to create clientset map: %w", err) + } + clientMap[operatorCluster] = clientset + return clientMap, nil +} + +// GetKubernetesClient returns a kubernetes.Clientset using the given context from the +// specified KubeConfig filepath. +func GetKubernetesClient(context, kubeConfigPath string) (KubeClient, error) { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfigPath}, + &clientcmd.ConfigOverrides{ + CurrentContext: context, + }).ClientConfig() + + if err != nil { + return nil, xerrors.Errorf("failed to create client config: %w", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, xerrors.Errorf("failed to create kubernetes clientset: %w", err) + } + + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, xerrors.Errorf("failed to create dynamic kubernetes clientset: %w", err) + } + + return NewKubeClientContainer(config, clientset, dynamicClient), nil +} diff --git a/tools/multicluster/pkg/common/utils.go b/tools/multicluster/pkg/common/utils.go new file mode 100644 index 0000000..77980e1 --- /dev/null +++ b/tools/multicluster/pkg/common/utils.go @@ -0,0 +1,21 @@ +package common + +// Contains checks if a string is present in the provided slice. +func Contains(s []string, str string) bool { + for _, v := range s { + if v == str { + return true + } + } + return false +} + +// AnyAreEmpty returns true if any of the given strings have the zero value. +func AnyAreEmpty(values ...string) bool { + for _, v := range values { + if v == "" { + return true + } + } + return false +} diff --git a/tools/multicluster/pkg/debug/anonymize.go b/tools/multicluster/pkg/debug/anonymize.go new file mode 100644 index 0000000..afd182b --- /dev/null +++ b/tools/multicluster/pkg/debug/anonymize.go @@ -0,0 +1,30 @@ +package debug + +import v1 "k8s.io/api/core/v1" + +const ( + MASKED_TEXT = "***MASKED***" +) + +type Anonymizer interface { + AnonymizeSecret(secret *v1.Secret) *v1.Secret +} + +var _ Anonymizer = &NoOpAnonymizer{} + +type NoOpAnonymizer struct{} + +func (n *NoOpAnonymizer) AnonymizeSecret(secret *v1.Secret) *v1.Secret { + return secret +} + +var _ Anonymizer = &SensitiveDataAnonymizer{} + +type SensitiveDataAnonymizer struct{} + +func (n *SensitiveDataAnonymizer) AnonymizeSecret(secret *v1.Secret) *v1.Secret { + for key, _ := range secret.Data { + secret.Data[key] = []byte(MASKED_TEXT) + } + return secret +} diff --git a/tools/multicluster/pkg/debug/anonymize_test.go b/tools/multicluster/pkg/debug/anonymize_test.go new file mode 100644 index 0000000..5925f3d --- /dev/null +++ b/tools/multicluster/pkg/debug/anonymize_test.go @@ -0,0 +1,40 @@ +package debug + +import ( + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" +) + +func TestNoOpAnonymizer_AnonymizeSecret(t *testing.T) { + //given + text := "test" + anonymizer := NoOpAnonymizer{} + + //when + result := anonymizer.AnonymizeSecret(&v1.Secret{ + Data: map[string][]byte{ + text: []byte(text), + }, + }) + + //then + assert.Equal(t, text, string(result.Data[text])) +} + +func TestSensitiveDataAnonymizer_AnonymizeSecret(t *testing.T) { + //given + text := "test" + anonymizer := SensitiveDataAnonymizer{} + + //when + result := anonymizer.AnonymizeSecret(&v1.Secret{ + Data: map[string][]byte{ + text: []byte(text), + }, + }) + + //then + assert.Equal(t, MASKED_TEXT, string(result.Data[text])) +} diff --git a/tools/multicluster/pkg/debug/collectors.go b/tools/multicluster/pkg/debug/collectors.go new file mode 100644 index 0000000..4acc788 --- /dev/null +++ b/tools/multicluster/pkg/debug/collectors.go @@ -0,0 +1,357 @@ +package debug + +import ( + "bufio" + "bytes" + "context" + "fmt" + "strings" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" + + "github.com/10gen/ops-manager-kubernetes/multi/pkg/common" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/pointer" +) + +var ( + // TODO: Report a bug on inconsistent naming (plural vs singular). + MongoDBCommunityGVR = schema.GroupVersionResource{Group: "mongodbcommunity.mongodb.com", Version: "v1", Resource: "mongodbcommunity"} + MongoDBGVR = schema.GroupVersionResource{Group: "mongodb.com", Version: "v1", Resource: "mongodb"} + MongoDBMultiClusterGVR = schema.GroupVersionResource{Group: "mongodb.com", Version: "v1", Resource: "mongodbmulticlusters"} + MongoDBUsersGVR = schema.GroupVersionResource{Group: "mongodb.com", Version: "v1", Resource: "mongodbusers"} + OpsManagerSchemeGVR = schema.GroupVersionResource{Group: "mongodb.com", Version: "v1", Resource: "opsmanagers"} +) + +type Filter interface { + Accept(object runtime.Object) bool +} + +var _ Filter = &AcceptAllFilter{} + +type AcceptAllFilter struct{} + +func (a *AcceptAllFilter) Accept(_ runtime.Object) bool { + return true +} + +var _ Filter = &WithOwningReference{} + +type WithOwningReference struct{} + +func (a *WithOwningReference) Accept(object runtime.Object) bool { + typeAccessor, err := meta.Accessor(object) + if err != nil { + return true + } + + for _, or := range typeAccessor.GetOwnerReferences() { + if strings.Contains(strings.ToLower(or.Kind), "mongo") { + return true + } + } + return false +} + +type RawFile struct { + Name string + content []byte +} + +type Collector interface { + Collect(context.Context, common.KubeClient, string, Filter, Anonymizer) ([]runtime.Object, []RawFile, error) +} + +var _ Collector = &StatefulSetCollector{} + +type StatefulSetCollector struct{} + +func (s *StatefulSetCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, _ Anonymizer) ([]runtime.Object, []RawFile, error) { + return genericCollect(ctx, kubeClient, namespace, filter, func(ctx context.Context, kubeClient common.KubeClient, namespace string) (runtime.Object, error) { + return kubeClient.AppsV1().StatefulSets(namespace).List(ctx, v1.ListOptions{}) + }) +} + +var _ Collector = &ConfigMapCollector{} + +type ConfigMapCollector struct{} + +func (s *ConfigMapCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, _ Anonymizer) ([]runtime.Object, []RawFile, error) { + return genericCollect(ctx, kubeClient, namespace, filter, func(ctx context.Context, kubeClient common.KubeClient, namespace string) (runtime.Object, error) { + return kubeClient.CoreV1().ConfigMaps(namespace).List(ctx, v1.ListOptions{}) + }) +} + +var _ Collector = &SecretCollector{} + +type SecretCollector struct{} + +func (s *SecretCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, anonymizer Anonymizer) ([]runtime.Object, []RawFile, error) { + var ret []runtime.Object + secrets, err := kubeClient.CoreV1().Secrets(namespace).List(ctx, v1.ListOptions{}) + if err != nil { + return nil, nil, err + } + for i := range secrets.Items { + item := secrets.Items[i] + if filter.Accept(&item) { + ret = append(ret, anonymizer.AnonymizeSecret(&item)) + } + } + return ret, nil, nil +} + +var _ Collector = &ServiceAccountCollector{} + +type ServiceAccountCollector struct{} + +func (s *ServiceAccountCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, anonymizer Anonymizer) ([]runtime.Object, []RawFile, error) { + return genericCollect(ctx, kubeClient, namespace, filter, func(ctx context.Context, kubeClient common.KubeClient, namespace string) (runtime.Object, error) { + return kubeClient.CoreV1().ServiceAccounts(namespace).List(ctx, v1.ListOptions{}) + }) +} + +var _ Collector = &RolesCollector{} + +type RolesCollector struct{} + +func (s *RolesCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, anonymizer Anonymizer) ([]runtime.Object, []RawFile, error) { + return genericCollect(ctx, kubeClient, namespace, filter, func(ctx context.Context, kubeClient common.KubeClient, namespace string) (runtime.Object, error) { + return kubeClient.RbacV1().Roles(namespace).List(ctx, v1.ListOptions{}) + }) +} + +var _ Collector = &RolesBindingsCollector{} + +type RolesBindingsCollector struct{} + +func (s *RolesBindingsCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, anonymizer Anonymizer) ([]runtime.Object, []RawFile, error) { + return genericCollect(ctx, kubeClient, namespace, filter, func(ctx context.Context, kubeClient common.KubeClient, namespace string) (runtime.Object, error) { + return kubeClient.RbacV1().RoleBindings(namespace).List(ctx, v1.ListOptions{}) + }) +} + +var _ Collector = &MongoDBCollector{} + +type MongoDBCollector struct{} + +func (s *MongoDBCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, anonymizer Anonymizer) ([]runtime.Object, []RawFile, error) { + return genericCollect(ctx, kubeClient, namespace, filter, func(ctx context.Context, kubeClient common.KubeClient, namespace string) (runtime.Object, error) { + return kubeClient.Resource(MongoDBGVR).List(ctx, v1.ListOptions{}) + }) +} + +var _ Collector = &MongoDBMultiClusterCollector{} + +type MongoDBMultiClusterCollector struct{} + +func (s *MongoDBMultiClusterCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, anonymizer Anonymizer) ([]runtime.Object, []RawFile, error) { + return genericCollect(ctx, kubeClient, namespace, filter, func(ctx context.Context, kubeClient common.KubeClient, namespace string) (runtime.Object, error) { + return kubeClient.Resource(MongoDBMultiClusterGVR).List(ctx, v1.ListOptions{}) + }) +} + +var _ Collector = &MongoDBUserCollector{} + +type MongoDBUserCollector struct{} + +func (s *MongoDBUserCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, anonymizer Anonymizer) ([]runtime.Object, []RawFile, error) { + return genericCollect(ctx, kubeClient, namespace, filter, func(ctx context.Context, kubeClient common.KubeClient, namespace string) (runtime.Object, error) { + return kubeClient.Resource(MongoDBUsersGVR).List(ctx, v1.ListOptions{}) + }) +} + +var _ Collector = &OpsManagerCollector{} + +type OpsManagerCollector struct{} + +func (s *OpsManagerCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, anonymizer Anonymizer) ([]runtime.Object, []RawFile, error) { + return genericCollect(ctx, kubeClient, namespace, filter, func(ctx context.Context, kubeClient common.KubeClient, namespace string) (runtime.Object, error) { + return kubeClient.Resource(OpsManagerSchemeGVR).List(ctx, v1.ListOptions{}) + }) +} + +var _ Collector = &MongoDBCommunityCollector{} + +type MongoDBCommunityCollector struct{} + +func (s *MongoDBCommunityCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, anonymizer Anonymizer) ([]runtime.Object, []RawFile, error) { + return genericCollect(ctx, kubeClient, namespace, filter, func(ctx context.Context, kubeClient common.KubeClient, namespace string) (runtime.Object, error) { + return kubeClient.Resource(MongoDBCommunityGVR).List(ctx, v1.ListOptions{}) + }) +} + +var _ Collector = &EventsCollector{} + +type EventsCollector struct{} + +func (s *EventsCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, anonymizer Anonymizer) ([]runtime.Object, []RawFile, error) { + return genericCollect(ctx, kubeClient, namespace, filter, func(ctx context.Context, kubeClient common.KubeClient, namespace string) (runtime.Object, error) { + return kubeClient.EventsV1().Events(namespace).List(ctx, v1.ListOptions{}) + }) +} + +var _ Collector = &LogsCollector{} + +type LogsCollector struct{} + +func (s *LogsCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, anonymizer Anonymizer) ([]runtime.Object, []RawFile, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{}) + if err != nil { + return nil, nil, err + } + var logsToCollect []RawFile + for i := range pods.Items { + logsToCollect = append(logsToCollect, RawFile{ + Name: pods.Items[i].Name, + }) + } + for i := range logsToCollect { + podName := logsToCollect[i].Name + PodLogsConnection := kubeClient.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{ + Follow: false, + TailLines: pointer.Int64(100), + }) + LogStream, _ := PodLogsConnection.Stream(ctx) + reader := bufio.NewScanner(LogStream) + var line string + for reader.Scan() { + line = fmt.Sprintf("%s\n", reader.Text()) + bytes := []byte(line) + logsToCollect[i].content = append(logsToCollect[i].content, bytes...) + } + LogStream.Close() + } + return nil, logsToCollect, nil +} + +var _ Collector = &AgentHealthFileCollector{} + +type AgentHealthFileCollector struct{} + +func (s *AgentHealthFileCollector) Collect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, anonymizer Anonymizer) ([]runtime.Object, []RawFile, error) { + type AgentHealthFileToCollect struct { + podName string + RawFile rest.ContentConfig + agentFileName string + containerName string + } + + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, v1.ListOptions{}) + if err != nil { + return nil, nil, err + } + var logsToCollect []AgentHealthFileToCollect + var collectedHealthFiles []RawFile + for i, pod := range pods.Items { + add := AgentHealthFileToCollect{ + podName: pods.Items[i].Name, + } + found := false + for _, c := range pod.Spec.Containers { + for _, e := range c.Env { + if "AGENT_STATUS_FILEPATH" == e.Name { + add.agentFileName = e.Value + found = true + break + } + } + if found { + add.containerName = c.Name + break + } + } + + if found { + logsToCollect = append(logsToCollect, add) + } + } + for _, l := range logsToCollect { + add := RawFile{ + Name: l.podName + "-agent-health", + } + content, err := getFileContent(kubeClient.GetRestConfig(), kubeClient, namespace, l.podName, l.containerName, l.agentFileName) + if err == nil { + add.content = content + collectedHealthFiles = append(collectedHealthFiles, add) + } + } + return nil, collectedHealthFiles, nil +} + +// Inspired by https://gist.github.com/kyroy/8453a0c4e075e91809db9749e0adcff2 +func getFileContent(config *rest.Config, clientset common.KubeClient, namespace, podName, containerName, path string) ([]byte, error) { + u := clientset.CoreV1().RESTClient().Post(). + Namespace(namespace). + Name(podName). + Resource("pods"). + SubResource("exec"). + Param("command", "/bin/cat"). + Param("command", path). + Param("container", containerName). + Param("stderr", "true"). + Param("stdout", "true").URL() + + buf := &bytes.Buffer{} + errBuf := &bytes.Buffer{} + exec, err := remotecommand.NewSPDYExecutor(config, "POST", u) + err = exec.Stream(remotecommand.StreamOptions{ + Stdout: buf, + Stderr: errBuf, + }) + if err != nil { + return nil, fmt.Errorf("%w Failed obtaining file %s from %v/%v", err, path, namespace, podName) + } + + return buf.Bytes(), nil +} + +type genericLister func(ctx context.Context, kubeClient common.KubeClient, namespace string) (runtime.Object, error) + +func genericCollect(ctx context.Context, kubeClient common.KubeClient, namespace string, filter Filter, lister genericLister) ([]runtime.Object, []RawFile, error) { + var ret []runtime.Object + listAsObject, err := lister(ctx, kubeClient, namespace) + if err != nil { + return nil, nil, err + } + list, err := meta.ExtractList(listAsObject) + if err != nil { + return nil, nil, err + } + for i := range list { + item := list[i] + if filter.Accept(item) { + ret = append(ret, item) + } + } + return ret, nil, nil +} + +type CollectionResult struct { + kubeResources []runtime.Object + rawObjects []RawFile + errors []error + namespace string + context string +} + +func Collect(ctx context.Context, kubeClient common.KubeClient, context string, namespace string, filter Filter, collectors []Collector, anonymizer Anonymizer) CollectionResult { + result := CollectionResult{} + result.context = context + result.namespace = namespace + + for _, collector := range collectors { + collectedKubeObjects, collectedRawObjects, err := collector.Collect(ctx, kubeClient, namespace, filter, anonymizer) + result.kubeResources = append(result.kubeResources, collectedKubeObjects...) + result.rawObjects = append(result.rawObjects, collectedRawObjects...) + if err != nil { + result.errors = append(result.errors, err) + } + } + return result +} diff --git a/tools/multicluster/pkg/debug/collectors_test.go b/tools/multicluster/pkg/debug/collectors_test.go new file mode 100644 index 0000000..eeb78fe --- /dev/null +++ b/tools/multicluster/pkg/debug/collectors_test.go @@ -0,0 +1,172 @@ +package debug + +import ( + "context" + "testing" + + "github.com/10gen/ops-manager-kubernetes/multi/pkg/common" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/apps/v1" + v12 "k8s.io/api/core/v1" + v13 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + fake2 "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/kubernetes/fake" +) + +func TestCollectors(t *testing.T) { + //given + collectors := []Collector{ + &MongoDBCommunityCollector{}, + &MongoDBCollector{}, + &MongoDBMultiClusterCollector{}, + &MongoDBUserCollector{}, + &OpsManagerCollector{}, + &StatefulSetCollector{}, + &SecretCollector{}, + &ConfigMapCollector{}, + &RolesCollector{}, + &ServiceAccountCollector{}, + &RolesBindingsCollector{}, + &ServiceAccountCollector{}, + } + filter := &AcceptAllFilter{} + anonymizer := &NoOpAnonymizer{} + namespace := "test" + testObjectNames := "test" + + kubeClient := kubeClientWithTestingResources(namespace, testObjectNames) + + //when + for _, collector := range collectors { + kubeObjects, rawObjects, err := collector.Collect(context.TODO(), kubeClient, namespace, filter, anonymizer) + + //then + assert.NoError(t, err) + assert.Equal(t, 1, len(kubeObjects)) + assert.Equal(t, 0, len(rawObjects)) + } +} + +func kubeClientWithTestingResources(namespace, testObjectNames string) *common.KubeClientContainer { + resources := []runtime.Object{ + &v12.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: testObjectNames, + Namespace: namespace, + }, + }, + &v1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: testObjectNames, + Namespace: namespace, + }, + }, + &v12.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: testObjectNames, + Namespace: namespace, + }, + }, + &v12.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: testObjectNames, + Namespace: namespace, + }, + }, + &v13.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: testObjectNames, + Namespace: namespace, + }, + }, + &v13.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: testObjectNames, + Namespace: namespace, + }, + }, + &v12.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: testObjectNames, + Namespace: namespace, + }, + }, + } + + // Unfortunately most of the Kind and Resource parts are guessing and making fake.NewSimpleDynamicClientWithCustomListKinds + // happy. Sadly, it uses naming conventions (with List suffix) and tries to guess the plural names - mostly incorrectly. + + scheme := runtime.NewScheme() + MongoDBCommunityGVK := schema.GroupVersionKind{ + Group: MongoDBCommunityGVR.Group, + Version: MongoDBCommunityGVR.Version, + Kind: "MongoDBCommunity", + } + MongoDBGVK := schema.GroupVersionKind{ + Group: MongoDBGVR.Group, + Version: MongoDBGVR.Version, + Kind: "MongoDB", + } + MongoDBUserGVK := schema.GroupVersionKind{ + Group: MongoDBGVR.Group, + Version: MongoDBGVR.Version, + Kind: "MongoDBUser", + } + MongoDBMultiGVK := schema.GroupVersionKind{ + Group: MongoDBGVR.Group, + Version: MongoDBGVR.Version, + Kind: "MongoDBMulti", + } + OpsManagerGVK := schema.GroupVersionKind{ + Group: OpsManagerSchemeGVR.Group, + Version: OpsManagerSchemeGVR.Version, + Kind: "OpsManager", + } + + scheme.AddKnownTypeWithName(MongoDBCommunityGVK, &unstructured.Unstructured{}) + scheme.AddKnownTypeWithName(MongoDBGVK, &unstructured.Unstructured{}) + scheme.AddKnownTypeWithName(MongoDBMultiGVK, &unstructured.Unstructured{}) + scheme.AddKnownTypeWithName(MongoDBUserGVK, &unstructured.Unstructured{}) + + MongoDBCommunityResource := unstructured.Unstructured{} + MongoDBCommunityResource.SetGroupVersionKind(MongoDBCommunityGVK) + MongoDBCommunityResource.SetName(testObjectNames) + + MongoDBResource := unstructured.Unstructured{} + MongoDBResource.SetGroupVersionKind(MongoDBGVK) + MongoDBResource.SetName(testObjectNames) + + MongoDBUserResource := unstructured.Unstructured{} + MongoDBUserResource.SetGroupVersionKind(MongoDBUserGVK) + MongoDBUserResource.SetName(testObjectNames) + + MongoDBMultiClusterResource := unstructured.Unstructured{} + MongoDBMultiClusterResource.SetGroupVersionKind(MongoDBMultiGVK) + MongoDBMultiClusterResource.SetName(testObjectNames) + + OpsManagerResource := unstructured.Unstructured{} + OpsManagerResource.SetGroupVersionKind(OpsManagerGVK) + OpsManagerResource.SetName(testObjectNames) + + dynamicLists := map[schema.GroupVersionResource]string{ + MongoDBCommunityGVR: "MongoDBCommunityList", + MongoDBGVR: "MongoDBList", + MongoDBUsersGVR: "MongoDBUserList", + MongoDBMultiClusterGVR: "MongoDBMultiClusterList", + OpsManagerSchemeGVR: "OpsManagerList", + } + dynamicFake := fake2.NewSimpleDynamicClientWithCustomListKinds(scheme, dynamicLists) + + dynamicFake.Resource(MongoDBMultiClusterGVR).Create(context.TODO(), &MongoDBMultiClusterResource, metav1.CreateOptions{}) + dynamicFake.Resource(MongoDBCommunityGVR).Create(context.TODO(), &MongoDBCommunityResource, metav1.CreateOptions{}) + dynamicFake.Resource(MongoDBGVR).Create(context.TODO(), &MongoDBResource, metav1.CreateOptions{}) + dynamicFake.Resource(MongoDBUsersGVR).Create(context.TODO(), &MongoDBUserResource, metav1.CreateOptions{}) + dynamicFake.Resource(OpsManagerSchemeGVR).Create(context.TODO(), &OpsManagerResource, metav1.CreateOptions{}) + + kubeClient := common.NewKubeClientContainer(nil, fake.NewSimpleClientset(resources...), dynamicFake) + return kubeClient +} diff --git a/tools/multicluster/pkg/debug/writer.go b/tools/multicluster/pkg/debug/writer.go new file mode 100644 index 0000000..950043d --- /dev/null +++ b/tools/multicluster/pkg/debug/writer.go @@ -0,0 +1,128 @@ +package debug + +import ( + "archive/zip" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/ghodss/yaml" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + DefaultWritePath = ".mongodb/debug" +) + +func WriteToFile(path string, collectionResults ...CollectionResult) (string, string, error) { + err := os.MkdirAll(path, os.ModePerm) + if err != nil { + return "", "", err + } + for _, collectionResult := range collectionResults { + for _, obj := range collectionResult.kubeResources { + data, err := yaml.Marshal(obj) + if err != nil { + return "", "", err + } + meta, err := meta.Accessor(obj) + if err != nil { + return "", "", err + } + kubeType, err := getType(obj) + if err != nil { + return "", "", err + } + fileName := fmt.Sprintf("%s/%s-%s-%s-%s.yaml", path, collectionResult.context, collectionResult.namespace, kubeType, meta.GetName()) + err = os.WriteFile(fileName, data, os.ModePerm) + if err != nil { + return "", "", err + } + } + for _, obj := range collectionResult.rawObjects { + fileName := fmt.Sprintf("%s/%s-%s-%s-%s.txt", path, collectionResult.context, collectionResult.namespace, "txt", obj.Name) + err = os.WriteFile(fileName, obj.content, os.ModePerm) + if err != nil { + return "", "", err + } + } + } + compressedFile, err := compressDirectory(path) + if err != nil { + return "", "", err + } + return path, compressedFile, err +} + +// Inspired by https://stackoverflow.com/questions/37869793/how-do-i-zip-a-directory-containing-sub-directories-or-files-in-golang/63233911#63233911 +func compressDirectory(path string) (string, error) { + fileName := path + ".zip" + file, err := os.Create(fileName) + if err != nil { + return "", err + } + defer file.Close() + + w := zip.NewWriter(file) + defer w.Close() + + walker := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + // Ensure that `path` is not absolute; it should not start with "/". + // This snippet happens to work because I don't use + // absolute paths, but ensure your real-world code + // transforms path into a zip-root relative path. + f, err := w.Create(path) + if err != nil { + return err + } + + _, err = io.Copy(f, file) + if err != nil { + return err + } + + return nil + } + err = filepath.Walk(path, walker) + if err != nil { + return "", err + } + return fileName, nil +} + +func DebugDirectory() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + time, err := time.Now().UTC().MarshalText() + if err != nil { + return "", err + } + return fmt.Sprintf("%s/%s/%s", home, DefaultWritePath, time), nil +} + +// This is a workaround for https://github.com/kubernetes/kubernetes/pull/63972 +func getType(obj runtime.Object) (string, error) { + v, err := conversion.EnforcePtr(obj) + if err != nil { + return "", err + } + return v.Type().String(), nil +} diff --git a/tools/multicluster/pkg/debug/writer_test.go b/tools/multicluster/pkg/debug/writer_test.go new file mode 100644 index 0000000..c792204 --- /dev/null +++ b/tools/multicluster/pkg/debug/writer_test.go @@ -0,0 +1,72 @@ +package debug + +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestWriteToFile(t *testing.T) { + //setup + uniqueTempDir, err := os.MkdirTemp(os.TempDir(), "*-TestWriteToFile") + assert.NoError(t, err) + defer os.RemoveAll(uniqueTempDir) + + //given + testNamespace := "testNamespace" + testContext := "testContext" + testError := fmt.Errorf("test") + testSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: testNamespace, + }, + Data: map[string][]byte{ + "test": []byte("test"), + }, + } + testFile := RawFile{ + Name: "testFile", + content: []byte("test"), + } + collectionResult := CollectionResult{ + kubeResources: []runtime.Object{testSecret}, + rawObjects: []RawFile{testFile}, + errors: []error{testError}, + namespace: testNamespace, + context: testContext, + } + outputFiles := []string{"testContext-testNamespace-txt-testFile.txt", "testContext-testNamespace-v1.Secret-test-secret.yaml"} + + //when + path, compressedFile, err := WriteToFile(uniqueTempDir, collectionResult) + defer os.RemoveAll(path) // This is fine as in case of an empty path, this does nothing + defer os.RemoveAll(compressedFile) + + //then + assert.NoError(t, err) + assert.NotNil(t, path) + assert.NotNil(t, compressedFile) + + files, err := os.ReadDir(uniqueTempDir) + assert.NoError(t, err) + assert.Equal(t, len(outputFiles), len(files)) + for _, outputFile := range outputFiles { + found := false + for _, file := range files { + if strings.Contains(file.Name(), outputFile) { + found = true + break + } + } + assert.Truef(t, found, "File %s not found", outputFile) + } + _, err = os.Stat(compressedFile) + assert.NoError(t, err) +}