From 67b067ce56c903d478c617f8c2dc8561282576ca Mon Sep 17 00:00:00 2001 From: mms-build-account Date: Wed, 1 May 2024 04:49:14 -0400 Subject: [PATCH] Kubernetes Enterprise Operator Release 1.25.0 (#287) --- .evergreen.yml | 20 +- .../workflows/release-multicluster-cli.yaml | 28 + LICENSE | 2 +- README.md | 2 +- crds.yaml | 282 +++++++++- .../107.0.0.8502-1/ubi/Dockerfile | 53 ++ .../107.0.1.8507-1/ubi/Dockerfile | 53 ++ .../107.0.1.8507-1_1.25.0/ubi/Dockerfile | 57 ++ .../107.0.2.8531-1/ubi/Dockerfile | 53 ++ .../107.0.2.8531-1_1.25.0/ubi/Dockerfile | 57 ++ .../107.0.3.8550-1/ubi/Dockerfile | 53 ++ .../107.0.3.8550-1_1.25.0/ubi/Dockerfile | 57 ++ .../107.0.4.8567-1/ubi/Dockerfile | 53 ++ .../107.0.4.8567-1_1.25.0/ubi/Dockerfile | 57 ++ .../12.0.29.7785-1/ubi/Dockerfile | 53 +- .../12.0.29.7785-1_1.25.0/ubi/Dockerfile | 57 ++ .../12.0.30.7791-1/ubi/Dockerfile | 57 ++ .../12.0.30.7791-1_1.25.0/ubi/Dockerfile | 57 ++ .../12.0.31.7825-1/ubi/Dockerfile | 53 ++ .../12.0.31.7825-1_1.25.0/ubi/Dockerfile | 57 ++ .../13.10.0.8620-1/ubi/Dockerfile | 58 +++ .../13.15.0.8788-1_1.25.0/ubi/Dockerfile | 57 ++ .../1.25.0/ubi/Dockerfile | 86 +++ .../2.0.2/ubi/Dockerfile | 1 - .../1.25.0/ubi/Dockerfile | 35 ++ .../1.25.0/ubi/Dockerfile | 34 ++ .../1.25.0/ubi/Dockerfile | 26 + .../1.25.0/ubi/Dockerfile | 39 ++ .../6.0.22/ubi/Dockerfile | 75 +++ .../6.0.23/ubi/Dockerfile | 77 +++ .../7.0.0/ubi/Dockerfile | 75 +++ .../7.0.1/ubi/Dockerfile | 75 +++ .../7.0.2/ubi/Dockerfile | 75 +++ .../7.0.3/ubi/Dockerfile | 75 +++ .../7.0.4/ubi/Dockerfile | 77 +++ mongodb-enterprise-multi-cluster.yaml | 164 ++++-- mongodb-enterprise-openshift.yaml | 164 +++--- mongodb-enterprise.yaml | 36 +- .../install_istio_separate_network.sh | 221 ++++++++ .../0010_create_gke_cluster_0.sh | 5 + .../0010_create_gke_cluster_1.sh | 5 + .../0010_create_gke_cluster_2.sh | 5 + .../code_snippets/0020_get_gke_credentials.sh | 3 + .../0030_verify_access_to_clusters.sh | 6 + .../code_snippets/0040_install_istio.sh | 5 + .../0045_create_operator_namespace.sh | 8 + .../0045_create_ops_manager_namespace.sh | 8 + .../0046_create_image_pull_secrets.sh | 8 + ...check_cluster_connectivity_create_sts_0.sh | 22 + ...check_cluster_connectivity_create_sts_1.sh | 22 + ...check_cluster_connectivity_create_sts_2.sh | 22 + ...check_cluster_connectivity_wait_for_sts.sh | 3 + ...uster_connectivity_create_pod_service_0.sh | 13 + ...uster_connectivity_create_pod_service_1.sh | 13 + ...uster_connectivity_create_pod_service_2.sh | 13 + ...nnectivity_create_round_robin_service_0.sh | 13 + ...nnectivity_create_round_robin_service_1.sh | 13 + ...nnectivity_create_round_robin_service_2.sh | 13 + ...nectivity_verify_pod_0_0_from_cluster_1.sh | 8 + ...nectivity_verify_pod_1_0_from_cluster_0.sh | 8 + ...nectivity_verify_pod_1_0_from_cluster_2.sh | 8 + ...nectivity_verify_pod_2_0_from_cluster_0.sh | 8 + ...0100_check_cluster_connectivity_cleanup.sh | 9 + ...kubectl_mongodb_configure_multi_cluster.sh | 8 + .../0210_helm_install_operator.sh | 14 + .../0211_check_operator_deployment.sh | 5 + .../code_snippets/0250_generate_certs.sh | 87 ++++ .../code_snippets/0255_create_cert_secrets.sh | 10 + ...00_ops_manager_create_admin_credentials.sh | 5 + ...manager_deploy_on_single_member_cluster.sh | 29 ++ ...0311_ops_manager_wait_for_pending_state.sh | 2 + ...0312_ops_manager_wait_for_running_state.sh | 16 + .../0320_ops_manager_add_second_cluster.sh | 33 ++ ...0321_ops_manager_wait_for_pending_state.sh | 2 + ...0322_ops_manager_wait_for_running_state.sh | 10 + .../code_snippets/0400_install_minio_s3.sh | 11 + ...0_ops_manager_prepare_s3_backup_secrets.sh | 7 + .../0510_ops_manager_enable_s3_backup.sh | 71 +++ ...0522_ops_manager_wait_for_running_state.sh | 14 + .../code_snippets/9000_delete_namespaces.sh | 5 + .../code_snippets/9010_delete_gke_clusters.sh | 4 + .../env_variables.sh | 51 ++ .../output/0030_verify_access_to_clusters.out | 15 + ...ectivity_verify_pod_0_0_from_cluster_1.out | 2 + ...ectivity_verify_pod_1_0_from_cluster_0.out | 2 + ...ectivity_verify_pod_1_0_from_cluster_2.out | 2 + ...ectivity_verify_pod_2_0_from_cluster_0.out | 2 + ...ubectl_mongodb_configure_multi_cluster.out | 10 + .../output/0210_helm_install_operator.out | 249 +++++++++ .../output/0211_check_operator_deployment.out | 9 + ...311_ops_manager_wait_for_pending_state.out | 2 + ...312_ops_manager_wait_for_running_state.out | 26 + ...321_ops_manager_wait_for_pending_state.out | 2 + ...322_ops_manager_wait_for_running_state.out | 22 + ...522_ops_manager_wait_for_running_state.out | 29 ++ samples/ops-manager-multi-cluster/test.sh | 58 +++ .../ops-manager-multi-cluster/test_cleanup.sh | 8 + scripts/sample_test_runner.sh | 113 ++++ tools/multicluster/.goreleaser.yaml | 1 + tools/multicluster/cmd/root.go | 29 +- tools/multicluster/cmd/setup.go | 13 +- tools/multicluster/go.mod | 37 +- tools/multicluster/go.sum | 372 ++----------- .../install_istio_separate_network.sh | 26 +- tools/multicluster/main.go | 9 +- tools/multicluster/pkg/common/common.go | 212 +++++--- tools/multicluster/pkg/common/common_test.go | 493 +++++++++--------- .../pkg/common/kubeclientcontainer.go | 213 ++++---- tools/multicluster/pkg/debug/collectors.go | 5 +- .../multicluster/pkg/debug/collectors_test.go | 17 +- 110 files changed, 4239 insertions(+), 975 deletions(-) create mode 100644 .github/workflows/release-multicluster-cli.yaml create mode 100644 dockerfiles/mongodb-agent/107.0.0.8502-1/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/107.0.1.8507-1/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/107.0.1.8507-1_1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/107.0.2.8531-1/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/107.0.2.8531-1_1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/107.0.3.8550-1/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/107.0.3.8550-1_1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/107.0.4.8567-1/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/107.0.4.8567-1_1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/12.0.29.7785-1_1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/12.0.30.7791-1/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/12.0.30.7791-1_1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/12.0.31.7825-1/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/12.0.31.7825-1_1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/13.10.0.8620-1/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-agent/13.15.0.8788-1_1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-database/1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-init-appdb/1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-init-database/1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-init-ops-manager/1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-operator/1.25.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-ops-manager/6.0.22/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-ops-manager/6.0.23/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-ops-manager/7.0.0/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-ops-manager/7.0.1/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-ops-manager/7.0.2/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-ops-manager/7.0.3/ubi/Dockerfile create mode 100644 dockerfiles/mongodb-enterprise-ops-manager/7.0.4/ubi/Dockerfile create mode 100755 samples/multi-cluster/install_istio_separate_network.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_0.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_1.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_2.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0020_get_gke_credentials.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0030_verify_access_to_clusters.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0040_install_istio.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0045_create_operator_namespace.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0045_create_ops_manager_namespace.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0046_create_image_pull_secrets.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0050_check_cluster_connectivity_create_sts_0.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0050_check_cluster_connectivity_create_sts_1.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0050_check_cluster_connectivity_create_sts_2.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0060_check_cluster_connectivity_wait_for_sts.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0070_check_cluster_connectivity_create_pod_service_0.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0070_check_cluster_connectivity_create_pod_service_1.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0070_check_cluster_connectivity_create_pod_service_2.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0080_check_cluster_connectivity_create_round_robin_service_0.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0080_check_cluster_connectivity_create_round_robin_service_1.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0080_check_cluster_connectivity_create_round_robin_service_2.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0100_check_cluster_connectivity_cleanup.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0200_kubectl_mongodb_configure_multi_cluster.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0210_helm_install_operator.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0211_check_operator_deployment.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0250_generate_certs.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0255_create_cert_secrets.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0300_ops_manager_create_admin_credentials.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0310_ops_manager_deploy_on_single_member_cluster.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0311_ops_manager_wait_for_pending_state.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0312_ops_manager_wait_for_running_state.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0320_ops_manager_add_second_cluster.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0321_ops_manager_wait_for_pending_state.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0322_ops_manager_wait_for_running_state.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0400_install_minio_s3.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0500_ops_manager_prepare_s3_backup_secrets.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0510_ops_manager_enable_s3_backup.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/0522_ops_manager_wait_for_running_state.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/9000_delete_namespaces.sh create mode 100644 samples/ops-manager-multi-cluster/code_snippets/9010_delete_gke_clusters.sh create mode 100644 samples/ops-manager-multi-cluster/env_variables.sh create mode 100644 samples/ops-manager-multi-cluster/output/0030_verify_access_to_clusters.out create mode 100644 samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.out create mode 100644 samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.out create mode 100644 samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.out create mode 100644 samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.out create mode 100644 samples/ops-manager-multi-cluster/output/0200_kubectl_mongodb_configure_multi_cluster.out create mode 100644 samples/ops-manager-multi-cluster/output/0210_helm_install_operator.out create mode 100644 samples/ops-manager-multi-cluster/output/0211_check_operator_deployment.out create mode 100644 samples/ops-manager-multi-cluster/output/0311_ops_manager_wait_for_pending_state.out create mode 100644 samples/ops-manager-multi-cluster/output/0312_ops_manager_wait_for_running_state.out create mode 100644 samples/ops-manager-multi-cluster/output/0321_ops_manager_wait_for_pending_state.out create mode 100644 samples/ops-manager-multi-cluster/output/0322_ops_manager_wait_for_running_state.out create mode 100644 samples/ops-manager-multi-cluster/output/0522_ops_manager_wait_for_running_state.out create mode 100755 samples/ops-manager-multi-cluster/test.sh create mode 100755 samples/ops-manager-multi-cluster/test_cleanup.sh create mode 100644 scripts/sample_test_runner.sh mode change 100755 => 100644 tools/multicluster/install_istio_separate_network.sh diff --git a/.evergreen.yml b/.evergreen.yml index fbe9b09..31c022d 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -1,13 +1,10 @@ -# 15m timeout for all the tasks -exec_timeout_secs: 900 - variables: - &go_env XDG_CONFIG_HOME: ${go_base_path}${workdir} GO111MODULE: "on" GOROOT: "/opt/golang/go1.21" - functions: + "clone": - command: subprocess.exec type: setup @@ -17,10 +14,12 @@ functions: type: setup params: directory: src/github.com/mongodb/mongodb-enterprise-kubernetes - + "install goreleaser": - command: shell.exec type: setup + include_expansions_in_env: + - goreleaser_pro_tar_gz params: script: | set -Eeu pipefail @@ -43,6 +42,7 @@ functions: chmod 755 ./linux_amd64/macnotary "release": - command: shell.exec + type: setup params: working_dir: src/github.com/mongodb/mongodb-enterprise-kubernetes/tools/multicluster include_expansions_in_env: @@ -51,13 +51,6 @@ functions: - macos_notary_secret - workdir - triggered_by_git_tag - - GRS_USERNAME - - GRS_PASSWORD - - ARTIFACTORY_URL - - SIGNING_IMAGE_URI - - ARTIFACTORY_USERNAME - - ARTIFACTORY_PASSWORD - - PKCS11_URI env: <<: *go_env MACOS_NOTARY_KEY: ${macos_notary_keyid} @@ -68,8 +61,7 @@ functions: set -Eeu pipefail export PATH=$GOROOT/bin:$PATH - # Avoid race conditions on signing and notarizing with parallelism=1 - ${workdir}/goreleaser release --clean --timeout 300s --parallelism 1 + ${workdir}/goreleaser release --rm-dist tasks: - name: package_goreleaser diff --git a/.github/workflows/release-multicluster-cli.yaml b/.github/workflows/release-multicluster-cli.yaml new file mode 100644 index 0000000..2ce1684 --- /dev/null +++ b/.github/workflows/release-multicluster-cli.yaml @@ -0,0 +1,28 @@ +name: Release multicluster-cli binary +on: + push: + tags: + - '*' + workflow_dispatch: +jobs: + goreleaser: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.21' + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v4 + with: + distribution: goreleaser + version: latest + args: release --rm-dist + workdir: ./tools/multicluster + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GORELEASER_CURRENT_TAG: ${{ github.ref_name }} diff --git a/LICENSE b/LICENSE index 23e0c88..d6af3a5 100644 --- a/LICENSE +++ b/LICENSE @@ -1,3 +1,3 @@ Usage of the MongoDB Enterprise Operator for Kubernetes indicates agreement with the MongoDB Customer Agreement. -https://www.mongodb.com/customer-agreement/ \ No newline at end of file +https://www.mongodb.com/customer-agreement/ diff --git a/README.md b/README.md index 598c2e6..7ce3137 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,7 @@ CRDs are defined cluster-wide, so to install them, you must have Cluster-level a #### Operator Installation -> In order to install the Operator in OpenShift, please follow [these](docs/openshift-marketplace.md) instructions instead. +> In order to install the Operator in OpenShift, please follow [these](openshift-install.md) instructions instead. To install the Operator using yaml files, you may apply the config directly from github; diff --git a/crds.yaml b/crds.yaml index 39cc102..c5e3ffc 100644 --- a/crds.yaml +++ b/crds.yaml @@ -875,7 +875,7 @@ spec: type: array statefulSet: description: StatefulSetConfiguration provides the statefulset override - for each of the cluster's statefulset if "StatefulSetConfiguration" + for each of the cluster's statefulset if "StatefulSetConfiguration" is specified at cluster level under "clusterSpecList" that takes precedence over the global one properties: @@ -1603,7 +1603,7 @@ spec: type: object statefulSet: description: StatefulSetConfiguration provides the statefulset override - for each of the cluster's statefulset if "StatefulSetConfiguration" + for each of the cluster's statefulset if "StatefulSetConfiguration" is specified at cluster level under "clusterSpecList" that takes precedence over the global one properties: @@ -2125,8 +2125,16 @@ spec: - name type: object type: array - required: - - processes + replicaSet: + properties: + settings: + description: MapWrapper is a wrapper for a map to be used + by other structs. The CRD generator does not support + map[string]interface{} on the top level and hence we + need to work around this with a wrapping struct. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object type: object cloudManager: properties: @@ -3001,6 +3009,180 @@ spec: which should be used instead' format: hostname type: string + clusterSpecList: + items: + description: ClusterSpecOMItem defines members cluster details for + Ops Manager multi-cluster deployment. + properties: + backup: + description: Backup contains settings to override from top-level + `spec.backup` for this member cluster. If the value is not + set here, then the value is taken from `spec.backup`. + properties: + assignmentLabels: + description: Assignment Labels set in the Ops Manager + items: + type: string + type: array + headDB: + description: HeadDB specifies configuration options for + the HeadDB + properties: + labelSelector: + type: object + x-kubernetes-preserve-unknown-fields: true + storage: + type: string + storageClass: + type: string + type: object + jvmParameters: + items: + type: string + type: array + members: + description: Members indicate the number of backup daemon + pods to create. + minimum: 0 + type: integer + statefulSet: + description: StatefulSetConfiguration specified optional + overrides for backup datemon statefulset. + properties: + metadata: + description: StatefulSetMetadataWrapper is a wrapper + around Labels and Annotations + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - spec + type: object + type: object + clusterDomain: + description: Cluster domain to override the default *.svc.cluster.local + if the default cluster domain has been changed on a cluster + level. + format: hostname + type: string + clusterName: + description: ClusterName is name of the cluster where the Ops + Manager Statefulset will be scheduled. The operator is using + ClusterName to find API credentials in `mongodb-enterprise-operator-member-list` + config map to use for this member cluster. If the credentials + are not found, then the member cluster is considered unreachable + and ignored in the reconcile process. + type: string + configuration: + additionalProperties: + type: string + description: The configuration properties passed to Ops Manager + and Backup Daemon in this cluster. If specified (not empty) + then this field overrides `spec.configuration` field entirely. + If not specified, then `spec.configuration` field is used + for the Ops Manager and Backup Daemon instances in this cluster. + type: object + externalConnectivity: + description: MongoDBOpsManagerExternalConnectivity if sets allows + for the creation of a Service for accessing Ops Manager instances + in this member cluster from outside the Kubernetes cluster. + If specified (even if provided empty) then this field overrides + `spec.externalConnectivity` field entirely. If not specified, + then `spec.externalConnectivity` field is used for the Ops + Manager and Backup Daemon instances in this cluster. + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a list of annotations to be + directly passed to the Service object. + type: object + clusterIP: + description: ClusterIP IP that will be assigned to this + Service when creating a ClusterIP type Service + type: string + externalTrafficPolicy: + description: ExternalTrafficPolicy mechanism to preserve + the client source IP. Only supported on GCE and Google + Kubernetes Engine. + enum: + - Cluster + - Local + type: string + loadBalancerIP: + description: LoadBalancerIP IP that will be assigned to + this LoadBalancer. + type: string + port: + description: Port in which this `Service` will listen to, + this applies to `NodePort`. + format: int32 + type: integer + type: + description: Type of the `Service` to be created. + enum: + - LoadBalancer + - NodePort + - ClusterIP + type: string + required: + - type + type: object + jvmParameters: + description: JVM parameters to pass to Ops Manager and Backup + Daemon instances in this member cluster. If specified (not + empty) then this field overrides `spec.jvmParameters` field + entirely. If not specified, then `spec.jvmParameters` field + is used for the Ops Manager and Backup Daemon instances in + this cluster. + items: + type: string + type: array + members: + description: Number of Ops Manager instances in this member + cluster. + type: integer + statefulSet: + description: Configure custom StatefulSet configuration to override + in Ops Manager's statefulset in this member cluster. If specified + (even if provided empty) then this field overrides `spec.externalConnectivity` + field entirely. If not specified, then `spec.externalConnectivity` + field is used for the Ops Manager and Backup Daemon instances + in this cluster. + properties: + metadata: + description: StatefulSetMetadataWrapper is a wrapper around + Labels and Annotations + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - spec + type: object + required: + - members + type: object + type: array configuration: additionalProperties: type: string @@ -3018,6 +3200,50 @@ spec: description: Annotations is a list of annotations to be directly passed to the Service object. type: object + clusterIP: + description: ClusterIP IP that will be assigned to this Service + when creating a ClusterIP type Service + type: string + externalTrafficPolicy: + description: ExternalTrafficPolicy mechanism to preserve the client + source IP. Only supported on GCE and Google Kubernetes Engine. + enum: + - Cluster + - Local + type: string + loadBalancerIP: + description: LoadBalancerIP IP that will be assigned to this LoadBalancer. + type: string + port: + description: Port in which this `Service` will listen to, this + applies to `NodePort`. + format: int32 + type: integer + type: + description: Type of the `Service` to be created. + enum: + - LoadBalancer + - NodePort + - ClusterIP + type: string + required: + - type + type: object + internalConnectivity: + description: InternalConnectivity if set allows for overriding the + settings of the default service used for internal connectivity to + the OpsManager servers. + properties: + annotations: + additionalProperties: + type: string + description: Annotations is a list of annotations to be directly + passed to the Service object. + type: object + clusterIP: + description: ClusterIP IP that will be assigned to this Service + when creating a ClusterIP type Service + type: string externalTrafficPolicy: description: ExternalTrafficPolicy mechanism to preserve the client source IP. Only supported on GCE and Google Kubernetes Engine. @@ -3038,6 +3264,7 @@ spec: enum: - LoadBalancer - NodePort + - ClusterIP type: string required: - type @@ -3047,6 +3274,17 @@ spec: items: type: string type: array + opsManagerURL: + description: OpsManagerURL specified the URL with which the operator + and AppDB monitoring agent should access Ops Manager instance (or + instances). When not set, the operator is using FQDN of Ops Manager's + headless service `{name}-svc.{namespace}.svc.cluster.local` to connect + to the instance. If that URL cannot be used, then URL in this field + should be provided for the operator to connect to Ops Manager instances. + It defaults (and if not set) to SingleCluster. If MultiCluster specified, + then clusterSpecList field is mandatory and at least one member + cluster has to be specified. + type: string replicas: minimum: 1 type: integer @@ -3090,6 +3328,15 @@ spec: required: - spec type: object + topology: + description: Topology sets the desired cluster topology of Ops Manager + deployment. It defaults (and if not set) to SingleCluster. If MultiCluster + specified, then clusterSpecList field is mandatory and at least + one member cluster has to be specified. + enum: + - SingleCluster + - MultiCluster + type: string version: type: string required: @@ -3107,6 +3354,15 @@ spec: required: - statusName type: object + clusterStatusList: + items: + properties: + clusterName: + type: string + members: + type: integer + type: object + type: array configServerCount: type: integer lastTransition: @@ -3167,6 +3423,15 @@ spec: type: object backup: properties: + clusterStatusList: + items: + properties: + clusterName: + type: string + replicas: + type: integer + type: object + type: array lastTransition: type: string message: @@ -3214,6 +3479,15 @@ spec: type: object opsManager: properties: + clusterStatusList: + items: + properties: + clusterName: + type: string + replicas: + type: integer + type: object + type: array lastTransition: type: string message: diff --git a/dockerfiles/mongodb-agent/107.0.0.8502-1/ubi/Dockerfile b/dockerfiles/mongodb-agent/107.0.0.8502-1/ubi/Dockerfile new file mode 100644 index 0000000..f004b2b --- /dev/null +++ b/dockerfiles/mongodb-agent/107.0.0.8502-1/ubi/Dockerfile @@ -0,0 +1,53 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG agent_version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir -p /agent \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && mkdir -p /var/log/mongodb-mms-automation/ \ + && chmod -R +wr /var/log/mongodb-mms-automation/ \ + # ensure that the agent user can write the logs in OpenShift + && touch /var/log/mongodb-mms-automation/readiness.log \ + && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log + + +COPY --from=base /data/mongodb-agent.tar.gz /agent +COPY --from=base /data/mongodb-tools.tgz /agent +COPY --from=base /data/LICENSE /licenses/LICENSE + +RUN tar xfz /agent/mongodb-agent.tar.gz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && mkdir -p /var/lib/automation/config \ + && chmod -R +r /var/lib/automation/config \ + && rm /agent/mongodb-agent.tar.gz \ + && rm -r mongodb-mms-automation-agent-* + +RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz + +USER 2000 +CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/107.0.1.8507-1/ubi/Dockerfile b/dockerfiles/mongodb-agent/107.0.1.8507-1/ubi/Dockerfile new file mode 100644 index 0000000..f004b2b --- /dev/null +++ b/dockerfiles/mongodb-agent/107.0.1.8507-1/ubi/Dockerfile @@ -0,0 +1,53 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG agent_version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir -p /agent \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && mkdir -p /var/log/mongodb-mms-automation/ \ + && chmod -R +wr /var/log/mongodb-mms-automation/ \ + # ensure that the agent user can write the logs in OpenShift + && touch /var/log/mongodb-mms-automation/readiness.log \ + && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log + + +COPY --from=base /data/mongodb-agent.tar.gz /agent +COPY --from=base /data/mongodb-tools.tgz /agent +COPY --from=base /data/LICENSE /licenses/LICENSE + +RUN tar xfz /agent/mongodb-agent.tar.gz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && mkdir -p /var/lib/automation/config \ + && chmod -R +r /var/lib/automation/config \ + && rm /agent/mongodb-agent.tar.gz \ + && rm -r mongodb-mms-automation-agent-* + +RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz + +USER 2000 +CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/107.0.1.8507-1_1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-agent/107.0.1.8507-1_1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..c89002a --- /dev/null +++ b/dockerfiles/mongodb-agent/107.0.1.8507-1_1.25.0/ubi/Dockerfile @@ -0,0 +1,57 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/probe.sh /opt/scripts/probe.sh +COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe +COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook +COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh +COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh +COPY --from=base /data/LICENSE /LICENSE + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz + +RUN tar xfz /tools/mongodb_tools.tgz +RUN mv mongodb-database-tools-*/bin/* /tools +RUN chmod +x /tools/* +RUN rm /tools/mongodb_tools.tgz +RUN rm -rf /mongodb-database-tools-* + +RUN tar xfz /agent/mongodb_agent.tgz +RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent +RUN chmod +x /agent/mongodb-agent +RUN rm /agent/mongodb_agent.tgz +RUN rm -rf mongodb-mms-automation-agent-* + +RUN mkdir -p /var/lib/automation/config +RUN chmod -R +r /var/lib/automation/config + +USER 2000 + +HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/107.0.2.8531-1/ubi/Dockerfile b/dockerfiles/mongodb-agent/107.0.2.8531-1/ubi/Dockerfile new file mode 100644 index 0000000..f004b2b --- /dev/null +++ b/dockerfiles/mongodb-agent/107.0.2.8531-1/ubi/Dockerfile @@ -0,0 +1,53 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG agent_version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir -p /agent \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && mkdir -p /var/log/mongodb-mms-automation/ \ + && chmod -R +wr /var/log/mongodb-mms-automation/ \ + # ensure that the agent user can write the logs in OpenShift + && touch /var/log/mongodb-mms-automation/readiness.log \ + && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log + + +COPY --from=base /data/mongodb-agent.tar.gz /agent +COPY --from=base /data/mongodb-tools.tgz /agent +COPY --from=base /data/LICENSE /licenses/LICENSE + +RUN tar xfz /agent/mongodb-agent.tar.gz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && mkdir -p /var/lib/automation/config \ + && chmod -R +r /var/lib/automation/config \ + && rm /agent/mongodb-agent.tar.gz \ + && rm -r mongodb-mms-automation-agent-* + +RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz + +USER 2000 +CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/107.0.2.8531-1_1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-agent/107.0.2.8531-1_1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..c89002a --- /dev/null +++ b/dockerfiles/mongodb-agent/107.0.2.8531-1_1.25.0/ubi/Dockerfile @@ -0,0 +1,57 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/probe.sh /opt/scripts/probe.sh +COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe +COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook +COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh +COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh +COPY --from=base /data/LICENSE /LICENSE + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz + +RUN tar xfz /tools/mongodb_tools.tgz +RUN mv mongodb-database-tools-*/bin/* /tools +RUN chmod +x /tools/* +RUN rm /tools/mongodb_tools.tgz +RUN rm -rf /mongodb-database-tools-* + +RUN tar xfz /agent/mongodb_agent.tgz +RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent +RUN chmod +x /agent/mongodb-agent +RUN rm /agent/mongodb_agent.tgz +RUN rm -rf mongodb-mms-automation-agent-* + +RUN mkdir -p /var/lib/automation/config +RUN chmod -R +r /var/lib/automation/config + +USER 2000 + +HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/107.0.3.8550-1/ubi/Dockerfile b/dockerfiles/mongodb-agent/107.0.3.8550-1/ubi/Dockerfile new file mode 100644 index 0000000..f004b2b --- /dev/null +++ b/dockerfiles/mongodb-agent/107.0.3.8550-1/ubi/Dockerfile @@ -0,0 +1,53 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG agent_version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir -p /agent \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && mkdir -p /var/log/mongodb-mms-automation/ \ + && chmod -R +wr /var/log/mongodb-mms-automation/ \ + # ensure that the agent user can write the logs in OpenShift + && touch /var/log/mongodb-mms-automation/readiness.log \ + && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log + + +COPY --from=base /data/mongodb-agent.tar.gz /agent +COPY --from=base /data/mongodb-tools.tgz /agent +COPY --from=base /data/LICENSE /licenses/LICENSE + +RUN tar xfz /agent/mongodb-agent.tar.gz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && mkdir -p /var/lib/automation/config \ + && chmod -R +r /var/lib/automation/config \ + && rm /agent/mongodb-agent.tar.gz \ + && rm -r mongodb-mms-automation-agent-* + +RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz + +USER 2000 +CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/107.0.3.8550-1_1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-agent/107.0.3.8550-1_1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..c89002a --- /dev/null +++ b/dockerfiles/mongodb-agent/107.0.3.8550-1_1.25.0/ubi/Dockerfile @@ -0,0 +1,57 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/probe.sh /opt/scripts/probe.sh +COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe +COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook +COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh +COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh +COPY --from=base /data/LICENSE /LICENSE + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz + +RUN tar xfz /tools/mongodb_tools.tgz +RUN mv mongodb-database-tools-*/bin/* /tools +RUN chmod +x /tools/* +RUN rm /tools/mongodb_tools.tgz +RUN rm -rf /mongodb-database-tools-* + +RUN tar xfz /agent/mongodb_agent.tgz +RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent +RUN chmod +x /agent/mongodb-agent +RUN rm /agent/mongodb_agent.tgz +RUN rm -rf mongodb-mms-automation-agent-* + +RUN mkdir -p /var/lib/automation/config +RUN chmod -R +r /var/lib/automation/config + +USER 2000 + +HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/107.0.4.8567-1/ubi/Dockerfile b/dockerfiles/mongodb-agent/107.0.4.8567-1/ubi/Dockerfile new file mode 100644 index 0000000..f004b2b --- /dev/null +++ b/dockerfiles/mongodb-agent/107.0.4.8567-1/ubi/Dockerfile @@ -0,0 +1,53 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG agent_version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir -p /agent \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && mkdir -p /var/log/mongodb-mms-automation/ \ + && chmod -R +wr /var/log/mongodb-mms-automation/ \ + # ensure that the agent user can write the logs in OpenShift + && touch /var/log/mongodb-mms-automation/readiness.log \ + && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log + + +COPY --from=base /data/mongodb-agent.tar.gz /agent +COPY --from=base /data/mongodb-tools.tgz /agent +COPY --from=base /data/LICENSE /licenses/LICENSE + +RUN tar xfz /agent/mongodb-agent.tar.gz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && mkdir -p /var/lib/automation/config \ + && chmod -R +r /var/lib/automation/config \ + && rm /agent/mongodb-agent.tar.gz \ + && rm -r mongodb-mms-automation-agent-* + +RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz + +USER 2000 +CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/107.0.4.8567-1_1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-agent/107.0.4.8567-1_1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..c89002a --- /dev/null +++ b/dockerfiles/mongodb-agent/107.0.4.8567-1_1.25.0/ubi/Dockerfile @@ -0,0 +1,57 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/probe.sh /opt/scripts/probe.sh +COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe +COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook +COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh +COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh +COPY --from=base /data/LICENSE /LICENSE + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz + +RUN tar xfz /tools/mongodb_tools.tgz +RUN mv mongodb-database-tools-*/bin/* /tools +RUN chmod +x /tools/* +RUN rm /tools/mongodb_tools.tgz +RUN rm -rf /mongodb-database-tools-* + +RUN tar xfz /agent/mongodb_agent.tgz +RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent +RUN chmod +x /agent/mongodb-agent +RUN rm /agent/mongodb_agent.tgz +RUN rm -rf mongodb-mms-automation-agent-* + +RUN mkdir -p /var/lib/automation/config +RUN chmod -R +r /var/lib/automation/config + +USER 2000 + +HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/12.0.29.7785-1/ubi/Dockerfile b/dockerfiles/mongodb-agent/12.0.29.7785-1/ubi/Dockerfile index 9577c0c..c89002a 100644 --- a/dockerfiles/mongodb-agent/12.0.29.7785-1/ubi/Dockerfile +++ b/dockerfiles/mongodb-agent/12.0.29.7785-1/ubi/Dockerfile @@ -3,7 +3,7 @@ FROM ${imagebase} as base FROM registry.access.redhat.com/ubi8/ubi-minimal -ARG agent_version +ARG version LABEL name="MongoDB Agent" \ version="${agent_version}" \ @@ -13,34 +13,45 @@ LABEL name="MongoDB Agent" \ release="1" \ maintainer="support@mongodb.com" +COPY --from=base /data/probe.sh /opt/scripts/probe.sh +COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe +COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook +COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh +COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh +COPY --from=base /data/LICENSE /LICENSE + RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs RUN microdnf install -y --disableplugin=subscription-manager curl \ - hostname tar gzip procps\ + hostname tar gzip procps jq \ && microdnf upgrade -y \ && rm -rf /var/lib/apt/lists/* -RUN mkdir -p /agent \ - && mkdir -p /var/lib/mongodb-mms-automation \ - && mkdir -p /var/log/mongodb-mms-automation/ \ - && chmod -R +wr /var/log/mongodb-mms-automation/ \ - # ensure that the agent user can write the logs in OpenShift - && touch /var/log/mongodb-mms-automation/readiness.log \ - && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz -COPY --from=base /data/mongodb-agent.tar.gz /agent -COPY --from=base /data/mongodb-tools.tgz /agent -COPY --from=base /data/LICENSE /licenses/LICENSE +RUN tar xfz /tools/mongodb_tools.tgz +RUN mv mongodb-database-tools-*/bin/* /tools +RUN chmod +x /tools/* +RUN rm /tools/mongodb_tools.tgz +RUN rm -rf /mongodb-database-tools-* -RUN tar xfz /agent/mongodb-agent.tar.gz \ - && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ - && chmod +x /agent/mongodb-agent \ - && mkdir -p /var/lib/automation/config \ - && chmod -R +r /var/lib/automation/config \ - && rm /agent/mongodb-agent.tar.gz \ - && rm -r mongodb-mms-automation-agent-* +RUN tar xfz /agent/mongodb_agent.tgz +RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent +RUN chmod +x /agent/mongodb-agent +RUN rm /agent/mongodb_agent.tgz +RUN rm -rf mongodb-mms-automation-agent-* -RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz +RUN mkdir -p /var/lib/automation/config +RUN chmod -R +r /var/lib/automation/config USER 2000 -CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] \ No newline at end of file + +HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/12.0.29.7785-1_1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-agent/12.0.29.7785-1_1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..c89002a --- /dev/null +++ b/dockerfiles/mongodb-agent/12.0.29.7785-1_1.25.0/ubi/Dockerfile @@ -0,0 +1,57 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/probe.sh /opt/scripts/probe.sh +COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe +COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook +COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh +COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh +COPY --from=base /data/LICENSE /LICENSE + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz + +RUN tar xfz /tools/mongodb_tools.tgz +RUN mv mongodb-database-tools-*/bin/* /tools +RUN chmod +x /tools/* +RUN rm /tools/mongodb_tools.tgz +RUN rm -rf /mongodb-database-tools-* + +RUN tar xfz /agent/mongodb_agent.tgz +RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent +RUN chmod +x /agent/mongodb-agent +RUN rm /agent/mongodb_agent.tgz +RUN rm -rf mongodb-mms-automation-agent-* + +RUN mkdir -p /var/lib/automation/config +RUN chmod -R +r /var/lib/automation/config + +USER 2000 + +HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/12.0.30.7791-1/ubi/Dockerfile b/dockerfiles/mongodb-agent/12.0.30.7791-1/ubi/Dockerfile new file mode 100644 index 0000000..c89002a --- /dev/null +++ b/dockerfiles/mongodb-agent/12.0.30.7791-1/ubi/Dockerfile @@ -0,0 +1,57 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/probe.sh /opt/scripts/probe.sh +COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe +COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook +COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh +COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh +COPY --from=base /data/LICENSE /LICENSE + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz + +RUN tar xfz /tools/mongodb_tools.tgz +RUN mv mongodb-database-tools-*/bin/* /tools +RUN chmod +x /tools/* +RUN rm /tools/mongodb_tools.tgz +RUN rm -rf /mongodb-database-tools-* + +RUN tar xfz /agent/mongodb_agent.tgz +RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent +RUN chmod +x /agent/mongodb-agent +RUN rm /agent/mongodb_agent.tgz +RUN rm -rf mongodb-mms-automation-agent-* + +RUN mkdir -p /var/lib/automation/config +RUN chmod -R +r /var/lib/automation/config + +USER 2000 + +HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/12.0.30.7791-1_1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-agent/12.0.30.7791-1_1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..c89002a --- /dev/null +++ b/dockerfiles/mongodb-agent/12.0.30.7791-1_1.25.0/ubi/Dockerfile @@ -0,0 +1,57 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/probe.sh /opt/scripts/probe.sh +COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe +COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook +COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh +COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh +COPY --from=base /data/LICENSE /LICENSE + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz + +RUN tar xfz /tools/mongodb_tools.tgz +RUN mv mongodb-database-tools-*/bin/* /tools +RUN chmod +x /tools/* +RUN rm /tools/mongodb_tools.tgz +RUN rm -rf /mongodb-database-tools-* + +RUN tar xfz /agent/mongodb_agent.tgz +RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent +RUN chmod +x /agent/mongodb-agent +RUN rm /agent/mongodb_agent.tgz +RUN rm -rf mongodb-mms-automation-agent-* + +RUN mkdir -p /var/lib/automation/config +RUN chmod -R +r /var/lib/automation/config + +USER 2000 + +HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/12.0.31.7825-1/ubi/Dockerfile b/dockerfiles/mongodb-agent/12.0.31.7825-1/ubi/Dockerfile new file mode 100644 index 0000000..f004b2b --- /dev/null +++ b/dockerfiles/mongodb-agent/12.0.31.7825-1/ubi/Dockerfile @@ -0,0 +1,53 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG agent_version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir -p /agent \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && mkdir -p /var/log/mongodb-mms-automation/ \ + && chmod -R +wr /var/log/mongodb-mms-automation/ \ + # ensure that the agent user can write the logs in OpenShift + && touch /var/log/mongodb-mms-automation/readiness.log \ + && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log + + +COPY --from=base /data/mongodb-agent.tar.gz /agent +COPY --from=base /data/mongodb-tools.tgz /agent +COPY --from=base /data/LICENSE /licenses/LICENSE + +RUN tar xfz /agent/mongodb-agent.tar.gz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && mkdir -p /var/lib/automation/config \ + && chmod -R +r /var/lib/automation/config \ + && rm /agent/mongodb-agent.tar.gz \ + && rm -r mongodb-mms-automation-agent-* + +RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz + +USER 2000 +CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/12.0.31.7825-1_1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-agent/12.0.31.7825-1_1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..c89002a --- /dev/null +++ b/dockerfiles/mongodb-agent/12.0.31.7825-1_1.25.0/ubi/Dockerfile @@ -0,0 +1,57 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/probe.sh /opt/scripts/probe.sh +COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe +COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook +COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh +COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh +COPY --from=base /data/LICENSE /LICENSE + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz + +RUN tar xfz /tools/mongodb_tools.tgz +RUN mv mongodb-database-tools-*/bin/* /tools +RUN chmod +x /tools/* +RUN rm /tools/mongodb_tools.tgz +RUN rm -rf /mongodb-database-tools-* + +RUN tar xfz /agent/mongodb_agent.tgz +RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent +RUN chmod +x /agent/mongodb-agent +RUN rm /agent/mongodb_agent.tgz +RUN rm -rf mongodb-mms-automation-agent-* + +RUN mkdir -p /var/lib/automation/config +RUN chmod -R +r /var/lib/automation/config + +USER 2000 + +HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/13.10.0.8620-1/ubi/Dockerfile b/dockerfiles/mongodb-agent/13.10.0.8620-1/ubi/Dockerfile new file mode 100644 index 0000000..245bffd --- /dev/null +++ b/dockerfiles/mongodb-agent/13.10.0.8620-1/ubi/Dockerfile @@ -0,0 +1,58 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/probe.sh /opt/scripts/probe.sh +COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe +COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook +COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh +COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh +COPY --from=base /data/LICENSE /LICENSE + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + + +RUN tar xfz /agent/mongodb_agent.tgz +RUN ls /agent +RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent +RUN ls /agent +RUN chmod +x /agent/mongodb-agent +RUN mkdir -p /var/lib/automation/config +RUN ls /var/lib/automation/config +RUN chmod -R +r /var/lib/automation/config +RUN ls /var/lib/automation/config +RUN rm /agent/mongodb_agent.tgz +RUN rm -r mongodb-mms-automation-agent-* + +USER 2000 + +HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 \ No newline at end of file diff --git a/dockerfiles/mongodb-agent/13.15.0.8788-1_1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-agent/13.15.0.8788-1_1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..c89002a --- /dev/null +++ b/dockerfiles/mongodb-agent/13.15.0.8788-1_1.25.0/ubi/Dockerfile @@ -0,0 +1,57 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version + +LABEL name="MongoDB Agent" \ + version="${agent_version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/probe.sh /opt/scripts/probe.sh +COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe +COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook +COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh +COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh +COPY --from=base /data/LICENSE /LICENSE + +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs libcurl openldap openssl xz-libs +# Dependencies for the Agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ + net-snmp \ + net-snmp-agent-libs +RUN microdnf install -y --disableplugin=subscription-manager curl \ + hostname tar gzip procps jq \ + && microdnf upgrade -y \ + && rm -rf /var/lib/apt/lists/* + + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz + +RUN tar xfz /tools/mongodb_tools.tgz +RUN mv mongodb-database-tools-*/bin/* /tools +RUN chmod +x /tools/* +RUN rm /tools/mongodb_tools.tgz +RUN rm -rf /mongodb-database-tools-* + +RUN tar xfz /agent/mongodb_agent.tgz +RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent +RUN chmod +x /agent/mongodb-agent +RUN rm /agent/mongodb_agent.tgz +RUN rm -rf mongodb-mms-automation-agent-* + +RUN mkdir -p /var/lib/automation/config +RUN chmod -R +r /var/lib/automation/config + +USER 2000 + +HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 \ No newline at end of file diff --git a/dockerfiles/mongodb-enterprise-database/1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-database/1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..95689ea --- /dev/null +++ b/dockerfiles/mongodb-enterprise-database/1.25.0/ubi/Dockerfile @@ -0,0 +1,86 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + + +LABEL name="MongoDB Enterprise Database" \ + version="1.25.0" \ + summary="MongoDB Enterprise Database Image" \ + description="MongoDB Enterprise Database Image" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + + + + + +ENV MMS_HOME /mongodb-automation +ENV MMS_LOG_DIR /var/log/mongodb-mms-automation + + + +RUN microdnf update -y && rm -rf /var/cache/yum + +# these are the packages needed for the agent +RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper +RUN microdnf install -y --disableplugin=subscription-manager \ + hostname \ + procps + + +# these are the packages needed for MongoDB +# (https://docs.mongodb.com/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ "RHEL/CentOS 8" tab) +RUN microdnf install -y --disableplugin=subscription-manager \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + jq \ + tar \ + findutils + + + +RUN ln -s /usr/lib64/libsasl2.so.3 /usr/lib64/libsasl2.so.2 + + +# Set the required perms +RUN mkdir -p "${MMS_LOG_DIR}" \ + && chmod 0775 "${MMS_LOG_DIR}" \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && chmod 0775 /var/lib/mongodb-mms-automation \ + && mkdir -p /data \ + && chmod 0775 /data \ + && mkdir -p /journal \ + && chmod 0775 /journal \ + && mkdir -p "${MMS_HOME}" \ + && chmod -R 0775 "${MMS_HOME}" + + + + +# USER needs to be set for this image to pass RedHat verification. Some customers have these requirements as well +# It does not matter what number it is, as long as it is set to something. +# However, OpenShift will run the container as a random user, +# and the number in this configuration is not relevant. +USER 2000 + + +# The docker image doesn't have any scripts so by default does nothing +# The script will be copied in runtime from init containers and the operator is expected +# to override the COMMAND +ENTRYPOINT ["sleep infinity"] + + +COPY --from=base /data/licenses/mongodb-enterprise-database /licenses/mongodb-enterprise-database + + diff --git a/dockerfiles/mongodb-enterprise-database/2.0.2/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-database/2.0.2/ubi/Dockerfile index 6318d3c..3454bdc 100644 --- a/dockerfiles/mongodb-enterprise-database/2.0.2/ubi/Dockerfile +++ b/dockerfiles/mongodb-enterprise-database/2.0.2/ubi/Dockerfile @@ -49,7 +49,6 @@ RUN microdnf install -y --disableplugin=subscription-manager \ findutils - RUN ln -s /usr/lib64/libsasl2.so.3 /usr/lib64/libsasl2.so.2 diff --git a/dockerfiles/mongodb-enterprise-init-appdb/1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-init-appdb/1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..68ae7cc --- /dev/null +++ b/dockerfiles/mongodb-enterprise-init-appdb/1.25.0/ubi/Dockerfile @@ -0,0 +1,35 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version +LABEL name="MongoDB Enterprise Init AppDB" \ + version="mongodb-enterprise-init-appdb-${version}" \ + summary="MongoDB Enterprise AppDB Init Image" \ + description="Startup Scripts for MongoDB Enterprise Application Database for Ops Manager" \ + release="1" \ + vendor="MongoDB" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ +COPY --from=base /data/version-upgrade-hook /probes/version-upgrade-hook + + +RUN microdnf update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz + + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] + + diff --git a/dockerfiles/mongodb-enterprise-init-database/1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-init-database/1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..eb48104 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-init-database/1.25.0/ubi/Dockerfile @@ -0,0 +1,34 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ARG version +LABEL name="MongoDB Enterprise Init Database" \ + version="mongodb-enterprise-init-database-${version}" \ + summary="MongoDB Enterprise Database Init Image" \ + description="Startup Scripts for MongoDB Enterprise Database" \ + release="1" \ + vendor="MongoDB" \ + maintainer="support@mongodb.com" + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ + + +RUN microdnf update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz + + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] + + diff --git a/dockerfiles/mongodb-enterprise-init-ops-manager/1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-init-ops-manager/1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..4892054 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-init-ops-manager/1.25.0/ubi/Dockerfile @@ -0,0 +1,26 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +LABEL name="MongoDB Enterprise Ops Manager Init" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="mongodb-enterprise-init-ops-manager-1.25.0" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Init Image" \ + description="Startup Scripts for MongoDB Enterprise Ops Manager" + + +COPY --from=base /data/scripts /scripts +COPY --from=base /data/licenses /licenses + + +RUN microdnf update --nodocs \ + && microdnf clean all + + +USER 2000 +ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] + + diff --git a/dockerfiles/mongodb-enterprise-operator/1.25.0/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-operator/1.25.0/ubi/Dockerfile new file mode 100644 index 0000000..eaecb49 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-operator/1.25.0/ubi/Dockerfile @@ -0,0 +1,39 @@ +# +# Base Template Dockerfile for Operator Image. +# + +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Operator" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="1.25.0" \ + release="1" \ + summary="MongoDB Enterprise Operator Image" \ + description="MongoDB Enterprise Operator Image" + + +# Building an UBI-based image: https://red.ht/3n6b9y0 +RUN microdnf update \ + --disableplugin=subscription-manager \ + --disablerepo=* --enablerepo=ubi-8-appstream-rpms --enablerepo=ubi-8-baseos-rpms -y \ + && rm -rf /var/cache/yum + + + + +COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator +COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json +COPY --from=base /data/licenses /licenses/ + +USER 2000 + + + +ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator + + diff --git a/dockerfiles/mongodb-enterprise-ops-manager/6.0.22/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-ops-manager/6.0.22/ubi/Dockerfile new file mode 100644 index 0000000..334d38c --- /dev/null +++ b/dockerfiles/mongodb-enterprise-ops-manager/6.0.22/ubi/Dockerfile @@ -0,0 +1,75 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="6.0.22" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + + +ENV MMS_HOME /mongodb-ops-manager +ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR ${MMS_HOME}/logs +ENV MMS_TMP_DIR ${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + + +COPY --from=base /data/licenses /licenses/ + + + +RUN curl --fail -L -o ops_manager.tar.gz https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-6.0.22.100.20231229T1643Z.tar.gz \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] + + diff --git a/dockerfiles/mongodb-enterprise-ops-manager/6.0.23/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-ops-manager/6.0.23/ubi/Dockerfile new file mode 100644 index 0000000..bc12a48 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-ops-manager/6.0.23/ubi/Dockerfile @@ -0,0 +1,77 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="6.0.23" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + + +ENV MMS_HOME /mongodb-ops-manager +ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR ${MMS_HOME}/logs +ENV MMS_TMP_DIR ${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + + +COPY --from=base /data/licenses /licenses/ + +COPY --from=base /data/scripts /opt/scripts + + + +RUN curl --fail -L -o ops_manager.tar.gz https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-6.0.23.100.20240402T1837Z.tar.gz \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] + + diff --git a/dockerfiles/mongodb-enterprise-ops-manager/7.0.0/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-ops-manager/7.0.0/ubi/Dockerfile new file mode 100644 index 0000000..39c6f65 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-ops-manager/7.0.0/ubi/Dockerfile @@ -0,0 +1,75 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="7.0.0" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + + +ENV MMS_HOME /mongodb-ops-manager +ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR ${MMS_HOME}/logs +ENV MMS_TMP_DIR ${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + + +COPY --from=base /data/licenses /licenses/ + + + +RUN curl --fail -L -o ops_manager.tar.gz https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-7.0.0.500.20231218T1955Z.tar.gz \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] + + diff --git a/dockerfiles/mongodb-enterprise-ops-manager/7.0.1/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-ops-manager/7.0.1/ubi/Dockerfile new file mode 100644 index 0000000..554d567 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-ops-manager/7.0.1/ubi/Dockerfile @@ -0,0 +1,75 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="7.0.1" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + + +ENV MMS_HOME /mongodb-ops-manager +ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR ${MMS_HOME}/logs +ENV MMS_TMP_DIR ${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + + +COPY --from=base /data/licenses /licenses/ + + + +RUN curl --fail -L -o ops_manager.tar.gz https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-7.0.1.500.20240105T0224Z.tar.gz \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] + + diff --git a/dockerfiles/mongodb-enterprise-ops-manager/7.0.2/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-ops-manager/7.0.2/ubi/Dockerfile new file mode 100644 index 0000000..8f57967 --- /dev/null +++ b/dockerfiles/mongodb-enterprise-ops-manager/7.0.2/ubi/Dockerfile @@ -0,0 +1,75 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="7.0.2" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + + +ENV MMS_HOME /mongodb-ops-manager +ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR ${MMS_HOME}/logs +ENV MMS_TMP_DIR ${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + + +COPY --from=base /data/licenses /licenses/ + + + +RUN curl --fail -L -o ops_manager.tar.gz https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-7.0.2.500.20240130T2116Z.tar.gz \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] + + diff --git a/dockerfiles/mongodb-enterprise-ops-manager/7.0.3/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-ops-manager/7.0.3/ubi/Dockerfile new file mode 100644 index 0000000..6c4361c --- /dev/null +++ b/dockerfiles/mongodb-enterprise-ops-manager/7.0.3/ubi/Dockerfile @@ -0,0 +1,75 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="7.0.3" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + + +ENV MMS_HOME /mongodb-ops-manager +ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR ${MMS_HOME}/logs +ENV MMS_TMP_DIR ${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + + +COPY --from=base /data/licenses /licenses/ + + + +RUN curl --fail -L -o ops_manager.tar.gz https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-7.0.3.500.20240305T1921Z.tar.gz \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] + + diff --git a/dockerfiles/mongodb-enterprise-ops-manager/7.0.4/ubi/Dockerfile b/dockerfiles/mongodb-enterprise-ops-manager/7.0.4/ubi/Dockerfile new file mode 100644 index 0000000..dc59a3d --- /dev/null +++ b/dockerfiles/mongodb-enterprise-ops-manager/7.0.4/ubi/Dockerfile @@ -0,0 +1,77 @@ +ARG imagebase +FROM ${imagebase} as base + +FROM registry.access.redhat.com/ubi8/ubi-minimal + + +LABEL name="MongoDB Enterprise Ops Manager" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="7.0.4" \ + release="1" \ + summary="MongoDB Enterprise Ops Manager Image" \ + description="MongoDB Enterprise Ops Manager" + + +ENV MMS_HOME /mongodb-ops-manager +ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties +ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf +ENV MMS_LOG_DIR ${MMS_HOME}/logs +ENV MMS_TMP_DIR ${MMS_HOME}/tmp + +EXPOSE 8080 + +# OpsManager docker image needs to have the MongoDB dependencies because the +# backup daemon is running its database locally + +RUN microdnf install --disableplugin=subscription-manager -y \ + cyrus-sasl \ + cyrus-sasl-gssapi \ + cyrus-sasl-plain \ + krb5-libs \ + libcurl \ + libpcap \ + lm_sensors-libs \ + net-snmp \ + net-snmp-agent-libs \ + openldap \ + openssl \ + tar \ + rpm-libs \ + net-tools \ + procps-ng \ + ncurses + + +COPY --from=base /data/licenses /licenses/ + +COPY --from=base /data/scripts /opt/scripts + + + +RUN curl --fail -L -o ops_manager.tar.gz https://downloads.mongodb.com/on-prem-mms/tar/mongodb-mms-7.0.4.500.20240405T1431Z.tar.gz \ + && tar -xzf ops_manager.tar.gz \ + && rm ops_manager.tar.gz \ + && mv mongodb-mms* "${MMS_HOME}" + + +# permissions +RUN chmod -R 0777 "${MMS_LOG_DIR}" \ + && chmod -R 0777 "${MMS_TMP_DIR}" \ + && chmod -R 0775 "${MMS_HOME}/conf" \ + && chmod -R 0775 "${MMS_HOME}/jdk" \ + && mkdir "${MMS_HOME}/mongodb-releases/" \ + && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ + && chmod -R 0777 "${MMS_CONF_FILE}" \ + && chmod -R 0777 "${MMS_PROP_FILE}" + +# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. +# For now we need to move into the templates directory. +RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" + +USER 2000 + +# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) +ENTRYPOINT [ "sleep infinity" ] + + diff --git a/mongodb-enterprise-multi-cluster.yaml b/mongodb-enterprise-multi-cluster.yaml index 60588af..4220886 100644 --- a/mongodb-enterprise-multi-cluster.yaml +++ b/mongodb-enterprise-multi-cluster.yaml @@ -1,38 +1,4 @@ --- -# Source: enterprise-operator/templates/database-roles.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: mongodb-enterprise-appdb - namespace: mongodb ---- -# Source: enterprise-operator/templates/database-roles.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: mongodb-enterprise-database-pods - namespace: mongodb ---- -# Source: enterprise-operator/templates/database-roles.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: mongodb-enterprise-ops-manager - namespace: mongodb ---- -# Source: enterprise-operator/templates/operator.yaml -apiVersion: v1 -kind: ConfigMap -data: - MDB_CLUSTER_1_FULL_NAME: "${MDB_CLUSTER_1_FULL_NAME}" - MDB_CLUSTER_2_FULL_NAME: "${MDB_CLUSTER_2_FULL_NAME}" - MDB_CLUSTER_3_FULL_NAME: "${MDB_CLUSTER_3_FULL_NAME}" -metadata: - namespace: mongodb - name: mongodb-enterprise-operator-member-list - labels: - multi-cluster: "true" ---- # Source: enterprise-operator/templates/operator-roles.yaml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 @@ -74,6 +40,111 @@ subjects: name: mongodb-enterprise-operator-multi-cluster namespace: mongodb --- +# Source: enterprise-operator/templates/operator-roles.yaml +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-enterprise-operator-multi-cluster + namespace: mongodb +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - create + - update + - delete + - apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - get + - list + - create + - update + - delete + - watch + - apiGroups: + - apps + resources: + - statefulsets + verbs: + - create + - get + - list + - watch + - delete + - update + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - delete + - deletecollection + - apiGroups: + - mongodb.com + verbs: + - "*" + resources: + - mongodb + - mongodb/finalizers + - mongodbusers + - mongodbusers/finalizers + - opsmanagers + - opsmanagers/finalizers + - mongodbmulticluster + - mongodbmulticluster/finalizers + - mongodb/status + - mongodbusers/status + - opsmanagers/status + - mongodbmulticluster/status +--- +# Source: enterprise-operator/templates/operator-roles.yaml +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-enterprise-operator-multi-cluster + namespace: mongodb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: mongodb-enterprise-operator-multi-cluster +subjects: + - kind: ServiceAccount + name: mongodb-enterprise-operator-multi-cluster + namespace: mongodb +--- +# Source: enterprise-operator/templates/database-roles.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mongodb-enterprise-appdb + namespace: mongodb +--- +# Source: enterprise-operator/templates/database-roles.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mongodb-enterprise-database-pods + namespace: mongodb +--- +# Source: enterprise-operator/templates/database-roles.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mongodb-enterprise-ops-manager + namespace: mongodb +--- # Source: enterprise-operator/templates/database-roles.yaml kind: Role apiVersion: rbac.authorization.k8s.io/v1 @@ -111,6 +182,13 @@ subjects: name: mongodb-enterprise-appdb namespace: mongodb --- +# Source: enterprise-operator/templates/operator-sa.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mongodb-enterprise-operator-multi-cluster + namespace: mongodb +--- # Source: enterprise-operator/templates/operator.yaml apiVersion: apps/v1 kind: Deployment @@ -137,7 +215,7 @@ spec: runAsUser: 2000 containers: - name: mongodb-enterprise-operator-multi-cluster - image: "quay.io/mongodb/mongodb-enterprise-operator-ubi:1.24.0" + image: "quay.io/mongodb/mongodb-enterprise-operator-ubi:1.25.0" imagePullPolicy: Always args: - -watch-resource=mongodb @@ -159,6 +237,8 @@ spec: env: - name: OPERATOR_ENV value: prod + - name: MDB_DEFAULT_ARCHITECTURE + value: non-static - name: WATCH_NAMESPACE valueFrom: fieldRef: @@ -177,29 +257,33 @@ spec: - name: INIT_DATABASE_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-init-database-ubi - name: INIT_DATABASE_VERSION - value: 1.24.0 + value: 1.25.0 - name: DATABASE_VERSION - value: 1.24.0 + value: 1.25.0 # Ops Manager - name: OPS_MANAGER_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi - name: INIT_OPS_MANAGER_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-init-ops-manager-ubi - name: INIT_OPS_MANAGER_VERSION - value: 1.24.0 + value: 1.25.0 # AppDB - name: INIT_APPDB_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-init-appdb-ubi - name: INIT_APPDB_VERSION - value: 1.24.0 + value: 1.25.0 - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE value: "quay.io/mongodb/mongodb-agent-ubi:12.0.29.7785-1" + - name: MDB_AGENT_IMAGE_REPOSITORY + value: "quay.io/mongodb/mongodb-agent-ubi" - name: MONGODB_IMAGE - value: mongodb-enterprise-appdb-database-ubi + value: mongodb-enterprise-server - name: MONGODB_REPO_URL value: quay.io/mongodb + - name: MDB_IMAGE_TYPE + value: ubi8 - name: PERFORM_FAILOVER value: "true" volumes: diff --git a/mongodb-enterprise-openshift.yaml b/mongodb-enterprise-openshift.yaml index fc9a153..52f9600 100644 --- a/mongodb-enterprise-openshift.yaml +++ b/mongodb-enterprise-openshift.yaml @@ -1,53 +1,5 @@ --- # Source: enterprise-operator/templates/operator-roles.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: mongodb-enterprise-operator - namespace: mongodb ---- -# Source: enterprise-operator/templates/operator-roles.yaml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: mongodb-enterprise-operator-mongodb-webhook -rules: - - apiGroups: - - "admissionregistration.k8s.io" - resources: - - validatingwebhookconfigurations - verbs: - - get - - create - - update - - delete - - apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - watch - - create - - update - - delete ---- -# Source: enterprise-operator/templates/operator-roles.yaml -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: mongodb-enterprise-operator-mongodb-webhook-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: mongodb-enterprise-operator-mongodb-webhook -subjects: - - kind: ServiceAccount - name: mongodb-enterprise-operator - namespace: mongodb ---- -# Source: enterprise-operator/templates/operator-roles.yaml kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -55,7 +7,7 @@ metadata: namespace: mongodb rules: - apiGroups: - - "" + - '' resources: - services verbs: @@ -66,7 +18,7 @@ rules: - update - delete - apiGroups: - - "" + - '' resources: - secrets - configmaps @@ -89,7 +41,7 @@ rules: - delete - update - apiGroups: - - "" + - '' resources: - pods verbs: @@ -101,11 +53,12 @@ rules: - apiGroups: - mongodb.com verbs: - - "*" + - '*' resources: - mongodb - mongodb/finalizers - mongodbusers + - mongodbusers/finalizers - opsmanagers - opsmanagers/finalizers - mongodbmulticluster @@ -129,11 +82,6 @@ subjects: - kind: ServiceAccount name: mongodb-enterprise-operator namespace: mongodb - -# This ClusterRoleBinding is necessary in order to use validating -# webhooks—these will prevent you from applying a variety of invalid resource -# definitions. The validating webhooks are optional so this can be removed if -# necessary. --- # Source: enterprise-operator/templates/database-roles.yaml apiVersion: v1 @@ -164,13 +112,13 @@ metadata: namespace: mongodb rules: - apiGroups: - - "" + - '' resources: - secrets verbs: - get - apiGroups: - - "" + - '' resources: - pods verbs: @@ -216,7 +164,7 @@ spec: serviceAccountName: mongodb-enterprise-operator containers: - name: mongodb-enterprise-operator - image: "quay.io/mongodb/mongodb-enterprise-operator-ubi:1.24.0" + image: "quay.io/mongodb/mongodb-enterprise-operator-ubi:1.25.0" imagePullPolicy: Always args: - -watch-resource=mongodb @@ -234,6 +182,8 @@ spec: env: - name: OPERATOR_ENV value: prod + - name: MDB_DEFAULT_ARCHITECTURE + value: non-static - name: WATCH_NAMESPACE valueFrom: fieldRef: @@ -243,7 +193,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: MANAGED_SECURITY_CONTEXT - value: "true" + value: 'true' - name: CLUSTER_CLIENT_TIMEOUT value: "10" - name: IMAGE_PULL_POLICY @@ -254,25 +204,27 @@ spec: - name: INIT_DATABASE_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-init-database-ubi - name: INIT_DATABASE_VERSION - value: 1.24.0 + value: 1.25.0 - name: DATABASE_VERSION - value: 1.24.0 + value: 1.25.0 # Ops Manager - name: OPS_MANAGER_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi - name: INIT_OPS_MANAGER_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-init-ops-manager-ubi - name: INIT_OPS_MANAGER_VERSION - value: 1.24.0 + value: 1.25.0 # AppDB - name: INIT_APPDB_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-init-appdb-ubi - name: INIT_APPDB_VERSION - value: 1.24.0 + value: 1.25.0 - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:12.0.29.7785-1" + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.0.8502-1" + - name: MDB_AGENT_IMAGE_REPOSITORY + value: "quay.io/mongodb/mongodb-agent-ubi" - name: MONGODB_IMAGE value: mongodb-enterprise-server - name: MONGODB_REPO_URL @@ -280,19 +232,37 @@ spec: - name: MDB_IMAGE_TYPE value: ubi8 - name: PERFORM_FAILOVER - value: "true" - - name: RELATED_IMAGE_MONGODB_ENTERPRISE_DATABASE_IMAGE_1_24_0 - value: "quay.io/mongodb/mongodb-enterprise-database-ubi:1.24.0" - - name: RELATED_IMAGE_INIT_DATABASE_IMAGE_REPOSITORY_1_24_0 - value: "quay.io/mongodb/mongodb-enterprise-init-database-ubi:1.24.0" - - name: RELATED_IMAGE_INIT_OPS_MANAGER_IMAGE_REPOSITORY_1_24_0 - value: "quay.io/mongodb/mongodb-enterprise-init-ops-manager-ubi:1.24.0" - - name: RELATED_IMAGE_INIT_APPDB_IMAGE_REPOSITORY_1_24_0 - value: "quay.io/mongodb/mongodb-enterprise-init-appdb-ubi:1.24.0" - - name: RELATED_IMAGE_AGENT_IMAGE_11_12_0_7388_1 - value: "quay.io/mongodb/mongodb-agent-ubi:11.12.0.7388-1" - - name: RELATED_IMAGE_AGENT_IMAGE_12_0_4_7554_1 - value: "quay.io/mongodb/mongodb-agent-ubi:12.0.4.7554-1" + value: 'true' + - name: MDB_WEBHOOK_REGISTER_CONFIGURATION + value: "false" + - name: RELATED_IMAGE_MONGODB_ENTERPRISE_DATABASE_IMAGE_1_25_0 + value: "quay.io/mongodb/mongodb-enterprise-database-ubi:1.25.0" + - name: RELATED_IMAGE_INIT_DATABASE_IMAGE_REPOSITORY_1_25_0 + value: "quay.io/mongodb/mongodb-enterprise-init-database-ubi:1.25.0" + - name: RELATED_IMAGE_INIT_OPS_MANAGER_IMAGE_REPOSITORY_1_25_0 + value: "quay.io/mongodb/mongodb-enterprise-init-ops-manager-ubi:1.25.0" + - name: RELATED_IMAGE_INIT_APPDB_IMAGE_REPOSITORY_1_25_0 + value: "quay.io/mongodb/mongodb-enterprise-init-appdb-ubi:1.25.0" + - name: RELATED_IMAGE_AGENT_IMAGE_107_0_0_8465_1 + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.0.8465-1" + - name: RELATED_IMAGE_AGENT_IMAGE_107_0_0_8502_1 + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.0.8502-1" + - name: RELATED_IMAGE_AGENT_IMAGE_107_0_1_8507_1 + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.1.8507-1" + - name: RELATED_IMAGE_AGENT_IMAGE_107_0_1_8507_1_1_25_0 + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.1.8507-1_1.25.0" + - name: RELATED_IMAGE_AGENT_IMAGE_107_0_2_8531_1 + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.2.8531-1" + - name: RELATED_IMAGE_AGENT_IMAGE_107_0_2_8531_1_1_25_0 + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.2.8531-1_1.25.0" + - name: RELATED_IMAGE_AGENT_IMAGE_107_0_3_8550_1 + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.3.8550-1" + - name: RELATED_IMAGE_AGENT_IMAGE_107_0_3_8550_1_1_25_0 + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.3.8550-1_1.25.0" + - name: RELATED_IMAGE_AGENT_IMAGE_107_0_4_8567_1 + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.4.8567-1" + - name: RELATED_IMAGE_AGENT_IMAGE_107_0_4_8567_1_1_25_0 + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.4.8567-1_1.25.0" - name: RELATED_IMAGE_AGENT_IMAGE_12_0_15_7646_1 value: "quay.io/mongodb/mongodb-agent-ubi:12.0.15.7646-1" - name: RELATED_IMAGE_AGENT_IMAGE_12_0_21_7698_1 @@ -305,8 +275,22 @@ spec: value: "quay.io/mongodb/mongodb-agent-ubi:12.0.28.7763-1" - name: RELATED_IMAGE_AGENT_IMAGE_12_0_29_7785_1 value: "quay.io/mongodb/mongodb-agent-ubi:12.0.29.7785-1" - - name: RELATED_IMAGE_AGENT_IMAGE_107_0_0_8465_1 - value: "quay.io/mongodb/mongodb-agent-ubi:107.0.0.8465-1" + - name: RELATED_IMAGE_AGENT_IMAGE_12_0_29_7785_1_1_25_0 + value: "quay.io/mongodb/mongodb-agent-ubi:12.0.29.7785-1_1.25.0" + - name: RELATED_IMAGE_AGENT_IMAGE_12_0_30_7791_1 + value: "quay.io/mongodb/mongodb-agent-ubi:12.0.30.7791-1" + - name: RELATED_IMAGE_AGENT_IMAGE_12_0_30_7791_1_1_25_0 + value: "quay.io/mongodb/mongodb-agent-ubi:12.0.30.7791-1_1.25.0" + - name: RELATED_IMAGE_AGENT_IMAGE_12_0_31_7825_1 + value: "quay.io/mongodb/mongodb-agent-ubi:12.0.31.7825-1" + - name: RELATED_IMAGE_AGENT_IMAGE_12_0_31_7825_1_1_25_0 + value: "quay.io/mongodb/mongodb-agent-ubi:12.0.31.7825-1_1.25.0" + - name: RELATED_IMAGE_AGENT_IMAGE_12_0_4_7554_1 + value: "quay.io/mongodb/mongodb-agent-ubi:12.0.4.7554-1" + - name: RELATED_IMAGE_AGENT_IMAGE_13_10_0_8620_1 + value: "quay.io/mongodb/mongodb-agent-ubi:13.10.0.8620-1" + - name: RELATED_IMAGE_AGENT_IMAGE_13_15_0_8788_1_1_25_0 + value: "quay.io/mongodb/mongodb-agent-ubi:13.15.0.8788-1_1.25.0" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_0 value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.0" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_1 @@ -351,7 +335,21 @@ spec: value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.20" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_21 value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.21" - # since the official server images end with a different suffix we can re-use the same $mongodbImageEnv + - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_22 + value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.22" + - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_23 + value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.23" + - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_7_0_0 + value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:7.0.0" + - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_7_0_1 + value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:7.0.1" + - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_7_0_2 + value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:7.0.2" + - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_7_0_3 + value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:7.0.3" + - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_7_0_4 + value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:7.0.4" + # since the official server images end with a different suffix we can re-use the same $mongodbImageEnv - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_0_ubi8 value: "quay.io/mongodb/mongodb-enterprise-server:4.4.0-ubi8" - name: RELATED_IMAGE_MONGODB_IMAGE_4_4_1_ubi8 @@ -446,7 +444,7 @@ spec: value: "quay.io/mongodb/mongodb-enterprise-server:6.0.4-ubi8" - name: RELATED_IMAGE_MONGODB_IMAGE_6_0_5_ubi8 value: "quay.io/mongodb/mongodb-enterprise-server:6.0.5-ubi8" - # mongodbLegacyAppDb will be deleted in 1.23 release + # mongodbLegacyAppDb will be deleted in 1.23 release - name: RELATED_IMAGE_MONGODB_IMAGE_4_2_11_ent value: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:4.2.11-ent" - name: RELATED_IMAGE_MONGODB_IMAGE_4_2_2_ent diff --git a/mongodb-enterprise.yaml b/mongodb-enterprise.yaml index 1cb2efa..bbf3f81 100644 --- a/mongodb-enterprise.yaml +++ b/mongodb-enterprise.yaml @@ -1,12 +1,5 @@ --- # Source: enterprise-operator/templates/operator-roles.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: mongodb-enterprise-operator - namespace: mongodb ---- -# Source: enterprise-operator/templates/operator-roles.yaml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -106,6 +99,7 @@ rules: - mongodb - mongodb/finalizers - mongodbusers + - mongodbusers/finalizers - opsmanagers - opsmanagers/finalizers - mongodbmulticluster @@ -129,11 +123,6 @@ subjects: - kind: ServiceAccount name: mongodb-enterprise-operator namespace: mongodb - -# This ClusterRoleBinding is necessary in order to use validating -# webhooks—these will prevent you from applying a variety of invalid resource -# definitions. The validating webhooks are optional so this can be removed if -# necessary. --- # Source: enterprise-operator/templates/database-roles.yaml apiVersion: v1 @@ -193,6 +182,13 @@ subjects: name: mongodb-enterprise-appdb namespace: mongodb --- +# Source: enterprise-operator/templates/operator-sa.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mongodb-enterprise-operator + namespace: mongodb +--- # Source: enterprise-operator/templates/operator.yaml apiVersion: apps/v1 kind: Deployment @@ -219,7 +215,7 @@ spec: runAsUser: 2000 containers: - name: mongodb-enterprise-operator - image: "quay.io/mongodb/mongodb-enterprise-operator-ubi:1.24.0" + image: "quay.io/mongodb/mongodb-enterprise-operator-ubi:1.25.0" imagePullPolicy: Always args: - -watch-resource=mongodb @@ -237,6 +233,8 @@ spec: env: - name: OPERATOR_ENV value: prod + - name: MDB_DEFAULT_ARCHITECTURE + value: non-static - name: WATCH_NAMESPACE valueFrom: fieldRef: @@ -255,25 +253,27 @@ spec: - name: INIT_DATABASE_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-init-database-ubi - name: INIT_DATABASE_VERSION - value: 1.24.0 + value: 1.25.0 - name: DATABASE_VERSION - value: 1.24.0 + value: 1.25.0 # Ops Manager - name: OPS_MANAGER_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi - name: INIT_OPS_MANAGER_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-init-ops-manager-ubi - name: INIT_OPS_MANAGER_VERSION - value: 1.24.0 + value: 1.25.0 # AppDB - name: INIT_APPDB_IMAGE_REPOSITORY value: quay.io/mongodb/mongodb-enterprise-init-appdb-ubi - name: INIT_APPDB_VERSION - value: 1.24.0 + value: 1.25.0 - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:12.0.29.7785-1" + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.0.8502-1" + - name: MDB_AGENT_IMAGE_REPOSITORY + value: "quay.io/mongodb/mongodb-agent-ubi" - name: MONGODB_IMAGE value: mongodb-enterprise-server - name: MONGODB_REPO_URL diff --git a/samples/multi-cluster/install_istio_separate_network.sh b/samples/multi-cluster/install_istio_separate_network.sh new file mode 100755 index 0000000..e84b91b --- /dev/null +++ b/samples/multi-cluster/install_istio_separate_network.sh @@ -0,0 +1,221 @@ +#!/bin/bash + +# This script is an adjusted version of the official Istio guide: +# https://istio.io/latest/docs/setup/install/multicluster/multi-primary_multi-network/ +# The script requires setting the following env variables: +# - CTX_CLUSTER1 +# - CTX_CLUSTER2 +# - CTX_CLUSTER3 + +set -eux + +export ISTIO_VERSION=${ISTIO_VERSION:-1.20.2} + +if [[ ! -d istio-${ISTIO_VERSION} ]]; then + # download Istio under the path + curl -L https://istio.io/downloadIstio | ISTIO_VERSION=${ISTIO_VERSION} sh - +fi + +# checks if external IP has been assigned to a service object, in our case we are interested in east-west gateway +function_check_external_ip_assigned() { + while : ; do + ip=$(kubectl --context="$1" get svc istio-eastwestgateway -n istio-system --output jsonpath='{.status.loadBalancer.ingress[0].ip}') + if [ -n "$ip" ] + then + echo "external ip assigned $ip" + break + else + echo "waiting for external ip to be assigned" + fi + sleep 1 +done +} + +cd istio-${ISTIO_VERSION} + +bin/istioctl uninstall --context="${CTX_CLUSTER1}" --purge -y +bin/istioctl uninstall --context="${CTX_CLUSTER2}" --purge -y +bin/istioctl uninstall --context="${CTX_CLUSTER3}" --purge -y + +kubectl --context="${CTX_CLUSTER1}" delete ns istio-system || true +kubectl --context="${CTX_CLUSTER2}" delete ns istio-system || true +kubectl --context="${CTX_CLUSTER3}" delete ns istio-system || true + +mkdir -p certs +pushd certs + +# create root trust for the clusters +make -f ../tools/certs/Makefile.selfsigned.mk root-ca +make -f ../tools/certs/Makefile.selfsigned.mk ${CTX_CLUSTER1}-cacerts +make -f ../tools/certs/Makefile.selfsigned.mk ${CTX_CLUSTER2}-cacerts +make -f ../tools/certs/Makefile.selfsigned.mk ${CTX_CLUSTER3}-cacerts + +kubectl --context="${CTX_CLUSTER1}" create ns istio-system +kubectl --context="${CTX_CLUSTER1}" create secret generic cacerts -n istio-system \ + --from-file=${CTX_CLUSTER1}/ca-cert.pem \ + --from-file=${CTX_CLUSTER1}/ca-key.pem \ + --from-file=${CTX_CLUSTER1}/root-cert.pem \ + --from-file=${CTX_CLUSTER1}/cert-chain.pem + +kubectl --context="${CTX_CLUSTER2}" create ns istio-system +kubectl --context="${CTX_CLUSTER2}" create secret generic cacerts -n istio-system \ + --from-file=${CTX_CLUSTER2}/ca-cert.pem \ + --from-file=${CTX_CLUSTER2}/ca-key.pem \ + --from-file=${CTX_CLUSTER2}/root-cert.pem \ + --from-file=${CTX_CLUSTER2}/cert-chain.pem + +kubectl --context="${CTX_CLUSTER3}" create ns istio-system +kubectl --context="${CTX_CLUSTER3}" create secret generic cacerts -n istio-system \ + --from-file=${CTX_CLUSTER3}/ca-cert.pem \ + --from-file=${CTX_CLUSTER3}/ca-key.pem \ + --from-file=${CTX_CLUSTER3}/root-cert.pem \ + --from-file=${CTX_CLUSTER3}/cert-chain.pem +popd + +# label namespace in cluster1 +kubectl --context="${CTX_CLUSTER1}" get namespace istio-system && \ + kubectl --context="${CTX_CLUSTER1}" label namespace istio-system topology.istio.io/network=network1 + +cat < cluster1.yaml +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + meshConfig: + defaultConfig: + terminationDrainDuration: 30s + proxyMetadata: + ISTIO_META_DNS_AUTO_ALLOCATE: "true" + ISTIO_META_DNS_CAPTURE: "true" + values: + global: + meshID: mesh1 + multiCluster: + clusterName: cluster1 + network: network1 +EOF +bin/istioctl install --context="${CTX_CLUSTER1}" -f cluster1.yaml -y +samples/multicluster/gen-eastwest-gateway.sh \ + --mesh mesh1 --cluster cluster1 --network network1 | \ + bin/istioctl --context="${CTX_CLUSTER1}" install -y -f - + + +# check if external IP is assigned to east-west gateway in cluster1 +function_check_external_ip_assigned "${CTX_CLUSTER1}" + + +# expose services in cluster1 +kubectl --context="${CTX_CLUSTER1}" apply -n istio-system -f \ + samples/multicluster/expose-services.yaml + + +kubectl --context="${CTX_CLUSTER2}" get namespace istio-system && \ + kubectl --context="${CTX_CLUSTER2}" label namespace istio-system topology.istio.io/network=network2 + + +cat < cluster2.yaml +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + meshConfig: + defaultConfig: + terminationDrainDuration: 30s + proxyMetadata: + ISTIO_META_DNS_AUTO_ALLOCATE: "true" + ISTIO_META_DNS_CAPTURE: "true" + values: + global: + meshID: mesh1 + multiCluster: + clusterName: cluster2 + network: network2 +EOF + +bin/istioctl install --context="${CTX_CLUSTER2}" -f cluster2.yaml -y + +samples/multicluster/gen-eastwest-gateway.sh \ + --mesh mesh1 --cluster cluster2 --network network2 | \ + bin/istioctl --context="${CTX_CLUSTER2}" install -y -f - + +# check if external IP is assigned to east-west gateway in cluster2 +function_check_external_ip_assigned "${CTX_CLUSTER2}" + +kubectl --context="${CTX_CLUSTER2}" apply -n istio-system -f \ + samples/multicluster/expose-services.yaml + +# cluster3 +kubectl --context="${CTX_CLUSTER3}" get namespace istio-system && \ + kubectl --context="${CTX_CLUSTER3}" label namespace istio-system topology.istio.io/network=network3 + +cat < cluster3.yaml +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + meshConfig: + defaultConfig: + terminationDrainDuration: 30s + proxyMetadata: + ISTIO_META_DNS_AUTO_ALLOCATE: "true" + ISTIO_META_DNS_CAPTURE: "true" + values: + global: + meshID: mesh1 + multiCluster: + clusterName: cluster3 + network: network3 +EOF + +bin/istioctl install --context="${CTX_CLUSTER3}" -f cluster3.yaml -y + +samples/multicluster/gen-eastwest-gateway.sh \ + --mesh mesh1 --cluster cluster3 --network network3 | \ + bin/istioctl --context="${CTX_CLUSTER3}" install -y -f - + + +# check if external IP is assigned to east-west gateway in cluster3 +function_check_external_ip_assigned "${CTX_CLUSTER3}" + +kubectl --context="${CTX_CLUSTER3}" apply -n istio-system -f \ + samples/multicluster/expose-services.yaml + + +# enable endpoint discovery +bin/istioctl create-remote-secret \ + --context="${CTX_CLUSTER1}" \ + -n istio-system \ + --name=cluster1 | \ + kubectl apply -f - --context="${CTX_CLUSTER2}" + +bin/istioctl create-remote-secret \ + --context="${CTX_CLUSTER1}" \ + -n istio-system \ + --name=cluster1 | \ + kubectl apply -f - --context="${CTX_CLUSTER3}" + +bin/istioctl create-remote-secret \ + --context="${CTX_CLUSTER2}" \ + -n istio-system \ + --name=cluster2 | \ + kubectl apply -f - --context="${CTX_CLUSTER1}" + +bin/istioctl create-remote-secret \ + --context="${CTX_CLUSTER2}" \ + -n istio-system \ + --name=cluster2 | \ + kubectl apply -f - --context="${CTX_CLUSTER3}" + +bin/istioctl create-remote-secret \ + --context="${CTX_CLUSTER3}" \ + -n istio-system \ + --name=cluster3 | \ + kubectl apply -f - --context="${CTX_CLUSTER1}" + +bin/istioctl create-remote-secret \ + --context="${CTX_CLUSTER3}" \ + -n istio-system \ + --name=cluster3 | \ + kubectl apply -f - --context="${CTX_CLUSTER2}" + + # cleanup: delete the istio repo at the end +cd .. +#rm -r istio-${ISTIO_VERSION} +#rm -f cluster1.yaml cluster2.yaml cluster3.yaml diff --git a/samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_0.sh b/samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_0.sh new file mode 100644 index 0000000..f5c8f88 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_0.sh @@ -0,0 +1,5 @@ +gcloud container clusters create "${K8S_CLUSTER_0}" \ + --zone="${K8S_CLUSTER_0_ZONE}" \ + --num-nodes="${K8S_CLUSTER_0_NUMBER_OF_NODES}" \ + --machine-type "${K8S_CLUSTER_0_MACHINE_TYPE}" \ + ${GKE_SPOT_INSTANCES_SWITCH:-""} diff --git a/samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_1.sh b/samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_1.sh new file mode 100644 index 0000000..a343271 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_1.sh @@ -0,0 +1,5 @@ +gcloud container clusters create "${K8S_CLUSTER_1}" \ + --zone="${K8S_CLUSTER_1_ZONE}" \ + --num-nodes="${K8S_CLUSTER_1_NUMBER_OF_NODES}" \ + --machine-type "${K8S_CLUSTER_1_MACHINE_TYPE}" \ + ${GKE_SPOT_INSTANCES_SWITCH:-""} diff --git a/samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_2.sh b/samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_2.sh new file mode 100644 index 0000000..aebf13d --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_2.sh @@ -0,0 +1,5 @@ +gcloud container clusters create "${K8S_CLUSTER_2}" \ + --zone="${K8S_CLUSTER_2_ZONE}" \ + --num-nodes="${K8S_CLUSTER_2_NUMBER_OF_NODES}" \ + --machine-type "${K8S_CLUSTER_2_MACHINE_TYPE}" \ + ${GKE_SPOT_INSTANCES_SWITCH:-""} diff --git a/samples/ops-manager-multi-cluster/code_snippets/0020_get_gke_credentials.sh b/samples/ops-manager-multi-cluster/code_snippets/0020_get_gke_credentials.sh new file mode 100644 index 0000000..58dbe0e --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0020_get_gke_credentials.sh @@ -0,0 +1,3 @@ +gcloud container clusters get-credentials "${K8S_CLUSTER_0}" --zone="${K8S_CLUSTER_0_ZONE}" +gcloud container clusters get-credentials "${K8S_CLUSTER_1}" --zone="${K8S_CLUSTER_1_ZONE}" +gcloud container clusters get-credentials "${K8S_CLUSTER_2}" --zone="${K8S_CLUSTER_2_ZONE}" diff --git a/samples/ops-manager-multi-cluster/code_snippets/0030_verify_access_to_clusters.sh b/samples/ops-manager-multi-cluster/code_snippets/0030_verify_access_to_clusters.sh new file mode 100644 index 0000000..e8d4b58 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0030_verify_access_to_clusters.sh @@ -0,0 +1,6 @@ +echo "Nodes in cluster ${K8S_CLUSTER_0_CONTEXT_NAME}" +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" get nodes +echo; echo "Nodes in cluster ${K8S_CLUSTER_1_CONTEXT_NAME}" +kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" get nodes +echo; echo "Nodes in cluster ${K8S_CLUSTER_2_CONTEXT_NAME}" +kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" get nodes diff --git a/samples/ops-manager-multi-cluster/code_snippets/0040_install_istio.sh b/samples/ops-manager-multi-cluster/code_snippets/0040_install_istio.sh new file mode 100644 index 0000000..d6cc595 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0040_install_istio.sh @@ -0,0 +1,5 @@ +CTX_CLUSTER1=${K8S_CLUSTER_0_CONTEXT_NAME} \ +CTX_CLUSTER2=${K8S_CLUSTER_1_CONTEXT_NAME} \ +CTX_CLUSTER3=${K8S_CLUSTER_2_CONTEXT_NAME} \ +ISTIO_VERSION="1.20.2" \ +../multi-cluster/install_istio_separate_network.sh diff --git a/samples/ops-manager-multi-cluster/code_snippets/0045_create_operator_namespace.sh b/samples/ops-manager-multi-cluster/code_snippets/0045_create_operator_namespace.sh new file mode 100644 index 0000000..8e205f9 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0045_create_operator_namespace.sh @@ -0,0 +1,8 @@ +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" label namespace "${OPERATOR_NAMESPACE}" istio-injection=enabled --overwrite + +kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" +kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" label namespace "${OPERATOR_NAMESPACE}" istio-injection=enabled --overwrite + +kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" +kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" label namespace "${OPERATOR_NAMESPACE}" istio-injection=enabled --overwrite diff --git a/samples/ops-manager-multi-cluster/code_snippets/0045_create_ops_manager_namespace.sh b/samples/ops-manager-multi-cluster/code_snippets/0045_create_ops_manager_namespace.sh new file mode 100644 index 0000000..f487538 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0045_create_ops_manager_namespace.sh @@ -0,0 +1,8 @@ +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "${NAMESPACE}" +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" label namespace "${NAMESPACE}" istio-injection=enabled --overwrite + +kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "${NAMESPACE}" +kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" label namespace "${NAMESPACE}" istio-injection=enabled --overwrite + +kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "${NAMESPACE}" +kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" label namespace "${NAMESPACE}" istio-injection=enabled --overwrite diff --git a/samples/ops-manager-multi-cluster/code_snippets/0046_create_image_pull_secrets.sh b/samples/ops-manager-multi-cluster/code_snippets/0046_create_image_pull_secrets.sh new file mode 100644 index 0000000..dffc5a7 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0046_create_image_pull_secrets.sh @@ -0,0 +1,8 @@ +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OPERATOR_NAMESPACE}" create secret generic "image-registries-secret" \ + --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create secret generic "image-registries-secret" \ + --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson +kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${NAMESPACE}" create secret generic "image-registries-secret" \ + --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson +kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${NAMESPACE}" create secret generic "image-registries-secret" \ + --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson diff --git a/samples/ops-manager-multi-cluster/code_snippets/0050_check_cluster_connectivity_create_sts_0.sh b/samples/ops-manager-multi-cluster/code_snippets/0050_check_cluster_connectivity_create_sts_0.sh new file mode 100644 index 0000000..b577496 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0050_check_cluster_connectivity_create_sts_0.sh @@ -0,0 +1,22 @@ +kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" -f - <&1); +grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" && echo "SUCCESS" || (echo "ERROR: ${out}" && return 1) diff --git a/samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh b/samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh new file mode 100644 index 0000000..a05b5dc --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh @@ -0,0 +1,8 @@ +source_cluster=${K8S_CLUSTER_0_CONTEXT_NAME} +target_pod="echoserver1-0" +source_pod="echoserver0-0" +target_url="http://${target_pod}.${NAMESPACE}.svc.cluster.local:8080" +echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" +out=$(kubectl exec --context "${source_cluster}" -n "${NAMESPACE}" "${source_pod}" -- \ + /bin/bash -c "curl -v ${target_url}" 2>&1); +grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" && echo "SUCCESS" || (echo "ERROR: ${out}" && return 1) diff --git a/samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh b/samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh new file mode 100644 index 0000000..618c357 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh @@ -0,0 +1,8 @@ +source_cluster=${K8S_CLUSTER_2_CONTEXT_NAME} +target_pod="echoserver1-0" +source_pod="echoserver2-0" +target_url="http://${target_pod}.${NAMESPACE}.svc.cluster.local:8080" +echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" +out=$(kubectl exec --context "${source_cluster}" -n "${NAMESPACE}" "${source_pod}" -- \ + /bin/bash -c "curl -v ${target_url}" 2>&1); +grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" && echo "SUCCESS" || (echo "ERROR: ${out}" && return 1) diff --git a/samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh b/samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh new file mode 100644 index 0000000..8651c40 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh @@ -0,0 +1,8 @@ +source_cluster=${K8S_CLUSTER_0_CONTEXT_NAME} +target_pod="echoserver2-0" +source_pod="echoserver0-0" +target_url="http://${target_pod}.${NAMESPACE}.svc.cluster.local:8080" +echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" +out=$(kubectl exec --context "${source_cluster}" -n "${NAMESPACE}" "${source_pod}" -- \ + /bin/bash -c "curl -v ${target_url}" 2>&1); +grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" && echo "SUCCESS" || (echo "ERROR: ${out}" && return 1) diff --git a/samples/ops-manager-multi-cluster/code_snippets/0100_check_cluster_connectivity_cleanup.sh b/samples/ops-manager-multi-cluster/code_snippets/0100_check_cluster_connectivity_cleanup.sh new file mode 100644 index 0000000..cae6b06 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0100_check_cluster_connectivity_cleanup.sh @@ -0,0 +1,9 @@ +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" delete statefulset echoserver0 +kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${NAMESPACE}" delete statefulset echoserver1 +kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${NAMESPACE}" delete statefulset echoserver2 +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" delete service echoserver +kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${NAMESPACE}" delete service echoserver +kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${NAMESPACE}" delete service echoserver +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" delete service echoserver0-0 +kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${NAMESPACE}" delete service echoserver1-0 +kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${NAMESPACE}" delete service echoserver2-0 diff --git a/samples/ops-manager-multi-cluster/code_snippets/0200_kubectl_mongodb_configure_multi_cluster.sh b/samples/ops-manager-multi-cluster/code_snippets/0200_kubectl_mongodb_configure_multi_cluster.sh new file mode 100644 index 0000000..757f0cd --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0200_kubectl_mongodb_configure_multi_cluster.sh @@ -0,0 +1,8 @@ +kubectl mongodb multicluster setup \ + --central-cluster="${K8S_CLUSTER_0_CONTEXT_NAME}" \ + --member-clusters="${K8S_CLUSTER_0_CONTEXT_NAME},${K8S_CLUSTER_1_CONTEXT_NAME},${K8S_CLUSTER_2_CONTEXT_NAME}" \ + --member-cluster-namespace="${NAMESPACE}" \ + --central-cluster-namespace="${OPERATOR_NAMESPACE}" \ + --create-service-account-secrets \ + --install-database-roles=true \ + --image-pull-secrets=image-registries-secret diff --git a/samples/ops-manager-multi-cluster/code_snippets/0210_helm_install_operator.sh b/samples/ops-manager-multi-cluster/code_snippets/0210_helm_install_operator.sh new file mode 100644 index 0000000..8718b6b --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0210_helm_install_operator.sh @@ -0,0 +1,14 @@ +helm upgrade --install \ + --debug \ + --kube-context "${K8S_CLUSTER_0_CONTEXT_NAME}" \ + mongodb-enterprise-operator-multi-cluster \ + "${OPERATOR_HELM_CHART}" \ + --namespace="${OPERATOR_NAMESPACE}" \ + --set namespace="${OPERATOR_NAMESPACE}" \ + --set operator.namespace="${OPERATOR_NAMESPACE}" \ + --set operator.watchNamespace="${NAMESPACE}" \ + --set operator.name=mongodb-enterprise-operator-multi-cluster \ + --set operator.createOperatorServiceAccount=false \ + --set operator.createResourcesServiceAccountsAndRoles=false \ + --set "multiCluster.clusters={${K8S_CLUSTER_0_CONTEXT_NAME},${K8S_CLUSTER_1_CONTEXT_NAME},${K8S_CLUSTER_2_CONTEXT_NAME}}" \ + --set "${OPERATOR_ADDITIONAL_HELM_VALUES:-"dummy=value"}" diff --git a/samples/ops-manager-multi-cluster/code_snippets/0211_check_operator_deployment.sh b/samples/ops-manager-multi-cluster/code_snippets/0211_check_operator_deployment.sh new file mode 100644 index 0000000..553aabf --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0211_check_operator_deployment.sh @@ -0,0 +1,5 @@ +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OPERATOR_NAMESPACE}" rollout status deployment/mongodb-enterprise-operator-multi-cluster +echo "Operator deployment in ${OPERATOR_NAMESPACE} namespace" +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OPERATOR_NAMESPACE}" get deployments +echo; echo "Operator pod in ${OPERATOR_NAMESPACE} namespace" +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OPERATOR_NAMESPACE}" get pods diff --git a/samples/ops-manager-multi-cluster/code_snippets/0250_generate_certs.sh b/samples/ops-manager-multi-cluster/code_snippets/0250_generate_certs.sh new file mode 100644 index 0000000..1717b46 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0250_generate_certs.sh @@ -0,0 +1,87 @@ +mkdir certs || true + +cat <certs/ca.cnf +[ req ] +default_bits = 2048 +prompt = no +default_md = sha256 +distinguished_name = dn + +[ dn ] +C=US +ST=New York +L=New York +O=Example Company +OU=IT Department +CN=exampleCA +EOF + +cat <certs/om.cnf +[ req ] +default_bits = 2048 +prompt = no +default_md = sha256 +distinguished_name = dn +req_extensions = req_ext + +[ dn ] +C=US +ST=New York +L=New York +O=Example Company +OU=IT Department +CN=${OPS_MANAGER_EXTERNAL_DOMAIN} + +[ req_ext ] +subjectAltName = @alt_names +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth, clientAuth + +[ alt_names ] +DNS.1 = ${OPS_MANAGER_EXTERNAL_DOMAIN} +DNS.2 = om-svc.${NAMESPACE}.svc.cluster.local +EOF + +cat <certs/appdb.cnf +[ req ] +default_bits = 2048 +prompt = no +default_md = sha256 +distinguished_name = dn +req_extensions = req_ext + +[ dn ] +C=US +ST=New York +L=New York +O=Example Company +OU=IT Department +CN=AppDB + +[ req_ext ] +subjectAltName = @alt_names +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth, clientAuth + +[ alt_names ] +# multi-cluster mongod hostnames from service for each pod +DNS.1 = *.${NAMESPACE}.svc.cluster.local +# single-cluster mongod hostnames from headless service +DNS.2 = *.om-db-svc.${NAMESPACE}.svc.cluster.local +EOF + +# generate CA keypair and certificate +openssl genrsa -out certs/ca.key 2048 +openssl req -x509 -new -nodes -key certs/ca.key -days 1024 -out certs/ca.crt -config certs/ca.cnf + +# generate OpsManager's keypair and certificate +openssl genrsa -out certs/om.key 2048 +openssl req -new -key certs/om.key -out certs/om.csr -config certs/om.cnf + +# generate AppDB's keypair and certificate +openssl genrsa -out certs/appdb.key 2048 +openssl req -new -key certs/appdb.key -out certs/appdb.csr -config certs/appdb.cnf + +# generate certificates signed by CA for OpsManager and AppDB +openssl x509 -req -in certs/om.csr -CA certs/ca.crt -CAkey certs/ca.key -CAcreateserial -out certs/om.crt -days 365 -sha256 -extfile certs/om.cnf -extensions req_ext +openssl x509 -req -in certs/appdb.csr -CA certs/ca.crt -CAkey certs/ca.key -CAcreateserial -out certs/appdb.crt -days 365 -sha256 -extfile certs/appdb.cnf -extensions req_ext diff --git a/samples/ops-manager-multi-cluster/code_snippets/0255_create_cert_secrets.sh b/samples/ops-manager-multi-cluster/code_snippets/0255_create_cert_secrets.sh new file mode 100644 index 0000000..4f16626 --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0255_create_cert_secrets.sh @@ -0,0 +1,10 @@ +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create secret tls cert-prefix-om-cert \ + --cert=certs/om.crt \ + --key=certs/om.key + +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create secret tls cert-prefix-om-db-cert \ + --cert=certs/appdb.crt \ + --key=certs/appdb.key + +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create configmap om-cert-ca --from-file="mms-ca.crt=certs/ca.crt" +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create configmap appdb-cert-ca --from-file="ca-pem=certs/ca.crt" diff --git a/samples/ops-manager-multi-cluster/code_snippets/0300_ops_manager_create_admin_credentials.sh b/samples/ops-manager-multi-cluster/code_snippets/0300_ops_manager_create_admin_credentials.sh new file mode 100644 index 0000000..1b2af4f --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0300_ops_manager_create_admin_credentials.sh @@ -0,0 +1,5 @@ +kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" --namespace "${NAMESPACE}" create secret generic om-admin-user-credentials \ + --from-literal=Username="admin" \ + --from-literal=Password="Passw0rd@" \ + --from-literal=FirstName="Jane" \ + --from-literal=LastName="Doe" diff --git a/samples/ops-manager-multi-cluster/code_snippets/0310_ops_manager_deploy_on_single_member_cluster.sh b/samples/ops-manager-multi-cluster/code_snippets/0310_ops_manager_deploy_on_single_member_cluster.sh new file mode 100644 index 0000000..818acfa --- /dev/null +++ b/samples/ops-manager-multi-cluster/code_snippets/0310_ops_manager_deploy_on_single_member_cluster.sh @@ -0,0 +1,29 @@ +kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" -f - < 65s v1.28.7-gke.1026000 +gke-k8s-mdb-0-default-pool-d0f98a43-q9sf Ready 65s v1.28.7-gke.1026000 +gke-k8s-mdb-0-default-pool-d0f98a43-zn8x Ready 64s v1.28.7-gke.1026000 + +Nodes in cluster gke_scratch-kubernetes-team_europe-central2-b_k8s-mdb-1 +NAME STATUS ROLES AGE VERSION +gke-k8s-mdb-1-default-pool-37ea602a-0qgw Ready 111s v1.28.7-gke.1026000 +gke-k8s-mdb-1-default-pool-37ea602a-k4qk Ready 114s v1.28.7-gke.1026000 +gke-k8s-mdb-1-default-pool-37ea602a-p2g7 Ready 113s v1.28.7-gke.1026000 + +Nodes in cluster gke_scratch-kubernetes-team_europe-central2-c_k8s-mdb-2 +NAME STATUS ROLES AGE VERSION +gke-k8s-mdb-2-default-pool-4b459a09-t1v9 Ready 29s v1.28.7-gke.1026000 diff --git a/samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.out b/samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.out new file mode 100644 index 0000000..0cfb188 --- /dev/null +++ b/samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.out @@ -0,0 +1,2 @@ +Checking cross-cluster DNS resolution and connectivity from echoserver1-0 in gke_scratch-kubernetes-team_europe-central2-b_k8s-mdb-1 to echoserver0-0 +SUCCESS diff --git a/samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.out b/samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.out new file mode 100644 index 0000000..d812e71 --- /dev/null +++ b/samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.out @@ -0,0 +1,2 @@ +Checking cross-cluster DNS resolution and connectivity from echoserver0-0 in gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 to echoserver1-0 +SUCCESS diff --git a/samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.out b/samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.out new file mode 100644 index 0000000..2a673be --- /dev/null +++ b/samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.out @@ -0,0 +1,2 @@ +Checking cross-cluster DNS resolution and connectivity from echoserver2-0 in gke_scratch-kubernetes-team_europe-central2-c_k8s-mdb-2 to echoserver1-0 +SUCCESS diff --git a/samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.out b/samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.out new file mode 100644 index 0000000..d843897 --- /dev/null +++ b/samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.out @@ -0,0 +1,2 @@ +Checking cross-cluster DNS resolution and connectivity from echoserver0-0 in gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 to echoserver2-0 +SUCCESS diff --git a/samples/ops-manager-multi-cluster/output/0200_kubectl_mongodb_configure_multi_cluster.out b/samples/ops-manager-multi-cluster/output/0200_kubectl_mongodb_configure_multi_cluster.out new file mode 100644 index 0000000..0055981 --- /dev/null +++ b/samples/ops-manager-multi-cluster/output/0200_kubectl_mongodb_configure_multi_cluster.out @@ -0,0 +1,10 @@ + +Build: 1f23ae48c41d208f14c860356e483ba386a3aab8, 2024-04-26T12:19:36Z +Ensured namespaces exist in all clusters. +creating central cluster roles in cluster: gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 +creating member roles in cluster: gke_scratch-kubernetes-team_europe-central2-b_k8s-mdb-1 +creating member roles in cluster: gke_scratch-kubernetes-team_europe-central2-c_k8s-mdb-2 +Ensured ServiceAccounts and Roles. +Creating KubeConfig secret mongodb-operator/mongodb-enterprise-operator-multi-cluster-kubeconfig in cluster gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 +Ensured database Roles in member clusters. +Creating Member list Configmap mongodb-operator/mongodb-enterprise-operator-member-list in cluster gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 diff --git a/samples/ops-manager-multi-cluster/output/0210_helm_install_operator.out b/samples/ops-manager-multi-cluster/output/0210_helm_install_operator.out new file mode 100644 index 0000000..6a1db66 --- /dev/null +++ b/samples/ops-manager-multi-cluster/output/0210_helm_install_operator.out @@ -0,0 +1,249 @@ +Release "mongodb-enterprise-operator-multi-cluster" does not exist. Installing it now. +NAME: mongodb-enterprise-operator-multi-cluster +LAST DEPLOYED: Tue Apr 30 19:40:26 2024 +NAMESPACE: mongodb-operator +STATUS: deployed +REVISION: 1 +TEST SUITE: None +USER-SUPPLIED VALUES: +dummy: value +multiCluster: + clusters: + - gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 + - gke_scratch-kubernetes-team_europe-central2-b_k8s-mdb-1 + - gke_scratch-kubernetes-team_europe-central2-c_k8s-mdb-2 +namespace: mongodb-operator +operator: + createOperatorServiceAccount: false + createResourcesServiceAccountsAndRoles: false + name: mongodb-enterprise-operator-multi-cluster + namespace: mongodb-operator + watchNamespace: mongodb + +COMPUTED VALUES: +agent: + name: mongodb-agent-ubi + version: 107.0.0.8502-1 +database: + name: mongodb-enterprise-database-ubi + version: 1.25.0 +dummy: value +initAppDb: + name: mongodb-enterprise-init-appdb-ubi + version: 1.25.0 +initDatabase: + name: mongodb-enterprise-init-database-ubi + version: 1.25.0 +initOpsManager: + name: mongodb-enterprise-init-ops-manager-ubi + version: 1.25.0 +managedSecurityContext: false +mongodb: + appdbAssumeOldFormat: false + imageType: ubi8 + name: mongodb-enterprise-server + repo: quay.io/mongodb +mongodbLegacyAppDb: + name: mongodb-enterprise-appdb-database-ubi + repo: quay.io/mongodb +multiCluster: + clusterClientTimeout: 10 + clusters: + - gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 + - gke_scratch-kubernetes-team_europe-central2-b_k8s-mdb-1 + - gke_scratch-kubernetes-team_europe-central2-c_k8s-mdb-2 + kubeConfigSecretName: mongodb-enterprise-operator-multi-cluster-kubeconfig + performFailOver: true +namespace: mongodb-operator +operator: + additionalArguments: [] + affinity: {} + createOperatorServiceAccount: false + createResourcesServiceAccountsAndRoles: false + deployment_name: mongodb-enterprise-operator + env: prod + mdbDefaultArchitecture: non-static + name: mongodb-enterprise-operator-multi-cluster + namespace: mongodb-operator + nodeSelector: {} + operator_image_name: mongodb-enterprise-operator-ubi + replicas: 1 + resources: + limits: + cpu: 1100m + memory: 1Gi + requests: + cpu: 500m + memory: 200Mi + tolerations: [] + vaultSecretBackend: + enabled: false + tlsSecretRef: "" + version: 1.25.0 + watchNamespace: mongodb + watchedResources: + - mongodb + - opsmanagers + - mongodbusers + webhook: + registerConfiguration: true +opsManager: + name: mongodb-enterprise-ops-manager-ubi +registry: + agent: quay.io/mongodb + appDb: quay.io/mongodb + database: quay.io/mongodb + imagePullSecrets: null + initAppDb: quay.io/mongodb + initDatabase: quay.io/mongodb + initOpsManager: quay.io/mongodb + operator: quay.io/mongodb + opsManager: quay.io/mongodb + pullPolicy: Always +subresourceEnabled: true + +HOOKS: +MANIFEST: +--- +# Source: enterprise-operator/templates/operator-roles.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-enterprise-operator-mongodb-webhook +rules: + - apiGroups: + - "admissionregistration.k8s.io" + resources: + - validatingwebhookconfigurations + verbs: + - get + - create + - update + - delete + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - create + - update + - delete +--- +# Source: enterprise-operator/templates/operator-roles.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-enterprise-operator-multi-cluster-mongodb-operator-webhook-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: mongodb-enterprise-operator-mongodb-webhook +subjects: + - kind: ServiceAccount + name: mongodb-enterprise-operator-multi-cluster + namespace: mongodb-operator +--- +# Source: enterprise-operator/templates/operator.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongodb-enterprise-operator-multi-cluster + namespace: mongodb-operator +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/name: mongodb-enterprise-operator-multi-cluster + app.kubernetes.io/instance: mongodb-enterprise-operator-multi-cluster + template: + metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/name: mongodb-enterprise-operator-multi-cluster + app.kubernetes.io/instance: mongodb-enterprise-operator-multi-cluster + spec: + serviceAccountName: mongodb-enterprise-operator-multi-cluster + securityContext: + runAsNonRoot: true + runAsUser: 2000 + containers: + - name: mongodb-enterprise-operator-multi-cluster + image: "quay.io/mongodb/mongodb-enterprise-operator-ubi:1.25.0" + imagePullPolicy: Always + args: + - -watch-resource=mongodb + - -watch-resource=opsmanagers + - -watch-resource=mongodbusers + - -watch-resource=mongodbmulticluster + command: + - /usr/local/bin/mongodb-enterprise-operator + volumeMounts: + - mountPath: /etc/config/kubeconfig + name: kube-config-volume + resources: + limits: + cpu: 1100m + memory: 1Gi + requests: + cpu: 500m + memory: 200Mi + env: + - name: OPERATOR_ENV + value: prod + - name: MDB_DEFAULT_ARCHITECTURE + value: non-static + - name: WATCH_NAMESPACE + value: "mongodb" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CLUSTER_CLIENT_TIMEOUT + value: "10" + - name: IMAGE_PULL_POLICY + value: Always + # Database + - name: MONGODB_ENTERPRISE_DATABASE_IMAGE + value: quay.io/mongodb/mongodb-enterprise-database-ubi + - name: INIT_DATABASE_IMAGE_REPOSITORY + value: quay.io/mongodb/mongodb-enterprise-init-database-ubi + - name: INIT_DATABASE_VERSION + value: 1.25.0 + - name: DATABASE_VERSION + value: 1.25.0 + # Ops Manager + - name: OPS_MANAGER_IMAGE_REPOSITORY + value: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi + - name: INIT_OPS_MANAGER_IMAGE_REPOSITORY + value: quay.io/mongodb/mongodb-enterprise-init-ops-manager-ubi + - name: INIT_OPS_MANAGER_VERSION + value: 1.25.0 + # AppDB + - name: INIT_APPDB_IMAGE_REPOSITORY + value: quay.io/mongodb/mongodb-enterprise-init-appdb-ubi + - name: INIT_APPDB_VERSION + value: 1.25.0 + - name: OPS_MANAGER_IMAGE_PULL_POLICY + value: Always + - name: AGENT_IMAGE + value: "quay.io/mongodb/mongodb-agent-ubi:107.0.0.8502-1" + - name: MDB_AGENT_IMAGE_REPOSITORY + value: "quay.io/mongodb/mongodb-agent-ubi" + - name: MONGODB_IMAGE + value: mongodb-enterprise-server + - name: MONGODB_REPO_URL + value: quay.io/mongodb + - name: MDB_IMAGE_TYPE + value: ubi8 + - name: PERFORM_FAILOVER + value: 'true' + volumes: + - name: kube-config-volume + secret: + defaultMode: 420 + secretName: mongodb-enterprise-operator-multi-cluster-kubeconfig + diff --git a/samples/ops-manager-multi-cluster/output/0211_check_operator_deployment.out b/samples/ops-manager-multi-cluster/output/0211_check_operator_deployment.out new file mode 100644 index 0000000..990010b --- /dev/null +++ b/samples/ops-manager-multi-cluster/output/0211_check_operator_deployment.out @@ -0,0 +1,9 @@ +Waiting for deployment "mongodb-enterprise-operator-multi-cluster" rollout to finish: 0 of 1 updated replicas are available... +deployment "mongodb-enterprise-operator-multi-cluster" successfully rolled out +Operator deployment in mongodb-operator namespace +NAME READY UP-TO-DATE AVAILABLE AGE +mongodb-enterprise-operator-multi-cluster 1/1 1 1 12s + +Operator pod in mongodb-operator namespace +NAME READY STATUS RESTARTS AGE +mongodb-enterprise-operator-multi-cluster-78cc97547d-nlgds 2/2 Running 1 (3s ago) 12s diff --git a/samples/ops-manager-multi-cluster/output/0311_ops_manager_wait_for_pending_state.out b/samples/ops-manager-multi-cluster/output/0311_ops_manager_wait_for_pending_state.out new file mode 100644 index 0000000..598d414 --- /dev/null +++ b/samples/ops-manager-multi-cluster/output/0311_ops_manager_wait_for_pending_state.out @@ -0,0 +1,2 @@ +Waiting for Application Database to reach Pending phase... +mongodbopsmanager.mongodb.com/om condition met diff --git a/samples/ops-manager-multi-cluster/output/0312_ops_manager_wait_for_running_state.out b/samples/ops-manager-multi-cluster/output/0312_ops_manager_wait_for_running_state.out new file mode 100644 index 0000000..7106622 --- /dev/null +++ b/samples/ops-manager-multi-cluster/output/0312_ops_manager_wait_for_running_state.out @@ -0,0 +1,26 @@ +Waiting for Application Database to reach Running phase... +mongodbopsmanager.mongodb.com/om condition met + +Waiting for Ops Manager to reach Running phase... +mongodbopsmanager.mongodb.com/om condition met + +Waiting for Application Database to reach Pending phase (enabling monitoring)... +mongodbopsmanager.mongodb.com/om condition met +Waiting for Application Database to reach Running phase... +mongodbopsmanager.mongodb.com/om condition met + +Waiting for Ops Manager to reach Running phase... +mongodbopsmanager.mongodb.com/om condition met + +MongoDBOpsManager resource +NAME REPLICAS VERSION STATE (OPSMANAGER) STATE (APPDB) STATE (BACKUP) AGE WARNINGS +om 7.0.4 Running Running Disabled 11m + +Pods running in cluster gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 +NAME READY STATUS RESTARTS AGE +om-0-0 2/2 Running 0 8m39s +om-db-0-0 4/4 Running 0 44s +om-db-0-1 4/4 Running 0 2m6s +om-db-0-2 4/4 Running 0 3m19s + +Pods running in cluster gke_scratch-kubernetes-team_europe-central2-b_k8s-mdb-1 diff --git a/samples/ops-manager-multi-cluster/output/0321_ops_manager_wait_for_pending_state.out b/samples/ops-manager-multi-cluster/output/0321_ops_manager_wait_for_pending_state.out new file mode 100644 index 0000000..598d414 --- /dev/null +++ b/samples/ops-manager-multi-cluster/output/0321_ops_manager_wait_for_pending_state.out @@ -0,0 +1,2 @@ +Waiting for Application Database to reach Pending phase... +mongodbopsmanager.mongodb.com/om condition met diff --git a/samples/ops-manager-multi-cluster/output/0322_ops_manager_wait_for_running_state.out b/samples/ops-manager-multi-cluster/output/0322_ops_manager_wait_for_running_state.out new file mode 100644 index 0000000..faa04bd --- /dev/null +++ b/samples/ops-manager-multi-cluster/output/0322_ops_manager_wait_for_running_state.out @@ -0,0 +1,22 @@ +Waiting for Application Database to reach Running phase... +mongodbopsmanager.mongodb.com/om condition met + +Waiting for Ops Manager to reach Running phase... +mongodbopsmanager.mongodb.com/om condition met + +MongoDBOpsManager resource +NAME REPLICAS VERSION STATE (OPSMANAGER) STATE (APPDB) STATE (BACKUP) AGE WARNINGS +om 7.0.4 Pending Running Disabled 14m + +Pods running in cluster gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 +NAME READY STATUS RESTARTS AGE +om-0-0 2/2 Terminating 0 12m +om-db-0-0 4/4 Running 0 4m12s +om-db-0-1 4/4 Running 0 5m34s +om-db-0-2 4/4 Running 0 6m47s + +Pods running in cluster gke_scratch-kubernetes-team_europe-central2-b_k8s-mdb-1 +NAME READY STATUS RESTARTS AGE +om-1-0 0/2 Init:0/2 0 0s +om-db-1-0 4/4 Running 0 3m24s +om-db-1-1 4/4 Running 0 104s diff --git a/samples/ops-manager-multi-cluster/output/0522_ops_manager_wait_for_running_state.out b/samples/ops-manager-multi-cluster/output/0522_ops_manager_wait_for_running_state.out new file mode 100644 index 0000000..21aee40 --- /dev/null +++ b/samples/ops-manager-multi-cluster/output/0522_ops_manager_wait_for_running_state.out @@ -0,0 +1,29 @@ + +Waiting for Backup to reach Running phase... +mongodbopsmanager.mongodb.com/om condition met +Waiting for Application Database to reach Running phase... +mongodbopsmanager.mongodb.com/om condition met + +Waiting for Ops Manager to reach Running phase... +mongodbopsmanager.mongodb.com/om condition met + +MongoDBOpsManager resource +NAME REPLICAS VERSION STATE (OPSMANAGER) STATE (APPDB) STATE (BACKUP) AGE WARNINGS +om 7.0.4 Running Running Running 22m + +Pods running in cluster gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 +NAME READY STATUS RESTARTS AGE +om-0-0 2/2 Running 0 7m10s +om-db-0-0 4/4 Running 0 11m +om-db-0-1 4/4 Running 0 13m +om-db-0-2 4/4 Running 0 14m + +Pods running in cluster gke_scratch-kubernetes-team_europe-central2-b_k8s-mdb-1 +NAME READY STATUS RESTARTS AGE +om-1-0 2/2 Running 0 4m8s +om-db-1-0 4/4 Running 0 11m +om-db-1-1 4/4 Running 0 9m25s + +Pods running in cluster gke_scratch-kubernetes-team_europe-central2-c_k8s-mdb-2 +NAME READY STATUS RESTARTS AGE +om-2-backup-daemon-0 2/2 Running 0 2m5s diff --git a/samples/ops-manager-multi-cluster/test.sh b/samples/ops-manager-multi-cluster/test.sh new file mode 100755 index 0000000..7df7ab4 --- /dev/null +++ b/samples/ops-manager-multi-cluster/test.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +set -eou pipefail + +source env_variables.sh +source ../../scripts/sample_test_runner.sh + +prepare_snippets + +run 0010_create_gke_cluster_0.sh & +run 0010_create_gke_cluster_1.sh & +run 0010_create_gke_cluster_2.sh & +wait +run 0020_get_gke_credentials.sh +run_for_output 0030_verify_access_to_clusters.sh + +run 0040_install_istio.sh + +run 0045_create_operator_namespace.sh +run 0045_create_ops_manager_namespace.sh + +run 0046_create_image_pull_secrets.sh + +run 0050_check_cluster_connectivity_create_sts_0.sh +run 0050_check_cluster_connectivity_create_sts_1.sh +run 0050_check_cluster_connectivity_create_sts_2.sh +run 0060_check_cluster_connectivity_wait_for_sts.sh +run 0070_check_cluster_connectivity_create_pod_service_0.sh +run 0070_check_cluster_connectivity_create_pod_service_1.sh +run 0070_check_cluster_connectivity_create_pod_service_2.sh +run 0080_check_cluster_connectivity_create_round_robin_service_0.sh +run 0080_check_cluster_connectivity_create_round_robin_service_1.sh +run 0080_check_cluster_connectivity_create_round_robin_service_2.sh +run_for_output 0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.sh +run_for_output 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh +run_for_output 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh +run_for_output 0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh +run 0100_check_cluster_connectivity_cleanup.sh + +run_for_output 0200_kubectl_mongodb_configure_multi_cluster.sh +run_for_output 0210_helm_install_operator.sh +run_for_output 0211_check_operator_deployment.sh + +run 0250_generate_certs.sh +run 0255_create_cert_secrets.sh + +run 0300_ops_manager_create_admin_credentials.sh +run 0310_ops_manager_deploy_on_single_member_cluster.sh +run_for_output 0311_ops_manager_wait_for_pending_state.sh +run_for_output 0312_ops_manager_wait_for_running_state.sh +run 0320_ops_manager_add_second_cluster.sh +run_for_output 0321_ops_manager_wait_for_pending_state.sh +run_for_output 0322_ops_manager_wait_for_running_state.sh + +run 0400_install_minio_s3.sh +run 0500_ops_manager_prepare_s3_backup_secrets.sh +run 0510_ops_manager_enable_s3_backup.sh +run_for_output 0522_ops_manager_wait_for_running_state.sh diff --git a/samples/ops-manager-multi-cluster/test_cleanup.sh b/samples/ops-manager-multi-cluster/test_cleanup.sh new file mode 100755 index 0000000..13580c4 --- /dev/null +++ b/samples/ops-manager-multi-cluster/test_cleanup.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -eou pipefail + +source env_variables.sh +source ../../scripts/sample_test_runner.sh + +run_cleanup "test.sh" diff --git a/scripts/sample_test_runner.sh b/scripts/sample_test_runner.sh new file mode 100644 index 0000000..ef4a6f9 --- /dev/null +++ b/scripts/sample_test_runner.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash + +set -eou pipefail + +log_file="$0.run.log" +snippets_src_dir="code_snippets" +snippets_run_dir=".generated" + +DEBUG=${DEBUG:-"false"} + +function snippets_list() { + src_dir=$1 + # shellcheck disable=SC2012 + ls -1 "${src_dir}" | sort -t '_' -k1,1n -k2,2 +} + +function run_cleanup() { + script_file=$1 + rm -rf "${snippets_run_dir}" 2>/dev/null || true + rm -rf "log" 2>/dev/null || true + rm -rf "output" 2>/dev/null || true + rm -rf "${script_file}.run.log" 2>/dev/null || true +} + +function prepare_snippets() { + echo "Generating code snippets in ${snippets_run_dir}..." + + touch "${log_file}" + mkdir log 2>/dev/null || true + mkdir output 2>/dev/null || true + + rm -rf "${snippets_run_dir}" 2>/dev/null || true + mkdir "${snippets_run_dir}" 2>/dev/null || true + + file_list=$(snippets_list "${snippets_src_dir}") + while IFS= read -r file_name; do + file_path="${snippets_run_dir}/${file_name}" + ( + echo "# This file is generated automatically from ${file_path}" + echo "# DO NOT EDIT" + echo "function ${file_name%.sh}() {" + cat "${snippets_src_dir}/${file_name}" + echo "}" + ) > "${file_path}" + done <<< "${file_list}" +} + +function run() { + # shellcheck disable=SC1090 + source "${snippets_run_dir}/$1" + cmd=${1%.sh} + + if grep -q "^${cmd}$" "${log_file}"; then + echo "Skipping ${cmd} as it is already executed." + return 0 + fi + + echo "$(date +"%Y-%m-%d %H:%M:%S") Executing ${cmd}" + + stdout_file="log/${cmd}.stdout.log" + stderr_file="log/${cmd}.stderr.log" + set +e + (set -e; set -x; "${cmd}" >"${stdout_file}" 2>"${stderr_file}") + ret=$? + set -e + if [[ ${ret} == 0 ]]; then + echo "${cmd}" >> "${log_file}" + else + echo "Error running: ${cmd}" + fi + + if [[ ${DEBUG} == "true" || ${ret} != 0 ]]; then + cat "${stdout_file}" + cat "${stderr_file}" + fi + + return ${ret} +} + +function run_for_output() { + # shellcheck disable=SC1090 + source "${snippets_run_dir}/$1" + cmd=${1%.sh} + + if grep -q "^${cmd}$" "${log_file}"; then + echo "Skipping ${cmd} as it is already executed." + return 0 + fi + + echo "$(date +"%Y-%m-%d %H:%M:%S") Executing ${cmd}" + stdout_file="log/${cmd}.stdout.log" + stderr_file="log/${cmd}.stderr.log" + set +e + (set -e; set -x; "${cmd}" >"${stdout_file}" 2>"${stderr_file}") + ret=$? + set -e + if [[ ${ret} == 0 ]]; then + tee "output/${cmd}.out" < "${stdout_file}" + else + echo "Error running: ${cmd}" + fi + + if [[ ${ret} == 0 ]]; then + echo "${cmd}" >> "${log_file}" + fi + + if [[ ${DEBUG} == "true" || ${ret} != 0 ]]; then + cat "${stdout_file}" + cat "${stderr_file}" + fi + + return ${ret} +} diff --git a/tools/multicluster/.goreleaser.yaml b/tools/multicluster/.goreleaser.yaml index 5458d41..d3cca7b 100644 --- a/tools/multicluster/.goreleaser.yaml +++ b/tools/multicluster/.goreleaser.yaml @@ -14,6 +14,7 @@ builds: - amd64 - arm64 hooks: + # This will notarize Apple binaries and replace goreleaser bins with the notarized ones post: - cmd: ./kubectl_mac_notarize.sh output: true diff --git a/tools/multicluster/cmd/root.go b/tools/multicluster/cmd/root.go index 83d444d..17c99aa 100644 --- a/tools/multicluster/cmd/root.go +++ b/tools/multicluster/cmd/root.go @@ -2,8 +2,10 @@ package cmd import ( "context" + "fmt" "os" "os/signal" + "runtime/debug" "syscall" "github.com/spf13/cobra" @@ -14,13 +16,14 @@ var rootCmd = &cobra.Command{ Use: "kubectl-mongodb", Short: "Manage and configure MongoDB resources on k8s", Long: `This application is a tool to simplify maintenance tasks -of MongoDB resources in your kubernetes cluster.`, +of MongoDB resources in your kubernetes cluster. + `, } // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. -func Execute() { - ctx, cancel := context.WithCancel(context.Background()) +func Execute(ctx context.Context) { + ctx, cancel := context.WithCancel(ctx) signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) @@ -29,8 +32,28 @@ func Execute() { <-signalChan cancel() }() + buildInfo, ok := debug.ReadBuildInfo() + if ok { + rootCmd.Long += getBuildInfoString(buildInfo) + } err := rootCmd.ExecuteContext(ctx) if err != nil { os.Exit(1) } } + +func getBuildInfoString(buildInfo *debug.BuildInfo) string { + var vcsHash string + var vcsTime string + for _, setting := range buildInfo.Settings { + if setting.Key == "vcs.revision" { + vcsHash = setting.Value + } + if setting.Key == "vcs.time" { + vcsTime = setting.Value + } + } + + buildInfoStr := fmt.Sprintf("\nBuild: %s, %s", vcsHash, vcsTime) + return buildInfoStr +} diff --git a/tools/multicluster/cmd/setup.go b/tools/multicluster/cmd/setup.go index 009b22e..70d7fe4 100644 --- a/tools/multicluster/cmd/setup.go +++ b/tools/multicluster/cmd/setup.go @@ -3,6 +3,7 @@ package cmd import ( "fmt" "os" + "runtime/debug" "strings" "github.com/10gen/ops-manager-kubernetes/multi/pkg/common" @@ -24,6 +25,7 @@ func init() { setupCmd.Flags().BoolVar(&setupFlags.ClusterScoped, "cluster-scoped", false, "Create ClusterRole and ClusterRoleBindings for member clusters. [optional default: false]") setupCmd.Flags().BoolVar(&setupFlags.InstallDatabaseRoles, "install-database-roles", false, "Install the ServiceAccounts and Roles required for running database workloads in the member clusters. [optional default: false]") setupCmd.Flags().BoolVar(&setupFlags.CreateServiceAccountSecrets, "create-service-account-secrets", true, "Create service account token secrets. [optional default: true]") + setupCmd.Flags().StringVar(&setupFlags.ImagePullSecrets, "image-pull-secrets", "", "Name of the secret for imagePullSecrets to set in created service accounts") setupCmd.Flags().StringVar(&common.MemberClustersApiServers, "member-clusters-api-servers", "", "Comma separated list of api servers addresses. [optional, default will take addresses from KUBECONFIG env var]") } @@ -38,12 +40,17 @@ Example: kubectl-mongodb multicluster setup --central-cluster="operator-cluster" --member-clusters="cluster-1,cluster-2,cluster-3" --member-cluster-namespace=mongodb --central-cluster-namespace=mongodb --create-service-account-secrets --install-database-roles `, - Run: func(cmd *cobra.Command, args []string) { - if err := parseSetupFlags(args); err != nil { + Run: func(cmd *cobra.Command, _ []string) { + if err := parseSetupFlags(); err != nil { fmt.Printf("error parsing flags: %s\n", err) os.Exit(1) } + buildInfo, ok := debug.ReadBuildInfo() + if ok { + fmt.Println(getBuildInfoString(buildInfo)) + } + clientMap, err := common.CreateClientMap(setupFlags.MemberClusters, setupFlags.CentralCluster, common.LoadKubeConfigFilePath(), common.GetKubernetesClient) if err != nil { fmt.Printf("failed to create clientset map: %s", err) @@ -65,7 +72,7 @@ kubectl-mongodb multicluster setup --central-cluster="operator-cluster" --member var setupFlags = common.Flags{} -func parseSetupFlags(args []string) error { +func parseSetupFlags() error { if common.AnyAreEmpty(common.MemberClusters, setupFlags.ServiceAccount, setupFlags.CentralCluster, setupFlags.MemberClusterNamespace, setupFlags.CentralClusterNamespace) { return xerrors.Errorf("non empty values are required for [service-account, member-clusters, central-cluster, member-cluster-namespace, central-cluster-namespace]") diff --git a/tools/multicluster/go.mod b/tools/multicluster/go.mod index c5ab788..925e926 100644 --- a/tools/multicluster/go.mod +++ b/tools/multicluster/go.mod @@ -5,32 +5,36 @@ go 1.21 require ( github.com/ghodss/yaml v1.0.0 github.com/spf13/cobra v1.6.1 - github.com/stretchr/testify v1.8.0 - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 - k8s.io/api v0.26.10 - k8s.io/apimachinery v0.26.10 - k8s.io/client-go v0.26.10 - k8s.io/utils v0.0.0-20221107191617-1a15be271d1d + github.com/stretchr/testify v1.8.1 + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 + k8s.io/api v0.27.12 + k8s.io/apimachinery v0.27.12 + k8s.io/client-go v0.27.12 + k8s.io/utils v0.0.0-20240310230437-4693a0247e57 ) +// force pin, until we update the direct dependencies +require google.golang.org/protobuf v1.33.0 // indirect + require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -39,19 +43,18 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect golang.org/x/net v0.23.0 // indirect - golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect + golang.org/x/oauth2 v0.7.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect + golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + k8s.io/klog/v2 v2.90.1 // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/tools/multicluster/go.sum b/tools/multicluster/go.sum index 25e04aa..b7438c1 100644 --- a/tools/multicluster/go.sum +++ b/tools/multicluster/go.sum @@ -1,140 +1,68 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= @@ -143,20 +71,18 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -166,18 +92,17 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= -github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= -github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= +github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= +github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= +github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= @@ -186,282 +111,101 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -473,29 +217,21 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.10 h1:skTnrDR0r8dg4MMLf6YZIzugxNM0BjFsWKPkNc5kOvk= -k8s.io/api v0.26.10/go.mod h1:ou/H3yviqrHtP/DSPVTfsc7qNfmU06OhajytJfYXkXw= -k8s.io/apimachinery v0.26.10 h1:aE+J2KIbjctFqPp3Y0q4Wh2PD+l1p2g3Zp4UYjSvtGU= -k8s.io/apimachinery v0.26.10/go.mod h1:iT1ZP4JBP34wwM+ZQ8ByPEQ81u043iqAcsJYftX9amM= -k8s.io/client-go v0.26.10 h1:4mDzl+1IrfRxh4Ro0s65JRGJp14w77gSMUTjACYWVRo= -k8s.io/client-go v0.26.10/go.mod h1:sh74ig838gCckU4ElYclWb24lTesPdEDPnlyg5vcbkA= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +k8s.io/api v0.27.12 h1:Qprj/nuFj4xjbsAuJ05F1sCHs1d0x33n/Ni0oAVEFDo= +k8s.io/api v0.27.12/go.mod h1:PNRL63V26JzKe2++ho6W/YRp3k9XG7nirN4J7WRy5gY= +k8s.io/apimachinery v0.27.12 h1:Nt20vwaAHcZsM4WdkOtLaDeBJHg9QJW8JyOOEn6xzRA= +k8s.io/apimachinery v0.27.12/go.mod h1:5/SjQaDYQgZOv8kuzNMzmNGrqh4/iyknC5yWjxU9ll8= +k8s.io/client-go v0.27.12 h1:ouIB3ZitBjmBWh/9auP4erVl8AXkheqcmbH7FSFa7DI= +k8s.io/client-go v0.27.12/go.mod h1:h3X7RGr5s9Wm4NtI06Bzt3am4Kj6aXuZQcP7OD+48Sk= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/tools/multicluster/install_istio_separate_network.sh b/tools/multicluster/install_istio_separate_network.sh old mode 100755 new mode 100644 index 4e879a8..3769362 --- a/tools/multicluster/install_istio_separate_network.sh +++ b/tools/multicluster/install_istio_separate_network.sh @@ -2,30 +2,30 @@ set -eux -# change the clusternames as per your need -export CTX_CLUSTER1=gke_k8s-rdas_us-east1-b_member-1a -export CTX_CLUSTER2=gke_k8s-rdas_us-east1-c_member-2a -export CTX_CLUSTER3=gke_k8s-rdas_us-west1-a_member-3a -export VERSION=1.10.3 +# define here or provide the cluster names externally +export CTX_CLUSTER1=${CTX_CLUSTER1} +export CTX_CLUSTER2=${CTX_CLUSTER2} +export CTX_CLUSTER3=${CTX_CLUSTER3} +export ISTIO_VERSION=${ISTIO_VERSION} -# download Istio 1.10.3 under the path -curl -L https://istio.io/downloadIstio | ISTIO_VERSION=${VERSION} sh - +# download Istio under the path +curl -L https://istio.io/downloadIstio | sh - # checks if external IP has been assigned to a service object, in our case we are interested in east-west gateway function_check_external_ip_assigned() { while : ; do - ip=$(kubectl --context="$1" get svc istio-eastwestgateway -n istio-system --output jsonpath='{.status.loadBalancer.ingress[0].ip}') + ip=$(kubectl --context="$1" get svc istio-eastwestgateway -n istio-system --output jsonpath='{.status.loadBalancer.ingress[0].ip}') if [ -n "$ip" ] - then + then echo "external ip assigned $ip" break - else + else echo "waiting for external ip to be assigned" fi done } -cd istio-${VERSION} +cd istio-${ISTIO_VERSION} mkdir -p certs pushd certs @@ -184,5 +184,5 @@ bin/istioctl x create-remote-secret \ # cleanup: delete the istio repo at the end cd .. -rm -r istio-${VERSION} -rm -f cluster1.yaml cluster2.yaml cluster3.yaml +rm -r istio-${ISTIO_VERSION} +rm -f cluster1.yaml cluster2.yaml cluster3.yaml diff --git a/tools/multicluster/main.go b/tools/multicluster/main.go index 765a125..3fe6b98 100644 --- a/tools/multicluster/main.go +++ b/tools/multicluster/main.go @@ -1,7 +1,12 @@ package main -import "github.com/10gen/ops-manager-kubernetes/multi/cmd" +import ( + "context" + + "github.com/10gen/ops-manager-kubernetes/multi/cmd" +) func main() { - cmd.Execute() + ctx := context.Background() + cmd.Execute(ctx) } diff --git a/tools/multicluster/pkg/common/common.go b/tools/multicluster/pkg/common/common.go index 9b19672..3d05734 100644 --- a/tools/multicluster/pkg/common/common.go +++ b/tools/multicluster/pkg/common/common.go @@ -4,6 +4,9 @@ import ( "context" "fmt" "strings" + "time" + + "k8s.io/apimachinery/pkg/util/wait" "github.com/ghodss/yaml" "golang.org/x/xerrors" @@ -25,9 +28,12 @@ type clusterType string var MemberClusters string var MemberClustersApiServers string +var PollingInterval = time.Millisecond * 100 +var PollingTimeout = time.Second * 5 + const ( - centralCluster clusterType = "CENTRAL" - memberCluster clusterType = "MEMBER" + clusterTypeCentral clusterType = "CENTRAL" + clusterTypeMember clusterType = "MEMBER" ) // Flags holds all the fields provided by the user. @@ -44,6 +50,7 @@ type Flags struct { OperatorName string SourceCluster string CreateServiceAccountSecrets bool + ImagePullSecrets string } const ( @@ -255,6 +262,11 @@ func ensureAllClusterNamespacesExist(ctx context.Context, clientSets map[string] if err := ensureNamespace(ctx, clientSets[clusterName], f.MemberClusterNamespace); err != nil { return xerrors.Errorf("failed to ensure namespace %s in member cluster %s: %w", f.MemberClusterNamespace, clusterName, err) } + if f.CentralClusterNamespace != f.MemberClusterNamespace { + if err := ensureNamespace(ctx, clientSets[clusterName], f.CentralClusterNamespace); err != nil { + return xerrors.Errorf("failed to ensure namespace %s in member cluster %s: %w", f.CentralClusterNamespace, clusterName, err) + } + } } if err := ensureNamespace(ctx, clientSets[f.CentralCluster], f.CentralClusterNamespace); err != nil { return xerrors.Errorf("failed to ensure namespace %s in central cluster %s: %w", f.CentralClusterNamespace, f.CentralCluster, err) @@ -277,12 +289,12 @@ func EnsureMultiClusterResources(ctx context.Context, flags Flags, clientMap map } fmt.Println("Ensured namespaces exist in all clusters.") - if err := createServiceAccountsAndRoles(ctx, clientMap, flags); err != nil { + if err := createOperatorServiceAccountsAndRoles(ctx, clientMap, flags); err != nil { return xerrors.Errorf("failed creating service accounts and roles in all clusters: %w", err) } fmt.Println("Ensured ServiceAccounts and Roles.") - secrets, err := getAllWorkerClusterServiceAccountSecretTokens(ctx, clientMap, flags) + secrets, err := getAllMemberClusterServiceAccountSecretTokens(ctx, clientMap, flags) if err != nil { return xerrors.Errorf("failed to get service account secret tokens: %w", err) } @@ -446,7 +458,7 @@ func buildMemberEntityClusterRole() rbacv1.ClusterRole { } // buildRoleBinding creates the RoleBinding which binds the Role to the given ServiceAccount. -func buildRoleBinding(role rbacv1.Role, serviceAccount string) rbacv1.RoleBinding { +func buildRoleBinding(role rbacv1.Role, serviceAccount string, serviceAccountNamespace string) rbacv1.RoleBinding { return rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "mongodb-enterprise-operator-multi-role-binding", @@ -457,7 +469,7 @@ func buildRoleBinding(role rbacv1.Role, serviceAccount string) rbacv1.RoleBindin { Kind: "ServiceAccount", Name: serviceAccount, - Namespace: role.Namespace, + Namespace: serviceAccountNamespace, }, }, RoleRef: rbacv1.RoleRef{ @@ -469,7 +481,7 @@ func buildRoleBinding(role rbacv1.Role, serviceAccount string) rbacv1.RoleBindin } // buildClusterRoleBinding creates the ClusterRoleBinding which binds the ClusterRole to the given ServiceAccount. -func buildClusterRoleBinding(clusterRole rbacv1.ClusterRole, sa corev1.ServiceAccount) rbacv1.ClusterRoleBinding { +func buildClusterRoleBinding(clusterRole rbacv1.ClusterRole, serviceAccountName, serviceAccountNamespace string) rbacv1.ClusterRoleBinding { return rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "mongodb-enterprise-operator-multi-cluster-role-binding", @@ -478,8 +490,8 @@ func buildClusterRoleBinding(clusterRole rbacv1.ClusterRole, sa corev1.ServiceAc Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: sa.Name, - Namespace: sa.Namespace, + Name: serviceAccountName, + Namespace: serviceAccountNamespace, }, }, RoleRef: rbacv1.RoleRef{ @@ -490,60 +502,45 @@ func buildClusterRoleBinding(clusterRole rbacv1.ClusterRole, sa corev1.ServiceAc } } -// createMemberServiceAccountAndRoles creates the ServiceAccount and Roles, RoleBindings, ClusterRoles and ClusterRoleBindings required -// for the member clusters. -func createMemberServiceAccountAndRoles(ctx context.Context, c kubernetes.Interface, f Flags) error { - return createServiceAccountAndRoles(ctx, c, f.ServiceAccount, f.MemberClusterNamespace, f.ClusterScoped, memberCluster) -} - -// createCentralClusterServiceAccountAndRoles creates the ServiceAccount and Roles, RoleBindings, ClusterRoles and ClusterRoleBindings required -// for the central cluster. -func createCentralClusterServiceAccountAndRoles(ctx context.Context, c kubernetes.Interface, f Flags) error { - // central cluster always uses Roles. Never Cluster Roles. - return createServiceAccountAndRoles(ctx, c, f.ServiceAccount, f.CentralClusterNamespace, f.ClusterScoped, centralCluster) -} - -// createServiceAccountAndRoles creates the ServiceAccount and Roles, RoleBindings, ClusterRoles and ClusterRoleBindings required. -func createServiceAccountAndRoles(ctx context.Context, c kubernetes.Interface, serviceAccountName, namespace string, clusterScoped bool, clusterType clusterType) error { - sa := corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceAccountName, - Namespace: namespace, - Labels: multiClusterLabels(), - }, - ImagePullSecrets: []corev1.LocalObjectReference{ - {Name: "image-registries-secret"}, - }, - } - - _, err := c.CoreV1().ServiceAccounts(sa.Namespace).Create(ctx, &sa, metav1.CreateOptions{}) - if !errors.IsAlreadyExists(err) && err != nil { - return xerrors.Errorf("error creating service account: %w", err) - } - +// createRoles creates the ServiceAccount and Roles, RoleBindings, ClusterRoles and ClusterRoleBindings required. +func createRoles(ctx context.Context, c KubeClient, serviceAccountName, serviceAccountNamespace, namespace string, clusterScoped bool, clusterType clusterType) error { + var err error if !clusterScoped { var role rbacv1.Role - if clusterType == centralCluster { - role = buildCentralEntityRole(sa.Namespace) + if clusterType == clusterTypeCentral { + role = buildCentralEntityRole(namespace) } else { - role = buildMemberEntityRole(sa.Namespace) + role = buildMemberEntityRole(namespace) } - _, err = c.RbacV1().Roles(sa.Namespace).Create(ctx, &role, metav1.CreateOptions{}) + _, err = c.RbacV1().Roles(namespace).Create(ctx, &role, metav1.CreateOptions{}) if !errors.IsAlreadyExists(err) && err != nil { - return xerrors.Errorf("error creating role: %w", err) + if errors.IsAlreadyExists(err) { + if _, err := c.RbacV1().Roles(namespace).Update(ctx, &role, metav1.UpdateOptions{}); err != nil { + return xerrors.Errorf("error updating role: %w", err) + } + } else { + return xerrors.Errorf("error creating role: %w", err) + } } - roleBinding := buildRoleBinding(role, sa.Name) - _, err = c.RbacV1().RoleBindings(sa.Namespace).Create(ctx, &roleBinding, metav1.CreateOptions{}) - if !errors.IsAlreadyExists(err) && err != nil { - return xerrors.Errorf("error creating role binding: %w", err) + roleBinding := buildRoleBinding(role, serviceAccountName, serviceAccountNamespace) + _, err = c.RbacV1().RoleBindings(namespace).Create(ctx, &roleBinding, metav1.CreateOptions{}) + if err != nil { + if errors.IsAlreadyExists(err) { + if _, err := c.RbacV1().RoleBindings(namespace).Update(ctx, &roleBinding, metav1.UpdateOptions{}); err != nil { + return xerrors.Errorf("error updating role binding: %w", err) + } + } else { + return xerrors.Errorf("error creating role binding: %w", err) + } } + return nil } var clusterRole rbacv1.ClusterRole - if clusterType == centralCluster { + if clusterType == clusterTypeCentral { clusterRole = buildCentralEntityClusterRole() } else { clusterRole = buildMemberEntityClusterRole() @@ -555,7 +552,7 @@ func createServiceAccountAndRoles(ctx context.Context, c kubernetes.Interface, s } fmt.Printf("created clusterrole: %s\n", clusterRole.Name) - clusterRoleBinding := buildClusterRoleBinding(clusterRole, sa) + clusterRoleBinding := buildClusterRoleBinding(clusterRole, serviceAccountName, serviceAccountNamespace) _, err = c.RbacV1().ClusterRoleBindings().Create(ctx, &clusterRoleBinding, metav1.CreateOptions{}) if !errors.IsAlreadyExists(err) && err != nil { return xerrors.Errorf("error creating cluster role binding: %w", err) @@ -564,34 +561,56 @@ func createServiceAccountAndRoles(ctx context.Context, c kubernetes.Interface, s return nil } -// createServiceAccountsAndRoles creates the required ServiceAccounts in all member clusters. -func createServiceAccountsAndRoles(ctx context.Context, clientMap map[string]KubeClient, f Flags) error { +// createOperatorServiceAccountsAndRoles creates the required ServiceAccounts in all member clusters. +func createOperatorServiceAccountsAndRoles(ctx context.Context, clientMap map[string]KubeClient, f Flags) error { fmt.Printf("creating central cluster roles in cluster: %s\n", f.CentralCluster) - c := clientMap[f.CentralCluster] - if err := createCentralClusterServiceAccountAndRoles(ctx, c, f); err != nil { - return err + centralClusterClient := clientMap[f.CentralCluster] + _, err := createServiceAccount(ctx, centralClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.ImagePullSecrets) + if err != nil { + return xerrors.Errorf("error creating service account: %w", err) } if f.CreateServiceAccountSecrets { - if err := createServiceAccountTokenSecret(ctx, c, f.CentralClusterNamespace, f.ServiceAccount); err != nil { + if err := createServiceAccountTokenSecret(ctx, centralClusterClient, f.CentralClusterNamespace, f.ServiceAccount); err != nil { + return err + } + } + + if err := createRoles(ctx, centralClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.CentralClusterNamespace, f.ClusterScoped, clusterTypeCentral); err != nil { + return err + } + + // in case the operator namespace (CentralClusterNamespace) is different from member cluster namespace we need + // to provide roles and role binding to the operator's SA in member namespace + if f.CentralClusterNamespace != f.MemberClusterNamespace { + if err := createRoles(ctx, centralClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.MemberClusterNamespace, f.ClusterScoped, clusterTypeCentral); err != nil { return err } } for _, memberCluster := range f.MemberClusters { if memberCluster == f.CentralCluster { - fmt.Printf("skipping creation of member roles in cluster (it is also the central cluster): %s\n", memberCluster) + // we've already done that for central cluster continue } fmt.Printf("creating member roles in cluster: %s\n", memberCluster) - c := clientMap[memberCluster] - if err := createMemberServiceAccountAndRoles(ctx, c, f); err != nil { - return err + memberClusterClient := clientMap[memberCluster] + _, err := createServiceAccount(ctx, memberClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.ImagePullSecrets) + if err != nil { + return xerrors.Errorf("error creating service account: %w", err) } + if f.CreateServiceAccountSecrets { - if err := createServiceAccountTokenSecret(ctx, c, f.MemberClusterNamespace, f.ServiceAccount); err != nil { + if err := createServiceAccountTokenSecret(ctx, memberClusterClient, f.CentralClusterNamespace, f.ServiceAccount); err != nil { return err } } + + if err := createRoles(ctx, memberClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.MemberClusterNamespace, f.ClusterScoped, clusterTypeMember); err != nil { + return err + } + if err := createRoles(ctx, memberClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.CentralClusterNamespace, f.ClusterScoped, clusterTypeMember); err != nil { + return err + } } return nil @@ -668,38 +687,54 @@ func createKubeConfigFromServiceAccountTokens(serviceAccountTokens map[string]co return *config, nil } -// getAllWorkerClusterServiceAccountSecretTokens returns a slice of secrets that should all be +// getAllMemberClusterServiceAccountSecretTokens returns a slice of secrets that should all be // copied in the central cluster for the operator to use. -func getAllWorkerClusterServiceAccountSecretTokens(ctx context.Context, clientSetMap map[string]KubeClient, flags Flags) (map[string]corev1.Secret, error) { +func getAllMemberClusterServiceAccountSecretTokens(ctx context.Context, clientSetMap map[string]KubeClient, flags Flags) (map[string]corev1.Secret, error) { allSecrets := map[string]corev1.Secret{} for _, cluster := range flags.MemberClusters { c := clientSetMap[cluster] - sas, err := getServiceAccounts(ctx, c, flags.MemberClusterNamespace) + serviceAccountNamespace := flags.CentralClusterNamespace + sa, err := getServiceAccount(ctx, c, serviceAccountNamespace, flags.ServiceAccount, cluster) if err != nil { - return nil, xerrors.Errorf("failed getting service accounts: %w", err) + return nil, xerrors.Errorf("failed getting service account: %w", err) } - for _, sa := range sas { - if sa.Name == flags.ServiceAccount { - token, err := getServiceAccountToken(ctx, c, sa) - if err != nil { - return nil, xerrors.Errorf("failed getting service account token: %w", err) + // Wait for the token secret to be created and populated with service account token data + var tokenSecret *corev1.Secret + if err := wait.PollWithContext(ctx, PollingInterval, PollingTimeout, func(ctx context.Context) (done bool, err error) { + tokenSecret, err = getServiceAccountToken(ctx, c, *sa) + if err != nil { + if errors.IsNotFound(err) { + return false, nil + } else { + return true, err } - allSecrets[cluster] = *token } + + if _, ok := tokenSecret.Data["ca.crt"]; !ok { + return false, nil + } + if _, ok := tokenSecret.Data["token"]; !ok { + return false, nil + } + + return true, nil + }); err != nil { + return nil, xerrors.Errorf("failed getting service account token secret: %w", err) } + + allSecrets[cluster] = *tokenSecret } return allSecrets, nil } -func getServiceAccounts(ctx context.Context, lister kubernetes.Interface, namespace string) ([]corev1.ServiceAccount, error) { - saList, err := lister.CoreV1().ServiceAccounts(namespace).List(ctx, metav1.ListOptions{}) - +func getServiceAccount(ctx context.Context, lister kubernetes.Interface, namespace string, name string, memberClusterName string) (*corev1.ServiceAccount, error) { + sa, err := lister.CoreV1().ServiceAccounts(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { - return nil, xerrors.Errorf("failed to list service accounts in member cluster namespace %s: %w", namespace, err) + return nil, xerrors.Errorf("failed to get service account %s/%s in member cluster %s: %w", namespace, name, memberClusterName, err) } - return saList.Items, nil + return sa, nil } // getServiceAccountToken returns the Secret containing the ServiceAccount token @@ -737,7 +772,7 @@ func copySecret(ctx context.Context, src, dst KubeClient, namespace, name string return nil } -func createServiceAccount(ctx context.Context, c KubeClient, serviceAccountName, namespace string) error { +func createServiceAccount(ctx context.Context, c KubeClient, serviceAccountName, namespace string, imagePullSecrets string) (corev1.ServiceAccount, error) { sa := corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: serviceAccountName, @@ -746,11 +781,20 @@ func createServiceAccount(ctx context.Context, c KubeClient, serviceAccountName, }, } + if imagePullSecrets != "" { + sa.ImagePullSecrets = []corev1.LocalObjectReference{ + {Name: "image-registries-secret"}, + } + } + _, err := c.CoreV1().ServiceAccounts(sa.Namespace).Create(ctx, &sa, metav1.CreateOptions{}) - if !errors.IsAlreadyExists(err) && err != nil { - return xerrors.Errorf("error creating service account: %w", err) + if errors.IsAlreadyExists(err) { + _, err = c.CoreV1().ServiceAccounts(sa.Namespace).Update(ctx, &sa, metav1.UpdateOptions{}) } - return nil + if err != nil { + return corev1.ServiceAccount{}, xerrors.Errorf("error creating/updating service account: %w", err) + } + return sa, nil } func createDatabaseRole(ctx context.Context, c KubeClient, roleName, namespace string) error { @@ -805,13 +849,13 @@ func createDatabaseRole(ctx context.Context, c KubeClient, roleName, namespace s // createDatabaseRoles creates the default ServiceAccounts, Roles and RoleBindings required for running database // instances in a member cluster. func createDatabaseRoles(ctx context.Context, client KubeClient, f Flags) error { - if err := createServiceAccount(ctx, client, AppdbServiceAccount, f.MemberClusterNamespace); err != nil { + if _, err := createServiceAccount(ctx, client, AppdbServiceAccount, f.MemberClusterNamespace, f.ImagePullSecrets); err != nil { return err } - if err := createServiceAccount(ctx, client, DatabasePodsServiceAccount, f.MemberClusterNamespace); err != nil { + if _, err := createServiceAccount(ctx, client, DatabasePodsServiceAccount, f.MemberClusterNamespace, f.ImagePullSecrets); err != nil { return err } - if err := createServiceAccount(ctx, client, OpsManagerServiceAccount, f.MemberClusterNamespace); err != nil { + if _, err := createServiceAccount(ctx, client, OpsManagerServiceAccount, f.MemberClusterNamespace, f.ImagePullSecrets); err != nil { return err } if err := createDatabaseRole(ctx, client, AppdbRole, f.MemberClusterNamespace); err != nil { diff --git a/tools/multicluster/pkg/common/common_test.go b/tools/multicluster/pkg/common/common_test.go index 1f72bae..ed0d1c0 100644 --- a/tools/multicluster/pkg/common/common_test.go +++ b/tools/multicluster/pkg/common/common_test.go @@ -4,9 +4,15 @@ import ( "bytes" "context" "fmt" + "math/rand" "os" "strings" "testing" + "time" + + "github.com/stretchr/testify/require" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" "github.com/ghodss/yaml" "github.com/stretchr/testify/assert" @@ -59,6 +65,12 @@ users: client-key-data: ZHNqaA== ` +func init() { + // we lower this to not make unit tests fast + PollingInterval = time.Millisecond + PollingTimeout = time.Second * 1 +} + func testFlags(t *testing.T, cleanup bool) Flags { memberClusters := []string{"member-cluster-0", "member-cluster-1", "member-cluster-2"} kubeconfig, err := clientcmd.Load([]byte(testKubeconfig)) @@ -68,173 +80,187 @@ func testFlags(t *testing.T, cleanup bool) Flags { assert.NoError(t, err) return Flags{ - MemberClusterApiServerUrls: memberClusterApiServerUrls, - MemberClusters: memberClusters, - ServiceAccount: "test-service-account", - CentralCluster: "central-cluster", - MemberClusterNamespace: "member-namespace", - CentralClusterNamespace: "central-namespace", - Cleanup: cleanup, - ClusterScoped: false, - OperatorName: "mongodb-enterprise-operator", + MemberClusterApiServerUrls: memberClusterApiServerUrls, + MemberClusters: memberClusters, + ServiceAccount: "test-service-account", + CentralCluster: "central-cluster", + MemberClusterNamespace: "member-namespace", + CentralClusterNamespace: "central-namespace", + Cleanup: cleanup, + ClusterScoped: false, + OperatorName: "mongodb-enterprise-operator", + CreateServiceAccountSecrets: true, } } func TestNamespaces_GetsCreated_WhenTheyDoNotExit(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) assert.NoError(t, err) - assertMemberClusterNamespacesExist(t, clientMap, flags) - assertCentralClusterNamespacesExist(t, clientMap, flags) + assertMemberClusterNamespacesExist(t, ctx, clientMap, flags) + assertCentralClusterNamespacesExist(t, ctx, clientMap, flags) } func TestExistingNamespaces_DoNotCause_AlreadyExistsErrors(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) - clientMap := getClientResources(flags, namespaceResourceType) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags, namespaceResourceType) + err := EnsureMultiClusterResources(ctx, flags, clientMap) assert.NoError(t, err) - assertMemberClusterNamespacesExist(t, clientMap, flags) - assertCentralClusterNamespacesExist(t, clientMap, flags) + assertMemberClusterNamespacesExist(t, ctx, clientMap, flags) + assertCentralClusterNamespacesExist(t, ctx, clientMap, flags) } func TestServiceAccount_GetsCreate_WhenTheyDoNotExit(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) - assert.NoError(t, err) - assertServiceAccountsExist(t, clientMap, flags) + require.NoError(t, err) + assertServiceAccountsExist(t, ctx, clientMap, flags) } func TestExistingServiceAccounts_DoNotCause_AlreadyExistsErrors(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) - clientMap := getClientResources(flags, serviceAccountResourceType) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags, serviceAccountResourceType) + err := EnsureMultiClusterResources(ctx, flags, clientMap) - assert.NoError(t, err) - assertServiceAccountsExist(t, clientMap, flags) + require.NoError(t, err) + assertServiceAccountsExist(t, ctx, clientMap, flags) } func TestDatabaseRoles_GetCreated(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) flags.ClusterScoped = true flags.InstallDatabaseRoles = true - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) - assert.NoError(t, err) - assertDatabaseRolesExist(t, clientMap, flags) + require.NoError(t, err) + assertDatabaseRolesExist(t, ctx, clientMap, flags) } func TestRoles_GetsCreated_WhenTheyDoesNotExit(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) - assert.NoError(t, err) - assertMemberRolesExist(t, clientMap, flags) + require.NoError(t, err) + assertMemberRolesExist(t, ctx, clientMap, flags) } func TestExistingRoles_DoNotCause_AlreadyExistsErrors(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) - clientMap := getClientResources(flags, roleResourceType) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags, roleResourceType) + err := EnsureMultiClusterResources(ctx, flags, clientMap) - assert.NoError(t, err) - assertMemberRolesExist(t, clientMap, flags) + require.NoError(t, err) + assertMemberRolesExist(t, ctx, clientMap, flags) } func TestClusterRoles_DoNotGetCreated_WhenNotSpecified(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) flags.ClusterScoped = false - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) - assert.NoError(t, err) - assertMemberRolesExist(t, clientMap, flags) - assertCentralRolesExist(t, clientMap, flags) + require.NoError(t, err) + assertMemberRolesExist(t, ctx, clientMap, flags) + assertCentralRolesExist(t, ctx, clientMap, flags) } func TestClusterRoles_GetCreated_WhenSpecified(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) flags.ClusterScoped = true - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) - assert.NoError(t, err) - assertMemberRolesDoNotExist(t, clientMap, flags) - assertMemberClusterRolesExist(t, clientMap, flags) + require.NoError(t, err) + assertMemberRolesDoNotExist(t, ctx, clientMap, flags) + assertMemberClusterRolesExist(t, ctx, clientMap, flags) } func TestCentralCluster_GetsRegularRoleCreated_WhenClusterScoped_IsSpecified(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) flags.ClusterScoped = true - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) assert.NoError(t, err) } func TestCentralCluster_GetsRegularRoleCreated_WhenNonClusterScoped_IsSpecified(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) flags.ClusterScoped = false - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) - assert.NoError(t, err) - assertCentralRolesExist(t, clientMap, flags) + require.NoError(t, err) + assertCentralRolesExist(t, ctx, clientMap, flags) } func TestPerformCleanup(t *testing.T) { + ctx := context.Background() flags := testFlags(t, true) flags.ClusterScoped = true - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) - assert.NoError(t, err) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) + require.NoError(t, err) t.Run("Resources get created with labels", func(t *testing.T) { - assertMemberClusterRolesExist(t, clientMap, flags) - assertMemberClusterNamespacesExist(t, clientMap, flags) - assertCentralClusterNamespacesExist(t, clientMap, flags) - assertServiceAccountsExist(t, clientMap, flags) + assertMemberClusterRolesExist(t, ctx, clientMap, flags) + assertMemberClusterNamespacesExist(t, ctx, clientMap, flags) + assertCentralClusterNamespacesExist(t, ctx, clientMap, flags) + assertServiceAccountsExist(t, ctx, clientMap, flags) }) - err = performCleanup(context.TODO(), clientMap, flags) - assert.NoError(t, err) + err = performCleanup(ctx, clientMap, flags) + require.NoError(t, err) t.Run("Resources with labels are removed", func(t *testing.T) { - assertMemberRolesDoNotExist(t, clientMap, flags) - assertMemberClusterRolesDoNotExist(t, clientMap, flags) - assertCentralRolesDoNotExist(t, clientMap, flags) + assertMemberRolesDoNotExist(t, ctx, clientMap, flags) + assertMemberClusterRolesDoNotExist(t, ctx, clientMap, flags) + assertCentralRolesDoNotExist(t, ctx, clientMap, flags) }) t.Run("Namespaces are preserved", func(t *testing.T) { - assertMemberClusterNamespacesExist(t, clientMap, flags) - assertCentralClusterNamespacesExist(t, clientMap, flags) + assertMemberClusterNamespacesExist(t, ctx, clientMap, flags) + assertCentralClusterNamespacesExist(t, ctx, clientMap, flags) }) } func TestCreateKubeConfig_IsComposedOf_ServiceAccountTokens_InAllClusters(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) - clientMap := getClientResources(flags) + clientMap := getClientResources(ctx, flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) - assert.NoError(t, err) + err := EnsureMultiClusterResources(ctx, flags, clientMap) + require.NoError(t, err) - kubeConfig, err := readKubeConfig(clientMap[flags.CentralCluster], flags.CentralClusterNamespace) + kubeConfig, err := readKubeConfig(ctx, clientMap[flags.CentralCluster], flags.CentralClusterNamespace) assert.NoError(t, err) assert.Equal(t, "Config", kubeConfig.Kind) @@ -244,7 +270,7 @@ func TestCreateKubeConfig_IsComposedOf_ServiceAccountTokens_InAllClusters(t *tes for i, kubeConfigCluster := range kubeConfig.Clusters { assert.Equal(t, flags.MemberClusters[i], kubeConfigCluster.Name, "Name of cluster should be set to the member clusters.") - expectedCaBytes, err := readSecretKey(clientMap[flags.MemberClusters[i]], fmt.Sprintf("%s-token", flags.ServiceAccount), flags.MemberClusterNamespace, "ca.crt") + expectedCaBytes, err := readSecretKey(ctx, clientMap[flags.MemberClusters[i]], fmt.Sprintf("%s-token-secret", flags.ServiceAccount), flags.CentralClusterNamespace, "ca.crt") assert.NoError(t, err) assert.Contains(t, string(expectedCaBytes), flags.MemberClusters[i]) @@ -253,7 +279,7 @@ func TestCreateKubeConfig_IsComposedOf_ServiceAccountTokens_InAllClusters(t *tes } for i, user := range kubeConfig.Users { - tokenBytes, err := readSecretKey(clientMap[flags.MemberClusters[i]], fmt.Sprintf("%s-token", flags.ServiceAccount), flags.MemberClusterNamespace, "token") + tokenBytes, err := readSecretKey(ctx, clientMap[flags.MemberClusters[i]], fmt.Sprintf("%s-token-secret", flags.ServiceAccount), flags.CentralClusterNamespace, "token") assert.NoError(t, err) assert.Equal(t, flags.MemberClusters[i], user.Name, "User name should be the name of the cluster.") assert.Equal(t, string(tokenBytes), user.User.Token, "Token from the service account secret should be set.") @@ -262,42 +288,45 @@ func TestCreateKubeConfig_IsComposedOf_ServiceAccountTokens_InAllClusters(t *tes } func TestKubeConfigSecret_IsCreated_InCentralCluster(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) - clientMap := getClientResources(flags) + clientMap := getClientResources(ctx, flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) - assert.NoError(t, err) + err := EnsureMultiClusterResources(ctx, flags, clientMap) + require.NoError(t, err) centralClusterClient := clientMap[flags.CentralCluster] - kubeConfigSecret, err := centralClusterClient.CoreV1().Secrets(flags.CentralClusterNamespace).Get(context.TODO(), KubeConfigSecretName, metav1.GetOptions{}) + kubeConfigSecret, err := centralClusterClient.CoreV1().Secrets(flags.CentralClusterNamespace).Get(ctx, KubeConfigSecretName, metav1.GetOptions{}) assert.NoError(t, err) assert.NotNil(t, kubeConfigSecret) } func TestKubeConfigSecret_IsNotCreated_InMemberClusters(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) - clientMap := getClientResources(flags) + clientMap := getClientResources(ctx, flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) - assert.NoError(t, err) + err := EnsureMultiClusterResources(ctx, flags, clientMap) + require.NoError(t, err) for _, memberCluster := range flags.MemberClusters { memberClient := clientMap[memberCluster] - kubeConfigSecret, err := memberClient.CoreV1().Secrets(flags.CentralClusterNamespace).Get(context.TODO(), KubeConfigSecretName, metav1.GetOptions{}) + kubeConfigSecret, err := memberClient.CoreV1().Secrets(flags.CentralClusterNamespace).Get(ctx, KubeConfigSecretName, metav1.GetOptions{}) assert.True(t, errors.IsNotFound(err)) assert.Nil(t, kubeConfigSecret) } } func TestChangingOneServiceAccountToken_ChangesOnlyThatEntry_InKubeConfig(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) - clientMap := getClientResources(flags) + clientMap := getClientResources(ctx, flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) - assert.NoError(t, err) + err := EnsureMultiClusterResources(ctx, flags, clientMap) + require.NoError(t, err) - kubeConfigBefore, err := readKubeConfig(clientMap[flags.CentralCluster], flags.CentralClusterNamespace) + kubeConfigBefore, err := readKubeConfig(ctx, clientMap[flags.CentralCluster], flags.CentralClusterNamespace) assert.NoError(t, err) firstClusterClient := clientMap[flags.MemberClusters[0]] @@ -305,8 +334,8 @@ func TestChangingOneServiceAccountToken_ChangesOnlyThatEntry_InKubeConfig(t *tes // simulate a service account token changing, re-running the script should leave the other clusters unchanged. newServiceAccountToken := corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-token", flags.ServiceAccount), - Namespace: flags.MemberClusterNamespace, + Name: fmt.Sprintf("%s-token-secret", flags.ServiceAccount), + Namespace: flags.CentralClusterNamespace, }, Data: map[string][]byte{ "token": []byte("new-token-data"), @@ -314,13 +343,14 @@ func TestChangingOneServiceAccountToken_ChangesOnlyThatEntry_InKubeConfig(t *tes }, } - _, err = firstClusterClient.CoreV1().Secrets(flags.MemberClusterNamespace).Update(context.TODO(), &newServiceAccountToken, metav1.UpdateOptions{}) + _, err = firstClusterClient.CoreV1().Secrets(flags.CentralClusterNamespace).Update(ctx, &newServiceAccountToken, metav1.UpdateOptions{}) assert.NoError(t, err) - err = EnsureMultiClusterResources(context.TODO(), flags, clientMap) - assert.NoError(t, err) + flags.CreateServiceAccountSecrets = false + err = EnsureMultiClusterResources(ctx, flags, clientMap) + require.NoError(t, err) - kubeConfigAfter, err := readKubeConfig(clientMap[flags.CentralCluster], flags.CentralClusterNamespace) + kubeConfigAfter, err := readKubeConfig(ctx, clientMap[flags.CentralCluster], flags.CentralClusterNamespace) assert.NoError(t, err) assert.NotEqual(t, kubeConfigBefore.Users[0], kubeConfigAfter.Users[0], "Cluster 0 users should have been modified.") @@ -359,15 +389,18 @@ func TestGetMemberClusterApiServerUrls(t *testing.T) { } func TestMemberClusterUris(t *testing.T) { + ctx := context.Background() t.Run("Uses server values set in CommonFlags", func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() flags := testFlags(t, false) flags.MemberClusterApiServerUrls = []string{"cluster1-url", "cluster2-url", "cluster3-url"} - clientMap := getClientResources(flags) + clientMap := getClientResources(ctx, flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) - assert.NoError(t, err) + err := EnsureMultiClusterResources(ctx, flags, clientMap) + require.NoError(t, err) - kubeConfig, err := readKubeConfig(clientMap[flags.CentralCluster], flags.CentralClusterNamespace) + kubeConfig, err := readKubeConfig(ctx, clientMap[flags.CentralCluster], flags.CentralClusterNamespace) assert.NoError(t, err) for i, c := range kubeConfig.Clusters { @@ -379,17 +412,18 @@ func TestMemberClusterUris(t *testing.T) { } func TestReplaceClusterMembersConfigMap(t *testing.T) { + ctx := context.Background() flags := testFlags(t, false) - clientMap := getClientResources(flags) + clientMap := getClientResources(ctx, flags) client := clientMap[flags.CentralCluster] { flags.MemberClusters = []string{"member-1", "member-2", "member-3", "member-4"} - err := ReplaceClusterMembersConfigMap(context.Background(), client, flags) + err := ReplaceClusterMembersConfigMap(ctx, client, flags) assert.NoError(t, err) - cm, err := client.CoreV1().ConfigMaps(flags.CentralClusterNamespace).Get(context.Background(), DefaultOperatorConfigMapName, metav1.GetOptions{}) + cm, err := client.CoreV1().ConfigMaps(flags.CentralClusterNamespace).Get(ctx, DefaultOperatorConfigMapName, metav1.GetOptions{}) assert.NoError(t, err) expected := map[string]string{} @@ -401,8 +435,8 @@ func TestReplaceClusterMembersConfigMap(t *testing.T) { { flags.MemberClusters = []string{"member-1", "member-2"} - err := ReplaceClusterMembersConfigMap(context.Background(), client, flags) - cm, err := client.CoreV1().ConfigMaps(flags.CentralClusterNamespace).Get(context.Background(), DefaultOperatorConfigMapName, metav1.GetOptions{}) + err := ReplaceClusterMembersConfigMap(ctx, client, flags) + cm, err := client.CoreV1().ConfigMaps(flags.CentralClusterNamespace).Get(ctx, DefaultOperatorConfigMapName, metav1.GetOptions{}) assert.NoError(t, err) expected := map[string]string{} @@ -419,6 +453,7 @@ func TestReplaceClusterMembersConfigMap(t *testing.T) { // samples/multi-cluster-cli-gitops/resources/rbac directory. By default, this test is not executed. If you indent to run // it, please set EXPORT_RBAC_SAMPLES variable to "true". func TestPrintingOutRolesServiceAccountsAndRoleBindings(t *testing.T) { + ctx := context.Background() if os.Getenv("EXPORT_RBAC_SAMPLES") != "true" { t.Skip("Skipping as EXPORT_RBAC_SAMPLES is false") } @@ -429,78 +464,78 @@ func TestPrintingOutRolesServiceAccountsAndRoleBindings(t *testing.T) { { sb := &strings.Builder{} - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) - cr, err := clientMap[flags.CentralCluster].RbacV1().ClusterRoles().List(context.TODO(), metav1.ListOptions{}) + cr, err := clientMap[flags.CentralCluster].RbacV1().ClusterRoles().List(ctx, metav1.ListOptions{}) assert.NoError(t, err) - crb, err := clientMap[flags.CentralCluster].RbacV1().ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{}) + crb, err := clientMap[flags.CentralCluster].RbacV1().ClusterRoleBindings().List(ctx, metav1.ListOptions{}) assert.NoError(t, err) - sa, err := clientMap[flags.CentralCluster].CoreV1().ServiceAccounts(flags.CentralClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + sa, err := clientMap[flags.CentralCluster].CoreV1().ServiceAccounts(flags.CentralClusterNamespace).List(ctx, metav1.ListOptions{}) sb = marshalToYaml(t, sb, "Central Cluster, cluster-scoped resources", "rbac.authorization.k8s.io/v1", "ClusterRole", cr.Items) sb = marshalToYaml(t, sb, "Central Cluster, cluster-scoped resources", "rbac.authorization.k8s.io/v1", "ClusterRoleBinding", crb.Items) sb = marshalToYaml(t, sb, "Central Cluster, cluster-scoped resources", "v1", "ServiceAccount", sa.Items) - os.WriteFile("../../samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_central_cluster.yaml", []byte(sb.String()), os.ModePerm) + _ = os.WriteFile("../../samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_central_cluster.yaml", []byte(sb.String()), os.ModePerm) } { sb := &strings.Builder{} - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) - cr, err := clientMap[flags.MemberClusters[0]].RbacV1().ClusterRoles().List(context.TODO(), metav1.ListOptions{}) + cr, err := clientMap[flags.MemberClusters[0]].RbacV1().ClusterRoles().List(ctx, metav1.ListOptions{}) assert.NoError(t, err) - crb, err := clientMap[flags.MemberClusters[0]].RbacV1().ClusterRoleBindings().List(context.TODO(), metav1.ListOptions{}) + crb, err := clientMap[flags.MemberClusters[0]].RbacV1().ClusterRoleBindings().List(ctx, metav1.ListOptions{}) assert.NoError(t, err) - sa, err := clientMap[flags.MemberClusters[0]].CoreV1().ServiceAccounts(flags.MemberClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + sa, err := clientMap[flags.MemberClusters[0]].CoreV1().ServiceAccounts(flags.MemberClusterNamespace).List(ctx, metav1.ListOptions{}) sb = marshalToYaml(t, sb, "Member Cluster, cluster-scoped resources", "rbac.authorization.k8s.io/v1", "ClusterRole", cr.Items) sb = marshalToYaml(t, sb, "Member Cluster, cluster-scoped resources", "rbac.authorization.k8s.io/v1", "ClusterRoleBinding", crb.Items) sb = marshalToYaml(t, sb, "Member Cluster, cluster-scoped resources", "v1", "ServiceAccount", sa.Items) - os.WriteFile("../../samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_member_cluster.yaml", []byte(sb.String()), os.ModePerm) + _ = os.WriteFile("../../samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_member_cluster.yaml", []byte(sb.String()), os.ModePerm) } { sb := &strings.Builder{} flags.ClusterScoped = false - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) - r, err := clientMap[flags.CentralCluster].RbacV1().Roles(flags.CentralClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + r, err := clientMap[flags.CentralCluster].RbacV1().Roles(flags.CentralClusterNamespace).List(ctx, metav1.ListOptions{}) assert.NoError(t, err) - rb, err := clientMap[flags.CentralCluster].RbacV1().RoleBindings(flags.CentralClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + rb, err := clientMap[flags.CentralCluster].RbacV1().RoleBindings(flags.CentralClusterNamespace).List(ctx, metav1.ListOptions{}) assert.NoError(t, err) - sa, err := clientMap[flags.CentralCluster].CoreV1().ServiceAccounts(flags.CentralClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + sa, err := clientMap[flags.CentralCluster].CoreV1().ServiceAccounts(flags.CentralClusterNamespace).List(ctx, metav1.ListOptions{}) sb = marshalToYaml(t, sb, "Central Cluster, namespace-scoped resources", "rbac.authorization.k8s.io/v1", "Role", r.Items) sb = marshalToYaml(t, sb, "Central Cluster, namespace-scoped resources", "rbac.authorization.k8s.io/v1", "RoleBinding", rb.Items) sb = marshalToYaml(t, sb, "Central Cluster, namespace-scoped resources", "v1", "ServiceAccount", sa.Items) - os.WriteFile("../../samples/multi-cluster-cli-gitops/resources/rbac/namespace_scoped_central_cluster.yaml", []byte(sb.String()), os.ModePerm) + _ = os.WriteFile("../../samples/multi-cluster-cli-gitops/resources/rbac/namespace_scoped_central_cluster.yaml", []byte(sb.String()), os.ModePerm) } { sb := &strings.Builder{} flags.ClusterScoped = false - clientMap := getClientResources(flags) - err := EnsureMultiClusterResources(context.TODO(), flags, clientMap) + clientMap := getClientResources(ctx, flags) + err := EnsureMultiClusterResources(ctx, flags, clientMap) - r, err := clientMap[flags.MemberClusters[0]].RbacV1().Roles(flags.MemberClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + r, err := clientMap[flags.MemberClusters[0]].RbacV1().Roles(flags.MemberClusterNamespace).List(ctx, metav1.ListOptions{}) assert.NoError(t, err) - rb, err := clientMap[flags.MemberClusters[0]].RbacV1().RoleBindings(flags.MemberClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + rb, err := clientMap[flags.MemberClusters[0]].RbacV1().RoleBindings(flags.MemberClusterNamespace).List(ctx, metav1.ListOptions{}) assert.NoError(t, err) - sa, err := clientMap[flags.MemberClusters[0]].CoreV1().ServiceAccounts(flags.MemberClusterNamespace).List(context.TODO(), metav1.ListOptions{}) + sa, err := clientMap[flags.MemberClusters[0]].CoreV1().ServiceAccounts(flags.MemberClusterNamespace).List(ctx, metav1.ListOptions{}) sb = marshalToYaml(t, sb, "Member Cluster, namespace-scoped resources", "rbac.authorization.k8s.io/v1", "Role", r.Items) sb = marshalToYaml(t, sb, "Member Cluster, namespace-scoped resources", "rbac.authorization.k8s.io/v1", "RoleBinding", rb.Items) sb = marshalToYaml(t, sb, "Member Cluster, namespace-scoped resources", "v1", "ServiceAccount", sa.Items) - os.WriteFile("../../samples/multi-cluster-cli-gitops/resources/rbac/namespace_scoped_member_cluster.yaml", []byte(sb.String()), os.ModePerm) + _ = os.WriteFile("../../samples/multi-cluster-cli-gitops/resources/rbac/namespace_scoped_member_cluster.yaml", []byte(sb.String()), os.ModePerm) } } @@ -509,9 +544,9 @@ func marshalToYaml[T interface{}](t *testing.T, sb *strings.Builder, comment str for _, cr := range items { sb.WriteString(fmt.Sprintf("apiVersion: %s\n", apiVersion)) sb.WriteString(fmt.Sprintf("kind: %s\n", kind)) - bytes, err := yaml.Marshal(cr) + marshalledBytes, err := yaml.Marshal(cr) assert.NoError(t, err) - sb.WriteString(string(bytes)) + sb.WriteString(string(marshalledBytes)) sb.WriteString("\n---\n") } return sb @@ -561,10 +596,10 @@ func TestConvertToSet(t *testing.T) { } // assertMemberClusterNamespacesExist asserts the Namespace in the member clusters exists. -func assertMemberClusterNamespacesExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { +func assertMemberClusterNamespacesExist(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags) { for _, clusterName := range flags.MemberClusters { client := clientMap[clusterName] - ns, err := client.CoreV1().Namespaces().Get(context.TODO(), flags.MemberClusterNamespace, metav1.GetOptions{}) + ns, err := client.CoreV1().Namespaces().Get(ctx, flags.MemberClusterNamespace, metav1.GetOptions{}) assert.NoError(t, err) assert.NotNil(t, ns) assert.Equal(t, flags.MemberClusterNamespace, ns.Name) @@ -572,62 +607,62 @@ func assertMemberClusterNamespacesExist(t *testing.T, clientMap map[string]KubeC } } -// assertCentralClusterNamespacesExist asserts the Namespace in the central cluster exists.. -func assertCentralClusterNamespacesExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { +// assertCentralClusterNamespacesExist asserts the Namespace in the central cluster exists. +func assertCentralClusterNamespacesExist(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags) { client := clientMap[flags.CentralCluster] - ns, err := client.CoreV1().Namespaces().Get(context.TODO(), flags.CentralClusterNamespace, metav1.GetOptions{}) - assert.NoError(t, err) + ns, err := client.CoreV1().Namespaces().Get(ctx, flags.CentralClusterNamespace, metav1.GetOptions{}) + require.NoError(t, err) assert.NotNil(t, ns) assert.Equal(t, flags.CentralClusterNamespace, ns.Name) assert.Equal(t, ns.Labels, multiClusterLabels()) } // assertServiceAccountsAreCorrect asserts the ServiceAccounts are created as expected. -func assertServiceAccountsExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { +func assertServiceAccountsExist(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags) { for _, clusterName := range flags.MemberClusters { client := clientMap[clusterName] - sa, err := client.CoreV1().ServiceAccounts(flags.MemberClusterNamespace).Get(context.TODO(), flags.ServiceAccount, metav1.GetOptions{}) - assert.NoError(t, err) + sa, err := client.CoreV1().ServiceAccounts(flags.CentralClusterNamespace).Get(ctx, flags.ServiceAccount, metav1.GetOptions{}) + require.NoError(t, err) assert.NotNil(t, sa) assert.Equal(t, flags.ServiceAccount, sa.Name) assert.Equal(t, sa.Labels, multiClusterLabels()) } client := clientMap[flags.CentralCluster] - sa, err := client.CoreV1().ServiceAccounts(flags.CentralClusterNamespace).Get(context.TODO(), flags.ServiceAccount, metav1.GetOptions{}) - assert.NoError(t, err) + sa, err := client.CoreV1().ServiceAccounts(flags.CentralClusterNamespace).Get(ctx, flags.ServiceAccount, metav1.GetOptions{}) + require.NoError(t, err) assert.NotNil(t, sa) assert.Equal(t, flags.ServiceAccount, sa.Name) assert.Equal(t, sa.Labels, multiClusterLabels()) } // assertDatabaseRolesExist asserts the DatabaseRoles are created as expected. -func assertDatabaseRolesExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { +func assertDatabaseRolesExist(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags) { for _, clusterName := range flags.MemberClusters { client := clientMap[clusterName] // appDB service account - sa, err := client.CoreV1().ServiceAccounts(flags.MemberClusterNamespace).Get(context.TODO(), AppdbServiceAccount, metav1.GetOptions{}) - assert.NoError(t, err) - assert.NotNil(t, sa) + sa, err := client.CoreV1().ServiceAccounts(flags.MemberClusterNamespace).Get(ctx, AppdbServiceAccount, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, sa) assert.Equal(t, sa.Labels, multiClusterLabels()) // database pods service account - sa, err = client.CoreV1().ServiceAccounts(flags.MemberClusterNamespace).Get(context.TODO(), DatabasePodsServiceAccount, metav1.GetOptions{}) - assert.NoError(t, err) - assert.NotNil(t, sa) + sa, err = client.CoreV1().ServiceAccounts(flags.MemberClusterNamespace).Get(ctx, DatabasePodsServiceAccount, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, sa) assert.Equal(t, sa.Labels, multiClusterLabels()) // ops manager service account - sa, err = client.CoreV1().ServiceAccounts(flags.MemberClusterNamespace).Get(context.TODO(), OpsManagerServiceAccount, metav1.GetOptions{}) - assert.NoError(t, err) - assert.NotNil(t, sa) + sa, err = client.CoreV1().ServiceAccounts(flags.MemberClusterNamespace).Get(ctx, OpsManagerServiceAccount, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, sa) assert.Equal(t, sa.Labels, multiClusterLabels()) // appdb role - r, err := client.RbacV1().Roles(flags.MemberClusterNamespace).Get(context.TODO(), AppdbRole, metav1.GetOptions{}) - assert.NoError(t, err) - assert.NotNil(t, r) + r, err := client.RbacV1().Roles(flags.MemberClusterNamespace).Get(ctx, AppdbRole, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, r) assert.Equal(t, r.Labels, multiClusterLabels()) assert.Equal(t, []rbacv1.PolicyRule{ { @@ -643,9 +678,9 @@ func assertDatabaseRolesExist(t *testing.T, clientMap map[string]KubeClient, fla }, r.Rules) // appdb rolebinding - rb, err := client.RbacV1().RoleBindings(flags.MemberClusterNamespace).Get(context.TODO(), AppdbRoleBinding, metav1.GetOptions{}) - assert.NoError(t, err) - assert.NotNil(t, r) + rb, err := client.RbacV1().RoleBindings(flags.MemberClusterNamespace).Get(ctx, AppdbRoleBinding, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, r) assert.Equal(t, rb.Labels, multiClusterLabels()) assert.Equal(t, []rbacv1.Subject{ { @@ -661,20 +696,20 @@ func assertDatabaseRolesExist(t *testing.T, clientMap map[string]KubeClient, fla } // assertMemberClusterRolesExist should be used when member cluster cluster roles should exist. -func assertMemberClusterRolesExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { - assertClusterRoles(t, clientMap, flags, true, memberCluster) +func assertMemberClusterRolesExist(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags) { + assertClusterRoles(t, ctx, clientMap, flags, true, clusterTypeMember) } // assertMemberClusterRolesDoNotExist should be used when member cluster cluster roles should not exist. -func assertMemberClusterRolesDoNotExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { - assertClusterRoles(t, clientMap, flags, false, centralCluster) +func assertMemberClusterRolesDoNotExist(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags) { + assertClusterRoles(t, ctx, clientMap, flags, false, clusterTypeCentral) } // assertClusterRoles should be used to assert the existence of member cluster cluster roles. The boolean // shouldExist should be true for roles existing, and false for cluster roles not existing. -func assertClusterRoles(t *testing.T, clientMap map[string]KubeClient, flags Flags, shouldExist bool, clusterType clusterType) { +func assertClusterRoles(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags, shouldExist bool, clusterType clusterType) { var expectedClusterRole rbacv1.ClusterRole - if clusterType == centralCluster { + if clusterType == clusterTypeCentral { expectedClusterRole = buildCentralEntityClusterRole() } else { expectedClusterRole = buildMemberEntityClusterRole() @@ -682,7 +717,7 @@ func assertClusterRoles(t *testing.T, clientMap map[string]KubeClient, flags Fla for _, clusterName := range flags.MemberClusters { client := clientMap[clusterName] - role, err := client.RbacV1().ClusterRoles().Get(context.TODO(), expectedClusterRole.Name, metav1.GetOptions{}) + role, err := client.RbacV1().ClusterRoles().Get(ctx, expectedClusterRole.Name, metav1.GetOptions{}) if shouldExist { assert.NoError(t, err) assert.NotNil(t, role) @@ -693,7 +728,7 @@ func assertClusterRoles(t *testing.T, clientMap map[string]KubeClient, flags Fla } } - clusterRole, err := clientMap[flags.CentralCluster].RbacV1().ClusterRoles().Get(context.TODO(), expectedClusterRole.Name, metav1.GetOptions{}) + clusterRole, err := clientMap[flags.CentralCluster].RbacV1().ClusterRoles().Get(ctx, expectedClusterRole.Name, metav1.GetOptions{}) if shouldExist { assert.Nil(t, err) assert.NotNil(t, clusterRole) @@ -703,23 +738,23 @@ func assertClusterRoles(t *testing.T, clientMap map[string]KubeClient, flags Fla } // assertMemberRolesExist should be used when member cluster roles should exist. -func assertMemberRolesExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { - assertMemberRolesAreCorrect(t, clientMap, flags, true) +func assertMemberRolesExist(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags) { + assertMemberRolesAreCorrect(t, ctx, clientMap, flags, true) } // assertMemberRolesDoNotExist should be used when member cluster roles should not exist. -func assertMemberRolesDoNotExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { - assertMemberRolesAreCorrect(t, clientMap, flags, false) +func assertMemberRolesDoNotExist(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags) { + assertMemberRolesAreCorrect(t, ctx, clientMap, flags, false) } // assertMemberRolesAreCorrect should be used to assert the existence of member cluster roles. The boolean // shouldExist should be true for roles existing, and false for roles not existing. -func assertMemberRolesAreCorrect(t *testing.T, clientMap map[string]KubeClient, flags Flags, shouldExist bool) { +func assertMemberRolesAreCorrect(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags, shouldExist bool) { expectedRole := buildMemberEntityRole(flags.MemberClusterNamespace) for _, clusterName := range flags.MemberClusters { client := clientMap[clusterName] - role, err := client.RbacV1().Roles(flags.MemberClusterNamespace).Get(context.TODO(), expectedRole.Name, metav1.GetOptions{}) + role, err := client.RbacV1().Roles(flags.MemberClusterNamespace).Get(ctx, expectedRole.Name, metav1.GetOptions{}) if shouldExist { assert.NoError(t, err) assert.NotNil(t, role) @@ -732,29 +767,29 @@ func assertMemberRolesAreCorrect(t *testing.T, clientMap map[string]KubeClient, } // assertCentralRolesExist should be used when central cluster roles should exist. -func assertCentralRolesExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { - assertCentralRolesAreCorrect(t, clientMap, flags, true) +func assertCentralRolesExist(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags) { + assertCentralRolesAreCorrect(t, ctx, clientMap, flags, true) } // assertCentralRolesDoNotExist should be used when central cluster roles should not exist. -func assertCentralRolesDoNotExist(t *testing.T, clientMap map[string]KubeClient, flags Flags) { - assertCentralRolesAreCorrect(t, clientMap, flags, false) +func assertCentralRolesDoNotExist(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags) { + assertCentralRolesAreCorrect(t, ctx, clientMap, flags, false) } // assertCentralRolesAreCorrect should be used to assert the existence of central cluster roles. The boolean // shouldExist should be true for roles existing, and false for roles not existing. -func assertCentralRolesAreCorrect(t *testing.T, clientMap map[string]KubeClient, flags Flags, shouldExist bool) { +func assertCentralRolesAreCorrect(t *testing.T, ctx context.Context, clientMap map[string]KubeClient, flags Flags, shouldExist bool) { client := clientMap[flags.CentralCluster] // should never have a cluster role clusterRole := buildCentralEntityClusterRole() - cr, err := client.RbacV1().ClusterRoles().Get(context.TODO(), clusterRole.Name, metav1.GetOptions{}) + cr, err := client.RbacV1().ClusterRoles().Get(ctx, clusterRole.Name, metav1.GetOptions{}) assert.True(t, errors.IsNotFound(err)) assert.Nil(t, cr) expectedRole := buildCentralEntityRole(flags.CentralClusterNamespace) - role, err := client.RbacV1().Roles(flags.CentralClusterNamespace).Get(context.TODO(), expectedRole.Name, metav1.GetOptions{}) + role, err := client.RbacV1().Roles(flags.CentralClusterNamespace).Get(ctx, expectedRole.Name, metav1.GetOptions{}) if shouldExist { assert.NoError(t, err, "should always create a role for central cluster") @@ -776,79 +811,59 @@ var ( roleResourceType resourceType = "Role" ) -// createResourcesForCluster returns the resources specified based on the provided resourceTypes. -// this function is used to populate subsets of resources for the unit tests. -func createResourcesForCluster(centralCluster bool, flags Flags, clusterName string, resourceTypes ...resourceType) []runtime.Object { - var namespace = flags.MemberClusterNamespace - if centralCluster { - namespace = flags.CentralCluster +// getClientResources returns a map of cluster name to fake.Clientset +func getClientResources(ctx context.Context, flags Flags, resourceTypes ...resourceType) map[string]KubeClient { + clientMap := make(map[string]KubeClient) + + for _, clusterName := range flags.MemberClusters { + if clusterName == flags.CentralCluster { + continue + } + clientMap[clusterName] = NewKubeClientContainer(nil, newFakeClientset(ctx, clusterName, nil), nil) } + clientMap[flags.CentralCluster] = NewKubeClientContainer(nil, newFakeClientset(ctx, flags.CentralCluster, nil), nil) - resources := make([]runtime.Object, 0) + return clientMap +} - // always create the service account token secret as this gets created by - // kubernetes, we can just assume it is always there for tests. - resources = append(resources, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-token", flags.ServiceAccount), - Namespace: namespace, - }, - Data: map[string][]byte{ - "ca.crt": []byte(fmt.Sprintf("ca-cert-data-%s", clusterName)), - "token": []byte(fmt.Sprintf("%s-token-data", clusterName)), +func newFakeClientset(ctx context.Context, clusterName string, resources []runtime.Object) *fake.Clientset { + clientset := fake.NewSimpleClientset(resources...) + informerFactory := informers.NewSharedInformerFactory(clientset, time.Second) + secretInformer := informerFactory.Core().V1().Secrets().Informer() + _, err := secretInformer.AddEventHandler(&cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + s := obj.(*corev1.Secret).DeepCopy() + // simulate populating the service account secret token data into the secret + // it's done automatically by k8s + onSecretCreate(s, clusterName, clientset, ctx) }, }) - if containsResourceType(resourceTypes, namespaceResourceType) { - resources = append(resources, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Labels: multiClusterLabels(), - }, - }) - } + informerFactory.Start(ctx.Done()) + informerFactory.WaitForCacheSync(ctx.Done()) - if containsResourceType(resourceTypes, serviceAccountResourceType) { - resources = append(resources, &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: flags.ServiceAccount, - Labels: multiClusterLabels(), - }, - Secrets: []corev1.ObjectReference{ - { - Name: flags.ServiceAccount + "-token", - Namespace: namespace, - }, - }, - }) - } - - if containsResourceType(resourceTypes, roleResourceType) { - role := buildMemberEntityRole(namespace) - resources = append(resources, &role) - } - - if containsResourceType(resourceTypes, roleBindingResourceType) { - role := buildMemberEntityRole(namespace) - roleBinding := buildRoleBinding(role, namespace) - resources = append(resources, &roleBinding) + if err != nil { + panic(fmt.Errorf("%w", err)) } - return resources + return clientset } -// getClientResources returns a map of cluster name to fake.Clientset -func getClientResources(flags Flags, resourceTypes ...resourceType) map[string]KubeClient { - clientMap := make(map[string]KubeClient) - - for _, clusterName := range flags.MemberClusters { - resources := createResourcesForCluster(false, flags, clusterName, resourceTypes...) - clientMap[clusterName] = NewKubeClientContainer(nil, fake.NewSimpleClientset(resources...), nil) +func onSecretCreate(s *corev1.Secret, clusterName string, clientset *fake.Clientset, ctx context.Context) { + // simulate populating the service account secret token data into the secret + // it's done automatically by k8s + if s.Type == corev1.SecretTypeServiceAccountToken { + // random delay to ensure the code is polling for the data set by k8s + time.Sleep(time.Millisecond * time.Duration(1+rand.Intn(5))) + if s.Data == nil { + s.Data = map[string][]byte{} + } + s.Data["ca.crt"] = []byte(fmt.Sprintf("ca.crt: %s", clusterName)) + s.Data["token"] = []byte(fmt.Sprintf("token: %s", clusterName)) + if _, err := clientset.CoreV1().Secrets(s.Namespace).Update(ctx, s, metav1.UpdateOptions{}); err != nil { + panic(err) + } } - resources := createResourcesForCluster(true, flags, flags.CentralCluster, resourceTypes...) - clientMap[flags.CentralCluster] = NewKubeClientContainer(nil, fake.NewSimpleClientset(resources...), nil) - - return clientMap } // containsResourceType returns true if r is in resourceTypes, otherwise false. @@ -862,8 +877,8 @@ func containsResourceType(resourceTypes []resourceType, r resourceType) bool { } // readSecretKey reads a key from a Secret in the given namespace with the given name. -func readSecretKey(client KubeClient, secretName, namespace, key string) ([]byte, error) { - tokenSecret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{}) +func readSecretKey(ctx context.Context, client KubeClient, secretName, namespace, key string) ([]byte, error) { + tokenSecret, err := client.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) if err != nil { return nil, err } @@ -871,8 +886,8 @@ func readSecretKey(client KubeClient, secretName, namespace, key string) ([]byte } // readKubeConfig reads the KubeConfig file from the secret in the given cluster and namespace. -func readKubeConfig(client KubeClient, namespace string) (KubeConfigFile, error) { - kubeConfigSecret, err := client.CoreV1().Secrets(namespace).Get(context.TODO(), KubeConfigSecretName, metav1.GetOptions{}) +func readKubeConfig(ctx context.Context, client KubeClient, namespace string) (KubeConfigFile, error) { + kubeConfigSecret, err := client.CoreV1().Secrets(namespace).Get(ctx, KubeConfigSecretName, metav1.GetOptions{}) if err != nil { return KubeConfigFile{}, err } diff --git a/tools/multicluster/pkg/common/kubeclientcontainer.go b/tools/multicluster/pkg/common/kubeclientcontainer.go index 6a5cbe3..1fbb085 100644 --- a/tools/multicluster/pkg/common/kubeclientcontainer.go +++ b/tools/multicluster/pkg/common/kubeclientcontainer.go @@ -5,56 +5,57 @@ import ( "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" - v1alpha19 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" - v1beta16 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" - "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" - v12 "k8s.io/client-go/kubernetes/typed/apps/v1" - v1beta17 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" - "k8s.io/client-go/kubernetes/typed/apps/v1beta2" - v17 "k8s.io/client-go/kubernetes/typed/authentication/v1" - v1alpha20 "k8s.io/client-go/kubernetes/typed/authentication/v1alpha1" - v1beta18 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" - v18 "k8s.io/client-go/kubernetes/typed/authorization/v1" - v1beta19 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" - v19 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" - v2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2" - "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" - "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2" - v110 "k8s.io/client-go/kubernetes/typed/batch/v1" - "k8s.io/client-go/kubernetes/typed/batch/v1beta1" - v111 "k8s.io/client-go/kubernetes/typed/certificates/v1" - v1beta110 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" - v116 "k8s.io/client-go/kubernetes/typed/coordination/v1" - v1beta111 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - v115 "k8s.io/client-go/kubernetes/typed/discovery/v1" - v1beta117 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1" - v114 "k8s.io/client-go/kubernetes/typed/events/v1" - v1beta116 "k8s.io/client-go/kubernetes/typed/events/v1beta1" - v1beta115 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - v1alpha16 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" - v1beta114 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" - v1beta22 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" - "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" - v113 "k8s.io/client-go/kubernetes/typed/networking/v1" - v1alpha18 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1" - v1beta113 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" - v112 "k8s.io/client-go/kubernetes/typed/node/v1" - v1alpha15 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" - v1beta15 "k8s.io/client-go/kubernetes/typed/node/v1beta1" - v16 "k8s.io/client-go/kubernetes/typed/policy/v1" - v1beta14 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" - v15 "k8s.io/client-go/kubernetes/typed/rbac/v1" - v1alpha13 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" - v1beta112 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - v1alpha17 "k8s.io/client-go/kubernetes/typed/resource/v1alpha1" - v14 "k8s.io/client-go/kubernetes/typed/scheduling/v1" - v1alpha14 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" - v1beta13 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" - v13 "k8s.io/client-go/kubernetes/typed/storage/v1" - v1alpha12 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" - v1beta12 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" + admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" + admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" + apiserverinternalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" + appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" + appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" + authenticationv1alpha1 "k8s.io/client-go/kubernetes/typed/authentication/v1alpha1" + authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" + autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" + autoscalingv2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2" + autoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" + autoscalingv2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2" + batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" + batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" + certificatesv1 "k8s.io/client-go/kubernetes/typed/certificates/v1" + certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" + certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" + coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + discoveryv1 "k8s.io/client-go/kubernetes/typed/discovery/v1" + discoveryv1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1" + eventsv1 "k8s.io/client-go/kubernetes/typed/events/v1" + eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" + extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" + flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" + flowcontrolv1beta3 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" + networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" + networkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1" + networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" + nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" + nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" + nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" + policyv1 "k8s.io/client-go/kubernetes/typed/policy/v1" + policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" + rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" + rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" + schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" + schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" + schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" + storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" + storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" + storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" "k8s.io/client-go/rest" ) @@ -74,208 +75,214 @@ type KubeClientContainer struct { restConfig *rest.Config } -func (k *KubeClientContainer) AdmissionregistrationV1alpha1() v1alpha19.AdmissionregistrationV1alpha1Interface { +func (k *KubeClientContainer) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface { + //TODO implement me + panic("implement me") +} + +func (k *KubeClientContainer) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { + //TODO implement me + panic("implement me") +} + +func (k *KubeClientContainer) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { return k.staticClient.AdmissionregistrationV1alpha1() } -func (k *KubeClientContainer) AuthenticationV1alpha1() v1alpha20.AuthenticationV1alpha1Interface { +func (k *KubeClientContainer) AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface { return k.staticClient.AuthenticationV1alpha1() } -func (k *KubeClientContainer) FlowcontrolV1beta3() v1beta3.FlowcontrolV1beta3Interface { +func (k *KubeClientContainer) FlowcontrolV1beta3() flowcontrolv1beta3.FlowcontrolV1beta3Interface { return k.staticClient.FlowcontrolV1beta3() } -func (k *KubeClientContainer) NetworkingV1alpha1() v1alpha18.NetworkingV1alpha1Interface { +func (k *KubeClientContainer) NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface { return k.staticClient.NetworkingV1alpha1() } -func (k *KubeClientContainer) ResourceV1alpha1() v1alpha17.ResourceV1alpha1Interface { - return k.staticClient.ResourceV1alpha1() -} - func (k *KubeClientContainer) Discovery() discovery.DiscoveryInterface { return k.staticClient.Discovery() } -func (k *KubeClientContainer) AdmissionregistrationV1() v1.AdmissionregistrationV1Interface { +func (k *KubeClientContainer) AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface { return k.staticClient.AdmissionregistrationV1() } -func (k *KubeClientContainer) AdmissionregistrationV1beta1() v1beta16.AdmissionregistrationV1beta1Interface { +func (k *KubeClientContainer) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { return k.staticClient.AdmissionregistrationV1beta1() } -func (k *KubeClientContainer) InternalV1alpha1() v1alpha1.InternalV1alpha1Interface { +func (k *KubeClientContainer) InternalV1alpha1() apiserverinternalv1alpha1.InternalV1alpha1Interface { return k.staticClient.InternalV1alpha1() } -func (k *KubeClientContainer) AppsV1() v12.AppsV1Interface { +func (k *KubeClientContainer) AppsV1() appsv1.AppsV1Interface { return k.staticClient.AppsV1() } -func (k *KubeClientContainer) AppsV1beta1() v1beta17.AppsV1beta1Interface { +func (k *KubeClientContainer) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { return k.staticClient.AppsV1beta1() } -func (k *KubeClientContainer) AppsV1beta2() v1beta2.AppsV1beta2Interface { +func (k *KubeClientContainer) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { return k.staticClient.AppsV1beta2() } -func (k *KubeClientContainer) AuthenticationV1() v17.AuthenticationV1Interface { +func (k *KubeClientContainer) AuthenticationV1() authenticationv1.AuthenticationV1Interface { return k.staticClient.AuthenticationV1() } -func (k *KubeClientContainer) AuthenticationV1beta1() v1beta18.AuthenticationV1beta1Interface { +func (k *KubeClientContainer) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { return k.staticClient.AuthenticationV1beta1() } -func (k *KubeClientContainer) AuthorizationV1() v18.AuthorizationV1Interface { +func (k *KubeClientContainer) AuthorizationV1() authorizationv1.AuthorizationV1Interface { return k.staticClient.AuthorizationV1() } -func (k *KubeClientContainer) AuthorizationV1beta1() v1beta19.AuthorizationV1beta1Interface { +func (k *KubeClientContainer) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { return k.staticClient.AuthorizationV1beta1() } -func (k *KubeClientContainer) AutoscalingV1() v19.AutoscalingV1Interface { +func (k *KubeClientContainer) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { return k.staticClient.AutoscalingV1() } -func (k *KubeClientContainer) AutoscalingV2() v2.AutoscalingV2Interface { +func (k *KubeClientContainer) AutoscalingV2() autoscalingv2.AutoscalingV2Interface { return k.staticClient.AutoscalingV2() } -func (k *KubeClientContainer) AutoscalingV2beta1() v2beta1.AutoscalingV2beta1Interface { +func (k *KubeClientContainer) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { return k.staticClient.AutoscalingV2beta1() } -func (k *KubeClientContainer) AutoscalingV2beta2() v2beta2.AutoscalingV2beta2Interface { +func (k *KubeClientContainer) AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface { return k.staticClient.AutoscalingV2beta2() } -func (k *KubeClientContainer) BatchV1() v110.BatchV1Interface { +func (k *KubeClientContainer) BatchV1() batchv1.BatchV1Interface { return k.staticClient.BatchV1() } -func (k *KubeClientContainer) BatchV1beta1() v1beta1.BatchV1beta1Interface { +func (k *KubeClientContainer) BatchV1beta1() batchv1beta1.BatchV1beta1Interface { //TODO implement me panic("implement me") } -func (k *KubeClientContainer) CertificatesV1() v111.CertificatesV1Interface { +func (k *KubeClientContainer) CertificatesV1() certificatesv1.CertificatesV1Interface { return k.staticClient.CertificatesV1() } -func (k *KubeClientContainer) CertificatesV1beta1() v1beta110.CertificatesV1beta1Interface { +func (k *KubeClientContainer) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface { return k.staticClient.CertificatesV1beta1() } -func (k *KubeClientContainer) CoordinationV1beta1() v1beta111.CoordinationV1beta1Interface { +func (k *KubeClientContainer) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return k.staticClient.CoordinationV1beta1() } -func (k *KubeClientContainer) CoordinationV1() v116.CoordinationV1Interface { +func (k *KubeClientContainer) CoordinationV1() coordinationv1.CoordinationV1Interface { return k.staticClient.CoordinationV1() } -func (k *KubeClientContainer) CoreV1() corev1client.CoreV1Interface { +func (k *KubeClientContainer) CoreV1() corev1.CoreV1Interface { return k.staticClient.CoreV1() } -func (k *KubeClientContainer) DiscoveryV1() v115.DiscoveryV1Interface { +func (k *KubeClientContainer) DiscoveryV1() discoveryv1.DiscoveryV1Interface { return k.staticClient.DiscoveryV1() } -func (k *KubeClientContainer) DiscoveryV1beta1() v1beta117.DiscoveryV1beta1Interface { +func (k *KubeClientContainer) DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface { return k.staticClient.DiscoveryV1beta1() } -func (k KubeClientContainer) EventsV1() v114.EventsV1Interface { +func (k KubeClientContainer) EventsV1() eventsv1.EventsV1Interface { return k.staticClient.EventsV1() } -func (k *KubeClientContainer) EventsV1beta1() v1beta116.EventsV1beta1Interface { +func (k *KubeClientContainer) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return k.staticClient.EventsV1beta1() } -func (k *KubeClientContainer) ExtensionsV1beta1() v1beta115.ExtensionsV1beta1Interface { +func (k *KubeClientContainer) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { return k.staticClient.ExtensionsV1beta1() } -func (k *KubeClientContainer) FlowcontrolV1alpha1() v1alpha16.FlowcontrolV1alpha1Interface { +func (k *KubeClientContainer) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface { return k.staticClient.FlowcontrolV1alpha1() } -func (k *KubeClientContainer) FlowcontrolV1beta1() v1beta114.FlowcontrolV1beta1Interface { +func (k *KubeClientContainer) FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface { return k.staticClient.FlowcontrolV1beta1() } -func (k *KubeClientContainer) FlowcontrolV1beta2() v1beta22.FlowcontrolV1beta2Interface { +func (k *KubeClientContainer) FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface { return k.staticClient.FlowcontrolV1beta2() } -func (k *KubeClientContainer) NetworkingV1() v113.NetworkingV1Interface { +func (k *KubeClientContainer) NetworkingV1() networkingv1.NetworkingV1Interface { return k.staticClient.NetworkingV1() } -func (k *KubeClientContainer) NetworkingV1beta1() v1beta113.NetworkingV1beta1Interface { +func (k *KubeClientContainer) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { return k.staticClient.NetworkingV1beta1() } -func (k *KubeClientContainer) NodeV1() v112.NodeV1Interface { +func (k *KubeClientContainer) NodeV1() nodev1.NodeV1Interface { return k.staticClient.NodeV1() } -func (k *KubeClientContainer) NodeV1alpha1() v1alpha15.NodeV1alpha1Interface { +func (k *KubeClientContainer) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { return k.staticClient.NodeV1alpha1() } -func (k *KubeClientContainer) NodeV1beta1() v1beta15.NodeV1beta1Interface { +func (k *KubeClientContainer) NodeV1beta1() nodev1beta1.NodeV1beta1Interface { return k.staticClient.NodeV1beta1() } -func (k *KubeClientContainer) PolicyV1() v16.PolicyV1Interface { +func (k *KubeClientContainer) PolicyV1() policyv1.PolicyV1Interface { return k.staticClient.PolicyV1() } -func (k *KubeClientContainer) PolicyV1beta1() v1beta14.PolicyV1beta1Interface { +func (k *KubeClientContainer) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { return k.staticClient.PolicyV1beta1() } -func (k *KubeClientContainer) RbacV1() v15.RbacV1Interface { +func (k *KubeClientContainer) RbacV1() rbacv1.RbacV1Interface { return k.staticClient.RbacV1() } -func (k *KubeClientContainer) RbacV1beta1() v1beta112.RbacV1beta1Interface { +func (k *KubeClientContainer) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { return k.staticClient.RbacV1beta1() } -func (k *KubeClientContainer) RbacV1alpha1() v1alpha13.RbacV1alpha1Interface { +func (k *KubeClientContainer) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return k.staticClient.RbacV1alpha1() } -func (k *KubeClientContainer) SchedulingV1alpha1() v1alpha14.SchedulingV1alpha1Interface { +func (k *KubeClientContainer) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { return k.staticClient.SchedulingV1alpha1() } -func (k *KubeClientContainer) SchedulingV1beta1() v1beta13.SchedulingV1beta1Interface { +func (k *KubeClientContainer) SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface { return k.staticClient.SchedulingV1beta1() } -func (k *KubeClientContainer) SchedulingV1() v14.SchedulingV1Interface { +func (k *KubeClientContainer) SchedulingV1() schedulingv1.SchedulingV1Interface { return k.staticClient.SchedulingV1() } -func (k *KubeClientContainer) StorageV1beta1() v1beta12.StorageV1beta1Interface { +func (k *KubeClientContainer) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return k.staticClient.StorageV1beta1() } -func (k *KubeClientContainer) StorageV1() v13.StorageV1Interface { +func (k *KubeClientContainer) StorageV1() storagev1.StorageV1Interface { return k.staticClient.StorageV1() } -func (k *KubeClientContainer) StorageV1alpha1() v1alpha12.StorageV1alpha1Interface { +func (k *KubeClientContainer) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { return k.staticClient.StorageV1alpha1() } diff --git a/tools/multicluster/pkg/debug/collectors.go b/tools/multicluster/pkg/debug/collectors.go index a5fbe4c..8428bb8 100644 --- a/tools/multicluster/pkg/debug/collectors.go +++ b/tools/multicluster/pkg/debug/collectors.go @@ -7,6 +7,8 @@ import ( "fmt" "strings" + "k8s.io/utils/ptr" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" @@ -16,7 +18,6 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/utils/pointer" ) var ( @@ -224,7 +225,7 @@ func (s *LogsCollector) Collect(ctx context.Context, kubeClient common.KubeClien podName := logsToCollect[i].Name PodLogsConnection := kubeClient.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{ Follow: false, - TailLines: pointer.Int64(100), + TailLines: ptr.To(int64(100)), Container: logsToCollect[i].ContainerName, }) LogStream, err := PodLogsConnection.Stream(ctx) diff --git a/tools/multicluster/pkg/debug/collectors_test.go b/tools/multicluster/pkg/debug/collectors_test.go index eeb78fe..651db87 100644 --- a/tools/multicluster/pkg/debug/collectors_test.go +++ b/tools/multicluster/pkg/debug/collectors_test.go @@ -18,6 +18,7 @@ import ( ) func TestCollectors(t *testing.T) { + ctx := context.Background() //given collectors := []Collector{ &MongoDBCommunityCollector{}, @@ -38,11 +39,11 @@ func TestCollectors(t *testing.T) { namespace := "test" testObjectNames := "test" - kubeClient := kubeClientWithTestingResources(namespace, testObjectNames) + kubeClient := kubeClientWithTestingResources(ctx, namespace, testObjectNames) //when for _, collector := range collectors { - kubeObjects, rawObjects, err := collector.Collect(context.TODO(), kubeClient, namespace, filter, anonymizer) + kubeObjects, rawObjects, err := collector.Collect(ctx, kubeClient, namespace, filter, anonymizer) //then assert.NoError(t, err) @@ -51,7 +52,7 @@ func TestCollectors(t *testing.T) { } } -func kubeClientWithTestingResources(namespace, testObjectNames string) *common.KubeClientContainer { +func kubeClientWithTestingResources(ctx context.Context, namespace, testObjectNames string) *common.KubeClientContainer { resources := []runtime.Object{ &v12.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -161,11 +162,11 @@ func kubeClientWithTestingResources(namespace, testObjectNames string) *common.K } dynamicFake := fake2.NewSimpleDynamicClientWithCustomListKinds(scheme, dynamicLists) - dynamicFake.Resource(MongoDBMultiClusterGVR).Create(context.TODO(), &MongoDBMultiClusterResource, metav1.CreateOptions{}) - dynamicFake.Resource(MongoDBCommunityGVR).Create(context.TODO(), &MongoDBCommunityResource, metav1.CreateOptions{}) - dynamicFake.Resource(MongoDBGVR).Create(context.TODO(), &MongoDBResource, metav1.CreateOptions{}) - dynamicFake.Resource(MongoDBUsersGVR).Create(context.TODO(), &MongoDBUserResource, metav1.CreateOptions{}) - dynamicFake.Resource(OpsManagerSchemeGVR).Create(context.TODO(), &OpsManagerResource, metav1.CreateOptions{}) + dynamicFake.Resource(MongoDBMultiClusterGVR).Create(ctx, &MongoDBMultiClusterResource, metav1.CreateOptions{}) + dynamicFake.Resource(MongoDBCommunityGVR).Create(ctx, &MongoDBCommunityResource, metav1.CreateOptions{}) + dynamicFake.Resource(MongoDBGVR).Create(ctx, &MongoDBResource, metav1.CreateOptions{}) + dynamicFake.Resource(MongoDBUsersGVR).Create(ctx, &MongoDBUserResource, metav1.CreateOptions{}) + dynamicFake.Resource(OpsManagerSchemeGVR).Create(ctx, &OpsManagerResource, metav1.CreateOptions{}) kubeClient := common.NewKubeClientContainer(nil, fake.NewSimpleClientset(resources...), dynamicFake) return kubeClient