Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: remove arc wiring and drop --arc-cluster flag from webhook #723

Merged
merged 1 commit into from
Jan 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions .pipelines/nightly.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -89,10 +89,6 @@ jobs:
CLUSTER_NAME: "azwi-aks-win-containerd"
soak_aks_linux:
CLUSTER_NAME: "azwi-aks-linux"
soak_arc:
ARC_CLUSTER: "true"
CLUSTER_NAME: "azwi-aks-arc"
GINKGO_SKIP: \[Exclude:Arc\]
steps:
- script: make test-e2e
displayName: Webhook E2E test suite
Expand All @@ -117,9 +113,6 @@ jobs:
GINKGO_SKIP: \[AKSSoakOnly\]
upgrade_aks_linux:
GINKGO_SKIP: \[AKSSoakOnly\]
upgrade_arc:
ARC_CLUSTER: "true"
GINKGO_SKIP: \[AKSSoakOnly\]
- job:
timeoutInMinutes: 60
dependsOn:
Expand Down
4 changes: 0 additions & 4 deletions .pipelines/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -87,10 +87,6 @@ jobs:
aks_linux:
REGISTRY: upstream.azurecr.io/azure-workload-identity
GINKGO_SKIP: \[AKSSoakOnly\]
arc:
REGISTRY: upstream.azurecr.io/azure-workload-identity
ARC_CLUSTER: "true"
GINKGO_SKIP: \[AKSSoakOnly\]
kind_v1_23_6:
KIND_NODE_VERSION: v1.23.6
LOCAL_ONLY: "true"
Expand Down
2 changes: 0 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,6 @@ run: generate fmt vet manifests
go run .cmd/webhook/main.go

# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
ARC_CLUSTER ?= false
DEPLOYMENT_YAML ?= false

.PHONY: deploy
Expand Down Expand Up @@ -321,7 +320,6 @@ GINKGO_ARGS ?= -focus="$(GINKGO_FOCUS)" -skip="$(GINKGO_SKIP)" -nodes=$(GINKGO_N
# E2E configurations
KUBECONFIG ?= $(HOME)/.kube/config
E2E_ARGS := -kubeconfig=$(KUBECONFIG) -report-dir=$(PWD)/_artifacts \
-e2e.arc-cluster=$(ARC_CLUSTER) \
-e2e.token-exchange-image=$(MSAL_GO_E2E_IMAGE)
E2E_EXTRA_ARGS ?=

Expand Down
6 changes: 1 addition & 5 deletions cmd/webhook/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ const (
)

var (
arcCluster bool
audience string
webhookCertDir string
tlsMinVersion string
Expand Down Expand Up @@ -72,9 +71,6 @@ func main() {
func mainErr() error {
defer mlog.Setup()()

// TODO (aramase) once webhook is added as an arc extension, use extension
// util to check if running in arc cluster.
flag.BoolVar(&arcCluster, "arc-cluster", false, "Running on arc cluster")
flag.StringVar(&audience, "audience", "", "Audience for service account token")
flag.StringVar(&webhookCertDir, "webhook-cert-dir", "/certs", "Webhook certificates dir to use. Defaults to /certs")
flag.BoolVar(&disableCertRotation, "disable-cert-rotation", false, "disable automatic generation and rotation of webhook TLS certificates/keys")
Expand Down Expand Up @@ -166,7 +162,7 @@ func setupWebhook(mgr manager.Manager, setupFinished chan struct{}) {

// setup webhooks
entryLog.Info("registering webhook to the webhook server")
podMutator, err := wh.NewPodMutator(mgr.GetClient(), mgr.GetAPIReader(), arcCluster, audience)
podMutator, err := wh.NewPodMutator(mgr.GetClient(), mgr.GetAPIReader(), audience)
if err != nil {
panic(fmt.Errorf("unable to set up pod mutator: %w", err))
}
Expand Down
2 changes: 0 additions & 2 deletions config/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@ spec:
containers:
- command:
- /manager
args:
- --arc-cluster=${ARC_CLUSTER:-false}
image: manager:latest
imagePullPolicy: IfNotPresent
name: manager
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ helm upgrade -n azure-workload-identity-system [RELEASE_NAME] azure-workload-ide
| image.release | The image release tag to use | Current release version: `v0.15.0` |
| imagePullSecrets | Image pull secrets to use for retrieving images from private registries | `[]` |
| nodeSelector | The node selector to use for pod scheduling | `kubernetes.io/os: linux` |
| arcCluster | Specify if it runs on Arc cluster | `false` |
| resources | The resource request/limits for the container image | limits: 100m CPU, 30Mi, requests: 100m CPU, 20Mi |
| affinity | The node affinity to use for pod scheduling | `{}` |
| tolerations | The tolerations to use for pod scheduling | `[]` |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ spec:
{{- toYaml .Values.affinity | nindent 8 }}
containers:
- args:
- --arc-cluster={{ .Values.arcCluster }}
- --log-level={{ .Values.logLevel }}
- --metrics-addr={{ .Values.metricsAddr }}
- --metrics-backend={{ .Values.metricsBackend }}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ image:
imagePullSecrets: []
nodeSelector:
kubernetes.io/os: linux
arcCluster: false
resources:
limits:
cpu: 100m
Expand Down
4 changes: 1 addition & 3 deletions manifest_staging/deploy/azure-wi-webhook.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,7 @@ spec:
azure-workload-identity.io/system: "true"
spec:
containers:
- args:
- --arc-cluster=${ARC_CLUSTER:-false}
command:
- command:
- /manager
env:
- name: POD_NAMESPACE
Expand Down
75 changes: 5 additions & 70 deletions pkg/webhook/webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,15 +46,14 @@ type podMutator struct {
// This should be used sparingly and only when the client does not fit the use case.
reader client.Reader
config *config.Config
isARCCluster bool
decoder *admission.Decoder
audience string
azureAuthorityHost string
reporter StatsReporter
}

// NewPodMutator returns a pod mutation handler
func NewPodMutator(client client.Client, reader client.Reader, arcCluster bool, audience string) (admission.Handler, error) {
func NewPodMutator(client client.Client, reader client.Reader, audience string) (admission.Handler, error) {
c, err := config.ParseConfig()
if err != nil {
return nil, err
Expand All @@ -73,7 +72,6 @@ func NewPodMutator(client client.Client, reader client.Reader, arcCluster bool,
client: client,
reader: reader,
config: c,
isARCCluster: arcCluster,
audience: audience,
azureAuthorityHost: azureAuthorityHost,
reporter: newStatsReporter(),
Expand Down Expand Up @@ -154,22 +152,10 @@ func (m *podMutator) Handle(ctx context.Context, req admission.Request) (respons
pod.Spec.InitContainers = m.mutateContainers(pod.Spec.InitContainers, clientID, tenantID, skipContainers)
pod.Spec.Containers = m.mutateContainers(pod.Spec.Containers, clientID, tenantID, skipContainers)

if !m.isARCCluster {
// add the projected service account token volume to the pod if not exists
if err = addProjectedServiceAccountTokenVolume(pod, serviceAccountTokenExpiration, m.audience); err != nil {
logger.Error(err, "failed to add projected service account volume")
return admission.Errored(http.StatusBadRequest, err)
}
} else {
// Kubernetes secret created by the arc jwt issuer will follow the naming convention
// localtoken-<service account name>
secretName := fmt.Sprintf("localtoken-%s", serviceAccount.Name)
// add the secret volume for arc scenarios
// TODO (aramase) Do we need to validate the k8s secret exists before adding the volume?
if err = addProjectedSecretVolume(pod, m.config, secretName); err != nil {
logger.Error(err, "failed to add projected secret volume")
return admission.Errored(http.StatusBadRequest, err)
}
// add the projected service account token volume to the pod if not exists
if err = addProjectedServiceAccountTokenVolume(pod, serviceAccountTokenExpiration, m.audience); err != nil {
logger.Error(err, "failed to add projected service account volume")
return admission.Errored(http.StatusBadRequest, err)
}

marshaledPod, err := json.Marshal(pod)
Expand Down Expand Up @@ -437,57 +423,6 @@ func addProjectedServiceAccountTokenVolume(pod *corev1.Pod, serviceAccountTokenE
return nil
}

// addProjectedSecretVolume adds a projected volume with kubernetes secret as source for
// arc clusters. The controller will generate the JWT token and create kubernetes secret for
// the service account.
func addProjectedSecretVolume(pod *corev1.Pod, config *config.Config, secretName string) error {
// add the projected secret volume to the pod if not exists
for _, volume := range pod.Spec.Volumes {
if volume.Projected == nil {
continue
}
for _, pvs := range volume.Projected.Sources {
if pvs.Secret == nil || pvs.Secret.Name != secretName {
continue
}
for _, item := range pvs.Secret.Items {
if item.Path == TokenFilePathName {
return nil
}
}
}
}

// add the projected secret volume
// the path for this volume will always be set to "azure-identity-token"
pod.Spec.Volumes = append(
pod.Spec.Volumes,
corev1.Volume{
Name: TokenFilePathName,
VolumeSource: corev1.VolumeSource{
Projected: &corev1.ProjectedVolumeSource{
Sources: []corev1.VolumeProjection{
{
Secret: &corev1.SecretProjection{
LocalObjectReference: corev1.LocalObjectReference{
Name: secretName,
},
Items: []corev1.KeyToPath{
{
Key: "token",
Path: TokenFilePathName,
},
},
},
},
},
},
},
})

return nil
}

// getAzureAuthorityHost returns the active directory endpoint to use for requesting
// tokens based on the azure environment the webhook is configured with.
func getAzureAuthorityHost(c *config.Config) (string, error) {
Expand Down
Loading