From f60ccc6fdde70d96c667bed933305998def86983 Mon Sep 17 00:00:00 2001 From: Jont828 Date: Mon, 16 Dec 2024 20:30:28 -0500 Subject: [PATCH] Set up ci-entrypoint to work with AKS management cluster and add custom builds perf testing template --- Makefile | 31 +- hack/create-dev-cluster.sh | 6 +- scripts/aks-as-mgmt.sh | 88 +- scripts/ci-build-azure-ccm.sh | 2 +- scripts/ci-build-kubernetes.sh | 2 +- scripts/ci-entrypoint.sh | 15 +- scripts/peer-vnets.sh | 59 + templates/cluster-template-aad.yaml | 12 +- templates/cluster-template-azure-bastion.yaml | 12 +- templates/cluster-template-azure-cni-v1.yaml | 12 +- templates/cluster-template-dual-stack.yaml | 11 +- templates/cluster-template-edgezone.yaml | 12 +- templates/cluster-template-ephemeral.yaml | 12 +- templates/cluster-template-flatcar.yaml | 12 +- templates/cluster-template-ipv6.yaml | 11 +- templates/cluster-template-nvidia-gpu.yaml | 12 +- templates/cluster-template-windows.yaml | 12 +- templates/cluster-template.yaml | 12 +- templates/flavors/aad/kustomization.yaml | 2 +- .../flavors/azure-bastion/kustomization.yaml | 2 +- .../flavors/azure-cni-v1/kustomization.yaml | 2 +- templates/flavors/default/kustomization.yaml | 2 +- .../flavors/dual-stack/kustomization.yaml | 2 +- templates/flavors/edgezone/kustomization.yaml | 2 +- .../flavors/ephemeral/kustomization.yaml | 2 +- templates/flavors/flatcar/kustomization.yaml | 2 +- templates/flavors/ipv6/kustomization.yaml | 2 +- .../flavors/nvidia-gpu/kustomization.yaml | 2 +- ...azure-cluster-cidrs-and-frontend-ips.yaml} | 14 + .../cluster-template-prow-azure-cni-v1.yaml | 12 +- ...r-template-prow-ci-version-dual-stack.yaml | 10 + ...cluster-template-prow-ci-version-ipv6.yaml | 10 + .../ci/cluster-template-prow-ci-version.yaml | 20 +- .../ci/cluster-template-prow-custom-vnet.yaml | 4 + .../ci/cluster-template-prow-dual-stack.yaml | 11 +- .../ci/cluster-template-prow-edgezone.yaml | 12 +- .../ci/cluster-template-prow-flatcar.yaml | 12 +- .../test/ci/cluster-template-prow-ipv6.yaml | 11 +- .../test/ci/cluster-template-prow-load.yaml | 22 +- .../ci/cluster-template-prow-nvidia-gpu.yaml | 12 +- .../test/ci/cluster-template-prow-spot.yaml | 12 +- templates/test/ci/cluster-template-prow.yaml | 22 +- templates/test/ci/prow/kustomization.yaml | 2 + .../cluster-template-custom-builds-load.yaml | 1426 +++++++++++++++++ .../dev/cluster-template-custom-builds.yaml | 20 +- .../dev/custom-builds-load/kustomization.yaml | 22 + .../patches/cluster-label-azuredisk.yaml | 6 + .../patches/cluster-label-storageclass.yaml | 7 + .../patches/kcp-scheduler.yaml | 12 + .../storageclass-resource-set.yaml | 14 + 50 files changed, 1948 insertions(+), 98 deletions(-) create mode 100755 scripts/peer-vnets.sh rename templates/internal-load-balancer/{azure-cluster-frontend-ip.yaml => azure-cluster-cidrs-and-frontend-ips.yaml} (57%) create mode 100644 templates/test/dev/cluster-template-custom-builds-load.yaml create mode 100644 templates/test/dev/custom-builds-load/kustomization.yaml create mode 100644 templates/test/dev/custom-builds-load/patches/cluster-label-azuredisk.yaml create mode 100644 templates/test/dev/custom-builds-load/patches/cluster-label-storageclass.yaml create mode 100644 templates/test/dev/custom-builds-load/patches/kcp-scheduler.yaml create mode 100644 templates/test/dev/custom-builds-load/storageclass-resource-set.yaml diff --git a/Makefile b/Makefile index 7f56e4e63d6..fc3e5cb6208 100644 --- a/Makefile +++ b/Makefile @@ -310,12 +310,16 @@ verify-codespell: codespell ## Verify codespell. ##@ Development: .PHONY: install-tools # populate hack/tools/bin -install-tools: $(ENVSUBST) $(KUSTOMIZE) $(KUBECTL) $(HELM) $(GINKGO) $(KIND) $(AZWI) +install-tools: $(ENVSUBST) $(KUSTOMIZE) $(KUBECTL) $(HELM) $(GINKGO) $(KIND) $(AZWI) $(YQ) .PHONY: create-management-cluster create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) $(KUBECTL) $(KIND) ## Create a management cluster. - # Create kind management cluster. - $(MAKE) kind-create + # Create management cluster. + if [ -z "${USE_AKS_MANAGEMENT_CLUSTER}" ]; then \ + $(MAKE) kind-create ; \ + else \ + $(MAKE) aks-create ; \ + fi # Install cert manager and wait for availability ./hack/install-cert-manager.sh @@ -331,7 +335,9 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) $(KUBECTL) $(KIND) ## Create timeout --foreground 300 bash -c "until curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/download/v0.2.5/addon-components.yaml | $(ENVSUBST) | $(KUBECTL) apply -f -; do sleep 5; done" # Deploy CAPZ - $(KIND) load docker-image $(CONTROLLER_IMG)-$(ARCH):$(TAG) --name=$(KIND_CLUSTER_NAME) + @if [ -z "${USE_AKS_MANAGEMENT_CLUSTER}" ]; then \ + $(KIND) load docker-image $(CONTROLLER_IMG)-$(ARCH):$(TAG) --name=$(KIND_CLUSTER_NAME) ; \ + fi timeout --foreground 300 bash -c "until $(KUSTOMIZE) build config/default | $(ENVSUBST) | $(KUBECTL) apply -f - --server-side=true; do sleep 5; done" # Wait for CAPI deployments @@ -361,16 +367,16 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) $(KUBECTL) $(KIND) ## Create .PHONY: create-workload-cluster create-workload-cluster: $(ENVSUBST) $(KUBECTL) ## Create a workload cluster. - # Create workload Cluster. @if [ -z "${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY}" ]; then \ export AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY=$(shell cat $(AZURE_IDENTITY_ID_FILEPATH)); \ fi; \ + # TODO: change this so it doesn't source aks-mgmt-vars.env when it is using a kind cluster if [ -f "$(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE)" ]; then \ - timeout --foreground 300 bash -c "until $(ENVSUBST) < $(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE) | $(KUBECTL) apply -f -; do sleep 5; done"; \ + timeout --foreground 300 bash -c "source aks-mgmt-vars.env && env && until $(ENVSUBST) < $(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE) | $(KUBECTL) apply -f -; do sleep 5; done"; \ elif [ -f "$(CLUSTER_TEMPLATE)" ]; then \ - timeout --foreground 300 bash -c "until $(ENVSUBST) < "$(CLUSTER_TEMPLATE)" | $(KUBECTL) apply -f -; do sleep 5; done"; \ + timeout --foreground 300 bash -c "source aks-mgmt-vars.env && env && until $(ENVSUBST) < "$(CLUSTER_TEMPLATE)" | $(KUBECTL) apply -f -; do sleep 5; done"; \ else \ - timeout --foreground 300 bash -c "until curl --retry "$(CURL_RETRIES)" "$(CLUSTER_TEMPLATE)" | "$(ENVSUBST)" | $(KUBECTL) apply -f -; do sleep 5; done"; \ + timeout --foreground 300 bash -c "source aks-mgmt-vars.env && env && until curl --retry "$(CURL_RETRIES)" "$(CLUSTER_TEMPLATE)" | "$(ENVSUBST)" | $(KUBECTL) apply -f -; do sleep 5; done"; \ fi # Wait for the kubeconfig to become available. @@ -379,6 +385,8 @@ create-workload-cluster: $(ENVSUBST) $(KUBECTL) ## Create a workload cluster. $(KUBECTL) get secret/$(CLUSTER_NAME)-kubeconfig -n default -o json | jq -r .data.value | base64 --decode > ./kubeconfig $(KUBECTL) -n default wait --for=condition=Ready --timeout=10m cluster "$(CLUSTER_NAME)" + ./scripts/peer-vnets.sh + @echo 'run "$(KUBECTL) --kubeconfig=./kubeconfig ..." to work with the new target cluster' .PHONY: create-cluster @@ -748,6 +756,13 @@ aks-create: $(KUBECTL) ## Create aks cluster as mgmt cluster. .PHONY: tilt-up tilt-up: install-tools ## Start tilt and build kind cluster if needed. + # Create management cluster. + if [ -z "${USE_AKS_MANAGEMENT_CLUSTER}" ]; then \ + $(MAKE) kind-create ; \ + else \ + $(MAKE) aks-create ; \ + fi + @if [ -z "${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY}" ]; then \ export AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY=$(shell cat $(AZURE_IDENTITY_ID_FILEPATH)); \ fi; \ diff --git a/hack/create-dev-cluster.sh b/hack/create-dev-cluster.sh index 43b44f68c09..6f1b85ac015 100755 --- a/hack/create-dev-cluster.sh +++ b/hack/create-dev-cluster.sh @@ -64,7 +64,11 @@ export ASO_CREDENTIAL_SECRET_NAME=${ASO_CREDENTIAL_SECRET_NAME:="aso-credentials capz::util::generate_ssh_key echo "================ DOCKER BUILD ===============" -PULL_POLICY=IfNotPresent make modules docker-build +PULL_POLICY=IfNotPresent make modules docker-build docker-push +# TODO: add a check for AKS vs kind +if [ -v "${USE_AKS_MANAGEMENT_CLUSTER}" ]; then + PULL_POLICY=IfNotPresent make docker-push +fi setup() { echo "================ MAKE CLEAN ===============" diff --git a/scripts/aks-as-mgmt.sh b/scripts/aks-as-mgmt.sh index 1d97708c420..8fea39b7ecd 100755 --- a/scripts/aks-as-mgmt.sh +++ b/scripts/aks-as-mgmt.sh @@ -18,6 +18,8 @@ set -o nounset # exit when script tries to use undeclared variables. set -o pipefail # make the pipeline fail if any command in it fails. REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +# shellcheck source=hack/common-vars.sh +source "${REPO_ROOT}/hack/common-vars.sh" # shellcheck source=hack/ensure-azcli.sh source "${REPO_ROOT}/hack/ensure-azcli.sh" # install az cli and login using WI # shellcheck source=hack/ensure-tags.sh @@ -30,7 +32,7 @@ make --directory="${REPO_ROOT}" "${KUBECTL##*/}" "${AZWI##*/}" export MGMT_CLUSTER_NAME="${MGMT_CLUSTER_NAME:-aks-mgmt-capz-${RANDOM_SUFFIX}}" # management cluster name export AKS_RESOURCE_GROUP="${AKS_RESOURCE_GROUP:-aks-mgmt-capz-${RANDOM_SUFFIX}}" # resource group name export AKS_NODE_RESOURCE_GROUP="node-${AKS_RESOURCE_GROUP}" -export KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.30.2}" +export AKS_MGMT_KUBERNETES_VERSION="${AKS_MGMT_KUBERNETES_VERSION:-v1.30.2}" export AZURE_LOCATION="${AZURE_LOCATION:-westus2}" export AKS_NODE_VM_SIZE="${AKS_NODE_VM_SIZE:-"Standard_B2s"}" export AKS_NODE_COUNT="${AKS_NODE_COUNT:-1}" @@ -42,6 +44,12 @@ export AZWI_STORAGE_CONTAINER="\$web" export SERVICE_ACCOUNT_SIGNING_PUB_FILEPATH="${SERVICE_ACCOUNT_SIGNING_PUB_FILEPATH:-}" export SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH="${SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH:-}" export REGISTRY="${REGISTRY:-}" +export AKS_MGMT_VNET_NAME="${AKS_MGMT_VNET_NAME:-"aks-mgmt-vnet-${RANDOM_SUFFIX}"}" +export AKS_MGMT_VNET_CIDR="${AKS_MGMT_VNET_CIDR:-"20.255.0.0/16"}" +export AKS_MGMT_SERVICE_CIDR="${AKS_MGMT_SERVICE_CIDR:-"20.255.254.0/24"}" +export AKS_MGMT_DNS_SERVICE_IP="${AKS_MGMT_DNS_SERVICE_IP:-"20.255.254.100"}" +export AKS_MGMT_SUBNET_NAME="${AKS_MGMT_SUBNET_NAME:-"aks-mgmt-subnet-${RANDOM_SUFFIX}"}" +export AKS_MGMT_SUBNET_CIDR="${AKS_MGMT_SUBNET_CIDR:-"20.255.0.0/24"}" export AZURE_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID:-}" export AZURE_CLIENT_ID="${AZURE_CLIENT_ID:-}" @@ -63,7 +71,7 @@ main() { echo "MGMT_CLUSTER_NAME: $MGMT_CLUSTER_NAME" echo "AKS_RESOURCE_GROUP: $AKS_RESOURCE_GROUP" echo "AKS_NODE_RESOURCE_GROUP: $AKS_NODE_RESOURCE_GROUP" - echo "KUBERNETES_VERSION: $KUBERNETES_VERSION" + echo "AKS_MGMT_KUBERNETES_VERSION: $AKS_MGMT_KUBERNETES_VERSION" echo "AZURE_LOCATION: $AZURE_LOCATION" echo "AKS_NODE_VM_SIZE: $AKS_NODE_VM_SIZE" echo "AZURE_NODE_MACHINE_TYPE: $AZURE_NODE_MACHINE_TYPE" @@ -76,6 +84,12 @@ main() { echo "SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH: $SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH" echo "REGISTRY: $REGISTRY" echo "APISERVER_LB_DNS_SUFFIX: $APISERVER_LB_DNS_SUFFIX" + echo "AKS_MGMT_VNET_NAME: $AKS_MGMT_VNET_NAME" + echo "AKS_MGMT_VNET_CIDR: $AKS_MGMT_VNET_CIDR" + echo "AKS_MGMT_SERVICE_CIDR: $AKS_MGMT_SERVICE_CIDR" + echo "AKS_MGMT_DNS_SERVICE_IP: $AKS_MGMT_DNS_SERVICE_IP" + echo "AKS_MGMT_SUBNET_NAME: $AKS_MGMT_SUBNET_NAME" + echo "AKS_MGMT_SUBNET_CIDR: $AKS_MGMT_SUBNET_CIDR" echo "AZURE_SUBSCRIPTION_ID: $AZURE_SUBSCRIPTION_ID" echo "AZURE_CLIENT_ID: $AZURE_CLIENT_ID" @@ -102,6 +116,17 @@ create_aks_cluster() { --location "${AZURE_LOCATION}" \ --output none --only-show-errors \ --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" + + + echo "creating vnet for the resource group ${AKS_RESOURCE_GROUP}" + az network vnet create \ + --resource-group "${AKS_RESOURCE_GROUP}"\ + --name "${AKS_MGMT_VNET_NAME}" \ + --address-prefix "${AKS_MGMT_VNET_CIDR}" \ + --subnet-name "${AKS_MGMT_SUBNET_NAME}" \ + --subnet-prefix "${AKS_MGMT_SUBNET_CIDR}" \ + --output none --only-show-errors \ + --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" fi aks_exists=$(az aks show --name "${MGMT_CLUSTER_NAME}" --resource-group "${AKS_RESOURCE_GROUP}" 2>&1 || true) # true because we want to continue if the command fails @@ -110,13 +135,16 @@ create_aks_cluster() { az aks create --name "${MGMT_CLUSTER_NAME}" \ --resource-group "${AKS_RESOURCE_GROUP}" \ --location "${AZURE_LOCATION}" \ - --kubernetes-version "${KUBERNETES_VERSION}" \ + --kubernetes-version "${AKS_MGMT_KUBERNETES_VERSION}" \ --node-count "${AKS_NODE_COUNT}" \ --node-vm-size "${AKS_NODE_VM_SIZE}" \ --node-resource-group "${AKS_NODE_RESOURCE_GROUP}" \ --vm-set-type VirtualMachineScaleSets \ --generate-ssh-keys \ --network-plugin azure \ + --vnet-subnet-id "/subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${AKS_RESOURCE_GROUP}/providers/Microsoft.Network/virtualNetworks/${AKS_MGMT_VNET_NAME}/subnets/${AKS_MGMT_SUBNET_NAME}" \ + --service-cidr "${AKS_MGMT_SERVICE_CIDR}" \ + --dns-service-ip "${AKS_MGMT_DNS_SERVICE_IP}" \ --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" \ --output none --only-show-errors; elif echo "$aks_exists" | grep -q "${MGMT_CLUSTER_NAME}"; then @@ -127,6 +155,7 @@ create_aks_cluster() { fi # check and save kubeconfig + echo -e "\n" echo "saving credentials of cluster ${MGMT_CLUSTER_NAME} in ${REPO_ROOT}/${MGMT_CLUSTER_KUBECONFIG}" az aks get-credentials --name "${MGMT_CLUSTER_NAME}" --resource-group "${AKS_RESOURCE_GROUP}" \ --file "${REPO_ROOT}/${MGMT_CLUSTER_KUBECONFIG}" --only-show-errors @@ -172,11 +201,50 @@ create_aks_cluster() { sleep 5 done + # If storage account var is set: + if [ -n "${AZURE_STORAGE_ACCOUNT}" ]; then + echo "assigning storage blob data reader role to the service principal" + until az role assignment create --assignee-object-id "${AKS_MI_OBJECT_ID}" --role "Storage Blob Data Reader" \ + --scope "/subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${AZURE_STORAGE_ACCOUNT_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${AZURE_STORAGE_ACCOUNT}/blobServices/default/containers/${AZURE_BLOB_CONTAINER_NAME}" \ + --assignee-principal-type ServicePrincipal; do + echo "retrying to assign storage blob data reader role to the service principal" + sleep 5 + done + fi + echo "using ASO_CREDENTIAL_SECRET_MODE as podidentity" ASO_CREDENTIAL_SECRET_MODE="podidentity" } set_env_varaibles(){ +rm aks-mgmt-vars.env || true +cat < aks-mgmt-vars.env +export MGMT_CLUSTER_NAME="${MGMT_CLUSTER_NAME}" +export AKS_RESOURCE_GROUP="${AKS_RESOURCE_GROUP}" +export AKS_NODE_RESOURCE_GROUP="${AKS_NODE_RESOURCE_GROUP}" +export MGMT_CLUSTER_KUBECONFIG="${MGMT_CLUSTER_KUBECONFIG}" +export AKS_MI_CLIENT_ID="${AKS_MI_CLIENT_ID}" +export AZURE_CLIENT_ID="${AKS_MI_CLIENT_ID}" +export AKS_MI_OBJECT_ID="${AKS_MI_OBJECT_ID}" +export AKS_MI_RESOURCE_ID="${AKS_MI_RESOURCE_ID}" +export MANAGED_IDENTITY_NAME="${MANAGED_IDENTITY_NAME}" +export MANAGED_IDENTITY_RG="${MANAGED_IDENTITY_RG}" +export AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY="${AKS_MI_CLIENT_ID}" +export CI_RG="${MANAGED_IDENTITY_RG}" +export USER_IDENTITY="${MANAGED_IDENTITY_NAME}" +export CLUSTER_IDENTITY_TYPE="UserAssignedMSI" +export ASO_CREDENTIAL_SECRET_MODE="${ASO_CREDENTIAL_SECRET_MODE}" +export REGISTRY="${REGISTRY}" +export APISERVER_LB_DNS_SUFFIX="${APISERVER_LB_DNS_SUFFIX}" +export AZURE_LOCATION="${AZURE_LOCATION}" +export AKS_MGMT_VNET_NAME="${AKS_MGMT_VNET_NAME}" +export AKS_MGMT_VNET_CIDR="${AKS_MGMT_VNET_CIDR}" +export AKS_MGMT_SERVICE_CIDR="${AKS_MGMT_SERVICE_CIDR}" +export AKS_MGMT_DNS_SERVICE_IP="${AKS_MGMT_DNS_SERVICE_IP}" +export AKS_MGMT_SUBNET_NAME="${AKS_MGMT_SUBNET_NAME}" +export AKS_MGMT_SUBNET_CIDR="${AKS_MGMT_SUBNET_CIDR}" +EOF + cat < tilt-settings-temp.yaml kustomize_substitutions: MGMT_CLUSTER_NAME: "${MGMT_CLUSTER_NAME}" @@ -210,28 +278,28 @@ else fi # copy over the existing allowed_contexts to tilt-settings.yaml if it does not exist -allowed_contexts_exists=$(yq eval '.allowed_contexts' tilt-settings.yaml) +allowed_contexts_exists=$(${YQ} eval '.allowed_contexts' tilt-settings.yaml) if [ "$allowed_contexts_exists" == "null" ]; then - yq eval '.allowed_contexts = load("tilt-settings-temp.yaml") | .allowed_contexts' tilt-settings-temp.yaml > tilt-settings.yaml + ${YQ} eval '.allowed_contexts = load("tilt-settings-temp.yaml") | .allowed_contexts' tilt-settings-temp.yaml > tilt-settings.yaml fi # extract allowed_contexts from tilt-settings.yaml -current_contexts=$(yq eval '.allowed_contexts' tilt-settings.yaml | sort -u) +current_contexts=$(${YQ} eval '.allowed_contexts' tilt-settings.yaml | sort -u) # extract allowed_contexts from tilt-settings-new.yaml -new_contexts=$(yq eval '.allowed_contexts' tilt-settings-temp.yaml | sort -u) +new_contexts=$(${YQ} eval '.allowed_contexts' tilt-settings-temp.yaml | sort -u) # combine current and new contexts, keeping the union of both combined_contexts=$(echo "$current_contexts"$'\n'"$new_contexts" | sort -u) -# create a temporary file since env($combined_contexts) is not supported in yq +# create a temporary file since env($combined_contexts) is not supported in ${YQ} echo "$combined_contexts" > combined_contexts.yaml # update allowed_contexts in tilt-settings.yaml with the combined contexts -yq eval --inplace ".allowed_contexts = load(\"combined_contexts.yaml\")" tilt-settings.yaml +${YQ} eval --inplace ".allowed_contexts = load(\"combined_contexts.yaml\")" tilt-settings.yaml # merge the updated kustomize_substitution and azure_location with the existing one in tilt-settings.yaml -yq eval-all 'select(fileIndex == 0) *+ {"kustomize_substitutions": select(fileIndex == 1).kustomize_substitutions, "azure_location": select(fileIndex == 1).azure_location}' tilt-settings.yaml tilt-settings-temp.yaml > tilt-settings-new.yaml +${YQ} eval-all 'select(fileIndex == 0) *+ {"kustomize_substitutions": select(fileIndex == 1).kustomize_substitutions, "azure_location": select(fileIndex == 1).azure_location}' tilt-settings.yaml tilt-settings-temp.yaml > tilt-settings-new.yaml mv tilt-settings-new.yaml tilt-settings.yaml rm -r combined_contexts.yaml diff --git a/scripts/ci-build-azure-ccm.sh b/scripts/ci-build-azure-ccm.sh index 586c99d38a3..e90ae2717d0 100755 --- a/scripts/ci-build-azure-ccm.sh +++ b/scripts/ci-build-azure-ccm.sh @@ -70,7 +70,7 @@ main() { echo "Creating ${AZURE_BLOB_CONTAINER_NAME} storage container" az storage container create --name "${AZURE_BLOB_CONTAINER_NAME}" --auth-mode login > /dev/null # if the storage account has public access disabled at the account level this will return 404 - AZURE_STORAGE_AUTH_MODE=login az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null + # AZURE_STORAGE_AUTH_MODE=login az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null fi az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" --auth-mode login diff --git a/scripts/ci-build-kubernetes.sh b/scripts/ci-build-kubernetes.sh index c7d71c4339b..42b672afdd1 100755 --- a/scripts/ci-build-kubernetes.sh +++ b/scripts/ci-build-kubernetes.sh @@ -80,7 +80,7 @@ main() { if [[ "$(az storage container exists --name "${AZURE_BLOB_CONTAINER_NAME}" --query exists --output tsv --auth-mode login)" == "false" ]]; then echo "Creating ${AZURE_BLOB_CONTAINER_NAME} storage container" az storage container create --name "${AZURE_BLOB_CONTAINER_NAME}" --auth-mode login > /dev/null - az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --auth-mode login --public-access container > /dev/null + # az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --auth-mode login --public-access container > /dev/null fi if [[ "${KUBE_BUILD_CONFORMANCE:-}" =~ [yY] ]]; then diff --git a/scripts/ci-entrypoint.sh b/scripts/ci-entrypoint.sh index 0d0cfc5ab00..aecea537abb 100755 --- a/scripts/ci-entrypoint.sh +++ b/scripts/ci-entrypoint.sh @@ -133,11 +133,16 @@ select_cluster_template() { create_cluster() { "${REPO_ROOT}/hack/create-dev-cluster.sh" - if [ ! -f "${REPO_ROOT}/${KIND_CLUSTER_NAME}.kubeconfig" ]; then - echo "Unable to find kubeconfig for kind mgmt cluster ${KIND_CLUSTER_NAME}" - exit 1 - fi - "${KUBECTL}" --kubeconfig "${REPO_ROOT}/${KIND_CLUSTER_NAME}.kubeconfig" get clusters -A + if [ -z "${USE_AKS_MANAGEMENT_CLUSTER}" ]; then + if [ ! -f "${REPO_ROOT}/${KIND_CLUSTER_NAME}.kubeconfig" ]; then + echo "Unable to find kubeconfig for kind mgmt cluster ${KIND_CLUSTER_NAME}" + exit 1 + fi + "${KUBECTL}" --kubeconfig "${REPO_ROOT}/${KIND_CLUSTER_NAME}.kubeconfig" get clusters -A + else + "${KUBECTL}" get clusters -A + fi; + # set the SSH bastion and user that can be used to SSH into nodes KUBE_SSH_BASTION=$(${KUBECTL} get azurecluster -o json | jq '.items[0].spec.networkSpec.apiServerLB.frontendIPs[0].publicIP.dnsName' | tr -d \"):22 diff --git a/scripts/peer-vnets.sh b/scripts/peer-vnets.sh new file mode 100755 index 00000000000..651b0379e01 --- /dev/null +++ b/scripts/peer-vnets.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +# TODO: check for az cli to be installed in local +# wait for AKS VNet to be in the state created + +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +# shellcheck source=hack/common-vars.sh +source "${REPO_ROOT}/hack/common-vars.sh" + +source "${REPO_ROOT}/aks-mgmt-vars.env" + +echo "--------Peering VNETs--------" +az network vnet wait --resource-group ${AKS_RESOURCE_GROUP} --name ${AKS_MGMT_VNET_NAME} --created --timeout 180 +export MGMT_VNET_ID=$(az network vnet show --resource-group ${AKS_RESOURCE_GROUP} --name ${AKS_MGMT_VNET_NAME} --query id --output tsv) +echo " 1/8 ${AKS_MGMT_VNET_NAME} found " + +# wait for workload VNet to be created +az network vnet wait --resource-group ${CLUSTER_NAME} --name ${CLUSTER_NAME}-vnet --created --timeout 180 +export WORKLOAD_VNET_ID=$(az network vnet show --resource-group ${CLUSTER_NAME} --name ${CLUSTER_NAME}-vnet --query id --output tsv) +echo " 2/8 ${CLUSTER_NAME}-vnet found with ID: ${WORKLOAD_VNET_ID} " + +# peer mgmt vnet +# TODO: check if exists before attempting to create +az network vnet peering create --name mgmt-to-${CLUSTER_NAME} --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME} --remote-vnet ${WORKLOAD_VNET_ID} --allow-vnet-access true --allow-forwarded-traffic true --only-show-errors --output none +az network vnet peering wait --name mgmt-to-${CLUSTER_NAME} --resource-group ${AKS_RESOURCE_GROUP} --vnet-name ${AKS_MGMT_VNET_NAME} --created --timeout 300 --only-show-errors --output none +echo " 3/8 mgmt-to-${CLUSTER_NAME} peering created in ${AKS_MGMT_VNET_NAME}" + +# peer workload vnet +# TODO: check if exists before attempting to create +az network vnet peering create --name ${CLUSTER_NAME}-to-mgmt --resource-group ${CLUSTER_NAME} --vnet-name ${CLUSTER_NAME}-vnet --remote-vnet ${MGMT_VNET_ID} --allow-vnet-access true --allow-forwarded-traffic true --only-show-errors --output none +az network vnet peering wait --name ${CLUSTER_NAME}-to-mgmt --resource-group ${CLUSTER_NAME} --vnet-name ${CLUSTER_NAME}-vnet --created --timeout 300 --only-show-errors --output none +echo " 4/8 ${CLUSTER_NAME}-to-mgmt peering created in ${CLUSTER_NAME}-vnet" + +# create private DNS zone +# TODO: check if exists before attempting to create +az network private-dns zone create --resource-group ${CLUSTER_NAME} --name ${AZURE_LOCATION}.cloudapp.azure.com --only-show-errors --output none +az network private-dns zone wait --resource-group ${CLUSTER_NAME} --name ${AZURE_LOCATION}.cloudapp.azure.com --created --timeout 300 --only-show-errors --output none +echo " 5/8 ${AZURE_LOCATION}.cloudapp.azure.com private DNS zone created in ${CLUSTER_NAME}" + +# link private DNS Zone to workload vnet +# TODO: check if exists before attempting to create +az network private-dns link vnet create --resource-group ${CLUSTER_NAME} --zone-name ${AZURE_LOCATION}.cloudapp.azure.com --name ${CLUSTER_NAME}-to-mgmt --virtual-network ${WORKLOAD_VNET_ID} --registration-enabled false --only-show-errors --output none +az network private-dns link vnet wait --resource-group ${CLUSTER_NAME} --zone-name ${AZURE_LOCATION}.cloudapp.azure.com --name ${CLUSTER_NAME}-to-mgmt --created --timeout 300 --only-show-errors --output none +echo " 6/8 workload cluster vnet ${CLUSTER_NAME}-vnet linked with private DNS zone" + +# link private DNS Zone to mgmt vnet +# TODO: check if exists before attempting to create +az network private-dns link vnet create --resource-group ${CLUSTER_NAME} --zone-name ${AZURE_LOCATION}.cloudapp.azure.com --name mgmt-to-${CLUSTER_NAME} --virtual-network ${MGMT_VNET_ID} --registration-enabled false --only-show-errors --output none +az network private-dns link vnet wait --resource-group ${CLUSTER_NAME} --zone-name ${AZURE_LOCATION}.cloudapp.azure.com --name mgmt-to-${CLUSTER_NAME} --created --timeout 300 --only-show-errors --output none +echo " 7/8 management cluster vnet ${AKS_MGMT_VNET_NAME} linked with private DNS zone" + +# create private DNS zone record +# TODO: 10.0.0.100 should be customizable +az network private-dns record-set a add-record --resource-group ${CLUSTER_NAME} --zone-name ${AZURE_LOCATION}.cloudapp.azure.com --record-set-name ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX} --ipv4-address 10.0.0.100 --only-show-errors --output none +echo " 8/8 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX} private DNS zone record created\n" \ No newline at end of file diff --git a/templates/cluster-template-aad.yaml b/templates/cluster-template-aad.yaml index 44a5d2b4455..226aa117a1b 100644 --- a/templates/cluster-template-aad.yaml +++ b/templates/cluster-template-aad.yaml @@ -35,12 +35,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/cluster-template-azure-bastion.yaml b/templates/cluster-template-azure-bastion.yaml index ba29dfeb37a..e8112be96d7 100644 --- a/templates/cluster-template-azure-bastion.yaml +++ b/templates/cluster-template-azure-bastion.yaml @@ -37,12 +37,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/cluster-template-azure-cni-v1.yaml b/templates/cluster-template-azure-cni-v1.yaml index 7704dcfc083..80046ef75f6 100644 --- a/templates/cluster-template-azure-cni-v1.yaml +++ b/templates/cluster-template-azure-cni-v1.yaml @@ -35,12 +35,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/cluster-template-dual-stack.yaml b/templates/cluster-template-dual-stack.yaml index fc890e60836..0dee4a74759 100644 --- a/templates/cluster-template-dual-stack.yaml +++ b/templates/cluster-template-dual-stack.yaml @@ -42,21 +42,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - cidrBlocks: - - 10.0.0.0/16 - - 2001:1234:5678:9abc::/64 + - 10.0.0.0/24 name: control-plane-subnet role: control-plane - cidrBlocks: - - 10.1.0.0/16 - - 2001:1234:5678:9abd::/64 + - 10.0.1.0/24 name: node-subnet role: node vnet: cidrBlocks: - - 10.0.0.0/8 - - 2001:1234:5678:9a00::/56 + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/cluster-template-edgezone.yaml b/templates/cluster-template-edgezone.yaml index fe5b94184d0..15ba1b59e45 100644 --- a/templates/cluster-template-edgezone.yaml +++ b/templates/cluster-template-edgezone.yaml @@ -38,12 +38,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/cluster-template-ephemeral.yaml b/templates/cluster-template-ephemeral.yaml index 8415a8f31b7..928bc86c44f 100644 --- a/templates/cluster-template-ephemeral.yaml +++ b/templates/cluster-template-ephemeral.yaml @@ -35,12 +35,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/cluster-template-flatcar.yaml b/templates/cluster-template-flatcar.yaml index 84750e8fe3a..c281345054d 100644 --- a/templates/cluster-template-flatcar.yaml +++ b/templates/cluster-template-flatcar.yaml @@ -35,12 +35,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/cluster-template-ipv6.yaml b/templates/cluster-template-ipv6.yaml index d0bb0e43a47..e913499aca3 100644 --- a/templates/cluster-template-ipv6.yaml +++ b/templates/cluster-template-ipv6.yaml @@ -38,21 +38,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - cidrBlocks: - - 10.0.0.0/16 - - 2001:1234:5678:9abc::/64 + - 10.0.0.0/24 name: control-plane-subnet role: control-plane - cidrBlocks: - - 10.1.0.0/16 - - 2001:1234:5678:9abd::/64 + - 10.0.1.0/24 name: node-subnet role: node vnet: cidrBlocks: - - 10.0.0.0/8 - - 2001:1234:5678:9a00::/56 + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/cluster-template-nvidia-gpu.yaml b/templates/cluster-template-nvidia-gpu.yaml index 5cb94eeb512..cb6a35f0443 100644 --- a/templates/cluster-template-nvidia-gpu.yaml +++ b/templates/cluster-template-nvidia-gpu.yaml @@ -35,12 +35,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/cluster-template-windows.yaml b/templates/cluster-template-windows.yaml index ecee4ae0eaf..1c43121db34 100644 --- a/templates/cluster-template-windows.yaml +++ b/templates/cluster-template-windows.yaml @@ -39,12 +39,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index a18b181f968..bf92bcd3be5 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -35,12 +35,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/flavors/aad/kustomization.yaml b/templates/flavors/aad/kustomization.yaml index de60f8706b0..a41af911025 100644 --- a/templates/flavors/aad/kustomization.yaml +++ b/templates/flavors/aad/kustomization.yaml @@ -8,7 +8,7 @@ resources: patches: - path: patches/kubeadm-controlplane.yaml - path: ../../azure-cluster-identity/azurecluster-identity-ref.yaml -- path: ../../internal-load-balancer/azure-cluster-frontend-ip.yaml +- path: ../../internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml - path: ../../internal-load-balancer/kubeadm-config-template-worker-node.yaml sortOptions: diff --git a/templates/flavors/azure-bastion/kustomization.yaml b/templates/flavors/azure-bastion/kustomization.yaml index f39b9e3e32c..71bfa436428 100644 --- a/templates/flavors/azure-bastion/kustomization.yaml +++ b/templates/flavors/azure-bastion/kustomization.yaml @@ -8,7 +8,7 @@ resources: patches: - path: ../../azure-cluster-identity/azurecluster-identity-ref.yaml - path: patches/azure-cluster.yaml -- path: ../../internal-load-balancer/azure-cluster-frontend-ip.yaml +- path: ../../internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml - path: ../../internal-load-balancer/kubeadm-config-template-worker-node.yaml sortOptions: diff --git a/templates/flavors/azure-cni-v1/kustomization.yaml b/templates/flavors/azure-cni-v1/kustomization.yaml index 43779e89117..5e3ca7e97e3 100644 --- a/templates/flavors/azure-cni-v1/kustomization.yaml +++ b/templates/flavors/azure-cni-v1/kustomization.yaml @@ -11,7 +11,7 @@ patches: - path: patches/azure-machine-template.yaml - path: patches/kubeadm-control-plane.yaml - path: patches/kubeadm-worker-node.yaml -- path: ../../internal-load-balancer/azure-cluster-frontend-ip.yaml +- path: ../../internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml - path: ../../internal-load-balancer/kubeadm-config-template-worker-node.yaml sortOptions: diff --git a/templates/flavors/default/kustomization.yaml b/templates/flavors/default/kustomization.yaml index eb0e4743560..333ecdd55d6 100644 --- a/templates/flavors/default/kustomization.yaml +++ b/templates/flavors/default/kustomization.yaml @@ -8,7 +8,7 @@ resources: patches: - path: ../../azure-cluster-identity/azurecluster-identity-ref.yaml -- path: ../../internal-load-balancer/azure-cluster-frontend-ip.yaml +- path: ../../internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml - path: ../../internal-load-balancer/kubeadm-config-template-worker-node.yaml sortOptions: diff --git a/templates/flavors/dual-stack/kustomization.yaml b/templates/flavors/dual-stack/kustomization.yaml index f8c00538723..922d9c71db3 100644 --- a/templates/flavors/dual-stack/kustomization.yaml +++ b/templates/flavors/dual-stack/kustomization.yaml @@ -10,7 +10,7 @@ patches: - path: patches/kubeadm-controlplane.yaml - path: patches/controlplane-azuremachinetemplate.yaml - path: ../../azure-cluster-identity/azurecluster-identity-ref.yaml -- path: ../../internal-load-balancer/azure-cluster-frontend-ip.yaml +- path: ../../internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml - path: ../../internal-load-balancer/kubeadm-config-template-worker-node.yaml sortOptions: diff --git a/templates/flavors/edgezone/kustomization.yaml b/templates/flavors/edgezone/kustomization.yaml index 5e44b5bc717..d659a2e6c2e 100644 --- a/templates/flavors/edgezone/kustomization.yaml +++ b/templates/flavors/edgezone/kustomization.yaml @@ -10,7 +10,7 @@ patches: - path: ../../azure-cluster-identity/azurecluster-identity-ref.yaml - path: patches/azure-extendedlocation.yaml - path: patches/azure-remove-natgateway.yaml -- path: ../../internal-load-balancer/azure-cluster-frontend-ip.yaml +- path: ../../internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml - path: ../../internal-load-balancer/kubeadm-config-template-worker-node.yaml sortOptions: diff --git a/templates/flavors/ephemeral/kustomization.yaml b/templates/flavors/ephemeral/kustomization.yaml index b66c5670a19..08f0f1e6a52 100644 --- a/templates/flavors/ephemeral/kustomization.yaml +++ b/templates/flavors/ephemeral/kustomization.yaml @@ -20,7 +20,7 @@ patches: kind: AzureMachineTemplate name: .*-control-plane version: v1beta1 -- path: ../../internal-load-balancer/azure-cluster-frontend-ip.yaml +- path: ../../internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml - path: ../../internal-load-balancer/kubeadm-config-template-worker-node.yaml sortOptions: diff --git a/templates/flavors/flatcar/kustomization.yaml b/templates/flavors/flatcar/kustomization.yaml index 826dc2b11c5..3f84701d996 100644 --- a/templates/flavors/flatcar/kustomization.yaml +++ b/templates/flavors/flatcar/kustomization.yaml @@ -9,7 +9,7 @@ resources: patches: - path: patches/kubeadm-controlplane.yaml - path: ../../azure-cluster-identity/azurecluster-identity-ref.yaml -- path: ../../internal-load-balancer/azure-cluster-frontend-ip.yaml +- path: ../../internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml - target: kind: KubeadmConfigTemplate patch: |- diff --git a/templates/flavors/ipv6/kustomization.yaml b/templates/flavors/ipv6/kustomization.yaml index d663897e134..587c58a2fb3 100644 --- a/templates/flavors/ipv6/kustomization.yaml +++ b/templates/flavors/ipv6/kustomization.yaml @@ -11,7 +11,7 @@ patches: - path: patches/ipv6.yaml - path: patches/kubeadm-controlplane.yaml - path: patches/controlplane-azuremachinetemplate.yaml -- path: ../../internal-load-balancer/azure-cluster-frontend-ip.yaml +- path: ../../internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml - path: ../../internal-load-balancer/kubeadm-config-template-worker-node.yaml sortOptions: diff --git a/templates/flavors/nvidia-gpu/kustomization.yaml b/templates/flavors/nvidia-gpu/kustomization.yaml index f8ebc7a9157..4f373f98792 100644 --- a/templates/flavors/nvidia-gpu/kustomization.yaml +++ b/templates/flavors/nvidia-gpu/kustomization.yaml @@ -16,7 +16,7 @@ generatorOptions: patches: - path: ../../azure-cluster-identity/azurecluster-identity-ref.yaml -- path: ../../internal-load-balancer/azure-cluster-frontend-ip.yaml +- path: ../../internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml - path: ../../internal-load-balancer/kubeadm-config-template-worker-node.yaml sortOptions: diff --git a/templates/internal-load-balancer/azure-cluster-frontend-ip.yaml b/templates/internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml similarity index 57% rename from templates/internal-load-balancer/azure-cluster-frontend-ip.yaml rename to templates/internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml index 45a584e4a02..35f59b81665 100644 --- a/templates/internal-load-balancer/azure-cluster-frontend-ip.yaml +++ b/templates/internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml @@ -12,3 +12,17 @@ spec: publicIP: name: ${CLUSTER_NAME}-api-lb dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 + vnet: + cidrBlocks: + - 10.0.0.0/16 + subnets: + - name: control-plane-subnet + role: control-plane + cidrBlocks: + - 10.0.0.0/24 + - name: node-subnet + role: node + cidrBlocks: + - 10.0.1.0/24 diff --git a/templates/test/ci/cluster-template-prow-azure-cni-v1.yaml b/templates/test/ci/cluster-template-prow-azure-cni-v1.yaml index 760210ba5a5..773db5da995 100644 --- a/templates/test/ci/cluster-template-prow-azure-cni-v1.yaml +++ b/templates/test/ci/cluster-template-prow-azure-cni-v1.yaml @@ -41,12 +41,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/test/ci/cluster-template-prow-ci-version-dual-stack.yaml b/templates/test/ci/cluster-template-prow-ci-version-dual-stack.yaml index 0a3d54c620f..cac09a3254f 100644 --- a/templates/test/ci/cluster-template-prow-ci-version-dual-stack.yaml +++ b/templates/test/ci/cluster-template-prow-ci-version-dual-stack.yaml @@ -45,6 +45,14 @@ spec: name: ${CLUSTER_IDENTITY_NAME} location: ${AZURE_LOCATION} networkSpec: + apiServerLB: + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - cidrBlocks: - 10.0.0.0/16 @@ -446,6 +454,8 @@ spec: image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml name: '{{ ds.meta_data["local_hostname"] }}' preKubeadmCommands: + - echo '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts - bash -c /tmp/oot-cred-provider.sh - bash -c /tmp/kubeadm-bootstrap.sh verbosity: 5 diff --git a/templates/test/ci/cluster-template-prow-ci-version-ipv6.yaml b/templates/test/ci/cluster-template-prow-ci-version-ipv6.yaml index cbf94be6a1b..5081f051a55 100644 --- a/templates/test/ci/cluster-template-prow-ci-version-ipv6.yaml +++ b/templates/test/ci/cluster-template-prow-ci-version-ipv6.yaml @@ -43,6 +43,14 @@ spec: name: ${CLUSTER_IDENTITY_NAME} location: ${AZURE_LOCATION} networkSpec: + apiServerLB: + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - cidrBlocks: - 10.0.0.0/16 @@ -464,6 +472,8 @@ spec: image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml name: '{{ ds.meta_data["local_hostname"] }}' preKubeadmCommands: + - echo '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts - bash -c /tmp/oot-cred-provider.sh - bash -c /tmp/kubeadm-bootstrap.sh verbosity: 5 diff --git a/templates/test/ci/cluster-template-prow-ci-version.yaml b/templates/test/ci/cluster-template-prow-ci-version.yaml index 8b64532ba96..ae18c33a258 100644 --- a/templates/test/ci/cluster-template-prow-ci-version.yaml +++ b/templates/test/ci/cluster-template-prow-ci-version.yaml @@ -40,12 +40,26 @@ spec: name: ${CLUSTER_IDENTITY_NAME} location: ${AZURE_LOCATION} networkSpec: + apiServerLB: + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} @@ -423,6 +437,8 @@ spec: image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml name: '{{ ds.meta_data["local_hostname"] }}' preKubeadmCommands: + - echo '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts - bash -c /tmp/oot-cred-provider.sh - bash -c /tmp/kubeadm-bootstrap.sh verbosity: 5 diff --git a/templates/test/ci/cluster-template-prow-custom-vnet.yaml b/templates/test/ci/cluster-template-prow-custom-vnet.yaml index e9b090f4ca5..5892fa793c1 100644 --- a/templates/test/ci/cluster-template-prow-custom-vnet.yaml +++ b/templates/test/ci/cluster-template-prow-custom-vnet.yaml @@ -42,6 +42,8 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - name: ${AZURE_CUSTOM_VNET_NAME}-controlplane-subnet role: control-plane @@ -54,6 +56,8 @@ spec: securityGroup: name: node-nsg vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_CUSTOM_VNET_NAME} resourceGroup: ${AZURE_CUSTOM_VNET_RESOURCE_GROUP} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} diff --git a/templates/test/ci/cluster-template-prow-dual-stack.yaml b/templates/test/ci/cluster-template-prow-dual-stack.yaml index b5ca8f3689f..362c4c540ea 100644 --- a/templates/test/ci/cluster-template-prow-dual-stack.yaml +++ b/templates/test/ci/cluster-template-prow-dual-stack.yaml @@ -47,21 +47,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - cidrBlocks: - - 10.0.0.0/16 - - 2001:1234:5678:9abc::/64 + - 10.0.0.0/24 name: control-plane-subnet role: control-plane - cidrBlocks: - - 10.1.0.0/16 - - 2001:1234:5678:9abd::/64 + - 10.0.1.0/24 name: node-subnet role: node vnet: cidrBlocks: - - 10.0.0.0/8 - - 2001:1234:5678:9a00::/56 + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/test/ci/cluster-template-prow-edgezone.yaml b/templates/test/ci/cluster-template-prow-edgezone.yaml index ef1b007edf2..56504a9607e 100644 --- a/templates/test/ci/cluster-template-prow-edgezone.yaml +++ b/templates/test/ci/cluster-template-prow-edgezone.yaml @@ -45,12 +45,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/test/ci/cluster-template-prow-flatcar.yaml b/templates/test/ci/cluster-template-prow-flatcar.yaml index bffae1d36f2..d191fa1aabf 100644 --- a/templates/test/ci/cluster-template-prow-flatcar.yaml +++ b/templates/test/ci/cluster-template-prow-flatcar.yaml @@ -42,12 +42,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/test/ci/cluster-template-prow-ipv6.yaml b/templates/test/ci/cluster-template-prow-ipv6.yaml index 6cd89fc25f0..94bc6ba4471 100644 --- a/templates/test/ci/cluster-template-prow-ipv6.yaml +++ b/templates/test/ci/cluster-template-prow-ipv6.yaml @@ -45,21 +45,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - cidrBlocks: - - 10.0.0.0/16 - - 2001:1234:5678:9abc::/64 + - 10.0.0.0/24 name: control-plane-subnet role: control-plane - cidrBlocks: - - 10.1.0.0/16 - - 2001:1234:5678:9abd::/64 + - 10.0.1.0/24 name: node-subnet role: node vnet: cidrBlocks: - - 10.0.0.0/8 - - 2001:1234:5678:9a00::/56 + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/test/ci/cluster-template-prow-load.yaml b/templates/test/ci/cluster-template-prow-load.yaml index 328e38b83e1..0d9160ae129 100644 --- a/templates/test/ci/cluster-template-prow-load.yaml +++ b/templates/test/ci/cluster-template-prow-load.yaml @@ -41,12 +41,26 @@ spec: name: ${CLUSTER_IDENTITY_NAME} location: ${AZURE_LOCATION} networkSpec: + apiServerLB: + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} @@ -221,7 +235,9 @@ spec: kubeletExtraArgs: cloud-provider: external name: '{{ ds.meta_data["local_hostname"] }}' - preKubeadmCommands: [] + preKubeadmCommands: + - echo '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts --- apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment diff --git a/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml b/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml index 0b7fde5f1b1..69ac9a7c025 100644 --- a/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml +++ b/templates/test/ci/cluster-template-prow-nvidia-gpu.yaml @@ -42,12 +42,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/test/ci/cluster-template-prow-spot.yaml b/templates/test/ci/cluster-template-prow-spot.yaml index 5dc7f601945..044ac4378b1 100644 --- a/templates/test/ci/cluster-template-prow-spot.yaml +++ b/templates/test/ci/cluster-template-prow-spot.yaml @@ -42,12 +42,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/test/ci/cluster-template-prow.yaml b/templates/test/ci/cluster-template-prow.yaml index d65a2895f7c..c1deebc6eb3 100644 --- a/templates/test/ci/cluster-template-prow.yaml +++ b/templates/test/ci/cluster-template-prow.yaml @@ -39,12 +39,26 @@ spec: name: ${CLUSTER_IDENTITY_NAME} location: ${AZURE_LOCATION} networkSpec: + apiServerLB: + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} @@ -215,7 +229,9 @@ spec: kubeletExtraArgs: cloud-provider: external name: '{{ ds.meta_data["local_hostname"] }}' - preKubeadmCommands: [] + preKubeadmCommands: + - echo '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts --- apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment diff --git a/templates/test/ci/prow/kustomization.yaml b/templates/test/ci/prow/kustomization.yaml index 37621a2333b..6535ed8e0c9 100644 --- a/templates/test/ci/prow/kustomization.yaml +++ b/templates/test/ci/prow/kustomization.yaml @@ -48,6 +48,8 @@ patches: - path: ../patches/cluster-label-calico.yaml - path: ../patches/cluster-label-cloud-provider-azure.yaml - path: ../patches/uami-md-0.yaml +- path: ../../../internal-load-balancer/azure-cluster-cidrs-and-frontend-ips.yaml +- path: ../../../internal-load-balancer/kubeadm-config-template-worker-node.yaml configMapGenerator: - files: - windows-cni=../../../addons/windows/calico/calico.yaml diff --git a/templates/test/dev/cluster-template-custom-builds-load.yaml b/templates/test/dev/cluster-template-custom-builds-load.yaml new file mode 100644 index 00000000000..68b893e9c9a --- /dev/null +++ b/templates/test/dev/cluster-template-custom-builds-load.yaml @@ -0,0 +1,1426 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + azuredisk-csi: "true" + cloud-provider: ${CLOUD_PROVIDER_AZURE_LABEL:=azure} + cni: calico + cni-windows: ${CLUSTER_NAME}-calico + containerd-logger: enabled + csi-proxy: enabled + metrics-server: enabled + storageclass: "true" + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + additionalTags: + buildProvenance: ${BUILD_PROVENANCE} + creationTimestamp: ${TIMESTAMP} + jobName: ${JOB_NAME} + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + networkSpec: + apiServerLB: + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 + subnets: + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet + role: control-plane + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet + role: node + vnet: + cidrBlocks: + - 10.0.0.0/16 + name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} + resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + annotations: + controlplane.cluster.x-k8s.io/skip-kube-proxy: "true" + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + feature-gates: ${K8S_FEATURE_GATES:-""} + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + allocate-node-cidrs: "false" + cloud-provider: external + cluster-name: ${CLUSTER_NAME} + v: "4" + etcd: + local: + dataDir: /var/lib/etcddisk/etcd + extraArgs: + quota-backend-bytes: "8589934592" + kubernetesVersion: ci/${CI_VERSION} + scheduler: + extraArgs: + authorization-always-allow-paths: /healthz,/readyz,/livez,/metrics + bind-address: 0.0.0.0 + diskSetup: + filesystems: + - device: /dev/disk/azure/scsi1/lun0 + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: etcd_disk + - device: ephemeral0.1 + filesystem: ext4 + label: ephemeral0 + replaceFS: ntfs + partitions: + - device: /dev/disk/azure/scsi1/lun0 + layout: true + overwrite: false + tableType: gpt + files: + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # Run the az login command with managed identity + if az login --identity > /dev/null 2>&1; then + echo "Logged in Azure with managed identity" + echo "Use OOT credential provider" + mkdir -p /var/lib/kubelet/credential-provider + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" -f /var/lib/kubelet/credential-provider/acr-credential-provider --auth-mode login + chmod 755 /var/lib/kubelet/credential-provider/acr-credential-provider + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" -f /var/lib/kubelet/credential-provider-config.yaml --auth-mode login + chmod 644 /var/lib/kubelet/credential-provider-config.yaml + else + echo "Using curl to download the OOT credential provider" + mkdir -p /var/lib/kubelet/credential-provider + curl --retry 10 --retry-delay 5 -w "response status code is %{http_code}" -Lo /var/lib/kubelet/credential-provider/acr-credential-provider "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" + chmod 755 /var/lib/kubelet/credential-provider/acr-credential-provider + curl --retry 10 --retry-delay 5 -w "response status code is %{http_code}" -Lo /var/lib/kubelet/credential-provider-config.yaml "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" + chmod 644 /var/lib/kubelet/credential-provider-config.yaml + fi + owner: root:root + path: /tmp/oot-cred-provider.sh + permissions: "0744" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + systemctl stop kubelet + declare -a BINARIES=("kubeadm" "kubectl" "kubelet") + az login --identity + for BINARY in "$${BINARIES[@]}"; do + echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" -f "/usr/bin/$${BINARY}" --auth-mode login + done + systemctl restart kubelet + + # prepull images from gcr.io/k8s-staging-ci-images and retag it to + # registry.k8s.io so kubeadm can fetch correct images no matter what + declare -a IMAGES=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + IMAGE_REGISTRY_PREFIX=registry.k8s.io + for IMAGE in "$${IMAGES[@]}"; do + $${SUDO} ctr -n k8s.io images tag $$IMAGE_REGISTRY_PREFIX/$$IMAGE-amd64:"${CI_VERSION//+/_}" $$IMAGE_REGISTRY_PREFIX/$$IMAGE:"${CI_VERSION//+/_}" + $${SUDO} ctr -n k8s.io images tag $$IMAGE_REGISTRY_PREFIX/$$IMAGE-amd64:"${CI_VERSION//+/_}" gcr.io/k8s-staging-ci-images/$$IMAGE:"${CI_VERSION//+/_}" + done + + echo "kubeadm version: $(kubeadm version -o=short)" + echo "kubectl version: $(kubectl version --client=true)" + echo "kubelet version: $(kubelet --version)" + owner: root:root + path: /tmp/replace-k8s-binaries.sh + permissions: "0744" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + curl -L --retry 10 --retry-delay 5 https://github.com/mikefarah/yq/releases/download/v4.6.1/yq_linux_amd64.tar.gz --output /tmp/yq_linux_amd64.tar.gz + tar -xzvf /tmp/yq_linux_amd64.tar.gz -C /tmp && mv /tmp/yq_linux_amd64 /usr/bin/yq + rm /tmp/yq_linux_amd64.tar.gz + + export KUBECONFIG=/etc/kubernetes/admin.conf + kubectl -n kube-system set image daemonset/kube-proxy kube-proxy="${REGISTRY}/kube-proxy:${KUBE_IMAGE_TAG}" + systemctl stop kubelet + yq e '.spec.containers[0].image = "${REGISTRY}/kube-apiserver:${KUBE_IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-apiserver.yaml + yq e '.spec.containers[0].image = "${REGISTRY}/kube-controller-manager:${KUBE_IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-controller-manager.yaml + yq e '.spec.containers[0].image = "${REGISTRY}/kube-scheduler:${KUBE_IMAGE_TAG}"' -i /etc/kubernetes/manifests/kube-scheduler.yaml + systemctl restart kubelet + owner: root:root + path: /tmp/replace-k8s-components.sh + permissions: "0744" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + image-credential-provider-bin-dir: /var/lib/kubelet/credential-provider + image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml + name: '{{ ds.meta_data["local_hostname"] }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + image-credential-provider-bin-dir: /var/lib/kubelet/credential-provider + image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml + name: '{{ ds.meta_data["local_hostname"] }}' + mounts: + - - LABEL=etcd_disk + - /var/lib/etcddisk + postKubeadmCommands: + - bash -c /tmp/replace-k8s-components.sh + preKubeadmCommands: + - bash -c /tmp/oot-cred-provider.sh + - bash -c /tmp/replace-k8s-binaries.sh + verbosity: 5 + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + replicas: ${CONTROL_PLANE_MACHINE_COUNT:=1} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + dataDisks: + - diskSizeGB: 256 + lun: 0 + nameSuffix: etcddisk + identity: UserAssigned + image: + marketplace: + offer: capi + publisher: cncf-upstream + sku: ubuntu-2204-gen1 + version: latest + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY} + vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT:=2} + selector: {} + template: + metadata: + labels: + nodepool: pool1 + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + identity: UserAssigned + image: + marketplace: + offer: capi + publisher: cncf-upstream + sku: ubuntu-2204-gen1 + version: latest + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY} + vmExtensions: + - name: CustomScript + protectedSettings: + commandToExecute: | + #!/bin/sh + echo "This script is a no-op used for extension testing purposes ..." + touch test_file + publisher: Microsoft.Azure.Extensions + version: "2.1" + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # Run the az login command with managed identity + if az login --identity > /dev/null 2>&1; then + echo "Logged in Azure with managed identity" + echo "Use OOT credential provider" + mkdir -p /var/lib/kubelet/credential-provider + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" -f /var/lib/kubelet/credential-provider/acr-credential-provider --auth-mode login + chmod 755 /var/lib/kubelet/credential-provider/acr-credential-provider + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" -f /var/lib/kubelet/credential-provider-config.yaml --auth-mode login + chmod 644 /var/lib/kubelet/credential-provider-config.yaml + else + echo "Use OOT credential provider" + mkdir -p /var/lib/kubelet/credential-provider + curl --retry 10 --retry-delay 5 -w "response status code is %{http_code}" -Lo /var/lib/kubelet/credential-provider/acr-credential-provider "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" + chmod 755 /var/lib/kubelet/credential-provider/acr-credential-provider + curl --retry 10 --retry-delay 5 -w "response status code is %{http_code}" -Lo /var/lib/kubelet/credential-provider-config.yaml "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" + chmod 644 /var/lib/kubelet/credential-provider-config.yaml + fi + owner: root:root + path: /tmp/oot-cred-provider.sh + permissions: "0744" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + systemctl stop kubelet + declare -a BINARIES=("kubeadm" "kubectl" "kubelet") + az login --identity + for BINARY in "$${BINARIES[@]}"; do + echo "* installing package: $${BINARY} ${KUBE_GIT_VERSION}" + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/linux/amd64/$${BINARY}" -f "/usr/bin/$${BINARY}" --auth-mode login + done + systemctl restart kubelet + + echo "kubeadm version: $(kubeadm version -o=short)" + echo "kubectl version: $(kubectl version --client=true)" + echo "kubelet version: $(kubelet --version)" + owner: root:root + path: /tmp/replace-k8s-binaries.sh + permissions: "0744" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + image-credential-provider-bin-dir: /var/lib/kubelet/credential-provider + image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: + - echo '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts + - bash -c /tmp/oot-cred-provider.sh + - bash -c /tmp/replace-k8s-binaries.sh +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WINDOWS_WORKER_MACHINE_COUNT:-0} + selector: {} + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-win + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-win + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + annotations: + runtime: containerd + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + template: + metadata: + annotations: + runtime: containerd + windowsServerVersion: ${WINDOWS_SERVER_VERSION:=""} + spec: + identity: UserAssigned + image: + marketplace: + offer: capi-windows + publisher: cncf-upstream + sku: ${WINDOWS_SERVER_VERSION:=windows-2019}-containerd-gen1 + version: latest + osDisk: + diskSizeGB: 128 + managedDisk: + storageAccountType: Premium_LRS + osType: Windows + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-win-azure-json + owner: root:root + path: c:/k/azure.json + permissions: "0644" + - content: |- + Add-MpPreference -ExclusionProcess C:/opt/cni/bin/calico.exe + Add-MpPreference -ExclusionProcess C:/opt/cni/bin/calico-ipam.exe + path: C:/defender-exclude-calico.ps1 + permissions: "0744" + - content: | + # /tmp is assumed created and required for upstream e2e tests to pass + New-Item -ItemType Directory -Force -Path C:\tmp\ + path: C:/create-temp-folder.ps1 + permissions: "0744" + - content: | + $ErrorActionPreference = 'Stop' + + $$CONTAINERD_URL="${WINDOWS_CONTAINERD_URL}" + if($$CONTAINERD_URL -ne ""){ + # Kubelet service depends on contianerd service so make a best effort attempt to stop it + Stop-Service kubelet -Force -ErrorAction SilentlyContinue + Stop-Service containerd -Force + echo "downloading containerd: $$CONTAINERD_URL" + curl.exe --retry 10 --retry-delay 5 -L "$$CONTAINERD_URL" --output "c:/k/containerd.tar.gz" + tar.exe -zxvf c:/k/containerd.tar.gz -C "c:/Program Files/containerd" --strip-components 1 + + Start-Service containerd + } + + containerd.exe --version + containerd-shim-runhcs-v1.exe --version + path: C:/replace-containerd.ps1 + permissions: "0744" + - content: | + mkdir -Force c:/localdumps + reg.exe add "HKLM\Software\Microsoft\Windows\Windows Error Reporting\LocalDumps" /V DumpCount /t REG_DWORD /d 50 /f + reg.exe add "HKLM\Software\Microsoft\Windows\Windows Error Reporting\LocalDumps" /V DumpType /t REG_DWORD /d 2 /f + reg.exe add "HKLM\Software\Microsoft\Windows\Windows Error Reporting\LocalDumps" /V DumpFolder /t REG_EXPAND_SZ /d "c:/LocalDumps" /f + # Enable sftp so we can copy crash dump files during log collection of stfp + $sshd_config = "$env:ProgramData\ssh\sshd_config" + if (-not (Test-Path $sshd_config)) { mkdir -Force $sshd_config } + Add-Content -Path $sshd_config "Subsystem sftp sftp-server.exe" + sc.exe stop sshd + sc.exe start sshd + path: C:/collect-hns-crashes.ps1 + permissions: "0744" + - content: | + $ErrorActionPreference = 'Stop' + + Write-Host "Attempting to log in to Azure with managed identity" + az login --identity > $null 2>&1 + if ($LASTEXITCODE -eq 0) { + Write-Host "Logged in Azure with managed identity" + Write-Host "Use OOT credential provider" + mkdir C:\var\lib\kubelet\credential-provider + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider.exe" -f C:\var\lib\kubelet\credential-provider\acr-credential-provider --auth-mode login + cp C:\var\lib\kubelet\credential-provider\acr-credential-provider C:\var\lib\kubelet\credential-provider\acr-credential-provider.exe + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" -f C:\var\lib\kubelet\credential-provider-config.yaml --auth-mode login + } else { + Write-Host "Using curl to download the OOT credential provider" + mkdir C:\var\lib\kubelet\credential-provider + curl.exe --retry 10 --retry-delay 5 -L "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider.exe" --output C:\var\lib\kubelet\credential-provider\acr-credential-provider + cp C:\var\lib\kubelet\credential-provider\acr-credential-provider C:\var\lib\kubelet\credential-provider\acr-credential-provider.exe + curl.exe --retry 10 --retry-delay 5 -L "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" --output C:\var\lib\kubelet\credential-provider-config.yaml + } + path: C:/oot-cred-provider.ps1 + permissions: "0744" + - content: | + Write-Host "Installing Azure CLI" + $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://azcliprod.blob.core.windows.net/msi/azure-cli-2.53.0.msi -OutFile .\AzureCLI.msi; Start-Process msiexec.exe -Wait -ArgumentList '/I AzureCLI.msi /quiet'; Remove-Item .\AzureCLI.msi + # Need to add manually AZ to PATH as it is not added without a reset + Write-Host "Adding AZ to path:" + $env:PATH +=";C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\wbin" + Write-Host "Version Azure CLI:" + az version + path: C:/install-az-cli.ps1 + permissions: "0744" + - content: | + $ErrorActionPreference = 'Stop' + + Stop-Service kubelet -Force + + $$KUBE_GIT_VERSION="${KUBE_GIT_VERSION}" + if($$KUBE_GIT_VERSION -ne "") + { + # Need to add manually AZ to PATH as it is not added without a reset + Write-Host "Adding AZ to path:" + $env:PATH +=";C:\Program Files (x86)\Microsoft SDKs\Azure\CLI2\wbin" + # Install Binaries + $$binaries=@("kubeadm", "kubectl", "kubelet", "kube-proxy") + $$ci_url="https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${KUBE_GIT_VERSION}/bin/windows/amd64" + echo "running az login via identity" + az login --identity + foreach ( $$binary in $$binaries ) + { + echo "downloading binary: $$ci_url/$$binary.exe" + az storage blob download --blob-url "$$ci_url/$$binary.exe" --file "c:/k/$$binary.exe" --auth-mode login + } + } + + # Tag it to the ci version. The image knows how to use the copy locally with the configmap + # that is applied at at this stage (windows-kubeproxy-ci.yaml) + ctr.exe -n k8s.io images pull docker.io/sigwindowstools/kube-proxy:v1.23.1-calico-hostprocess + ctr.exe -n k8s.io images tag docker.io/sigwindowstools/kube-proxy:v1.23.1-calico-hostprocess "docker.io/sigwindowstools/kube-proxy:${CI_VERSION/+/_}-calico-hostprocess" + + kubeadm.exe version -o=short + kubectl.exe version --client=true + kubelet.exe --version + kube-proxy.exe --version + path: C:/replace-pr-binaries.ps1 + permissions: "0744" + joinConfiguration: + nodeRegistration: + criSocket: npipe:////./pipe/containerd-containerd + kubeletExtraArgs: + cloud-provider: external + feature-gates: ${NODE_FEATURE_GATES:-""} + image-credential-provider-bin-dir: /var/lib/kubelet/credential-provider + image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml + v: "2" + windows-priorityclass: ABOVE_NORMAL_PRIORITY_CLASS + name: '{{ ds.meta_data["local_hostname"] }}' + postKubeadmCommands: + - nssm set kubelet start SERVICE_AUTO_START + - powershell C:/defender-exclude-calico.ps1 + preKubeadmCommands: + - powershell C:/create-temp-folder.ps1 + - powershell C:/replace-containerd.ps1 + - powershell C:/collect-hns-crashes.ps1 + - powershell C:/oot-cred-provider.ps1 + - powershell C:/install-az-cli.ps1 + - powershell C:/replace-pr-binaries.ps1 + users: + - groups: Administrators + name: capi + sshAuthorizedKeys: + - ${AZURE_SSH_PUBLIC_KEY:=""} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: ${CLUSTER_NAME}-mhc-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + maxUnhealthy: 100% + selector: + matchLabels: + nodepool: pool1 + unhealthyConditions: + - status: "True" + timeout: 30s + type: E2ENodeUnhealthy +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: ${CLUSTER_NAME}-calico-windows + namespace: default +spec: + clusterSelector: + matchLabels: + cni-windows: ${CLUSTER_NAME}-calico + resources: + - kind: ConfigMap + name: cni-${CLUSTER_NAME}-calico-windows + strategy: ApplyOnce +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY} + tenantID: ${AZURE_TENANT_ID} + type: ${CLUSTER_IDENTITY_TYPE:=WorkloadIdentity} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: csi-proxy + namespace: default +spec: + clusterSelector: + matchLabels: + csi-proxy: enabled + resources: + - kind: ConfigMap + name: csi-proxy-addon + strategy: ApplyOnce +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: containerd-logger-${CLUSTER_NAME} + namespace: default +spec: + clusterSelector: + matchLabels: + containerd-logger: enabled + resources: + - kind: ConfigMap + name: containerd-logger-${CLUSTER_NAME} + strategy: ApplyOnce +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: calico + namespace: default +spec: + chartName: tigera-operator + clusterSelector: + matchLabels: + cni: calico + namespace: tigera-operator + releaseName: projectcalico + repoURL: https://docs.tigera.io/calico/charts + valuesTemplate: |- + installation: + cni: + type: Calico + calicoNetwork: + bgp: Disabled + mtu: 1350 + ipPools: + ipPools:{{range $i, $cidr := .Cluster.spec.clusterNetwork.pods.cidrBlocks }} + - cidr: {{ $cidr }} + encapsulation: VXLAN{{end}} + registry: mcr.microsoft.com/oss + # Image and registry configuration for the tigera/operator pod. + tigeraOperator: + image: tigera/operator + registry: mcr.microsoft.com/oss + calicoctl: + image: mcr.microsoft.com/oss/calico/ctl + version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart + namespace: default +spec: + chartName: cloud-provider-azure + clusterSelector: + matchLabels: + cloud-provider: azure + releaseName: cloud-provider-azure-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + logVerbosity: 4 +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart-ci + namespace: default +spec: + chartName: cloud-provider-azure + clusterSelector: + matchLabels: + cloud-provider: azure-ci + releaseName: cloud-provider-azure-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + cloudConfig: ${CLOUD_CONFIG:-"/etc/kubernetes/azure.json"} + cloudConfigSecretName: ${CONFIG_SECRET_NAME:-""} + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + imageName: "${CCM_IMAGE_NAME:-""}" + imageRepository: "${IMAGE_REGISTRY:-""}" + imageTag: "${IMAGE_TAG_CCM:-""}" + logVerbosity: ${CCM_LOG_VERBOSITY:-4} + replicas: ${CCM_COUNT:-1} + enableDynamicReloading: ${ENABLE_DYNAMIC_RELOADING:-false} + cloudNodeManager: + imageName: "${CNM_IMAGE_NAME:-""}" + imageRepository: "${IMAGE_REGISTRY:-""}" + imageTag: "${IMAGE_TAG_CNM:-""}" +--- +apiVersion: v1 +data: + kube-proxy-patch: |- + apiVersion: v1 + kind: ConfigMap + metadata: + name: windows-kubeproxy-ci + namespace: kube-system + data: + KUBEPROXY_PATH: "c:/k/kube-proxy.exe" + proxy: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy-windows + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: kube-proxy-windows + template: + metadata: + labels: + k8s-app: kube-proxy-windows + spec: + serviceAccountName: kube-proxy + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\system" + hostNetwork: true + containers: + - image: sigwindowstools/kube-proxy:${KUBERNETES_VERSION/+/_}-calico-hostprocess + args: ["$env:CONTAINER_SANDBOX_MOUNT_POINT/kube-proxy/start.ps1"] + workingDir: "$env:CONTAINER_SANDBOX_MOUNT_POINT/kube-proxy/" + name: kube-proxy + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: KUBEPROXY_PATH + valueFrom: + configMapKeyRef: + name: windows-kubeproxy-ci + key: KUBEPROXY_PATH + optional: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + nodeSelector: + kubernetes.io/os: windows + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - configMap: + name: kube-proxy + name: kube-proxy + updateStrategy: + type: RollingUpdate + windows-cni: "# strictAffinity required for windows\napiVersion: crd.projectcalico.org/v1\nkind: + IPAMConfig\nmetadata:\n name: default\nspec:\n autoAllocateBlocks: true\n strictAffinity: + true\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-static-rules\n + \ namespace: calico-system\n labels:\n tier: node\n app: calico\ndata:\n + \ static-rules.json: |\n {\n \"Provider\": \"azure\",\n \"Version\": + \"0.1\",\n \"Rules\": [\n {\n \"Name\": \"EndpointPolicy\",\n + \ \"Rule\": {\n \"Id\": \"wireserver\",\n \"Type\": + \"ACL\",\n \"Protocol\": 6,\n \"Action\": \"Block\",\n + \ \"Direction\": \"Out\",\n \"RemoteAddresses\": \"168.63.129.16/32\",\n + \ \"RemotePorts\": \"80\",\n \"Priority\": 200,\n \"RuleType\": + \"Switch\"\n }\n }\n ]\n } \n---\nkind: ConfigMap\napiVersion: + v1\nmetadata:\n name: calico-config-windows\n namespace: calico-system\n labels:\n + \ tier: node\n app: calico\ndata:\n veth_mtu: \"1350\"\n \n cni_network_config: + |\n {\n \"name\": \"Calico\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": + [\n {\n \"windows_use_single_network\": true,\n \"type\": + \"calico\",\n \"mode\": \"vxlan\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n + \ \"nodename_file_optional\": true,\n \"log_file_path\": \"c:/cni.log\",\n + \ \"log_level\": \"debug\",\n\n \"vxlan_mac_prefix\": \"0E-2A\",\n + \ \"vxlan_vni\": 4096,\n \"mtu\": __CNI_MTU__,\n \"policy\": + {\n \"type\": \"k8s\"\n },\n\n \"log_level\": \"info\",\n\n + \ \"capabilities\": {\"dns\": true},\n \"DNS\": {\n \"Search\": + \ [\n \"svc.cluster.local\"\n ]\n },\n\n \"datastore_type\": + \"kubernetes\",\n\n \"kubernetes\": {\n \"kubeconfig\": \"__KUBECONFIG_FILEPATH__\"\n + \ },\n\n \"ipam\": {\n \"type\": \"calico-ipam\",\n + \ \"subnet\": \"usePodCidr\"\n },\n\n \"policies\": + \ [\n {\n \"Name\": \"EndpointPolicy\",\n \"Value\": + \ {\n \"Type\": \"OutBoundNAT\",\n \"ExceptionList\": + \ [\n \"__K8S_SERVICE_CIDR__\"\n ]\n }\n + \ },\n {\n \"Name\": \"EndpointPolicy\",\n + \ \"Value\": {\n \"Type\": \"SDNROUTE\",\n \"DestinationPrefix\": + \ \"__K8S_SERVICE_CIDR__\",\n \"NeedEncap\": true\n }\n + \ }\n ]\n }\n ]\n\n }\n---\napiVersion: apps/v1\nkind: + DaemonSet\nmetadata:\n name: calico-node-windows\n labels:\n tier: node\n + \ app: calico\n namespace: calico-system\nspec:\n selector:\n matchLabels:\n + \ app: calico\n template:\n metadata:\n labels:\n tier: node\n + \ app: calico\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n + \ nodeSelectorTerms:\n - matchExpressions:\n - + key: kubernetes.io/os\n operator: In\n values:\n + \ - windows\n - key: kubernetes.io/arch\n + \ operator: In\n values:\n - + amd64\n securityContext:\n windowsOptions:\n hostProcess: + true\n runAsUserName: \"NT AUTHORITY\\\\system\"\n hostNetwork: + true\n serviceAccountName: calico-node\n tolerations:\n - operator: + Exists\n effect: NoSchedule\n # Mark the pod as a critical add-on + for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n + \ - effect: NoExecute\n operator: Exists\n initContainers:\n # + This container installs the CNI binaries\n # and CNI network config file + on each node.\n - name: install-cni\n image: sigwindowstools/calico-install:v3.26.1-hostprocess\n + \ args: [\"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/install.ps1\"]\n + \ imagePullPolicy: Always\n env:\n # Name of the CNI + config file to create.\n - name: CNI_CONF_NAME\n value: + \"10-calico.conflist\"\n # The CNI network config to install on each + node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n + \ name: calico-config-windows\n key: cni_network_config\n + \ # Set the hostname based on the k8s node name.\n - name: + KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: + spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n + \ valueFrom:\n configMapKeyRef:\n name: + calico-config-windows\n key: veth_mtu\n # Prevents + the container from sleeping forever.\n - name: SLEEP\n value: + \"false\"\n - name: K8S_SERVICE_CIDR\n value: \"10.96.0.0/12\"\n + \ volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: + cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: + cni-net-dir\n - name: kubeadm-config\n mountPath: /etc/kubeadm-config/\n + \ securityContext:\n windowsOptions:\n hostProcess: + true\n runAsUserName: \"NT AUTHORITY\\\\system\"\n containers:\n + \ - name: calico-node-startup\n image: sigwindowstools/calico-node:v3.26.1-hostprocess\n + \ args: [\"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/node-service.ps1\"]\n + \ workingDir: \"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/\"\n imagePullPolicy: + Always\n volumeMounts:\n - name: calico-config-windows\n mountPath: + /etc/kube-calico-windows/\n env:\n - name: POD_NAME\n valueFrom:\n + \ fieldRef:\n apiVersion: v1\n fieldPath: + metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n + \ apiVersion: v1\n fieldPath: metadata.namespace\n - + name: CNI_IPAM_TYPE\n value: \"calico-ipam\"\n - name: CALICO_NETWORKING_BACKEND\n + \ value: \"vxlan\"\n - name: KUBECONFIG\n value: \"C:/etc/cni/net.d/calico-kubeconfig\"\n + \ - name: VXLAN_VNI\n value: \"4096\"\n - name: calico-node-felix\n + \ image: sigwindowstools/calico-node:v3.26.1-hostprocess\n args: + [\"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/felix-service.ps1\"]\n imagePullPolicy: + Always\n workingDir: \"$env:CONTAINER_SANDBOX_MOUNT_POINT/calico/\"\n volumeMounts:\n + \ - name: calico-config-windows\n mountPath: /etc/kube-calico-windows/\n + \ - name: calico-static-rules\n mountPath: /calico/static-rules.json\n + \ subPath: static-rules.json\n env:\n - name: POD_NAME\n + \ valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: + metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n + \ apiVersion: v1\n fieldPath: metadata.namespace\n - + name: VXLAN_VNI\n value: \"4096\"\n - name: KUBECONFIG\n value: + \"C:/etc/cni/net.d/calico-kubeconfig\"\n volumes:\n - name: calico-config-windows\n + \ configMap:\n name: calico-config-windows\n - name: calico-static-rules\n + \ configMap:\n name: calico-static-rules\n # Used to install + CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n + \ - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n + \ - name: kubeadm-config\n configMap:\n name: kubeadm-config\n---\napiVersion: + apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n + \ group: crd.projectcalico.org\n names:\n kind: IPAMConfig\n listKind: + IPAMConfigList\n plural: ipamconfigs\n singular: ipamconfig\n preserveUnknownFields: + false\n scope: Cluster\n versions:\n - name: v1\n schema:\n openAPIV3Schema:\n + \ properties:\n apiVersion:\n description: 'APIVersion + defines the versioned schema of this representation\n of an object. + Servers should convert recognized schemas to the latest\n internal + value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'\n + \ type: string\n kind:\n description: 'Kind is a + string value representing the REST resource this\n object represents. + Servers may infer this from the endpoint the client\n submits requests + to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'\n + \ type: string\n metadata:\n type: object\n spec:\n + \ description: IPAMConfigSpec contains the specification for an IPAMConfig\n + \ resource.\n properties:\n autoAllocateBlocks:\n + \ type: boolean\n maxBlocksPerHost:\n description: + MaxBlocksPerHost, if non-zero, is the max number of blocks\n that + can be affine to each host.\n maximum: 2147483647\n minimum: + 0\n type: integer\n strictAffinity:\n type: + boolean\n required:\n - autoAllocateBlocks\n - + strictAffinity\n type: object\n type: object\n served: true\n + \ storage: true\nstatus:\n acceptedNames:\n kind: \"\"\n plural: \"\"\n + \ conditions: []\n storedVersions: []\n" +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: cni-${CLUSTER_NAME}-calico-windows + namespace: default +--- +apiVersion: v1 +data: + csi-proxy: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: csi-proxy + name: csi-proxy + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: csi-proxy + template: + metadata: + labels: + k8s-app: csi-proxy + spec: + nodeSelector: + "kubernetes.io/os": windows + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + containers: + - name: csi-proxy + image: ghcr.io/kubernetes-sigs/sig-windows/csi-proxy:v1.0.2 +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: csi-proxy-addon + namespace: default +--- +apiVersion: v1 +data: + containerd-windows-logger: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: containerd-logger + name: containerd-logger + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: containerd-logger + template: + metadata: + labels: + k8s-app: containerd-logger + spec: + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\system" + hostNetwork: true + containers: + - image: ghcr.io/kubernetes-sigs/sig-windows/eventflow-logger:v0.1.0 + args: [ "config.json" ] + name: containerd-logger + imagePullPolicy: Always + volumeMounts: + - name: containerd-logger-config + mountPath: /config.json + subPath: config.json + nodeSelector: + kubernetes.io/os: windows + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - configMap: + name: containerd-logger-config + name: containerd-logger-config + updateStrategy: + type: RollingUpdate + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: containerd-logger-config + namespace: kube-system + data: + config.json: | + { + "inputs": [ + { + "type": "ETW", + "sessionNamePrefix": "containerd", + "cleanupOldSessions": true, + "reuseExistingSession": true, + "providers": [ + { + "providerName": "Microsoft.Virtualization.RunHCS", + "providerGuid": "0B52781F-B24D-5685-DDF6-69830ED40EC3", + "level": "Verbose" + }, + { + "providerName": "ContainerD", + "providerGuid": "2acb92c0-eb9b-571a-69cf-8f3410f383ad", + "level": "Verbose" + } + ] + } + ], + "filters": [ + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == Stats && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == hcsshim::LayerID && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == hcsshim::NameToGuid && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == containerd.task.v2.Task.Stats && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == containerd.task.v2.Task.State && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == HcsGetProcessProperties && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == HcsGetComputeSystemProperties && hasnoproperty error" + } + ], + "outputs": [ + { + "type": "StdOutput" + } + ], + "schemaVersion": "2016-08-11" + } +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: containerd-logger-${CLUSTER_NAME} + namespace: default +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: metrics-server-${CLUSTER_NAME} + namespace: default +spec: + clusterSelector: + matchLabels: + metrics-server: enabled + resources: + - kind: ConfigMap + name: metrics-server-${CLUSTER_NAME} + strategy: ApplyOnce +--- +apiVersion: v1 +data: + metrics-server: | + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + k8s-app: metrics-server + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader + rules: + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server + rules: + - apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get + - apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + k8s-app: metrics-server + name: metrics-server:system:auth-delegator + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator + subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server + subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system + --- + apiVersion: v1 + kind: Service + metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system + spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + labels: + k8s-app: metrics-server + spec: + containers: + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + - --kubelet-insecure-tls + image: registry.k8s.io/metrics-server/metrics-server:v0.6.3 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server + ports: + - containerPort: 4443 + name: https + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + resources: + requests: + cpu: 100m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - emptyDir: {} + name: tmp-dir + --- + apiVersion: apiregistration.k8s.io/v1 + kind: APIService + metadata: + labels: + k8s-app: metrics-server + name: v1beta1.metrics.k8s.io + spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: metrics-server-${CLUSTER_NAME} + namespace: default +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: ${CLUSTER_NAME}-storageclass + namespace: default +spec: + clusterSelector: + matchLabels: + storageclass: "true" + resources: + - kind: ConfigMap + name: cni-${CLUSTER_NAME}-storageclass + strategy: ApplyOnce +--- +apiVersion: v1 +data: + storageclass: | + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: default + annotations: + storageclass.beta.kubernetes.io/is-default-class: "true" + labels: + kubernetes.io/cluster-service: "true" + provisioner: kubernetes.io/azure-disk + parameters: + kind: Managed + storageaccounttype: Standard_LRS + cachingmode: ReadOnly + volumeBindingMode: WaitForFirstConsumer + --- + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: managed-premium + annotations: + labels: + kubernetes.io/cluster-service: "true" + provisioner: kubernetes.io/azure-disk + parameters: + kind: Managed + storageaccounttype: Premium_LRS + cachingmode: ReadOnly + volumeBindingMode: WaitForFirstConsumer + --- + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: managed-standard + annotations: + labels: + kubernetes.io/cluster-service: "true" + provisioner: kubernetes.io/azure-disk + parameters: + kind: Managed + storageaccounttype: Standard_LRS + cachingmode: ReadOnly + volumeBindingMode: WaitForFirstConsumer +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: storageclass-${CLUSTER_NAME} + namespace: default diff --git a/templates/test/dev/cluster-template-custom-builds.yaml b/templates/test/dev/cluster-template-custom-builds.yaml index 7c07739985e..eca5a0ea304 100644 --- a/templates/test/dev/cluster-template-custom-builds.yaml +++ b/templates/test/dev/cluster-template-custom-builds.yaml @@ -40,12 +40,26 @@ spec: name: ${CLUSTER_IDENTITY_NAME} location: ${AZURE_LOCATION} networkSpec: + apiServerLB: + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 10.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 10.0.0.0/24 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 10.0.1.0/24 + name: node-subnet role: node vnet: + cidrBlocks: + - 10.0.0.0/16 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} @@ -373,6 +387,8 @@ spec: image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml name: '{{ ds.meta_data["local_hostname"] }}' preKubeadmCommands: + - echo '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts - bash -c /tmp/oot-cred-provider.sh - bash -c /tmp/replace-k8s-binaries.sh --- diff --git a/templates/test/dev/custom-builds-load/kustomization.yaml b/templates/test/dev/custom-builds-load/kustomization.yaml new file mode 100644 index 00000000000..be9e5ffad81 --- /dev/null +++ b/templates/test/dev/custom-builds-load/kustomization.yaml @@ -0,0 +1,22 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: +- ../custom-builds +- storageclass-resource-set.yaml +patches: +- path: patches/cluster-label-storageclass.yaml +- path: patches/cluster-label-azuredisk.yaml +- path: patches/kcp-scheduler.yaml +generatorOptions: + annotations: + note: generated + disableNameSuffixHash: true + labels: + type: generated +configMapGenerator: +- files: + - storageclass=../../../addons/storageclass-azure-disk.yaml + name: storageclass-${CLUSTER_NAME} +sortOptions: + order: fifo \ No newline at end of file diff --git a/templates/test/dev/custom-builds-load/patches/cluster-label-azuredisk.yaml b/templates/test/dev/custom-builds-load/patches/cluster-label-azuredisk.yaml new file mode 100644 index 00000000000..e1fab4ee278 --- /dev/null +++ b/templates/test/dev/custom-builds-load/patches/cluster-label-azuredisk.yaml @@ -0,0 +1,6 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + labels: + azuredisk-csi: "true" diff --git a/templates/test/dev/custom-builds-load/patches/cluster-label-storageclass.yaml b/templates/test/dev/custom-builds-load/patches/cluster-label-storageclass.yaml new file mode 100644 index 00000000000..3928f7d694f --- /dev/null +++ b/templates/test/dev/custom-builds-load/patches/cluster-label-storageclass.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + labels: + storageclass: "true" diff --git a/templates/test/dev/custom-builds-load/patches/kcp-scheduler.yaml b/templates/test/dev/custom-builds-load/patches/kcp-scheduler.yaml new file mode 100644 index 00000000000..6e9d49617fe --- /dev/null +++ b/templates/test/dev/custom-builds-load/patches/kcp-scheduler.yaml @@ -0,0 +1,12 @@ +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + kubeadmConfigSpec: + clusterConfiguration: + scheduler: + extraArgs: + authorization-always-allow-paths: /healthz,/readyz,/livez,/metrics + bind-address: 0.0.0.0 diff --git a/templates/test/dev/custom-builds-load/storageclass-resource-set.yaml b/templates/test/dev/custom-builds-load/storageclass-resource-set.yaml new file mode 100644 index 00000000000..00c100e5057 --- /dev/null +++ b/templates/test/dev/custom-builds-load/storageclass-resource-set.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: ${CLUSTER_NAME}-storageclass + namespace: default +spec: + clusterSelector: + matchLabels: + storageclass: "true" + resources: + - kind: ConfigMap + name: cni-${CLUSTER_NAME}-storageclass + strategy: ApplyOnce