From c5fe4e213971bfc1e64b72a759396a45da079fd2 Mon Sep 17 00:00:00 2001 From: badra001 Date: Mon, 24 Nov 2025 14:01:44 -0500 Subject: [PATCH] * 2.6.0 -- 2025-11-24 - create new 1.32, update charts and images --- CHANGELOG.md | 3 + .../full-cluster-tf-upgrade/1.31-1.32.diffs | 140 ++++++ .../full-cluster-tf-upgrade/1.32/.gitignore | 5 + .../full-cluster-tf-upgrade/1.32/.tf-control | 20 + .../1.32/.tf-control.tfrc | 24 + .../full-cluster-tf-upgrade/1.32/README.md | 468 +++++++++++++++++ .../full-cluster-tf-upgrade/1.32/ROLES.md | 119 +++++ .../1.32/addons/.tf-control | 20 + .../1.32/addons/.tf-control.tfrc | 24 + .../1.32/addons/README.addons.md | 3 + .../1.32/addons/README.ebs.md | 75 +++ .../1.32/addons/README.md | 122 +++++ .../1.32/addons/addon_cloudwatch.tf | 66 +++ .../1.32/addons/addon_coredns.tf | 11 + .../1.32/addons/addon_ebs-csi.tf | 127 +++++ .../1.32/addons/addon_kube-proxy.tf | 13 + .../1.32/addons/addon_vpc-cni.tf | 99 ++++ .../1.32/addons/addons.tf | 5 + .../1.32/addons/data.eks-subdirectory.tf | 1 + .../addons/kubeconfig.eks-subdirectory.tf | 1 + .../1.32/addons/locals.tf | 17 + .../1.32/addons/parent_rs.tf | 1 + .../1.32/addons/prefixes.tf | 1 + .../1.32/addons/providers.tf | 1 + .../1.32/addons/region.tf | 4 + .../1.32/addons/role.tf.off | 101 ++++ .../1.32/addons/tf-run.data | 31 ++ .../1.32/addons/tf-run.destroy.data | 6 + .../1.32/addons/variables.addons.tf | 1 + .../1.32/addons/variables.eks.tf | 1 + .../1.32/addons/version.tf | 1 + .../1.32/applications/tf-run.data | 31 ++ .../1.32/applications/tf-run.destroy.data | 6 + .../1.32/aws-auth/.tf-control | 20 + .../1.32/aws-auth/.tf-control.tfrc | 24 + .../1.32/aws-auth/README.md | 68 +++ .../1.32/aws-auth/aws-auth.auto.tfvars | 28 ++ .../aws-auth/config_map.aws-auth.yaml.tpl | 17 + .../1.32/aws-auth/data.eks-subdirectory.tf | 1 + .../aws-auth/kubeconfig.eks-subdirectory.tf | 1 + .../1.32/aws-auth/patch-aws-auth.tf | 135 +++++ .../1.32/aws-auth/prefixes.tf | 1 + .../1.32/aws-auth/providers.tf | 1 + .../1.32/aws-auth/region.tf | 4 + .../1.32/aws-auth/settings.aws-auth.tf | 11 + .../1.32/aws-auth/tf-run.data | 14 + .../1.32/aws-auth/tf-run.destroy.data | 9 + .../1.32/aws-auth/variables.aws-auth.tf | 23 + .../1.32/aws-auth/variables.eks.tf | 1 + .../1.32/aws-auth/version.tf | 1 + .../1.32/aws-auth/versions.tf | 1 + .../1.32/bin/copy_image.sh | 326 ++++++++++++ .../1.32/bin/fix-terminating-namespace.sh | 29 ++ .../1.32/bin/remove-ecr.sh | 77 +++ .../1.32/bin/show-k8s-things.sh | 7 + .../1.32/charts-images.tf | 8 + .../full-cluster-tf-upgrade/1.32/charts.yml | 24 + .../1.32/cluster-roles/.tf-control | 20 + .../1.32/cluster-roles/.tf-control.tfrc | 24 + .../1.32/cluster-roles/README.md | 238 +++++++++ .../1.32/cluster-roles/RESULTS.md | 41 ++ .../1.32/cluster-roles/cm.tf.off | 6 + .../cluster-roles/data.eks-subdirectory.tf | 1 + .../1.32/cluster-roles/dba-clusterrole.tf | 24 + .../1.32/cluster-roles/dba-rolebinding.tf | 40 ++ .../1.32/cluster-roles/dba.iam.tf | 117 +++++ .../cluster-roles/deployer-clusterrole.tf | 67 +++ .../cluster-roles/deployer-rolebinding.tf | 91 ++++ .../1.32/cluster-roles/deployer.iam.tf | 167 ++++++ .../kubeconfig.eks-subdirectory.tf | 1 + .../1.32/cluster-roles/locals.tf | 11 + .../1.32/cluster-roles/main.tf | 30 ++ .../1.32/cluster-roles/prefixes.tf | 1 + .../1.32/cluster-roles/providers.tf | 1 + .../1.32/cluster-roles/region.tf | 4 + .../1.32/cluster-roles/tf-run.data | 18 + .../1.32/cluster-roles/variables.auto.tfvars | 16 + .../1.32/cluster-roles/variables.eks.tf | 1 + .../1.32/cluster-roles/variables.tf | 83 +++ .../1.32/cluster-roles/version.tf | 1 + .../1.32/cluster-roles/versions.tf | 1 + .../1.32/common-services/.gitignore | 1 + .../1.32/common-services/.tf-control | 20 + .../1.32/common-services/.tf-control.tfrc | 24 + .../1.32/common-services/README.md | 66 +++ .../1.32/common-services/README.output.md | 84 ++++ .../common-services/cert-manager-issuer.tf | 15 + .../charts/cluster-autoscaler/.helmignore | 23 + .../charts/cluster-autoscaler/Chart.yaml | 20 + .../charts/cluster-autoscaler/README.md | 5 + .../cluster-autoscaler/templates/NOTES.txt | 18 + .../cluster-autoscaler/templates/_helpers.tpl | 117 +++++ .../templates/clusterrole.yaml | 163 ++++++ .../templates/clusterrolebinding.yaml | 16 + .../templates/deployment.yaml | 291 +++++++++++ .../cluster-autoscaler/templates/pdb.yaml | 16 + .../templates/podsecuritypolicy.yaml | 46 ++ .../priority-expander-configmap.yaml | 22 + .../templates/prometheusrule.yaml | 15 + .../cluster-autoscaler/templates/role.yaml | 78 +++ .../templates/rolebinding.yaml | 17 + .../cluster-autoscaler/templates/secret.yaml | 21 + .../cluster-autoscaler/templates/service.yaml | 37 ++ .../templates/serviceaccount.yaml | 13 + .../templates/servicemonitor.yaml | 24 + .../charts/cluster-autoscaler/values.yaml | 378 ++++++++++++++ .../.helmignore | 23 + .../Chart.yaml | 24 + .../templates/_helpers.tpl | 62 +++ .../templates/ca-key-pair.yaml | 8 + .../templates/clusterissuer.yaml | 7 + .../values.yaml | 6 + .../charts/istio-operator/Chart.yaml | 12 + .../istio-operator/crds/crd-operator.yaml | 48 ++ .../istio-operator/files/gen-operator.yaml | 220 ++++++++ .../istio-operator/templates/clusterrole.yaml | 115 +++++ .../templates/clusterrole_binding.yaml | 13 + .../charts/istio-operator/templates/crds.yaml | 6 + .../istio-operator/templates/deployment.yaml | 51 ++ .../istio-operator/templates/namespace.yaml | 8 + .../istio-operator/templates/service.yaml | 16 + .../templates/service_account.yaml | 12 + .../charts/istio-operator/values.yaml | 29 ++ .../istio-peerauthentication/.helmignore | 23 + .../istio-peerauthentication/Chart.yaml | 24 + .../templates/_helpers.tpl | 62 +++ .../templates/peerauthentication.yaml | 9 + .../istio-peerauthentication/values.yaml | 0 .../charts/istio-profile/.helmignore | 23 + .../charts/istio-profile/Chart.yaml | 6 + .../istio-profile/templates/_helpers.tpl | 62 +++ .../templates/istiooperator.yaml | 175 +++++++ .../charts/istio-profile/values.yaml | 44 ++ .../.helmignore | 23 + .../self-signed-certificate-issuer/Chart.yaml | 6 + .../templates/_helpers.tpl | 62 +++ .../templates/ca-issuer.yaml | 8 + .../templates/selfsigned-ca.yaml | 17 + .../templates/selfsigned-clusterissuer.yaml | 7 + .../values.yaml | 0 .../vault-certificate-issuer/.helmignore | 23 + .../vault-certificate-issuer/Chart.yaml | 24 + .../templates/_helpers.tpl | 62 +++ .../templates/app-role-issuer.yaml | 18 + .../templates/app-role-secret.yaml | 10 + .../templates/service-account-issuer.yaml | 20 + .../templates/token-issuer.yaml | 15 + .../templates/token-secret.yaml | 10 + .../vault-certificate-issuer/values.yaml | 47 ++ .../cluster-autoscaler/.tf-control | 20 + .../cluster-autoscaler/.tf-control.tfrc | 24 + .../cluster-autoscaler/cluster-autoscaler.tf | 92 ++++ .../cluster-autoscaler/locals.tf | 17 + .../cluster-autoscaler/region.tf | 3 + .../test-cluster-autoscaling.json | 24 + .../cluster-autoscaler/tf-run.data | 44 ++ .../cluster-autoscaler/tf-run.destroy.data | 6 + .../variables.cluster-autoscaler.auto.tfvars | 21 + .../variables.cluster-autoscaler.tf | 40 ++ .../common-services.auto.tfvars | 2 + .../common-services/data.eks-subdirectory.tf | 1 + .../1.32/common-services/dns.tf | 40 ++ .../1.32/common-services/images.tf | 77 +++ .../kubeconfig.eks-subdirectory.tf | 1 + .../1.32/common-services/locals.tf | 17 + .../1.32/common-services/main.tf | 475 ++++++++++++++++++ .../1.32/common-services/parent_rs.tf | 1 + .../1.32/common-services/prefixes.tf | 1 + .../1.32/common-services/providers.tf | 1 + .../1.32/common-services/region.tf | 4 + .../1.32/common-services/tags.md | 20 + .../1.32/common-services/tf-run.data | 72 +++ .../1.32/common-services/tf-run.destroy.data | 9 + .../variables.common-services.auto.tfvars | 21 + .../variables.common-services.tf | 208 ++++++++ .../1.32/common-services/variables.eks.tf | 1 + .../variables.images.auto.tfvars | 163 ++++++ .../1.32/common-services/variables.images.tf | 38 ++ .../1.32/common-services/version.tf | 1 + .../1.32/common-services/versions.tf | 1 + .../1.32/create-iam-config.sh | 63 +++ .../1.32/data.eks-main.tf | 18 + .../1.32/dns-zone.route53-profile.tf | 34 ++ .../full-cluster-tf-upgrade/1.32/dns-zone.tf | 237 +++++++++ .../1.32/dns-zone.tf.dmz | 179 +++++++ .../1.32/ebs-encryption.tf | 108 ++++ .../1.32/efs/.tf-control | 20 + .../1.32/efs/.tf-control.tfrc | 24 + .../1.32/efs/README.efs.md | 81 +++ .../1.32/efs/README.md | 164 ++++++ .../full-cluster-tf-upgrade/1.32/efs/addon.tf | 15 + .../1.32/efs/data.eks-subdirectory.tf | 1 + .../full-cluster-tf-upgrade/1.32/efs/ecr.tf | 70 +++ .../full-cluster-tf-upgrade/1.32/efs/efs.tf | 27 + .../1.32/efs/kubeconfig.eks-subdirectory.tf | 1 + .../1.32/efs/locals.tf | 17 + .../1.32/efs/parent_rs.tf | 1 + .../1.32/efs/persistent-volume.tf | 19 + .../1.32/efs/policy.tf | 87 ++++ .../1.32/efs/prefixes.tf | 1 + .../1.32/efs/providers.tf | 1 + .../1.32/efs/region.tf | 4 + .../full-cluster-tf-upgrade/1.32/efs/role.tf | 53 ++ .../1.32/efs/storage-class.tf | 17 + .../1.32/efs/tf-run.data | 31 ++ .../1.32/efs/tf-run.destroy.data | 6 + .../1.32/efs/variables.efs.tf | 37 ++ .../1.32/efs/variables.eks.tf | 1 + .../1.32/efs/version.tf | 1 + .../1.32/efs/versions.tf | 1 + .../1.32/eks-console-access.tf | 71 +++ .../full-cluster-tf-upgrade/1.32/group.tf | 13 + .../full-cluster-tf-upgrade/1.32/images.yml | 133 +++++ .../1.32/import.tf.off | 4 + .../1.32/includes.d/README.md | 30 ++ .../1.32/includes.d/data.eks-main.tf | 18 + .../1.32/includes.d/data.eks-subdirectory.tf | 15 + .../1.32/includes.d/kubeconfig.eks-main.tf | 29 ++ .../includes.d/kubeconfig.eks-subdirectory.tf | 29 ++ .../1.32/includes.d/parent_rs.tf | 4 + .../1.32/irsa-roles/.tf-control | 20 + .../1.32/irsa-roles/.tf-control.tfrc | 24 + .../1.32/irsa-roles/README.md | 64 +++ .../1.32/irsa-roles/data.eks-subdirectory.tf | 1 + .../1.32/irsa-roles/parent_rs.tf | 1 + .../1.32/irsa-roles/prefixes.tf | 1 + .../1.32/irsa-roles/providers.tf | 1 + .../1.32/irsa-roles/region.tf | 4 + .../1.32/irsa-roles/tf-run.data | 13 + .../1.32/irsa-roles/tf-run.destroy.data | 9 + .../1.32/irsa-roles/variables.eks.tf | 1 + .../irsa-roles/variables.irsa.auto.tfvars | 3 + .../1.32/irsa-roles/variables.irsa.tf | 14 + .../1.32/irsa-roles/variables.tags.tf | 1 + .../1.32/irsa-roles/version.tf | 1 + .../1.32/irsa-roles/versions.tf | 1 + .../1.32/kubeconfig.eks-main.tf | 29 ++ .../full-cluster-tf-upgrade/1.32/locals.tf | 7 + examples/full-cluster-tf-upgrade/1.32/main.tf | 246 +++++++++ examples/full-cluster-tf-upgrade/1.32/oidc.tf | 32 ++ .../full-cluster-tf-upgrade/1.32/outputs.tf | 63 +++ .../full-cluster-tf-upgrade/1.32/policy.tf | 185 +++++++ .../full-cluster-tf-upgrade/1.32/prefixes.tf | 34 ++ .../full-cluster-tf-upgrade/1.32/providers.tf | 25 + .../full-cluster-tf-upgrade/1.32/region.tf | 4 + examples/full-cluster-tf-upgrade/1.32/role.tf | 175 +++++++ examples/full-cluster-tf-upgrade/1.32/saml.tf | 26 + .../1.32/securitygroup.ports.tf | 128 +++++ .../1.32/securitygroup.tf | 184 +++++++ .../1.32/settings.auto.tfvars.example | 16 + .../1.32/setup-directory.tf | 9 + .../1.32/templates/node-private-userdata.tmpl | 9 + .../full-cluster-tf-upgrade/1.32/tf-run.data | 79 +++ .../1.32/tf-run.destroy.data | 37 ++ .../1.32/variables.addons.tf | 120 +++++ .../1.32/variables.dns.auto.tfvars | 28 ++ .../1.32/variables.dns.tf | 21 + .../1.32/variables.eks.tf | 80 +++ .../1.32/variables.route53.tf | 24 + .../1.32/variables.route53.tf.lab | 13 + .../1.32/variables.tags.tf | 9 + .../1.32/variables.username.tf | 5 + .../full-cluster-tf-upgrade/1.32/version.tf | 4 + .../full-cluster-tf-upgrade/1.32/versions.tf | 41 ++ 264 files changed, 11255 insertions(+) create mode 100644 examples/full-cluster-tf-upgrade/1.31-1.32.diffs create mode 100644 examples/full-cluster-tf-upgrade/1.32/.gitignore create mode 100644 examples/full-cluster-tf-upgrade/1.32/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.32/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.32/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/ROLES.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/README.addons.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/README.ebs.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/addon_cloudwatch.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/addon_coredns.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/addon_ebs-csi.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/addon_kube-proxy.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/addon_vpc-cni.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/addons.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/addons/data.eks-subdirectory.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/addons/kubeconfig.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/locals.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/addons/parent_rs.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/addons/prefixes.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/addons/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/role.tf.off create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/addons/tf-run.destroy.data create mode 120000 examples/full-cluster-tf-upgrade/1.32/addons/variables.addons.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/addons/variables.eks.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/addons/version.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/applications/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/applications/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/aws-auth/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.32/aws-auth/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.32/aws-auth/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/aws-auth/aws-auth.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.32/aws-auth/config_map.aws-auth.yaml.tpl create mode 120000 examples/full-cluster-tf-upgrade/1.32/aws-auth/data.eks-subdirectory.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/aws-auth/kubeconfig.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/aws-auth/patch-aws-auth.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/aws-auth/prefixes.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/aws-auth/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/aws-auth/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/aws-auth/settings.aws-auth.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/aws-auth/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/aws-auth/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/aws-auth/variables.aws-auth.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/aws-auth/variables.eks.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/aws-auth/version.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/aws-auth/versions.tf create mode 100755 examples/full-cluster-tf-upgrade/1.32/bin/copy_image.sh create mode 100755 examples/full-cluster-tf-upgrade/1.32/bin/fix-terminating-namespace.sh create mode 100755 examples/full-cluster-tf-upgrade/1.32/bin/remove-ecr.sh create mode 100755 examples/full-cluster-tf-upgrade/1.32/bin/show-k8s-things.sh create mode 100644 examples/full-cluster-tf-upgrade/1.32/charts-images.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/charts.yml create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/RESULTS.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/cm.tf.off create mode 120000 examples/full-cluster-tf-upgrade/1.32/cluster-roles/data.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/dba-clusterrole.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/dba-rolebinding.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/dba.iam.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/deployer-clusterrole.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/deployer-rolebinding.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/deployer.iam.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/cluster-roles/kubeconfig.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/locals.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/main.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/cluster-roles/prefixes.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/cluster-roles/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/variables.auto.tfvars create mode 120000 examples/full-cluster-tf-upgrade/1.32/cluster-roles/variables.eks.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/cluster-roles/variables.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/cluster-roles/version.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/cluster-roles/versions.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/.gitignore create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/README.output.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/cert-manager-issuer.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/.helmignore create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/NOTES.txt create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/_helpers.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/deployment.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/pdb.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/role.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/secret.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/service.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/serviceaccount.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/.helmignore create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/crds/crd-operator.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/files/gen-operator.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/clusterrole.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/clusterrole_binding.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/crds.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/deployment.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/namespace.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/service.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/service_account.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/.helmignore create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/templates/_helpers.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/.helmignore create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/templates/_helpers.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/templates/istiooperator.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/.helmignore create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/.helmignore create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/cluster-autoscaler.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/locals.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/test-cluster-autoscaling.json create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/variables.cluster-autoscaler.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/variables.cluster-autoscaler.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/common-services.auto.tfvars create mode 120000 examples/full-cluster-tf-upgrade/1.32/common-services/data.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/dns.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/images.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/common-services/kubeconfig.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/locals.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/main.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/common-services/parent_rs.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/common-services/prefixes.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/common-services/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/tags.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/variables.common-services.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/variables.common-services.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/common-services/variables.eks.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/variables.images.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.32/common-services/variables.images.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/common-services/version.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/common-services/versions.tf create mode 100755 examples/full-cluster-tf-upgrade/1.32/create-iam-config.sh create mode 100644 examples/full-cluster-tf-upgrade/1.32/data.eks-main.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/dns-zone.route53-profile.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/dns-zone.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/dns-zone.tf.dmz create mode 100644 examples/full-cluster-tf-upgrade/1.32/ebs-encryption.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/README.efs.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/addon.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/efs/data.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/ecr.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/efs.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/efs/kubeconfig.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/locals.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/efs/parent_rs.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/persistent-volume.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/policy.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/efs/prefixes.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/efs/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/role.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/storage-class.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/efs/variables.efs.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/efs/variables.eks.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/efs/version.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/efs/versions.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/eks-console-access.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/group.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/images.yml create mode 100644 examples/full-cluster-tf-upgrade/1.32/import.tf.off create mode 100644 examples/full-cluster-tf-upgrade/1.32/includes.d/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.32/includes.d/data.eks-main.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/includes.d/data.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/includes.d/kubeconfig.eks-main.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/includes.d/kubeconfig.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/includes.d/parent_rs.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/irsa-roles/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.32/irsa-roles/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.32/irsa-roles/README.md create mode 120000 examples/full-cluster-tf-upgrade/1.32/irsa-roles/data.eks-subdirectory.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/irsa-roles/parent_rs.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/irsa-roles/prefixes.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/irsa-roles/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/irsa-roles/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/irsa-roles/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/irsa-roles/tf-run.destroy.data create mode 120000 examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.eks.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.irsa.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.irsa.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.tags.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/irsa-roles/version.tf create mode 120000 examples/full-cluster-tf-upgrade/1.32/irsa-roles/versions.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/kubeconfig.eks-main.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/locals.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/main.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/oidc.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/outputs.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/policy.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/prefixes.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/role.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/saml.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/securitygroup.ports.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/securitygroup.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/settings.auto.tfvars.example create mode 100644 examples/full-cluster-tf-upgrade/1.32/setup-directory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/templates/node-private-userdata.tmpl create mode 100644 examples/full-cluster-tf-upgrade/1.32/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.32/variables.addons.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/variables.dns.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.32/variables.dns.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/variables.eks.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/variables.route53.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/variables.route53.tf.lab create mode 100644 examples/full-cluster-tf-upgrade/1.32/variables.tags.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/variables.username.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/version.tf create mode 100644 examples/full-cluster-tf-upgrade/1.32/versions.tf diff --git a/CHANGELOG.md b/CHANGELOG.md index 3debee9..0578c51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,3 +92,6 @@ * 2.5.0 -- 2025-05-21 - istio update to 1.26.0 + +* 2.6.0 -- 2025-11-24 + - create new 1.32, update charts and images diff --git a/examples/full-cluster-tf-upgrade/1.31-1.32.diffs b/examples/full-cluster-tf-upgrade/1.31-1.32.diffs new file mode 100644 index 0000000..6990b8d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.31-1.32.diffs @@ -0,0 +1,140 @@ +diff -Nuar 1.31/addons/variables.addons.tf 1.32/addons/variables.addons.tf +--- 1.31/addons/variables.addons.tf 2025-01-02 10:50:40.202398021 -0500 ++++ 1.32/addons/variables.addons.tf 2025-11-24 13:33:33.102689768 -0500 +@@ -83,6 +83,27 @@ + "amazon-cloudwatch-observability" = "v2.6.0-eksbuild.1" + "eks-pod-identity-agent" = "v1.3.4-eksbuild.1" + } ++ "1.32" = { ++ "coredns" = "v1.11.4-eksbuild.24" ++ "kube-proxy" = "v1.32.9-eksbuild.2" ++ "vpc-cni" = "v1.20.5-eksbuild.1" ++ "aws-ebs-csi-driver" = "v1.53.0-eksbuild.1" ++ "aws-efs-csi-driver" = "v2.1.15-eksbuild.1" ++ "adot" = "v0.131.0-eksbuild.1" ++ "snapshot-controller" = "v8.3.0-eksbuild.1" ++ "amazon-cloudwatch-observability" = "v4.7.0-eksbuild.1" ++ "eks-pod-identity-agent" = "v1.3.10-eksbuild.1" ++ # "external-dns" = "v0.20.0-eksbuild.1" ++ # "prometheus-node-exporter" = "v1.10.2-eksbuild.4" ++ # "cert-manager" = "v1.19.1-eksbuild.2" ++ # "aws-fsx-csi-driver" = "v1.6.0-eksbuild.1" ++ # "aws-mountpoint-s3-csi-driver" = "v2.2.0-eksbuild.1" ++ # "aws-secrets-store-csi-driver-provider" = "v2.1.1-eksbuild.1" ++ # "eks-node-monitoring-agent" = "v1.4.2-eksbuild.1" ++ # "fluent-bit" = "v4.2.0-eksbuild.1" ++ # "aws-guardduty-agent" = "v1.12.1-eksbuild.2" ++ # "kube-state-metrics" = "v2.17.0-eksbuild.4" ++ } + } + } + +diff -Nuar 1.31/logs/fmt.20251124.1764009193.log 1.32/logs/fmt.20251124.1764009193.log +--- 1.31/logs/fmt.20251124.1764009193.log 1969-12-31 19:00:00.000000000 -0500 ++++ 1.32/logs/fmt.20251124.1764009193.log 2025-11-24 13:33:13.246629814 -0500 +@@ -0,0 +1,15 @@ ++# starting v1.11.0 action fmt file logs/fmt.20251124.1764009193.log stamp 20251124.1764009193 time 1764009193 ++# current_directory=/home/b/badra001/terraform-modules/aws-eks/examples/full-cluster-tf-upgrade/1.32 ++# git_repository=git@github.e.it.census.gov:terraform-modules/aws-eks.git ++# git_current_branch=tf-upgrade ++# terraform_version=Terraform v1.13.5 ++# TFCONTROL=/home/b/badra001/terraform-modules/aws-eks/examples/full-cluster-tf-upgrade/1.32/.tf-control ++# TF_CLI_CONFIG_FILE=/home/b/badra001/terraform-modules/aws-eks/examples/full-cluster-tf-upgrade/1.32/.tf-control.tfrc ++# TFARGS="" TFNOCLOR= TFNOLOG= TFNOPROXY= ++# env TF_VAR_ variables ++# TF_VAR_os_environment={"pwd":"/home/b/badra001/terraform-modules/aws-eks/examples/full-cluster-tf-upgrade/1.32"} ++# TF_VAR_os_username=badra001 ++ ++variables.addons.tf ++# ending v1.11.0 action fmt file logs/fmt.20251124.1764009193.log stamp 20251124.1764009193 start 1764009193 end 1764009193 elapsed 0 ++ +diff -Nuar 1.31/logs/fmt.20251124.1764009212.log 1.32/logs/fmt.20251124.1764009212.log +--- 1.31/logs/fmt.20251124.1764009212.log 1969-12-31 19:00:00.000000000 -0500 ++++ 1.32/logs/fmt.20251124.1764009212.log 2025-11-24 13:33:33.115691116 -0500 +@@ -0,0 +1,15 @@ ++# starting v1.11.0 action fmt file logs/fmt.20251124.1764009212.log stamp 20251124.1764009212 time 1764009212 ++# current_directory=/home/b/badra001/terraform-modules/aws-eks/examples/full-cluster-tf-upgrade/1.32 ++# git_repository=git@github.e.it.census.gov:terraform-modules/aws-eks.git ++# git_current_branch=tf-upgrade ++# terraform_version=Terraform v1.13.5 ++# TFCONTROL=/home/b/badra001/terraform-modules/aws-eks/examples/full-cluster-tf-upgrade/1.32/.tf-control ++# TF_CLI_CONFIG_FILE=/home/b/badra001/terraform-modules/aws-eks/examples/full-cluster-tf-upgrade/1.32/.tf-control.tfrc ++# TFARGS="" TFNOCLOR= TFNOLOG= TFNOPROXY= ++# env TF_VAR_ variables ++# TF_VAR_os_environment={"pwd":"/home/b/badra001/terraform-modules/aws-eks/examples/full-cluster-tf-upgrade/1.32"} ++# TF_VAR_os_username=badra001 ++ ++variables.addons.tf ++# ending v1.11.0 action fmt file logs/fmt.20251124.1764009212.log stamp 20251124.1764009212 start 1764009212 end 1764009213 elapsed 1 ++ +diff -Nuar 1.31/README.md 1.32/README.md +--- 1.31/README.md 2025-01-03 12:10:42.242314393 -0500 ++++ 1.32/README.md 2025-11-24 13:25:32.187807183 -0500 +@@ -1,6 +1,6 @@ +-# EKS Full Cluster Example 1.31 ++# EKS Full Cluster Example 1.32 + +-This is for deploying an EKS cluster with 1.31. ++This is for deploying an EKS cluster with 1.32. + + ## About + +@@ -75,7 +75,7 @@ + + ```hcl + cluster_name = "org-project-env" +-cluster_version = "1.31" ++cluster_version = "1.32" + region = "us-gov-east-1" + domain = "org-project-env.env.domain.census.gov" + contact_email = "org-project-env-group-mailing-list@census.gov" +@@ -463,3 +463,6 @@ + - remove old certificate stuff + - updated to 1.31 + - still needs some updates ++ ++- 1.2.0 -- 2025-11-24 ++ - update to 1.32 +diff -Nuar 1.31/settings.auto.tfvars.example 1.32/settings.auto.tfvars.example +--- 1.31/settings.auto.tfvars.example 2025-01-17 11:49:06.912602345 -0500 ++++ 1.32/settings.auto.tfvars.example 2025-11-24 13:25:37.999409250 -0500 +@@ -3,7 +3,7 @@ + # domain_name is removed + + cluster_name = "{org}-{project}-{env}" +-cluster_version = "1.31" ++cluster_version = "1.32" + region = "us-gov-east-1" ## set to proper region where this will be deployed + contact_email = "" ## enter valid @census.gov email for the customer's group contact list + domain = "NAME" ## set to correct domain if using a shared vpc +diff -Nuar 1.31/variables.addons.tf 1.32/variables.addons.tf +--- 1.31/variables.addons.tf 2025-01-02 10:50:40.202398021 -0500 ++++ 1.32/variables.addons.tf 2025-11-24 13:33:33.102689768 -0500 +@@ -83,6 +83,27 @@ + "amazon-cloudwatch-observability" = "v2.6.0-eksbuild.1" + "eks-pod-identity-agent" = "v1.3.4-eksbuild.1" + } ++ "1.32" = { ++ "coredns" = "v1.11.4-eksbuild.24" ++ "kube-proxy" = "v1.32.9-eksbuild.2" ++ "vpc-cni" = "v1.20.5-eksbuild.1" ++ "aws-ebs-csi-driver" = "v1.53.0-eksbuild.1" ++ "aws-efs-csi-driver" = "v2.1.15-eksbuild.1" ++ "adot" = "v0.131.0-eksbuild.1" ++ "snapshot-controller" = "v8.3.0-eksbuild.1" ++ "amazon-cloudwatch-observability" = "v4.7.0-eksbuild.1" ++ "eks-pod-identity-agent" = "v1.3.10-eksbuild.1" ++ # "external-dns" = "v0.20.0-eksbuild.1" ++ # "prometheus-node-exporter" = "v1.10.2-eksbuild.4" ++ # "cert-manager" = "v1.19.1-eksbuild.2" ++ # "aws-fsx-csi-driver" = "v1.6.0-eksbuild.1" ++ # "aws-mountpoint-s3-csi-driver" = "v2.2.0-eksbuild.1" ++ # "aws-secrets-store-csi-driver-provider" = "v2.1.1-eksbuild.1" ++ # "eks-node-monitoring-agent" = "v1.4.2-eksbuild.1" ++ # "fluent-bit" = "v4.2.0-eksbuild.1" ++ # "aws-guardduty-agent" = "v1.12.1-eksbuild.2" ++ # "kube-state-metrics" = "v2.17.0-eksbuild.4" ++ } + } + } + diff --git a/examples/full-cluster-tf-upgrade/1.32/.gitignore b/examples/full-cluster-tf-upgrade/1.32/.gitignore new file mode 100644 index 0000000..010b80b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/.gitignore @@ -0,0 +1,5 @@ +kube.config +ecr-login.txt +setup/ec2-ssh-eks-* +!setup/ec2-ssh-eks-*.pub +logs diff --git a/examples/full-cluster-tf-upgrade/1.32/.tf-control b/examples/full-cluster-tf-upgrade/1.32/.tf-control new file mode 100644 index 0000000..280f449 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/.tf-control @@ -0,0 +1,20 @@ +# .tf-control +# allows for setting a specific command to be used for tf-* commands under this git repo +# see tf-control.sh help for more info + +TFCONTROL_VERSION="1.0.5" + +TFCOMMAND="terraform_latest" +# TF_CLI_CONFIG_FILE=PATH-TO-FILE/.tf-control.tfrc +# TFARGS="" +# TFNOLOG="" +# TFNOCOLOR="" + +# use the following to force a specific version. An upgrade of an existing 0.12.31 to 1.x +# needs you to cycle through 0.13.17, 0.14.11, and then latest (0.15.5 not needed). Other +# steps in between. See https://github.e.it.census.gov/terraform/support/tree/master/docs/how-to/terraform-upgrade for details +# +#TFCOMMAND="terraform_0.12.31" +#TFCOMMAND="terraform_0.13.7" +#TFCOMMAND="terraform_0.14.11" +#TFCOMMAND="terraform_0.15.5" diff --git a/examples/full-cluster-tf-upgrade/1.32/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.32/.tf-control.tfrc new file mode 100644 index 0000000..7425488 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/.tf-control.tfrc @@ -0,0 +1,24 @@ +TFCONTROL_VERSION="1.0.5" + +# https://www.terraform.io/docs/cli/config/config-file.html +plugin_cache_dir = "/data/terraform/terraform.d/plugin-cache" +#disable_checkpoint = true + +provider_installation { +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "*/*/*" ] +# } + filesystem_mirror { + path = "/data/terraform/terraform.d/providers" + include = [ "*/*/*" ] + } +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "external.terraform.census.gov/*/*" ] +# } + direct { + include = [ "*/*/*" ] + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/README.md b/examples/full-cluster-tf-upgrade/1.32/README.md new file mode 100644 index 0000000..2b0685e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/README.md @@ -0,0 +1,468 @@ +# EKS Full Cluster Example 1.32 + +This is for deploying an EKS cluster with 1.32. + +## About + +There are a number of steps to end up with a cluster. + +1. From main repository, in the same `vpc/{region}/vpc{number}` directory + 1. [Tag subnets](#subnet-tagging) in main repository (before creating nodegroup) +1. In the submodule repository, in the `vpc/{region}/vpc{number}/apps/{clustername}` directory + 1. Update `settings.auto.tfvars` + 1. Update `includes.d/parent_rs.tf` +1. Terraform [Automated Setup](#terraform-automated-setup) +1. Optionally, follow the Terraform Setup-by-Step, which is essentially the same as following the automated tf-run.data + 1. Initialize [Cluster Main](#initialize-cluster-main) directory + 1. Create [policies](#policies) + 1. Create [EC2 Keypair](#keypair-creation) + 1. Finish [cluster setup](#cluster-creation) + 1. Setup [aws-auth](#setup-aws-auth) + 1. Setup [EFS](#setup-efs) + 1. Setup [Common Services](#common-services) +1. [Access to the cluster](#access-to-the-cluster) +1. [Cluster Setup](#cluster-setup) + +## Post-Setup Tasks + +1. Connect DNS zone from on-prem to Route53 Resolvers with a forwarder + +## Subnet Tagging + +A tag needs to be added to the subnet(s) where the cluster will run. We haven't figured out yet how to incorporate this more +automatically. + +The file to update is the `variable.subnets.auto.tfvars`, in this case `vpc/east/vpc3/variables.subnets.auto.tfvars`: + +```hcl +private_subnets = [ + { base_cidr = "10.188.18.0/23", label = "private-lb", bits = 2, private = true, + tags = { "kubernetes.io/role/internal-elb" = 1 } + }, + { base_cidr = "10.188.17.0/24", label = "endpoints", bits = 2, private = true, tags = {} }, + { base_cidr = "10.188.20.0/23", label = "db", bits = 2, private = true, tags = {} }, + { base_cidr = "10.188.22.0/23", label = "apps", bits = 2, private = true, tags = {} }, + { base_cidr = "10.188.24.0/21", label = "container", bits = 2, private = true, + tags = { + "kubernetes.io/cluster/org-project-env" = "shared" + }, + } +# space all used up +] +``` + +We add the tag `"kubernetes.io/cluster/{cluster_name}" = "shared"` in order for the node groups to pick up the +cluster subnets. This is on the new `container` subnet. + +For creating a service which uses load balancers (ELB, ALB, or NLB), the last tag listed here is needed +`"kubernetes.io/role/internal-elb" = 1`. This is only one tag for all EKS, not one per cluster, and it should apply +to the subnet(s) for load balancing. A separate set of subnets exist for load balacning, with a name including `private-lb`. + + +## Update the settings.auto.tfvars file + +Set the appropriate values in the `settings.auto.tfvars` file. An example starter file is at `settings.auto.tfvars.example`. +If you are deploying into an account using a shared VPC, you **must** define the domain name. Please be sure the domain +name exists. To do so, check the output of `dig`. It should come back with a value with `awsdns` in the response. + +```console +% dig +short in soa myenvironment.mydomain.csp1.census.gov +% dig +short in soa dev.geo.csp1.census.gov +ns-0.awsdns-us-gov-00.com. awsdns-hostmaster.amazon.com. 1 7200 900 1209600 86400 +``` + +Here is a sample file: + +```hcl +cluster_name = "org-project-env" +cluster_version = "1.32" +region = "us-gov-east-1" +domain = "org-project-env.env.domain.census.gov" +contact_email = "org-project-env-group-mailing-list@census.gov" +eks_instance_disk_size = 40 +eks_vpc_name = "vpc_full_name" +eks_instance_type = "t3.xlarge" +eks_ng_desire_size = 3 +eks_ng_max_size = 15 +eks_ng_min_size = 3 +subnets_name = "*-subnet_label-*" +``` + +You need to change these values: + +* cluster_name: put in the proper org, project, and environment. Cluster names should not be replicated across the environment. +These are tracked in the repo [cloud-information/aws/documentation/containers/](https://github.e.it.census.gov/terraform/cloud-information/blob/master/documentation/dns.md). +* region: include the correct region. This really is a duplicate of the `region` variable, so it may be removed in the future. +* domain: this is the domain name of the cluster, consisting of the cluster name and the proper domain name for the environment/VPC. +* contact_email: put in an email addres of a group responsible for this cluster. +* eks_instance_disk_size: this should be default to 40Gb for most use-cases; only change this if you have special requirement and have exception approval. +* eks_vpc_name: replace *vpc_full_name* with the appropriate vpc full name. This is used to find the vpc ID. +* subnets_name: replace *subnet_label* with the label of the subnets allocated to providing ENIs for the cluster node group and containers; often as `container` or `task` + +All the others are subject to your configuration. They are a good starting point, but can vary. + +## Update the includes.d/parent_rs.tf file + +```hcl +locals { + parent_rs = data.terraform_remote_state.vpc_{region}_vpc{number}_apps_eks-{cluster-name}.outputs +} +``` + +* region: west or east, dependent on which region the VPC is in +* number: incremental VPC number +* cluster-name: cluster name, the same as used in the `settings.auto.tfvars` file above + +# Terraform Automated Setup + +A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. + +* copy the `remote_state.yml` from the parent and update `directory` to be the current directory +* run the tf-run.sh + +```console +% tf-run.sh apply +``` + +* example of the `tf-run.sh` steps + +This is part of a larger cluster configuration, so at the end of the run it indicates another directory +to visit when done. + +```console +% tf-run.sh list +* running action=plan +* START: tf-run.sh v1.1.2 start=1636562594 end= logfile=logs/run.plan.20211110.1636562594.log (not-created) +* reading from tf-run.data +* read 22 entries from tf-run.data +> list +** START: start=1636562594 +* 1 COMMENT> make sure the private-lb subnet and container subnets are tagged properly (see README.md) +* 2 STOP> then continue with at step 3 +* 3 COMMAND> tf-directory-setup.py -l none -f +* 4 COMMAND> setup-new-directory.sh +* 5 COMMAND> tf-init -upgrade +* 6 POLICY> (*.tf) aws_iam_policy.nlb-policy aws_iam_policy.cloudwatch-policy aws_iam_policy.cluster-admin-policy aws_iam_policy.cluster-admin_assume_policy +* 6 tf-plan -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy -target=aws_iam_policy.cluster-admin_assume_policy +* 7 COMMENT> EC2 key pairs +* 8 tf-plan -target=null_resource.generate_keypair +* 9 tf-plan -target=aws_key_pair.cluster_keypair +* 10 COMMAND> tf-directory-setup.py -l s3 +* 11 COMMENT> be sure to add the setup/ec2-ssh-eks-{cluster} to git-secret, git-secret hide, add the setup/*secret and setup/*pub got git, and commit the entirety of the change +* 12 tf-plan +* 13 COMMENT> setup the includes.d/parent_rs.tf according to the includes.d/README +* 14 STOP> +* 15 COMMENT> cd aws-auth and tf-run.sh apply +* 16 STOP> +* 17 COMMENT> cd efs and tf-run.sh apply +* 18 STOP> +* 19 COMMENT> cd irsa-roles and tf-run.sh apply +* 20 STOP> +* 21 COMMENT> cd common-services and tf-run.sh apply +* 22 STOP> +** END: start=1636562594 end=1636562594 elapsed=0 logfile=logs/run.plan.20211110.1636562594.log (not-created) +``` + +It is highly recommended to use the `tf-run.sh` approach. This has a number of stopping points along the way with comments telling you what to do. +It also directs you to the subdirectories to visit to complete the setup. + +# Terraform Manual Setup + +## Initialize Cluster Main + +We need to setup the main directory for the cluster. Be sure `remote_state.yml` is correct. Then: + +```shell +tf-directory-setup.py -l none +tf-init +``` + +## Policies + +First, we have to create the two polices. The roles will not get created until they do. + +```shell +TFTARGET=$(grep ^res.*iam_policy *.tf |awk '{print "-target=" $2 "." $3}'|sed -e 's/"//g') +tf-plan $TFTARGET +tf-apply $TFTARGET +unset TFTARGET +``` + +## Keypair Creation + +We need to create the SSH key, which then allows for the public key to be uploaded. + +```shell +tf-plan -target=null_resource.generate_keypair +tf-apply -target=null_resource.generate_keypair + +tf-plan -target=aws_key_pair.cluster_keypair +tf-apply -target=aws_key_pair.cluster_keypair +``` + +## Cluster Creation + +One created, we can run the rest of the code + +```shell +tf-plan +tf-apply +``` + +Finalize by linking to the remote state file: + +```shell +tf-directory-setup.py -l s3 +``` + +## Setup aws-auth + +Be sure `remote_state.yml` is correct. Examine the `settings.aws-auth.tfvars` and replace any remote state references to the proper +objects. There is at least one, a `rolearn`. You can get the remote state path with + +```shell +grep ^data remote_state.*{clustername}.tf | awk '{print $1 "." $2 "." $3}' |sed -e 's/"//g' +``` + +Next, we setup the remote state files, link to the parent remote state, and initialize terraform. + +```shell +tf-directory-setup.py -l none +# should only be one file here +ln -s ../remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_*.tf . +setup-new-directory.sh +tf-init +``` + +Then, we can plan, apply, and finalize: + +```shell +tf-pan +tf-apply +tf-directory-setup.py -l s3 +``` + +## Setup EFS + +Be sure `remote_state.yml` is correct. Examine the `main.tf` and replace any remote state references to the proper +objects. You can find where they are used: + +```console +% grep data.terraform_remote_state *.tf +main.tf: vpc_id = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_vpc_id +main.tf: subnet_ids = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_subnet_ids +main.tf: cluster_worker_sg_id = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_worker_sg_id +main.tf: oidc_provider_url = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.oidc_provider_url +main.tf: oidc_provider_arn = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.oidc_provider_arn +``` + +Find the value to replace these with: + +```shell +grep ^data remote_state.*{clustername}.tf | awk '{print $1 "." $2 "." $3}' |sed -e 's/"//g' +``` + +Next, we setup the remote state files, link to the parent remote state, and initialize terraform. + +```shell +tf-directory-setup.py -l none +# should only be one file here +ln -s ../remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_*.tf . +setup-new-directory.sh +``` + +Then, we have to create the polices. The roles will not get created until they do. + +```shell +TFTARGET=$(grep ^res.*iam_policy *.tf |awk '{print "-target=" $2 "." $3}'|sed -e 's/"//g') +tf-plan $TFTARGET +tf-apply $TFTARGET +unset TFTARGET +``` + +Finally, you can apply the rest: + + +```shell +tf-plan +tf-apply +``` + +## Common Services +### Certificate Authority + +This is now handled by the `acmpca-eks-cert-manager` submodule of `aws-certificate`, which uses the ACM PCA to obtain +a signed certificate. No actions are needed to get a CSR signed by TCO through Remedy. + +### Rest of Setup + +```shell +tf-plan +tf-apply +tf-directory-setup.py -l s3 +``` + +## Access to the cluster + +There are two ways to access the cluster. One is from the AWS Console and the other is via the IAM account or role. + +The cluster access vi console is found in the EKS section, under *clusters*. + +For IAM access, one must have IAM account credentials configured in `$HOME/.aws/credentials` and `$HOME/.aws/config`. [Here](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html) +are the docs, and we have an example below. Region is important, otherwise it defaults to `us-gov-west-1` and the STS will fail. + +```script +# $HOME/.aws/credentials +[252960665057-ma6-gov] +aws_access_key_id = ABCD1234... +aws_secret_access_key = abcd5678... + +# $HOME/.aws/config +[profile 252960665057-ma6-gov-eks-org-project-env] +source_profile = 252960665057-ma6-gov +region = us-gov-east-1 +role_arn = arn:aws-us-gov:iam::252960665057:role/r-eks-org-project-env-cluster-admin +role_session_name = badra001 +``` + +With this configuration, using the proifle `252960665057-ma6-gov` gives you the normal IAM access + +```console +% aws --profile 252960665057-ma6-gov sts get-caller-identity +{ + "UserId": "AIDATVZNBNXQ5UPHMBGPY", + "Account": "252960665057", + "Arn": "arn:aws-us-gov:iam::252960665057:user/a-badra001" +} +``` + +Using the other profile will use the source profile (which has to have permission to assume the role), the role arn, and a session +name mapping it back to your Census username (JBID). + +```console +% aws --profile 252960665057-ma6-gov-eks-org-project-env sts get-caller-identity +{ + "UserId": "AROATVZNBNXQ7AV7W2ISZ:badra001", + "Account": "252960665057", + "Arn": "arn:aws-us-gov:sts::252960665057:assumed-role/r-eks-org-project-env-cluster-admin/badra001" +} +``` + +# Cluster Setup + +## Download Configuration + +Now that the cluster is created, we need the `kubectl` command and to download the configuration. + +* get [kubectl](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) + +```console +% aws eks --profile $(get-profile) --region $(get-region) update-kubeconfig --name test2 --kubeconfig ./test2.kube.config +Added new context arn:aws:eks:us-east-1:079788916859:cluster/test2 to /data/git-repos/terraform/079788916859-do2-cat_apps-adsd-eks/vpc/east-1/vpc4/apps/eks-test2/test2.kube.config +% export KUBECONFIG=$(pwd)/test2.kube.config +% kubectl get nodes +NAME STATUS ROLES AGE VERSION +ip-10-194-24-49.ec2.internal Ready 24m v1.20.4-eks-6b7464 +ip-10-194-24-90.ec2.internal Ready 24m v1.20.4-eks-6b7464 +ip-10-194-25-120.ec2.internal Ready 24m v1.20.4-eks-6b7464 +ip-10-194-26-252.ec2.internal Ready 24m v1.20.4-eks-6b7464 +``` + +## Authentication + +### Automated + +This is in theh `aws-auth` subdirectory. + +```shell +cd aws-auth +tf-init +tf-plan +tf-apply +``` + +### Manual + +To allow users and roles to manipulate the cluster, we add to the mapRole or mapUsera. + +```shell +kubectl edit -n kube-system configmap/aws-auth +``` + +Add sections for `mapRoles`: + +```yaml + mapRoles: | + - rolearn: arn:aws:iam::079788916859:role/r-inf-cloud-admin + username: system:node:{{EC2PrivateDNSName}} + groups: + - system:bootstrappers + - system:nodes + - eks-console-dashboard-full-access-group +``` + +Add sections for `mapUsers`: + +```yaml + mapUsers: | + - userarn: arn:aws:iam::079788916859:user/u-zawac002 + username: admin + groups: + - system:masters +``` + +We will like want to do this through templating. + +* users + * arn:aws:iam::079788916859:user/u-badra001 + * arn:aws:iam::079788916859:user/u-ashle001 + * arn:aws:iam::079788916859:user/u-mcgin314 + * arn:aws:iam::079788916859:user/u-sall0002 + * arn:aws:iam::079788916859:user/u-zawac002 +* roles + * arn:aws:iam::079788916859:role/r-inf-cloud-admin + * arn:aws:iam::079788916859:role/r-adsd-cumulus + * arn:aws:iam::079788916859:role/r-adsd-eks + * arn:aws:iam::079788916859:role/r-adsd-tools + +## Adding Cluster Roles for AWS Console + +To allow [console access](https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml), we need these steps. + +It requires the cluster to be up and the `{clustername}.kube.config` file to exist along with the environment variable pointing to it. + +### Automated + +This appies just the full access cluste role, as the restricted one needs additional configuration. + +```shell +tf-apply -target=null_resource.apply_cluster_roles +``` + +### Manual + +```shell +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml +``` + +For full console, we'll use the first one. + +```console +% kubectl apply -f eks-console-full-access.yaml +clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created +clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created +``` + +# CHANGELOG + +- 1.0.0 -- 2023-10-27 + - setup for 1.28, ready for edits + +- 1.1.0 -- 2025-01-03 + - remove old certificate stuff + - updated to 1.31 + - still needs some updates + +- 1.2.0 -- 2025-11-24 + - update to 1.32 diff --git a/examples/full-cluster-tf-upgrade/1.32/ROLES.md b/examples/full-cluster-tf-upgrade/1.32/ROLES.md new file mode 100644 index 0000000..3880590 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/ROLES.md @@ -0,0 +1,119 @@ +# Roles + +There are several types of roles we handle within the EKS cluster. + +1. IAM Role for Service Account (IRSA) +These roles involve an IAM role with a formatted name of r-eks-{cluster}-irsa__{k8snamespace}__{k8suser}. This will +grant approproriate IAM permissions to a pod. It includes specific conditions for the local OIDC provider mapping to +system:serviceaccount:{k8snamespace}:{k8suse}. This is super important because the pod inherits the permissions +of the node group, which grants far too much access to the running pods. These are not mapped into the ConfigMap aws-auth. + +A default:default will exists which grants little to no AWS permissions. + +1. Cluster Admin Role +This role is used for the cluster administration. It is of the form r-eks-{cluster}-cluster-admin. It has read access to the +[EKS Console](https://console.amazonaws-us-gov.com/eks/home). It has: +* access to read and write ECR for the specific repositories used for the cluster at /eks/{clustername} +* access to the EKS API for the cluster +* can download the kube.config file +* is mapped with the ConfigMap aws-auth into k8suser admin and k8sgroup system:masters +* permissions to update the node groups (via cli) +* others as discovered + +Users will use this role through the use of STS:AssumeRole either with the console or CLI. + +1. Additional Application Roles +These will be for granting access to clusterroles via namespace and k8suser to IAM or SAML users. They will take the form +r-eks-{cluster}-{name} where name should consider some portion of the namespace and purpose, and the name cannot be one of the existing +roles already in existence. These will typically not need any AWS Access beyond that of the update-config or get-token to obtain +the configuration file. These will require a clusterrole and clusterrolebinding, and will need a username to go along with them. +See [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) and [here](https://aws.amazon.com/premiumsupport/knowledge-center/eks-iam-permissions-namespaces/) +for details about this. The configuration file to create this (yaml) will be stored in github, and ideally, it will be created through the use of Terraform to be able +to easily add these as needed. + +Users will use this role through the use of STS:AssumeRole either with the console or CLI. + +## IRSA Roles + +```hcl + condition { + test = "StringEquals" + variable = "${local.oidc_provider_url}:sub" + values = ["system:serviceaccount:${local.app2_namespace}:${local.app2_name}"] + } +``` + +* irsa-roles.aws-cli.tf +* irsa-roles.cumulus.tf +* irsa-roles.jenkins.tf + + +## Cluster Admin Role + +## Additional Application Roles + +## cumulus-dba +## cumulus-deployer +## cicd-deployer + +## jenkins + +* Tool: Jenkins +* Purpose: Used for CICD Pipeline + * build images + * copy images + * deploy pods + * deploy services + * other things as necessary +* Source System: VM on-prem +* AWS Access + * IAM Service account tied to the cluster name + * s-eks-{cluster}-cicd + * permissions to read and write ECR * but NOT eks/{clustername} + * permission to eks get-token + * permission to eks update-cluster (get kubeconfig) +* Kubernetes Access + * Username + * recommend the same pattern: eks-{cluster}-cicd + * Group + * group names needed + * Permissions + * defined in K8S thing .. + * Files for configuration of K8S + * yml: + * tf: + +# AWS Commands + +```shell +aws eks get token +aws eks update-config +``` + +## CICD + +There are a number of ways to handle the CICD pipline. How in part depends on whether it runs outside of the cluster or inside of the cluster. These + +* service account for CICD (say, s-adsd-cicd-deployer) with full permissions to ECR and to get eks config and token along with k8s permissions through +ConfigMap aws-auth. +* role for CICD per cluster, say r-eks-{cluster}-cicd-deployer with same permissions above. +* These are all account specific, so running CICD across multiple accounts will need multiple IAM accounts and roles. +* consider some central way of doing this so a CICD can deploy to any cluster in any account in any region. +* perhaps start with a smaller per cluster user/role and work towards a better solution later + +# TBD + +1. Determine how to create a default:default IRSA role which grants little to no AWS permissions (maybe sts get-caller-identity). +1. Create a module for IRSA +1. Explore the use of the OIDC integration with Access Manager +1. Develop a strategy for CICD access + +# Links + +* [AWS RBAC](https://aws.amazon.com/premiumsupport/knowledge-center/eks-iam-permissions-namespaces/) +* [K8S RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) +* [Add User Role](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html) +* [OIDC Identity Provider](https://docs.aws.amazon.com/eks/latest/userguide/authenticate-oidc-identity-provider.html) +* [OIDC with MicroFocus](https://community.microfocus.com/cyberres/accessmanager/w/access_manager_tips/27815/access-amazon-web-services-using-amazon-cognito-for-mobile-applications-and-netiq-access-manager-4-1) + + diff --git a/examples/full-cluster-tf-upgrade/1.32/addons/.tf-control b/examples/full-cluster-tf-upgrade/1.32/addons/.tf-control new file mode 100644 index 0000000..280f449 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/addons/.tf-control @@ -0,0 +1,20 @@ +# .tf-control +# allows for setting a specific command to be used for tf-* commands under this git repo +# see tf-control.sh help for more info + +TFCONTROL_VERSION="1.0.5" + +TFCOMMAND="terraform_latest" +# TF_CLI_CONFIG_FILE=PATH-TO-FILE/.tf-control.tfrc +# TFARGS="" +# TFNOLOG="" +# TFNOCOLOR="" + +# use the following to force a specific version. An upgrade of an existing 0.12.31 to 1.x +# needs you to cycle through 0.13.17, 0.14.11, and then latest (0.15.5 not needed). Other +# steps in between. See https://github.e.it.census.gov/terraform/support/tree/master/docs/how-to/terraform-upgrade for details +# +#TFCOMMAND="terraform_0.12.31" +#TFCOMMAND="terraform_0.13.7" +#TFCOMMAND="terraform_0.14.11" +#TFCOMMAND="terraform_0.15.5" diff --git a/examples/full-cluster-tf-upgrade/1.32/addons/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.32/addons/.tf-control.tfrc new file mode 100644 index 0000000..7425488 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/addons/.tf-control.tfrc @@ -0,0 +1,24 @@ +TFCONTROL_VERSION="1.0.5" + +# https://www.terraform.io/docs/cli/config/config-file.html +plugin_cache_dir = "/data/terraform/terraform.d/plugin-cache" +#disable_checkpoint = true + +provider_installation { +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "*/*/*" ] +# } + filesystem_mirror { + path = "/data/terraform/terraform.d/providers" + include = [ "*/*/*" ] + } +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "external.terraform.census.gov/*/*" ] +# } + direct { + include = [ "*/*/*" ] + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/addons/README.addons.md b/examples/full-cluster-tf-upgrade/1.32/addons/README.addons.md new file mode 100644 index 0000000..8c1e730 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/addons/README.addons.md @@ -0,0 +1,3 @@ +tf-aws eks describe-addon-versions --kubernetes-version 1.28 + + diff --git a/examples/full-cluster-tf-upgrade/1.32/addons/README.ebs.md b/examples/full-cluster-tf-upgrade/1.32/addons/README.ebs.md new file mode 100644 index 0000000..de10f70 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/addons/README.ebs.md @@ -0,0 +1,75 @@ +# eks-ebs + +With EKS based upon Kubernetes 1.23 or higher, the default gp2 storage class will no longer auto-provision persistent volumes. +While an EFS-based auto-provisioner which supports all types of persistent volumes has been installed, it does not perform as well as a gp2/gp3 based perstent volume. +The eks-ebs module installs an ebs-provisioner in the cluster with a storage class of `gp3-encrypted`, deletes the pre-existing `gp2` storage class, and makes `gp3-encrypted` the default storage class for the cluster. + +## Parameters + +| Name | Description | +| ---- | ----------- | +| region | The AWS region that EKS cluster is located. | +| cluster_name | The name of the cluster in which ebs-provisioner will be installed. | +| aws_ebs_csi_driver_version | Which version of the aws-ebs-csi-driver helm chart to use. Currently defaults to 2.14.1. | + +## Updating the aws-ebs-csi-driver chart + +When using a private VPC, the helm chart cannot be downloaded from "https://kubernetes-sigs.github.io/aws-ebs-csi-driver/" during installation. +A local copy of the chart is maintained within the terraform script. +The lastest version of the helm chart can be found by looking at https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/charts/aws-ebs-csi-driver/Chart.yaml and checking the `version:` tag (not the `appVersion` tag.) +To update this helm chart to the latest version, the procedure is to: + +```script +cd charts +helm repo add aws-ebs-csi-driver https://kubernetes-sigs.github.io/aws-ebs-csi-driver/ +helm repo update +rm -fr aws-ebs-csi-driver +helm pull aws-ebs-csi-driver/aws-ebs-csi-driver --untar +``` + +After completing these steps, be sure to examine aws-ebs-csi-driver/values.yaml and confirm that the tags listed for the sidecar images match the tags assigned by default in input.tf. +For example, the values.yaml file: + +```json +sidecars: + livenessProbe: + image: + repository: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe + tag: v2.2.0-eks-1-18-2 + pullPolicy: IfNotPresent + resources: {} + nodeDriverRegistrar: + image: + repository: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar + tag: v2.1.0-eks-1-18-2 + pullPolicy: IfNotPresent + resources: {} + csiProvisioner: + image: + repository: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner + tag: v2.1.1-eks-1-18-2 + pullPolicy: IfNotPresent + resources: {} +``` + +Entries in input.tf: + +```hcl +variable "livenessprobe_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/livenessp +robe to use." + default = "v2.2.0-eks-1-18-2" +} + +variable "node_driver_registrar_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/node-driv +er-registrar to use." + default = "v2.1.0-eks-1-18-2" +} + +variable "external_provisioner_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/external- +provisioner to use." + default = "v2.1.1-eks-1-18-2" +} +``` diff --git a/examples/full-cluster-tf-upgrade/1.32/addons/README.md b/examples/full-cluster-tf-upgrade/1.32/addons/README.md new file mode 100644 index 0000000..37bb3c8 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/addons/README.md @@ -0,0 +1,122 @@ +# EBS + +Staring with EKS based upon Kubernetes 1.23, the gp2 storage class no longer supports auto-provisiong. +This module sets up the needed resources to provision EBS-based gp3 persistent volumes. See [this](README.efs.md) for more details. + +## Links + +* https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html +* https://github.com/kubernetes-sigs/aws-ebs-csi-driver +* https://github.com/kubernetes-sigs/aws-ebs-csi-driver/issues/722 +* https://github.com/kubernetes-sigs/aws-ebs-csi-driver/issues/1086 + +## Initialize + +* Proxy setup + +Proxy is needed because system may not have access to the `registry.terraform.io` site directory, +and if indirectly, it may not be able to handle a proxy redirect. You may not need to use this, but if you get +errors from the `tf-init`, this is your first thing to setup. + +```shell +export HTTP_PROXY=http://proxy.tco.census.gov:3128 +export HTTPS_PROXY=http://proxy.tco.census.gov:3128 +``` + +## Terraform Automated + +A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. + +* copy the `remote_state.yml` from the parent and update `directory` to be the current directory +* run the tf-run.sh + +```console +% tf-run.sh apply +``` + +* example of the `tf-run.sh` steps + +This is part of a larger cluster configuration, so at the end of the run it indicates another directory +to visit when done. + +```console +<<>> +``` + +It is highly recommended to use the `tf-run.sh` approach. + +## Terraform Manual + + +```shell +tf-directory-setup.py -l none +setup-new-directory.sh +tf-init +```` + +* Apply the rest + +This must be done from a system with the skopeo command, so RHEL8+. + +To use the local install, The ebs/charts/ directory +must be populated with the expected code (see [README.md](README.md)) outside of terraform, +much like the .tf files are created. Currently, as the box we run this from has internet access, +we can deploy by pulling the helm stuff from the internet. + +```shell +tf-apply +tf-directory-setup.py -l s3 +``` + +## Post Setup Examination + +This gives us (look at the ebs-csi-* ones) to see what was setup. Your `kubectl` configuration file +needs to be setup (one is extracted in `setup/kube.config` as part of this configuration). + +```console +% kubectl --kubeconfig setup/kube.config get pods -n kube-system +<<>> +``` + +* Create PVC Automated + +Use the `persistent-volume.tf`, which is setup by default, and should happen as part of the final apply above. + +* Create PVC Manually + +```json +# pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ebs-test3-claim +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 1Gi + storageClassName: gp3-encrypted +``` + +* Examinine the PV and PVC + +```console +% kubectl get pv +No resources found +% kubectl get pvc +No resources found in default namespace. +% kubectl apply -f pvc.yaml +persistentvolumeclaim/ebs-test3-claim created +% kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +ebs-test3-claim Pending gp3-encrypted 39s +``` + +* Describing the PVC + +```shell +kubectl --kubeconfig setup/kube.config describe pvc ebs-test3-claim +``` + diff --git a/examples/full-cluster-tf-upgrade/1.32/addons/addon_cloudwatch.tf b/examples/full-cluster-tf-upgrade/1.32/addons/addon_cloudwatch.tf new file mode 100644 index 0000000..f3b483e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/addons/addon_cloudwatch.tf @@ -0,0 +1,66 @@ +# https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html +# amazon-cloudwatch-observability + +locals { + cloudwatch_managed_policies = ["AWSXrayWriteOnlyAccess", "CloudWatchAgentServerPolicy"] + cloudwatch_observability_name = "cloudwatch-agent" + cloudwatch_observability_namespace = "amazon-cloudwatch" +} + +data "aws_iam_policy" "cloudwatch-observability-policies" { + for_each = toset(local.cloudwatch_managed_policies) + name = each.key +} + +resource "aws_eks_addon" "amazon-cloudwatch-observability" { + count = lookup(local.addon_versions, "amazon-cloudwatch-observability", null) != null ? 1 : 0 + + cluster_name = var.cluster_name + addon_name = "amazon-cloudwatch-observability" + addon_version = lookup(local.addon_versions, "amazon-cloudwatch-observability") + service_account_role_arn = module.role_cloudwatch-observability.iam_role_arn + configuration_values = null + # resolve_conflicts = "OVERWRITE" + # note OVERWRITE resets to eks addon defaults, PRESERVE uses any values set here + resolve_conflicts_on_create = "OVERWRITE" + resolve_conflicts_on_update = "OVERWRITE" + + depends_on = [aws_cloudwatch_log_group.cloudwatch-observability] +} + +module "role_cloudwatch-observability" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + + role_description = "EKS IAM Role for ${var.cluster_name} for service account ${local.cloudwatch_observability_namespace}:${local.cloudwatch_observability_name}" + role_name = format("%v%v-irsa__%v", local._prefixes["eks-role"], var.cluster_name, local.cloudwatch_observability_name) + role_policy_arns = { for k, v in data.aws_iam_policy.cloudwatch-observability-policies : k => v.arn } + + oidc_providers = { + main = { + provider_arn = local.oidc_provider_arn + namespace_service_accounts = [format("%v:%v", local.cloudwatch_observability_namespace, local.cloudwatch_observability_name)] + } + } + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + { + "eks:namespace" = local.cloudwatch_observability_namespace + "eks:user" = local.cloudwatch_observability_name + } + ) +} + +resource "aws_cloudwatch_log_group" "cloudwatch-observability" { + for_each = toset(var.cloudwatch-observability_log_names) + name = format("/aws/containerinsights/%v/%v", var.cluster_name, each.key) + retention_in_days = var.cloudwatch-observability_log_retention_days + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} diff --git a/examples/full-cluster-tf-upgrade/1.32/addons/addon_coredns.tf b/examples/full-cluster-tf-upgrade/1.32/addons/addon_coredns.tf new file mode 100644 index 0000000..8c12156 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/addons/addon_coredns.tf @@ -0,0 +1,11 @@ +resource "aws_eks_addon" "coredns" { + count = lookup(local.addon_versions, "coredns", null) != null ? 1 : 0 + + cluster_name = var.cluster_name + addon_name = "coredns" + addon_version = lookup(local.addon_versions, "coredns") + # resolve_conflicts = "OVERWRITE" + # note OVERWRITE resets to eks addon defaults, PRESERVE uses any values set here + resolve_conflicts_on_create = "OVERWRITE" + resolve_conflicts_on_update = "OVERWRITE" +} diff --git a/examples/full-cluster-tf-upgrade/1.32/addons/addon_ebs-csi.tf b/examples/full-cluster-tf-upgrade/1.32/addons/addon_ebs-csi.tf new file mode 100644 index 0000000..2ff0e33 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/addons/addon_ebs-csi.tf @@ -0,0 +1,127 @@ +## resource "aws_iam_role" "cluster_ebs_role" { +## name = "${var.cluster_name}_ebs_driver_role" +## assume_role_policy = < list +** START: start=1636558903 +* 1 COMMAND> tf-directory-setup.py -l none -f +* 2 COMMAND> setup-new-directory.sh +* 3 COMMAND> tf-init -upgrade +* 4 tf-plan +* 5 COMMAND> tf-directory-setup.py -l s3 +* 6 STOP> cd ../efs and tf-run.sh apply +** END: start=1636558903 end=1636558903 elapsed=0 logfile=logs/run.plan.20211110.1636558903.log (not-created) +``` + +It is highly recommended to use the `tf-run.sh` approach. + +## Terraform Manual + +First, copy the `remote_state.yml` from the parent and update `directory` to be the current directory. + +```shell +tf-directory-setup.py -l none +setup-new-directory.sh +tf-init +```` + +* Apply the rest + +```shell +tf-apply +tf-directory-setup.py -l s3 +``` + +## Post Setup Examination + +Your `kubectl` configuration file needs to be setup (one is extracted in `setup/kube.config` as part of this configuration). + +```console +% kubectl --kubeconfig setup/kube.config get configmap -n kube-system aws-auth +NAME DATA AGE +aws-auth 2 44d +``` diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/aws-auth.auto.tfvars b/examples/full-cluster-tf-upgrade/1.32/aws-auth/aws-auth.auto.tfvars new file mode 100644 index 0000000..4114446 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/aws-auth.auto.tfvars @@ -0,0 +1,28 @@ +aws_auth_users = [ + # { + # userarn = "" + # aws_username = "a-ashle001" + # username = "admin" + # groups = ["system:masters", "eks-console-dashboard-full-access-group"] + # }, + { + userarn = "" + aws_username = "a-badra001" + username = "admin" + groups = ["system:masters", "eks-console-dashboard-full-access-group"] + }, +] +aws_auth_roles = [ + { + rolearn = "" + aws_rolename = "r-inf-cloud-admin" + username = "admin" + groups = ["system:masters", "eks-console-dashboard-full-access-group"] + }, + { + rolearn = "" + aws_rolename = "r-inf-terraform" + username = "admin" + groups = ["system:masters", "eks-console-dashboard-full-access-group"] + }, +] diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/config_map.aws-auth.yaml.tpl b/examples/full-cluster-tf-upgrade/1.32/aws-auth/config_map.aws-auth.yaml.tpl new file mode 100644 index 0000000..7c58ada --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/config_map.aws-auth.yaml.tpl @@ -0,0 +1,17 @@ +data: +%{ if length(roles) > 0 } + mapRoles: | + %{ for k, v in roles ~} + - rolearn: ${v.rolearn} + username: ${v.username} + groups: ${v.groups} + %{ endfor ~} +%{ endif } +%{ if length(users) > 0 } + mapUsers: | + %{ for k, v in users ~} + - userarn: ${v.userarn} + username: ${v.username} + groups: ${v.groups} + %{ endfor ~} +%{ endif } diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.32/aws-auth/data.eks-subdirectory.tf new file mode 120000 index 0000000..43b5430 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/data.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.32/aws-auth/kubeconfig.eks-subdirectory.tf new file mode 120000 index 0000000..e3750a4 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/kubeconfig.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/kubeconfig.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/patch-aws-auth.tf b/examples/full-cluster-tf-upgrade/1.32/aws-auth/patch-aws-auth.tf new file mode 100644 index 0000000..88e0bbe --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/patch-aws-auth.tf @@ -0,0 +1,135 @@ +data "kubernetes_config_map" "aws-auth" { + metadata { + namespace = "kube-system" + name = "aws-auth" + } +} + +data "aws_iam_user" "auth_users" { + for_each = toset([for u in local.joined_auth_users : u.aws_username]) + user_name = each.key +} + +data "aws_iam_role" "auth_roles" { + for_each = toset([for r in local.joined_auth_roles : r.aws_rolename]) + name = each.key +} + + +locals { + existing_roles_string = lookup(data.kubernetes_config_map.aws-auth.data, "mapRoles", "") + existing_users_string = lookup(data.kubernetes_config_map.aws-auth.data, "mapUsers", "") + + existing_roles = local.existing_roles_string != "" ? yamldecode(local.existing_roles_string) : [] + existing_users = local.existing_users_string != "" ? yamldecode(local.existing_users_string) : [] + + joined_auth_users = concat(local.aws_auth_users, var.aws_auth_users) + joined_auth_roles = concat(local.aws_auth_roles, var.aws_auth_roles) + + mapped_auth_users = [for u in local.joined_auth_users : { + userarn = data.aws_iam_user.auth_users[u.aws_username].arn + aws_username = u.aws_username + username = u.username + groups = u.groups + }] + mapped_auth_roles = [for u in local.joined_auth_roles : { + rolearn = data.aws_iam_role.auth_roles[u.aws_rolename].arn + aws_rolename = u.aws_rolename + username = u.username + groups = u.groups + }] + + merged_users = merge( + { for user in local.existing_users : user.userarn => user }, + # { for user in local.aws_auth_users : user.userarn => user }, + # { for user in var.aws_auth_users : user.userarn => user } + { for user in local.mapped_auth_users : user.userarn => user }, + ) + + merged_roles = merge( + { for role in local.existing_roles : role.rolearn => role }, + # { for role in local.aws_auth_roles : role.rolearn => role }, + # { for role in var.aws_auth_roles : role.rolearn => role } + { for role in local.mapped_auth_roles : role.rolearn => role }, + ) + + # patch = yamlencode({ + # "data" = { + # "mapUsers" = values(local.merged_users) + # "mapRoles" = values(local.merged_roles) + # } + # }) + patch = < 0~} + mapRoles: | +%{for k, v in local.merged_roles~} + - rolearn: ${v.rolearn} + username: ${v.username} + groups: +%{for g in v.groups~} + - ${g} +%{endfor~} +%{endfor~} +%{endif~} +%{if length(local.merged_users) > 0~} + mapUsers: | +%{for k, v in local.merged_users~} + - userarn: ${v.userarn} + username: ${v.username} + groups: +%{for g in v.groups~} + - ${g} +%{endfor~} +%{endfor~} +%{endif~} +EOM + + # patch_t = templatefile("${path.root}/config_map.aws-auth.yaml.tpl",{ + # users = values(local.merged_users) + # roles = values(local.merged_roles) + # }) +} + +resource "null_resource" "patch-aws-auth" { + triggers = { + users = join(",", sort(keys(local.merged_users))) + roles = join(",", sort(keys(local.merged_roles))) + } + depends_on = [null_resource.kubeconfig] + # provisioner "local-exec" { + # command = "if [ -z $KUBECONFIG ]; then 'echo missing KUBECONFIG'; exit 1; else exit 0; fi" + # } + # provisioner "local-exec" { + # command = "if [ ! -r $KUBECONFIG ]; then 'echo unreadable KUBECONFIG'; exit 1; else exit 0; fi" + # } + # provisioner "local-exec" { + # command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + # } + provisioner "local-exec" { + command = "test -d setup || mkdir setup" + } + provisioner "local-exec" { + command = "echo '${local.patch}' > setup/config_map.patch.yaml" + } + # provisioner "local-exec" { + # command = "echo '${local.patch_t}' > config_map.patch_t.yaml" + # } + provisioner "local-exec" { + # command = "kubectl patch --type merge -n kube-system configmap/aws-auth -p '${local.patch}'" + command = "kubectl --kubeconfig ${path.root}/setup/kube.config patch --type merge -n kube-system configmap/aws-auth --patch-file setup/config_map.patch.yaml" + } +} + +# output "map" { +# value = data.kubernetes_config_map.aws-auth +# } +# output "map_output" { +# value = { +# "object" = data.kubernetes_config_map.aws-auth +# "existing_users" = local.existing_users +# "existing_roles" = local.existing_roles +# "patch" = local.patch +# "patch_text" = local.patch_t +# } +# } diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/prefixes.tf b/examples/full-cluster-tf-upgrade/1.32/aws-auth/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/providers.tf b/examples/full-cluster-tf-upgrade/1.32/aws-auth/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/region.tf b/examples/full-cluster-tf-upgrade/1.32/aws-auth/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/settings.aws-auth.tf b/examples/full-cluster-tf-upgrade/1.32/aws-auth/settings.aws-auth.tf new file mode 100644 index 0000000..4d3259d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/settings.aws-auth.tf @@ -0,0 +1,11 @@ +locals { + aws_auth_users = [] + aws_auth_roles = [ + { + rolearn : "" + aws_rolename : format("%v%v-cluster-admin", local._prefixes["eks-role"], var.cluster_name) + username : "admin" + groups = ["system:masters", "eks-console-dashboard-full-access-group"] + }, + ] +} diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/tf-run.data b/examples/full-cluster-tf-upgrade/1.32/aws-auth/tf-run.data new file mode 100644 index 0000000..8afedd9 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/tf-run.data @@ -0,0 +1,14 @@ +VERSION 1.3.0 +REMOTE-STATE +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +COMMAND tf-init -upgrade + +LINKTOP init +LINK versions.tf +LINK settings.auto.tfvars +LINK variables.application_tags.auto.tfvars + +ALL +COMMAND tf-directory-setup.py -l s3 +STOP cd ../efs and tf-run.sh apply diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/tf-run.destroy.data b/examples/full-cluster-tf-upgrade/1.32/aws-auth/tf-run.destroy.data new file mode 100644 index 0000000..fcf987a --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/tf-run.destroy.data @@ -0,0 +1,9 @@ +VERSION 1.0.1 +BACKUP-STATE +COMMAND tf-init +COMMAND tf-state list + +COMMENT We do not want to remove anything here, because once you do, you will not be able to access the cluster for the destroy step. +COMMENT Destroying the cluster will take care of this directory. + +STOP diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/variables.aws-auth.tf b/examples/full-cluster-tf-upgrade/1.32/aws-auth/variables.aws-auth.tf new file mode 100644 index 0000000..05708d5 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/variables.aws-auth.tf @@ -0,0 +1,23 @@ +# maybe just ignore the ARN entirely and force a read + +variable "aws_auth_users" { + description = "A list of objects where each object has userarn, username, k8s_username, and groups, where groups is a list of groups to associate with the user. Leaving userarn as an empty string will pull the user ARN from AWS." + type = list(object({ + userarn = string + aws_username = string + username = string + groups = list(string) + })) + default = [] +} + +variable "aws_auth_roles" { + description = "A list of objects where each object has rolearn, rolename, k8s_username, and groups, where groups is a list of groups to associate with the role. Leaving rolearn as an empty string will pull the role ARN from AWS." + type = list(object({ + rolearn = string + aws_rolename = string + username = string + groups = list(string) + })) + default = [] +} diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.32/aws-auth/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/version.tf b/examples/full-cluster-tf-upgrade/1.32/aws-auth/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/aws-auth/versions.tf b/examples/full-cluster-tf-upgrade/1.32/aws-auth/versions.tf new file mode 120000 index 0000000..8bd0ff1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/aws-auth/versions.tf @@ -0,0 +1 @@ +../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/bin/copy_image.sh b/examples/full-cluster-tf-upgrade/1.32/bin/copy_image.sh new file mode 100755 index 0000000..60fad27 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/bin/copy_image.sh @@ -0,0 +1,326 @@ +#!/bin/bash + +############################################################################### +# This script uses skopeo to copy a docker image from one repository to +# another. The primary intent is to copy the image from a public repository +# to a private repository. +############################################################################### +# Expected environment variables: +# +# SOURCE_IMAGE - The image to copy to to another location. Example: +# paradyme-docker-local.jfrog.io/appetizer:dev +# SOURCE_INSECURE - Set this to 1 of the source repository is in an insecure +# docker registry. Set it to 0 or leave it unset if the +# docker registry is secure. +# +# DESTINATION_IMAGE - The image to copy to to another location. Example: +# paradyme-docker-local.jfrog.io/appetizer:dev +# DESTINATION_INSECURE - Set this to 1 of the destination repository is in +# an insecure docker registry. Set it to 0 or leave it unset +# if the docker registry is secure. +# +# When the source repository requires authentication to access, configure +# these values. Otherwise do not set them. +# +# SOURCE_USERNAME - The username to supply for credentialed access to the +# repository. `anthony-zawacki` is an example. +# SOURCE_PASSWORD - The password to supply for credentialed access to the +# repository. An artifactory API_KEY for example. +# +# When the destination repository requires authentication to access, configure +# these values. Otherwise do not set them. +# +# DESTINATION_USERNAME - The username to supply for credentialed access to the +# repository. `anthony-zawacki` is an example. +# DESTINATION_PASSWORD - The password to supply for credentialed access to the +# repository. The output of: +# `aws ecr get-login-password --region us-east-2` for example. +# +# If the destination repository does not exist, the copy_image.sh script will +# create the repository automatically. In cases where the newly created +# repository should have a mutable image (perhaps always pushing to a `latest` +# tag in a development environment), it is possible to configure the +# repository to allow mutability by configuring this environment variable. +# Otherwise, do not set it. +# +# +############################################################################### + +ensure_skopeo() { + skopeo=$(command -v skopeo) + if [[ "$skopeo" == "" ]]; then + echo "The required executable, skopeo, was not found." + echo "Please install it and ensure it is in the path." + return 1 + fi + + return 0 +} + +usage() { + local msg="${1}"; shift; + + cat < (SOURCE_IMAGE) The name of the image to copy to another + registry. + -src-username (SOURCE_USERNAME) Optional parameter in cases where + the source registry requires authentication. Use this username for the + credentials. + -src-password (SOURCE_PASSWORD) Optional parameter in cases where + the source registry requires authentication. Use this password for the + credentials. + -src-insecure (SOURCE_INSECURE=1) Optional parameter indicates that the + source registry is not a secured registry and that tls validation + should be disabled for the processing of the image. The default is + to assume that the source registry is secured. + +src-insecure (SOURCE_INSECURE=0) Optional parameter explicitly indicating + that the source registry is secure and TLS must be used to access the + registry. + + -dest-image (DESTINATION_IMAGE) The name of the image to to use in the + destination registry. + -dest-username (DESTINATION_USERNAME) Optional parameter in cases + where the destination registry requires authentication. Use this + username for the credentials. + -dest-password (DESTINATION_PASSWORD) Optional parameter in cases + where the destination registry requires authentication. Use this + password for the credentials. + -dest-insecure (DESTINATION_INSECURE=1) Optional parameter indicates that the + destination registry is not a secured registry and that tls validation + should be disabled for the processing of the image. The default is + to assume that the destination registry is secured. + +dest-insecure (DESTINATION_INSECURE=0) Optional parameter explicitly + indicating that the destination registry is secure and TLS must be + used to access the registry. + -dest-mutable (DESTINATION_MUTABLE=1) Optional parameter indicates that if + creating the ECR repository is required, create it allowing mutable + images. + +dest-mutable (DESTNATION_MUTABLE=0) Optional parameter explicitly + indicating that if creating the ECR repository is required, create it + with immutable images. + +EOF + + exit 1 +} + +parse_commandline() { + local key + local positional=() + + while [[ $# -gt 0 ]]; do + key="$1"; shift + + case "$key" in + -src-image) + SOURCE_IMAGE="$1"; shift + ;; + -src-username) + SOURCE_USERNAME="$1"; shift + ;; + -src-password) + SOURCE_PASSWORD="$1"; shift + ;; + -src-insecure) + SOURCE_INSECURE=1 + ;; + +src-insecure) + SOURCE_INSECURE=0 + ;; + -dest-image) + DESTINATION_IMAGE="$1"; shift + ;; + -dest-username) + DESTINATION_USERNAME="$1"; shift + ;; + -dest-password) + DESTINATION_PASSWORD="$1"; shift + ;; + -dest-insecure) + DESTINATION_INSECURE=1 + ;; + +dest-insecure) + DESTINATION_INSECURE=0 + ;; + -dest-mutable) + DESTINATION_MUTABLE=1 + ;; + +dest-mutable) + DESTINATION_MUTABLE=0 + ;; + *) + positional+=("$key") + ;; + esac + done + + if [[ ${#positional[@]} -gt 0 ]]; then + usage "Unrecognized parameters: ${positional[*]}" + fi +} + +ensure_parameters() { + if [[ "$SOURCE_IMAGE" == "" ]]; then + usage "Must specify SOURCE_IMAGE" + fi + + if [[ "$DESTINATION_IMAGE" == "" ]]; then + usage "Must specify DESTINATION_IMAGE" + fi + + if [[ "$SOURCE_USERNAME" != "" || "$SOURCE_PASSWORD" != "" ]]; then + if [[ "$SOURCE_USERNAME" == "" || "$SOURCE_PASSWORD" == "" ]]; then + usage "Must specify both the SOURCE_USERNAME and SOURCE_PASSWORD." + fi + fi + + if [[ "$DESTINATION_USERNAME" != "" || "$DESTINATION_PASSWORD" != "" ]]; then + if [[ "$DESTINATION_USERNAME" == "" || "$DESTINATION_PASSWORD" == "" ]]; then + usage "Must specify both the DESTINATION_USERNAME and DESTINATION_PASSWORD." + fi + fi + + return 0 +} + +image_exists() { + declare src_creds="$SOURCE_USERNAME:$SOURCE_PASSWORD" + declare command=(skopeo inspect --insecure-policy) + + if [[ "$SOURCE_USERNAME" != "" ]]; then +# command+=(--src-creds "$src_creds") + command+=(--creds "$src_creds") + else +# command+=(--src-no-creds) + command+=(--no-creds) + fi + +# if [[ "$SOURCE_INSECURE" == "1" ]]; then +# command+=(--src-tls-verify=false) +# else +# command+=(--src-tls-verify=true) +# fi + + command+=("docker://$SOURCE_IMAGE") + + ${command[@]} > /dev/null 2>&1 + status=$? + echo "* source_image_exists() status=$status" + # return 0 if it does, 1 if not + return $? +} + +destination_image_exists() { + declare dst_creds="$DESTINATION_USERNAME:$DESTINATION_PASSWORD" + declare command=(skopeo inspect --insecure-policy) + + if [[ "$DESTINATION_USERNAME" != "" ]]; then +# command+=(--dest-creds "$dst_creds") + command+=(--creds "$dst_creds") + else +# command+=(--dest-no-creds) + command+=(--no-creds) + fi + +# if [[ "$DESTINATION_INSECURE" == "1" ]]; then +# command+=(--dest-tls-verify=false) +# else +# command+=(--dest-tls-verify=true) +# fi + + command+=("docker://$DESTINATION_IMAGE") + + ${command[@]} > /dev/null 2>&1 + status=$? + echo "* destination_image_exists() status=$status" + # return 0 if it does, 1 if not + return $? +} + +copy_image() { + declare src_creds="$SOURCE_USERNAME:$SOURCE_PASSWORD" + declare dest_creds="$DESTINATION_USERNAME:$DESTINATION_PASSWORD" + declare command=(skopeo copy --insecure-policy) + + if [[ "$SOURCE_USERNAME" != "" ]]; then + command+=(--src-creds "$src_creds") + else + command+=(--src-no-creds) + fi + + if [[ "$SOURCE_INSECURE" == "1" ]]; then + command+=(--src-tls-verify=false) + else + command+=(--src-tls-verify=true) + fi + + if [[ "$DESTINATION_USERNAME" != "" ]]; then + command+=(--dest-creds "$dest_creds") + else + command+=(--dest-no-creds) + fi + + if [[ "$DESTINATION_INSECURE" == "1" ]]; then + command+=(--dest-tls-verify=false) + else + command+=(--dest-tls-verify=true) + fi + + command+=("docker://$SOURCE_IMAGE" "docker://$DESTINATION_IMAGE") + + if [[ "$DESTINATION_IMAGE" == *.dkr.ecr.*.amazonaws.com/* ]]; then + echo "ECR registry detected, ensuring repository." + declare repository="${DESTINATION_IMAGE##*.amazonaws.com/}" + repository="${repository%%:*}" + declare region="${DESTINATION_IMAGE%%.amazonaws.com/*}" + region="${region##*.}" + export AWS_PAGER="" + if ! aws ecr describe-repositories \ + --region "$region" \ + --output "json" \ + --repository-names "$repository" \ + > /dev/null 2>&1; then + local mutability="IMMUTABLE" + if [ "$DESTINATION_MUTABLE" == "1" ]; then + mutability="MUTABLE" + fi + echo "creating repository $repository." + aws ecr create-repository \ + --image-tag-mutability "$mutability" \ + --image-scanning-configuration "scanOnPush=true" \ + --encryption-configuration "encryptionType=KMS" \ + --repository-name "$repository" \ + --region "$region" \ + > /dev/null 2>&1 || return $? + else + echo "repository $repository exists." + fi + fi + + echo "Copying $SOURCE_IMAGE" + echo "to $DESTINATION_IMAGE" + + while ! ${command[@]}; do + echo "Retrying uploading image..." + done +} + + +ensure_image() { + ( image_exists && ! destination_image_exists ) || copy_image +} + +main() { + ensure_skopeo && \ + parse_commandline "$@" && \ + ensure_parameters && \ + ensure_image && \ + echo "Done" +} + +return 0 > /dev/null 2>&1 || main "$@" + diff --git a/examples/full-cluster-tf-upgrade/1.32/bin/fix-terminating-namespace.sh b/examples/full-cluster-tf-upgrade/1.32/bin/fix-terminating-namespace.sh new file mode 100755 index 0000000..7282e79 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/bin/fix-terminating-namespace.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# fix_terminating_namespace() { +# local -r namespace="${1}"; shift; +# +# kubectl get ns "$namespace" 2>&1 | grep -q Terminating +# +# if [ $? -eq 0 ]; then +# kubectl get namespace "$namespace" -o json | \ +# grep -v '^ "kubernetes"$' | \ +# kubectl replace --raw "/api/v1/namespaces/$namespace/finalize" -f - +# else +# echo "Namespace $namespace not found or not stuck in terminating state." +# fi +# } +# } + +namespace="${1}" +shift; + +kubectl get ns "$namespace" 2>&1 | grep -q Terminating +if [ $? -eq 0 ] +then + kubectl get namespace "$namespace" -o json |\ + grep -v '^ "kubernetes"$' |\ + kubectl replace --raw "/api/v1/namespaces/$namespace/finalize" -f - +else + echo "Namespace $namespace not found or not stuck in terminating state." +fi diff --git a/examples/full-cluster-tf-upgrade/1.32/bin/remove-ecr.sh b/examples/full-cluster-tf-upgrade/1.32/bin/remove-ecr.sh new file mode 100755 index 0000000..06c7975 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/bin/remove-ecr.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +test -r /apps/terraform/etc/aws-functions.sh && source /apps/terraform/etc/aws-functions.sh + +REPO=$1 + +if [ -z $REPO ] +then + echo "missing respository, exiting" + exit 1 +fi + +echo "* listing repositories for $REPO" +#tf-aws ecr describe-repositories --query 'repositories[*].{a1:repositoryName,a2:repositoryArn}' --output text +REPOLIST=$(tf-aws ecr describe-repositories --query 'repositories[*].{a1:repositoryName}' --output text | grep ^$REPO) +for f in $REPOLIST +do + echo " $f" +done +echo "" + +echo "* listing images for each repo in $REPO" +for f in $REPOLIST +do + echo "> $f" + tf-aws ecr list-images --repository-name $f --query 'imageIds[*].{a1:imageDigest,a2:imageTag}' --output text +done +echo "" + +rc=1 +cc=0 +echo "* removing images for each repo in $REPO" +for f in $REPOLIST +do + echo "> $f" + c=1 + tf-aws ecr list-images --repository-name $f --query 'imageIds[*].{a1:imageDigest,a2:imageTag}' --output text | while read digest tag + do + echo "[$rc.$c] rm $digest tag $tag" + true tf-aws ecr batch-delete-image --repository-name $f --image-ids imageDigest=$digest + status=$? + if [ $status == 0 ] + then + cc=$(( $cc + 1 )) + fi + c=$(( $c + 1 )) + done + echo "= removed $c images from $f" + rc=$(( $rc + 1 )) +done +echo "" +echo "= $rc repos, removed $cc images" + +echo "* deleting the repo for each repo in $REPO" +for f in $REPOLIST +do + echo "> $f" + tf-aws ecr delete-repository --repository-name $f +done +echo "" + + +## 2827 2023-05-11 10:20:37 tf-aws ecr describe-repositories -- query 'repositories[*].{a1.repositoryName,a2:respositoryArn}' --output text +## 2828 2023-05-11 10:20:49 tf-aws ecr describe-repositories --query 'repositories[*].{a1.repositoryName,a2:respositoryArn}' --output text +## 2829 2023-05-11 10:20:57 tf-aws ecr describe-repositories --query 'repositories[*].{a1:repositoryName,a2:respositoryArn}' --output text +## 2830 2023-05-11 10:21:20 tf-aws ecr describe-repositories --query 'repositories[*].{a1:repositoryName,a2:repositoryArn}' --output text +## 2831 2023-05-11 10:21:32 tf-aws ecr describe-repositories --query 'repositories[*].{a1:repositoryName,a2:repositoryArn}' --output text|grep eks/ditd-partnerportal-test/ +## 2833 2023-05-11 10:21:51 tf-aws ecr help|grep image +## 2834 2023-05-11 10:22:14 tf-aws ecr list-images +## 2835 2023-05-11 10:22:26 tf-aws ecr list-images --repository-name eks/ditd-partnerportal-test/csi-snapshotter +## 2836 2023-05-11 10:22:45 tf-aws ecr batch-delete-image +## 2837 2023-05-11 10:22:57 tf-aws ecr batch-delete-image --repository-name eks/ditd-partnerportal-test/csi-snapshotter +## 2838 2023-05-11 10:23:05 tf-aws ecr batch-delete-image --repository-name eks/ditd-partnerportal-test/csi-snapshotter --image-ids --help +## 2839 2023-05-11 10:23:11 tf-aws ecr batch-delete-image --repository-name eks/ditd-partnerportal-test/csi-snapshotter --image-ids +## 2840 2023-05-11 10:23:19 tf-aws ecr batch-delete-image help # --repository-name eks/ditd-partnerportal-test/csi-snapshotter --image-ids +## 2841 2023-05-11 10:23:34 tf-aws ecr list-images --repository-name eks/ditd-partnerportal-test/csi-snapshotter +## 2842 2023-05-11 10:23:58 history > remove-ecr.sh diff --git a/examples/full-cluster-tf-upgrade/1.32/bin/show-k8s-things.sh b/examples/full-cluster-tf-upgrade/1.32/bin/show-k8s-things.sh new file mode 100755 index 0000000..c5f6290 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/bin/show-k8s-things.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +for f in all clusterrolebindings clusterroles nodes pods pvc pv rolebindings roles sc secrets services +do + echo "kubectl --config setup/kube.config get $f --all-namespaces -o wide > OUT.get-$f.txt" + kubectl --kubeconfig setup/kube.config get $f --all-namespaces -o wide > OUT.get-$f.txt +done diff --git a/examples/full-cluster-tf-upgrade/1.32/charts-images.tf b/examples/full-cluster-tf-upgrade/1.32/charts-images.tf new file mode 100644 index 0000000..2dc118b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/charts-images.tf @@ -0,0 +1,8 @@ +locals { + chart_settings = yamldecode(file("${path.root}/charts.yml")) + images_settings = yamldecode(file("${path.root}/images.yml")) +} + +# locations +## ./common-services/cluster-autoscaler/cluster-autoscaler.tf +## ./common-services/images.tf diff --git a/examples/full-cluster-tf-upgrade/1.32/charts.yml b/examples/full-cluster-tf-upgrade/1.32/charts.yml new file mode 100644 index 0000000..11557ce --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/charts.yml @@ -0,0 +1,24 @@ +cluster-autoscaler: + documentation: "https://artifacthub.io/packages/helm/cluster-autoscaler/cluster-autoscaler" + name: "cluster-autoscaler" + repository: "https://kubernetes.github.io/autoscaler" + version: "9.52.1" + use_remote: true +cert-manager: + documetation: "https://artifacthub.io/packages/helm/cert-manager/cert-manager" + name: "cert-manager" + repository: "https://charts.jetstack.io" + version: "1.19.1" + use_remote: true +metrics-server: + documentation: "https://artifacthub.io/packages/helm/bitnami/metrics-server" + name: "metrics-server" + repository: "https://charts.bitnami.com/bitnami" + version: "7.2.14" + use_remote: true +# new one, does not work yet +# repository: "oci://registry-1.docker.io/bitnamicharts" +# version: "7.3.0" +# metrics-server +# https://artifacthub.io/packages/helm/metrics-server/metrics-server +# chart 3.13.0, app version 0.8.0 diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/.tf-control b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/.tf-control new file mode 100644 index 0000000..280f449 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/.tf-control @@ -0,0 +1,20 @@ +# .tf-control +# allows for setting a specific command to be used for tf-* commands under this git repo +# see tf-control.sh help for more info + +TFCONTROL_VERSION="1.0.5" + +TFCOMMAND="terraform_latest" +# TF_CLI_CONFIG_FILE=PATH-TO-FILE/.tf-control.tfrc +# TFARGS="" +# TFNOLOG="" +# TFNOCOLOR="" + +# use the following to force a specific version. An upgrade of an existing 0.12.31 to 1.x +# needs you to cycle through 0.13.17, 0.14.11, and then latest (0.15.5 not needed). Other +# steps in between. See https://github.e.it.census.gov/terraform/support/tree/master/docs/how-to/terraform-upgrade for details +# +#TFCOMMAND="terraform_0.12.31" +#TFCOMMAND="terraform_0.13.7" +#TFCOMMAND="terraform_0.14.11" +#TFCOMMAND="terraform_0.15.5" diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/.tf-control.tfrc new file mode 100644 index 0000000..7425488 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/.tf-control.tfrc @@ -0,0 +1,24 @@ +TFCONTROL_VERSION="1.0.5" + +# https://www.terraform.io/docs/cli/config/config-file.html +plugin_cache_dir = "/data/terraform/terraform.d/plugin-cache" +#disable_checkpoint = true + +provider_installation { +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "*/*/*" ] +# } + filesystem_mirror { + path = "/data/terraform/terraform.d/providers" + include = [ "*/*/*" ] + } +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "external.terraform.census.gov/*/*" ] +# } + direct { + include = [ "*/*/*" ] + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/README.md b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/README.md new file mode 100644 index 0000000..eae6d1d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/README.md @@ -0,0 +1,238 @@ +# About cluster-roles + +This directory constructs the resources for roles, permissions and Kubernetes resources +for the EKS cluster adsd-cumulus-dev. + +# Application Information + +* Application: EKS adsd-cumulus-dev +* Organization: ADSD +* Project: DICE-dev +* Point of Contact(s): badra001, +* Creation Date: 2021-10-08 +* References: + * Requirements: {url} + * Remedy Ticket: {number} + * Other: {url} +* Related Configurations: + * {directory-path} + +# Application Requirements: EKS Cluster RBAC + +In order to let CICD pipeline and DBA to manage the applications and databases which Cumulus needed. 3 cluster roles need to be create + +1. Deployer Application Role +2. Deployer Istio System Role +3. DBA Administrator Role + +CICD deployer will be binding to Deployer roles in the namespaces that CICD will manager. Same as DBA Admin user, they only have admin roles for the namespaces that they are going to manage. + +## Deployer Application Role + +This role defines the k8s resources that CICD pipeline need to create for application deployment. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: deployer-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" +rules: + - apiGroups: + - cert-manager.io + - acme.cert-manager.io + resources: + - "*" + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - networking.istio.io + - security.istio.io + resources: + - virtualservices + - authorizationpolicies + - destinationrules + - peerauthentications + - requestauthentications + verbs: + - get + - list + - watch + - create + - delete + - patch + +``` +## Deployer Istio System Role + +This Role defines that deployer need to create gateway and certificate in istio-system namespace, per istio requires, TLS certificate need stay in the same +namespace as istio-ingressgateway. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: deployer-istiosystem-role +rules: + - apiGroups: + - cert-manager.io + - acme.cert-manager.io + resources: + - "*" + verbs: + - get + - list + - watch + - create + - update + - patch + - apiGroups: + - networking.istio.io + resources: + - gateways + verbs: + - get + - list + - watch + - create + - delete + - patch + +``` + +## DBA Administrator Role +This is admin role for a particular namespace or namespaces that DBA need to access and managed the DBs. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dba-admin-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: + - cert-manager.io + - acme.cert-manager.io + resources: + - "*" + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - networking.istio.io + - security.istio.io + resources: + - virtualservices + - authorizationpolicies + - destinationrules + - peerauthentications + - requestauthentications + verbs: + - get + - list + - watch + - create + - delete + - patch + +``` + +# Terraform Directions + + + + +# Details + + +account_alias = "" +account_id = "" +application_tags = {} +aws_environment = "" +census_private_cidr = [ + "148.129.0.0/16", + "172.16.0.0/12", + "192.168.0.0/16" +] +census_public_cidr = [ + "148.129.0.0/16" +] +cicd_k8s_group_name = "s-eks-adsd-cumulus-dev-cicd-deployer" +cicd_k8s_user_name = "cicd-deployer" +cicd_managed_namespaces = [ + "adsd-cumulus-dev-apps", + "adsd-cumulus-dev-addressupdate", + "adsd-cumulus-dev-adminmatchrecord", + "adsd-cumulus-dev-cbs-apps", + "adsd-cumulus-dev-collectionevent", + "adsd-cumulus-dev-collectionintervention", + "adsd-cumulus-dev-collectionoperation", + "adsd-cumulus-dev-collectionresponse", + "adsd-cumulus-dev-common", + "adsd-cumulus-dev-mft", + "adsd-cumulus-dev-monitoring" +] +cluster_name = "" +cluster_version = "1.20" +dba_admin_rolebinding_name = "dba-admin-rolebinding" +dba_administrator_role_name = "dba-admin-role" +dba_k8s_group_name = "s-eks-adsd-cumulus-dev-dba-admin" +dba_k8s_user_name = "dba-admin" +dba_managed_namespaces = [ + "adsd-cumulus-dev-db" +] +deployer_application_role_name = "deployer-application-role" +deployer_application_rolebinding_name = "deployer-application-rolebinding" +deployer_istiosystem_role_name = "deployer-istiosystem-role" +domain = "" +eks_instance_disk_size = 40 +eks_instance_type = "t3.xlarge" +eks_ng_desire_size = 4 +eks_ng_max_size = 16 +eks_ng_min_size = 4 +eks_vpc_name = "*vpc4*" +istio_installed_namespace = "istio-system" +kms_tfstate_key = "k-kms-inf-tfstate" +profile = "" +region = "" +region_map = {} +regions = [] +subnets_name = "*-apps-*" +tag_costallocation = "csvd:infrastructure" +tag_creator = "" +tfstate_bucket = "inf-tfstate-252960665057" +tfstate_bucket_prefix = "inf-tfstate" +tfstate_key_prefix = "ma6-gov" +tfstate_key_suffix = "terraform.tfstate" +tfstate_region = "us-gov-east-1" +tfstate_table = "tf_remote_state" +vpc_dns_servers = [ + "148.129.127.22", + "148.129.191.22" +] +vpc_domain_name = "dice.census.gov" +vpc_full_name = "" +vpc_ntp_servers = [ + "148.129.127.23", + "148.129.191.23" +] + + + diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/RESULTS.md b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/RESULTS.md new file mode 100644 index 0000000..5d31a20 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/RESULTS.md @@ -0,0 +1,41 @@ +## Cluster Roles + +```console +% kubectl --kubeconfig setup/kube.config get clusterrole -o wide |grep -iE "dba|deployer" +cumulus-dba-role 2021-10-07T14:36:45Z +dba-admin-role 2021-10-13T12:12:33Z +deployer-application-role 2021-10-13T12:12:33Z +deployer-istiosystem-role 2021-10-13T12:12:33Z +deployer-role 2021-10-07T16:37:43Z +``` + +## Role Binding + +```console +% kubectl --kubeconfig setup/kube.config get rolebinding -o wide --all-namespaces |grep -iE "deployer|dba" +adsd-cumulus-dev-addressupdate deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-addressupdate deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-adminmatchrecord deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-adminmatchrecord deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-apps deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-apps deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-cbs-apps deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-cbs-apps deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-collectionevent deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-collectionevent deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-collectionintervention deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-collectionintervention deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-collectionoperation deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-collectionoperation deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-collectionresponse deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-collectionresponse deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-common deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-common deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-db cumulus-dba-rolebinding ClusterRole/cumulus-dba-role 5d22h dba-admin cumulus-dba kube-system/dba +adsd-cumulus-dev-db dba-admin-rolebinding ClusterRole/dba-admin-role 56m dba-admin s-eks-adsd-cumulus-dev-dba-admin +adsd-cumulus-dev-mft deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-mft deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-monitoring deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-monitoring deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +istio-system deployer_istiosystem_role_binding ClusterRole/deployer-istiosystem-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +``` diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/cm.tf.off b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/cm.tf.off new file mode 100644 index 0000000..f84cb4b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/cm.tf.off @@ -0,0 +1,6 @@ +data "kubernetes_config_map" "awsauth" { + metadata { + name = "aws-auth" + namespace = "kube-system" + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/data.eks-subdirectory.tf new file mode 120000 index 0000000..43b5430 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/data.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/dba-clusterrole.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/dba-clusterrole.tf new file mode 100644 index 0000000..e60e7b5 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/dba-clusterrole.tf @@ -0,0 +1,24 @@ +resource "kubernetes_cluster_role" "dba_administrator_cluster_role" { + metadata { + name = var.dba_administrator_role_name + } + aggregation_rule { + cluster_role_selectors { + match_labels = { + "rbac.authorization.k8s.io/aggregate-to-admin" = "true" + } + } + } + + rule { + api_groups = ["cert-manager.io", "acme.cert-manager.io"] + resources = ["certificates", "challenges", "orders", "certificaterequests", "issuers"] + verbs = ["get", "list", "watch", "create", "update", "patch"] + } + + rule { + verbs = ["get", "list", "watch", "create", "update", "patch"] + api_groups = ["networking.istio.io", "security.istio.io"] + resources = ["virtualservices", "authorizationpolicies", "destinationrules", "peerauthentications", "requestauthentications"] + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/dba-rolebinding.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/dba-rolebinding.tf new file mode 100644 index 0000000..e7d48aa --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/dba-rolebinding.tf @@ -0,0 +1,40 @@ +locals { + dba_managed_namespaces = formatlist("%v-%v", var.cluster_name, var.dba_managed_namespaces) + dba_k8s_group_name = format("%v%v-%v", local._prefixes["eks-user"], var.cluster_name, var.dba_k8s_group_name) +} + +resource "kubernetes_namespace" "dba_managed_namespaces" { + for_each = toset(local.dba_managed_namespaces) + metadata { + name = each.key + labels = { + istio-injection = "enabled" + } + } +} + +resource "kubernetes_role_binding" "dba_admin_rolebinding" { + # for_each = toset(local.dba_managed_namespaces) + for_each = kubernetes_namespace.dba_managed_namespaces + + metadata { + name = var.dba_admin_rolebinding_name + namespace = each.key + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = var.dba_administrator_role_name + } + subject { + kind = "User" + name = var.dba_k8s_user_name + api_group = "rbac.authorization.k8s.io" + } + subject { + kind = "Group" + name = local.dba_k8s_group_name + api_group = "rbac.authorization.k8s.io" + } + # depends_on = [kubernetes_namespace.dba_managed_namespaces] +} diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/dba.iam.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/dba.iam.tf new file mode 100644 index 0000000..3ef0a8a --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/dba.iam.tf @@ -0,0 +1,117 @@ +locals { + policy_dba_k8s_group_name = replace(local.dba_k8s_group_name, local._prefixes["eks-user"], local._prefixes["eks-policy"]) + role_dba_k8s_group_name = format("%v%v-%v", local._prefixes["eks"], var.cluster_name, var.dba_k8s_group_name) +} + +module "role_dba_administrator" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" + + role_name = local.role_dba_k8s_group_name + role_description = "Role for EKS cluster ${var.cluster_name} for access by ${var.dba_k8s_group_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.dba_administrator_allow_sts.json + attached_policies = [aws_iam_policy.dba_administrator.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +resource "aws_iam_policy" "dba_administrator" { + name = local.policy_dba_k8s_group_name + path = "/" + description = "Policy for EKS ${var.cluster_name} IAM access ${var.dba_k8s_group_name}" + policy = data.aws_iam_policy_document.dba_administrator.json +} + +locals { + dba_administrator_policy_statements = { + ECRRead = { + actions = [ + "ecr:Describe*", + "ecr:Get*", + "ecr:ListImages", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + ] + resources = ["*"] + } + EKSRead = { + actions = [ + "eks:ListClusters", + ] + resources = ["*"] + } + EKSReadMyClusters = { + actions = [ + "eks:DescribeCluster", + "eks:AccessKubernetesApi", + ] + resources = [format(local.common_arn, "eks", format("%v/%v", "cluster", var.cluster_name))] + } + STSAssumeRole = { + actions = ["sts:AssumeRole"] + resources = [module.role_dba_administrator.role_arn] + } + } +} + +data "aws_iam_policy_document" "dba_administrator" { + dynamic "statement" { + for_each = local.dba_administrator_policy_statements + iterator = s + content { + sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) + effect = lookup(s.value, "effect", "Allow") + actions = lookup(s.value, "actions", []) + resources = lookup(s.value, "resources", []) + not_resources = lookup(s.value, "not_resources", []) + } + } +} + +# allow anyone in this account to assume the role, if they have the permission to do so +data "aws_iam_policy_document" "dba_administrator_allow_sts" { + statement { + sid = "AllowSTSAssume" + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "AWS" + identifiers = [ + format(local.iam_arn, "root"), + ] + } + } +} + +# output "role_dba_administrator_arn" { +# description = "DBA Adminstrator role ARN" +# value = module.role_dba_administrator.role_arn +# } + +module "group_dba_administrator" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git" + + group_name = local.role_dba_k8s_group_name + attached_policies = [aws_iam_policy.dba_administrator.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +output "info_dba_administrator" { + description = "DBA Adminstrator IAM details" + value = { + role_name = module.role_dba_administrator.role_name + role_arn = module.role_dba_administrator.role_arn + group_name = module.group_dba_administrator.group_name + group_arn = module.group_dba_administrator.group_arn + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/deployer-clusterrole.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/deployer-clusterrole.tf new file mode 100644 index 0000000..7cede6e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/deployer-clusterrole.tf @@ -0,0 +1,67 @@ +resource "kubernetes_cluster_role" "cicd_deployer_istiosystem_cluster_role" { + metadata { + name = var.deployer_istiosystem_role_name + } + + rule { + api_groups = ["acme.cert-manager.io"] + resources = ["challenges", "orders", "certificaterequests"] + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + } + + rule { + api_groups = ["cert-manager.io"] + resources = ["certificates"] + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + } + + + rule { + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + api_groups = ["networking.istio.io"] + resources = ["gateways"] + } +} + +resource "kubernetes_cluster_role" "cicd_deployer_istio_cluster_role" { + metadata { + name = var.deployer_application_istio_role_name + } + rule { + api_groups = ["security.istio.io"] + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + resources = ["requestauthentications", "authorizationpolicies", "peerauthentications"] + } + + rule { + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + api_groups = ["networking.istio.io"] + resources = ["virtualservices", "destinationrules", "gateways"] + } +} + +resource "kubernetes_cluster_role" "cicd_deployer_application_cluster_role" { + metadata { + name = var.deployer_application_role_name + } + aggregation_rule { + cluster_role_selectors { + match_labels = { + "rbac.authorization.k8s.io/aggregate-to-edit" = "true" + } + } + } + + rule { + api_groups = ["acme.cert-manager.io"] + resources = ["challenges", "orders", "certificaterequests"] + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + } + + rule { + api_groups = ["cert-manager.io"] + resources = ["certificates"] + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + } + +} diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/deployer-rolebinding.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/deployer-rolebinding.tf new file mode 100644 index 0000000..3b90b7b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/deployer-rolebinding.tf @@ -0,0 +1,91 @@ +resource "kubernetes_role_binding" "deployer_istio_role_binding" { + metadata { + name = "deployer_istiosystem_role_binding" + namespace = var.istio_installed_namespace + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = var.deployer_istiosystem_role_name + } + subject { + kind = "User" + name = var.cicd_k8s_user_name + api_group = "rbac.authorization.k8s.io" + } + subject { + kind = "Group" + # name = format("%v%v-%v", local._prefixes["eks-user"], var.cluster_name, var.cicd_k8s_group_name) + name = local.cicd_k8s_iam_username + api_group = "rbac.authorization.k8s.io" + } +} + +locals { + cicd_managed_namespaces = formatlist("%v-%v", var.cluster_name, var.cicd_managed_namespaces) + cicd_k8s_iam_username = format("%v%v-%v", local._prefixes["eks-user"], var.cluster_name, var.cicd_k8s_group_name) + cicd_k8s_group_name = format("%v%v-%v", local._prefixes["eks"], var.cluster_name, var.cicd_k8s_group_name) +} + +resource "kubernetes_namespace" "cicd_managed_namespaces" { + for_each = toset(local.cicd_managed_namespaces) + metadata { + name = each.key + labels = { + istio-injection = "enabled" + } + } +} + + +resource "kubernetes_role_binding" "deployer_application_istio_rolebinding" { + # for_each = toset(local.cicd_managed_namespaces) + for_each = kubernetes_namespace.cicd_managed_namespaces + + metadata { + name = var.deployer_application_istio_rolebinding_name + namespace = each.key + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = var.deployer_application_istio_role_name + } + subject { + kind = "User" + name = var.cicd_k8s_user_name + api_group = "rbac.authorization.k8s.io" + } + subject { + kind = "Group" + name = local.cicd_k8s_iam_username + api_group = "rbac.authorization.k8s.io" + } + # depends_on = [kubernetes_namespace.cicd_managed_namespaces] +} + +resource "kubernetes_role_binding" "deployer_application_rolebinding" { + # for_each = toset(local.cicd_managed_namespaces) + for_each = kubernetes_namespace.cicd_managed_namespaces + + metadata { + name = var.deployer_application_rolebinding_name + namespace = each.key + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = var.deployer_application_role_name + } + subject { + kind = "User" + name = var.cicd_k8s_user_name + api_group = "rbac.authorization.k8s.io" + } + subject { + kind = "Group" + name = local.cicd_k8s_iam_username + api_group = "rbac.authorization.k8s.io" + } + # depends_on = [kubernetes_namespace.cicd_managed_namespaces] +} diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/deployer.iam.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/deployer.iam.tf new file mode 100644 index 0000000..7d76a89 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/deployer.iam.tf @@ -0,0 +1,167 @@ +locals { + policy_cicd_k8s_group_name = replace(local.cicd_k8s_iam_username, local._prefixes["eks-user"], local._prefixes["eks-policy"]) + role_cicd_k8s_group_name = replace(local.cicd_k8s_iam_username, local._prefixes["eks-user"], "") + iam_policies_cicd = ["p-inf-manage-access-keys"] +} + +data "aws_iam_policy" "cicd_deployer_policies" { + for_each = toset(local.iam_policies_cicd) + name = each.key +} + +module "service_cicd_deployer" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-user.git" + + iam_username = local.cicd_k8s_iam_username + username = "" + email_address = "" + groups = ["g-inf-ip-restriction"] + generate_password = false + service_account = true + enable_sending_mail = false + create_access_keys = false + profile = var.profile + pgp_key_file = "./init/tf-gpg-key.b64" + + attached_policies = flatten(concat([for k, v in data.aws_iam_policy.cicd_deployer_policies : v.arn], [aws_iam_policy.cicd_deployer.arn])) + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} +module "role_cicd_deployer" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" + + role_name = local.role_cicd_k8s_group_name + role_description = "Role for EKS cluster ${var.cluster_name} for access by ${var.cicd_k8s_group_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.cicd_deployer_allow_sts.json + # attached_policies = flatten(concat([for k, v in data.aws_iam_policy.cicd_deployer_policies : v.arn], [aws_iam_policy.cicd_deployer.arn])) + attached_policies = [aws_iam_policy.cicd_deployer.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +resource "aws_iam_policy" "cicd_deployer" { + name = local.policy_cicd_k8s_group_name + path = "/" + description = "Policy for EKS ${var.cluster_name} IAM access ${var.cicd_k8s_group_name}" + policy = data.aws_iam_policy_document.cicd_deployer.json +} + +locals { + cicd_deployer_policy_statements = { + ECRRead = { + actions = [ + "ecr:Describe*", + "ecr:Get*", + "ecr:ListImages", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + ] + resources = ["*"] + } + ECRWrite = { + # effect = "Deny" + actions = [ + "ecr:BatchDeleteImage", + "ecr:CompleteLayerUpload", + "ecr:CreateRepository", + "ecr:DeleteRepository", + "ecr:InitiateLayerUpload", + "ecr:PutImage", + "ecr:UploadLayerPart" + ] + # not_resources = [format(local.common_arn, "ecr", format("repository/eks/%v/*", var.cluster_name))] + not_resources = [format(local.common_arn, "ecr", "repository/eks/*")] + } + EKSRead = { + actions = [ + "eks:ListClusters", + ] + resources = ["*"] + } + EKSReadMyClusters = { + actions = [ + "eks:AccessKubernetesApi", + "eks:DescribeCluster", + ] + resources = [format(local.common_arn, "eks", format("%v/%v", "cluster", var.cluster_name))] + } + # IAMRead = { + # actions = [ + # "iam:ListRoles", + # ] + # resources = ["*"] + # } + } +} + +data "aws_iam_policy_document" "cicd_deployer" { + dynamic "statement" { + for_each = local.cicd_deployer_policy_statements + iterator = s + content { + sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) + effect = lookup(s.value, "effect", "Allow") + actions = lookup(s.value, "actions", []) + resources = lookup(s.value, "resources", []) + not_resources = lookup(s.value, "not_resources", []) + } + } +} + +# allow anyone in this account to assume the role, if they have the permission to do so +data "aws_iam_policy_document" "cicd_deployer_allow_sts" { + statement { + sid = "AllowSTSAssume" + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "AWS" + identifiers = [ + format(local.iam_arn, "root"), + ] + } + } +} + +# output "service_cicd_deployer_arn" { +# description = "CICD Deployer user ARN" +# value = module.service_cicd_deployer.user_arn +# } +# +# output "service_cicd_deployer_username" { +# description = "CICD Deployer username" +# value = module.service_cicd_deployer.user_name +# } + +module "group_cicd_deployer" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git" + + group_name = local.cicd_k8s_group_name + attached_policies = flatten(concat([for k, v in data.aws_iam_policy.cicd_deployer_policies : v.arn], [aws_iam_policy.cicd_deployer.arn])) + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +output "info_cicd_deployer" { + description = "CID Deployer IAM details" + value = { + user_name = module.service_cicd_deployer.user_name + user_arn = module.service_cicd_deployer.user_arn + group_name = module.group_cicd_deployer.group_name + group_arn = module.group_cicd_deployer.group_arn + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/kubeconfig.eks-subdirectory.tf new file mode 120000 index 0000000..e3750a4 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/kubeconfig.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/kubeconfig.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/locals.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/locals.tf new file mode 100644 index 0000000..92d0613 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/locals.tf @@ -0,0 +1,11 @@ +locals { + base_arn = format("arn:%v:%%v:%v:%v:%%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) + iam_arn = format("arn:%v:iam::%v:%%v", data.aws_arn.current.partition, data.aws_caller_identity.current.account_id) + common_arn = format("arn:%v:%%v:%v:%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) + + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/main.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/main.tf new file mode 100644 index 0000000..ef02738 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/main.tf @@ -0,0 +1,30 @@ +locals { + aws_auth_users = [ + { + userarn = module.service_cicd_deployer.user_arn + aws_username = "" + username = var.cicd_k8s_user_name + groups = [local.cicd_k8s_group_name] + }, + ] + aws_auth_roles = [ + { + rolearn : module.role_dba_administrator.role_arn + aws_rolename : "" + username : var.dba_k8s_user_name + groups = [local.dba_k8s_group_name] + }, + ] +} + +module "awsauth_cluster-roles" { + source = "git@github.e.it.census.gov:terraform-modules/aws-eks.git//patch-aws-auth" + + region = local.region + profile = var.profile + cluster_name = var.cluster_name + aws_auth_users = local.aws_auth_users + aws_auth_roles = local.aws_auth_roles + + keep_temporary_files = false +} diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/prefixes.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/providers.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/region.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/tf-run.data b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/tf-run.data new file mode 100644 index 0000000..1d1a079 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/tf-run.data @@ -0,0 +1,18 @@ +VERSION 1.3.0 +REMOTE-STATE +STOP only run this after the cluster roles represented here have been setup in K8S +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +COMMAND tf-init -upgrade +LINKTOP init +LINKTOP provider_configs.d/provider.ldap_new.auto.tfvars +LINKTOP provider_configs.d/provider.ldap_new.tf +LINKTOP provider_configs.d/provider.ldap_new.variables.tf +LINK versions.tf +LINK settings.auto.tfvars +LINK variables.application_tags.auto.tfvars +POLICY +ALL +COMMAND tf-directory-setup.py -l s3 + +COMMENT cd ../ and continue diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/variables.auto.tfvars b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/variables.auto.tfvars new file mode 100644 index 0000000..974aef0 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/variables.auto.tfvars @@ -0,0 +1,16 @@ +istio_installed_namespace = "istio-system" +# enable only for cicd needs +cicd_k8s_group_name = "cicd-deployer" +cicd_k8s_user_name = "cicd-deployer" +cicd_managed_namespaces = [] +deployer_application_istio_role_name = "deployer-application-istio-role" +deployer_application_istio_rolebinding_name = "deployer-application-istio-rolebinding" +deployer_application_role_name = "deployer-application-role" +deployer_application_rolebinding_name = "deployer-application-rolebinding" +deployer_istiosystem_role_name = "deployer-istiosystem-role" +# enable only for dba account needs (most likely, not needed) +dba_admin_rolebinding_name = "dba-admin-rolebinding" +dba_administrator_role_name = "dba-admin-role" +dba_k8s_group_name = "dba-admin" +dba_k8s_user_name = "dba-admin" +dba_managed_namespaces = [] diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/variables.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/variables.tf new file mode 100644 index 0000000..6ec43a6 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/variables.tf @@ -0,0 +1,83 @@ +variable "deployer_istiosystem_role_name" { + description = "The kubernetes cluster role name of CIDR Deployer" + type = string + default = "deployer-istiosystem-role" +} + +variable "deployer_application_role_name" { + description = "The kubernetes cluster role name of CICD Deployer" + type = string + default = "deployer-application-role" +} + +variable "deployer_application_istio_role_name" { + description = "The kubernetes cluster role name of CICD Deployer" + type = string + default = "deployer-application-istio-role" +} + + + +variable "dba_administrator_role_name" { + description = "The kubernetes cluster role name of DBA Administrator" + type = string + default = "dba-admin-role" +} + +variable "istio_installed_namespace" { + description = "Namespace that Istio installed" + type = string + default = "istio-system" +} + +variable "cicd_k8s_user_name" { + description = "The user name of CICD Deployer" + type = string + default = "cicd-deployer" +} +variable "cicd_k8s_group_name" { + description = "The Group name of CICD Deployer belongs to (excluding prefix for service account and cluster)" + type = string + default = "cicd-deployer" +} + +variable "dba_k8s_user_name" { + description = "the user name of DBA Administrator" + type = string + default = "dba-admin" +} +variable "dba_k8s_group_name" { + description = "The Group name of dba-admin belongs to (excluding prefix for service account and cluster)" + type = string + default = "dba-admin" +} + +variable "deployer_application_rolebinding_name" { + description = "Role binding name of deployer that binding to role deployer_application_cluster_role" + type = string + default = "deployer-application-rolebinding" +} + +variable "deployer_application_istio_rolebinding_name" { + description = "Role binding name of deployer that binding to role deployer_application_cluster_role" + type = string + default = "deployer-application-istio-rolebinding" +} + +variable "dba_admin_rolebinding_name" { + description = "Role binding name of deployer that binding to role deployer_application_cluster_role" + type = string + default = "dba-admin-rolebinding" +} + +variable "cicd_managed_namespaces" { + description = "Deployer managed namespaces that deploy can create resources in (excluding cluster name prefix)" + type = list(any) + default = [] +} + +variable "dba_managed_namespaces" { + description = "DBA admin managed namespaces (excluding cluster name prefix)" + type = list(any) + default = [] +} diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/version.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/cluster-roles/versions.tf b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/versions.tf new file mode 120000 index 0000000..8bd0ff1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/cluster-roles/versions.tf @@ -0,0 +1 @@ +../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/.gitignore b/examples/full-cluster-tf-upgrade/1.32/common-services/.gitignore new file mode 100644 index 0000000..1ae9a3f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/.gitignore @@ -0,0 +1 @@ +certs/*.key diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/.tf-control b/examples/full-cluster-tf-upgrade/1.32/common-services/.tf-control new file mode 100644 index 0000000..280f449 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/.tf-control @@ -0,0 +1,20 @@ +# .tf-control +# allows for setting a specific command to be used for tf-* commands under this git repo +# see tf-control.sh help for more info + +TFCONTROL_VERSION="1.0.5" + +TFCOMMAND="terraform_latest" +# TF_CLI_CONFIG_FILE=PATH-TO-FILE/.tf-control.tfrc +# TFARGS="" +# TFNOLOG="" +# TFNOCOLOR="" + +# use the following to force a specific version. An upgrade of an existing 0.12.31 to 1.x +# needs you to cycle through 0.13.17, 0.14.11, and then latest (0.15.5 not needed). Other +# steps in between. See https://github.e.it.census.gov/terraform/support/tree/master/docs/how-to/terraform-upgrade for details +# +#TFCOMMAND="terraform_0.12.31" +#TFCOMMAND="terraform_0.13.7" +#TFCOMMAND="terraform_0.14.11" +#TFCOMMAND="terraform_0.15.5" diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.32/common-services/.tf-control.tfrc new file mode 100644 index 0000000..7425488 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/.tf-control.tfrc @@ -0,0 +1,24 @@ +TFCONTROL_VERSION="1.0.5" + +# https://www.terraform.io/docs/cli/config/config-file.html +plugin_cache_dir = "/data/terraform/terraform.d/plugin-cache" +#disable_checkpoint = true + +provider_installation { +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "*/*/*" ] +# } + filesystem_mirror { + path = "/data/terraform/terraform.d/providers" + include = [ "*/*/*" ] + } +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "external.terraform.census.gov/*/*" ] +# } + direct { + include = [ "*/*/*" ] + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/README.md b/examples/full-cluster-tf-upgrade/1.32/common-services/README.md new file mode 100644 index 0000000..f8b7f53 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/README.md @@ -0,0 +1,66 @@ +# common-services + +This is a directory where the common services are setup: + +* cert-manager +* istio service mesh +* metrics-server + +## Setup Steps + +First, copy the `remote_state.yml` from the parent and update `directory` to be the current directory. + +## Terraform Automated + +A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. + +* copy the `remote_state.yml` from the parent and update `directory` to be the current directory +* run the tf-run.sh + +```console +% tf-run.sh apply +``` + +* example of the `tf-run.sh` steps + +This is part of a larger cluster configuration, so at the end of the run it indicates another directory +to visit when done. + +```console +% tf-run.sh list +* running action=plan +* START: tf-run.sh v1.1.2 start=1636563207 end= logfile=logs/run.plan.20211110.1636563207.log (not-created) +* reading from tf-run.data +* read 23 entries from tf-run.data +> list +** START: start=1636563207 +* 1 COMMAND> tf-directory-setup.py -l none -f +* 2 COMMAND> setup-new-directory.sh +* 3 COMMAND> tf-init -upgrade +* 4 tf-plan -target=tls_private_key.ca +* 5 tf-plan -target=tls_cert_request.ca +* 6 tf-plan -target=null_resource.ca_root_cert +* 7 tf-plan -target=null_resource.ca_files +* 8 tf-plan -target=null_resource.ca_cert +* 9 tf-plan -target=local_file.ca_bundle_cert +* 10 COMMAND> tf-directory-setup.py -l s3 +* 11 COMMENT> submit certs/*csr using command ouptut listed in apply to TCO for signing +* 12 STOP> once that is availabile, change cert_download to true +* 13 COMMAND> terraform taint null_resource.ca_cert +* 14 tf-plan -target=null_resource.ca_root_cert +* 15 tf-plan -target=null_resource.ca_files +* 16 tf-plan -target=null_resource.ca_cert +* 17 COMMENT> second run is to complete the steps +* 18 tf-plan -target=null_resource.ca_root_cert +* 19 tf-plan -target=null_resource.ca_files +* 20 tf-plan -target=null_resource.ca_cert +* 21 tf-plan +* 22 COMMENT> run: git-secret add certs/*.key; git-secret hide +* 23 COMMENT> be sure to add all files to git, and be sure to commit -a to get .gitsecret/ changes +** END: start=1636563207 end=1636563207 elapsed=0 logfile=logs/run.plan.20211110.1636563207.log (not-created) +``` + +It is highly recommended to use the `tf-run.sh` approach. + +## Terraform Manual + diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/README.output.md b/examples/full-cluster-tf-upgrade/1.32/common-services/README.output.md new file mode 100644 index 0000000..089cab7 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/README.output.md @@ -0,0 +1,84 @@ +```console +% kubectl -n kube-system get pods -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +aws-load-balancer-controller-54fdf64896-jzwsr 1/1 Running 0 23h 10.194.26.74 ip-10-194-26-252.ec2.internal +aws-load-balancer-controller-54fdf64896-qqt6d 1/1 Running 0 23h 10.194.24.242 ip-10-194-24-49.ec2.internal +aws-node-29kmc 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal +aws-node-6d8ls 1/1 Running 1 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal +aws-node-6vrbg 1/1 Running 1 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal +aws-node-ldgxc 1/1 Running 1 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal +coredns-65bfc5645f-g86rx 1/1 Running 0 7d2h 10.194.24.207 ip-10-194-24-90.ec2.internal +coredns-65bfc5645f-xj9rl 1/1 Running 0 7d2h 10.194.24.69 ip-10-194-24-90.ec2.internal +efs-csi-controller-65fb886fd4-7slw6 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal +efs-csi-controller-65fb886fd4-vcf9l 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal +efs-csi-node-6t6v6 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal +efs-csi-node-kxqfb 3/3 Running 0 2d21h 10.194.24.49 ip-10-194-24-49.ec2.internal +efs-csi-node-p8hzn 3/3 Running 0 2d21h 10.194.26.252 ip-10-194-26-252.ec2.internal +efs-csi-node-xxq9h 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-proxy-78n7f 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-proxy-cms7c 1/1 Running 0 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-proxy-h2t6n 1/1 Running 0 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-proxy-jkxnz 1/1 Running 0 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal +``` + +```console +% kubectl get pods --all-namespaces -o wide +NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +cert-manager cert-manager-7fcbc79fc5-xwt4s 1/1 Running 0 51m 10.194.24.138 ip-10-194-24-49.ec2.internal +cert-manager cert-manager-cainjector-6b7f4575f4-xpgnc 1/1 Running 0 51m 10.194.24.56 ip-10-194-24-49.ec2.internal +cert-manager cert-manager-webhook-6cd54b96fc-rvld4 1/1 Running 0 51m 10.194.24.170 ip-10-194-24-90.ec2.internal +istio-system istio-egressgateway-7fcc58ddf7-dtx25 1/1 Running 0 95m 10.194.26.120 ip-10-194-26-252.ec2.internal +istio-system istio-ingressgateway-75f76c546b-vx2v6 1/1 Running 0 95m 10.194.24.8 ip-10-194-24-90.ec2.internal +istio-system istiod-85b6f86f94-vqfj2 1/1 Running 0 95m 10.194.25.155 ip-10-194-25-120.ec2.internal +kube-system aws-load-balancer-controller-54fdf64896-jzwsr 1/1 Running 0 23h 10.194.26.74 ip-10-194-26-252.ec2.internal +kube-system aws-load-balancer-controller-54fdf64896-qqt6d 1/1 Running 0 23h 10.194.24.242 ip-10-194-24-49.ec2.internal +kube-system aws-node-29kmc 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-system aws-node-6d8ls 1/1 Running 1 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal +kube-system aws-node-6vrbg 1/1 Running 1 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-system aws-node-ldgxc 1/1 Running 1 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-system coredns-65bfc5645f-g86rx 1/1 Running 0 7d2h 10.194.24.207 ip-10-194-24-90.ec2.internal +kube-system coredns-65bfc5645f-xj9rl 1/1 Running 0 7d2h 10.194.24.69 ip-10-194-24-90.ec2.internal +kube-system efs-csi-controller-65fb886fd4-7slw6 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-system efs-csi-controller-65fb886fd4-vcf9l 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal +kube-system efs-csi-node-6t6v6 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal +kube-system efs-csi-node-kxqfb 3/3 Running 0 2d21h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-system efs-csi-node-p8hzn 3/3 Running 0 2d21h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-system efs-csi-node-xxq9h 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-system kube-proxy-78n7f 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-system kube-proxy-cms7c 1/1 Running 0 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-system kube-proxy-h2t6n 1/1 Running 0 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-system kube-proxy-jkxnz 1/1 Running 0 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal +operators istio-operator-7cc8974d48-f2j2m 1/1 Running 0 14h 10.194.26.211 ip-10-194-26-252.ec2.internal +sample-alb sample-alb-8744f54f9-7w4cj 1/1 Running 0 23h 10.194.25.67 ip-10-194-25-120.ec2.internal +sample-alb sample-alb-8744f54f9-gs8f5 1/1 Running 0 23h 10.194.24.147 ip-10-194-24-49.ec2.internal +sample-alb sample-alb-8744f54f9-v6kgr 1/1 Running 0 23h 10.194.26.168 ip-10-194-26-252.ec2.internal +sample-elb sample-elb-69786b5f7d-d7nb4 1/1 Running 0 2d21h 10.194.26.178 ip-10-194-26-252.ec2.internal +sample-elb sample-elb-69786b5f7d-mw7jb 1/1 Running 0 2d21h 10.194.24.193 ip-10-194-24-49.ec2.internal +sample-elb sample-elb-69786b5f7d-tqz2s 1/1 Running 0 2d21h 10.194.25.96 ip-10-194-25-120.ec2.internal +sample-nlb sample-nlb-6cd5769dfb-n8dmd 1/1 Running 0 2d21h 10.194.25.198 ip-10-194-25-120.ec2.internal +sample-nlb sample-nlb-6cd5769dfb-qw8n4 1/1 Running 0 2d21h 10.194.24.132 ip-10-194-24-49.ec2.internal +sample-nlb sample-nlb-6cd5769dfb-t2nhp 1/1 Running 0 2d21h 10.194.26.18 ip-10-194-26-252.ec2.internal +``` + +```console +% kubectl -n istio-system get secret | grep -iE "ca-secret|tls" +istio-ca-secret istio.io/ca-root 5 7d2h +nginx-cert kubernetes.io/tls 3 6d20h +root-secret kubernetes.io/tls 3 7d14h +``` + + kubectl get pods --all-namespaces -o wide|grep -i cert +cert-manager cert-manager-7fcbc79fc5-xwt4s 1/1 Running 0 7d22h 10.194.24.138 ip-10-194-24-49.ec2.internal +cert-manager cert-manager-cainjector-6b7f4575f4-xpgnc 1/1 Running 0 7d22h 10.194.24.56 ip-10-194-24-49.ec2.internal +cert-manager cert-manager-webhook-6cd54b96fc-rvld4 1/1 Running 0 7d22h 10.194.24.170 ip-10-194-24-90.ec2.internal + +$ kubectl -n cert-manager get secrets +NAME TYPE + DATA AGE +ca-key-pair Opaque + 2 5m2s +... +$ kubectl get clusterissuer +NAME READY AGE +clusterissuer True 5m36s + diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/cert-manager-issuer.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/cert-manager-issuer.tf new file mode 100644 index 0000000..77ccec0 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/cert-manager-issuer.tf @@ -0,0 +1,15 @@ +module "subordinate_ca" { + source = "git@github.e.it.census.gov:terraform-modules/aws-certificates//acmpca-eks-cert-manager" + + cluster_name = var.cluster_name + contact_email = var.contact_email + import_to_acm = true + + tags = merge( + local.base_tags, + local.common_tags, + var.account_tags, + var.infrastructure_tags, + var.application_tags, + ) +} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/.helmignore b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/Chart.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/Chart.yaml new file mode 100644 index 0000000..87b5611 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v2 +appVersion: 1.28.2 +description: Scales Kubernetes worker nodes within autoscaling groups. +engine: gotpl +home: https://github.com/kubernetes/autoscaler +icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png +maintainers: + - email: e.bailey@sportradar.com + name: yurrriq + - email: mgoodness@gmail.com + name: mgoodness + - email: guyjtempleton@googlemail.com + name: gjtempleton + - email: scott.crooks@gmail.com + name: sc250024 +name: cluster-autoscaler +sources: + - https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler +type: application +version: 9.34.0 diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/README.md b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/README.md new file mode 100644 index 0000000..59a6e0c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/README.md @@ -0,0 +1,5 @@ +# cluster-autoscaler + +Scales Kubernetes worker nodes within autoscaling groups. + +Refer to following location for more info: https://artifacthub.io/packages/helm/cluster-autoscaler/cluster-autoscaler diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/NOTES.txt b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/NOTES.txt new file mode 100644 index 0000000..94e211e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/NOTES.txt @@ -0,0 +1,18 @@ +{{- if or .Values.autoDiscovery.clusterName .Values.autoscalingGroups -}} + +To verify that cluster-autoscaler has started, run: + + kubectl --namespace={{ .Release.Namespace }} get pods -l "app.kubernetes.io/name={{ template "cluster-autoscaler.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" + +{{- else -}} + +############################################################################## +#### ERROR: You must specify values for either #### +#### autoDiscovery.clusterName or autoscalingGroups[] #### +############################################################################## + +The deployment and pod will not be created and the installation is not functional +See README: + open https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler + +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/_helpers.tpl new file mode 100644 index 0000000..726086e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/_helpers.tpl @@ -0,0 +1,117 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cluster-autoscaler.name" -}} +{{- default (printf "%s-%s" .Values.cloudProvider .Chart.Name) .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "cluster-autoscaler.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default (printf "%s-%s" .Values.cloudProvider .Chart.Name) .Values.nameOverride -}} +{{- if ne $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cluster-autoscaler.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return instance and name labels. +*/}} +{{- define "cluster-autoscaler.instance-name" -}} +app.kubernetes.io/instance: {{ .Release.Name | quote }} +app.kubernetes.io/name: {{ include "cluster-autoscaler.name" . | quote }} +{{- end -}} + + +{{/* +Return labels, including instance and name. +*/}} +{{- define "cluster-autoscaler.labels" -}} +{{ include "cluster-autoscaler.instance-name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service | quote }} +helm.sh/chart: {{ include "cluster-autoscaler.chart" . | quote }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "deployment.apiVersion" -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "<1.9-0" $kubeTargetVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "<1.10-0" $kubeTargetVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the service account name used by the pod. +*/}} +{{- define "cluster-autoscaler.serviceAccountName" -}} +{{- if .Values.rbac.serviceAccount.create -}} + {{ default (include "cluster-autoscaler.fullname" .) .Values.rbac.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.rbac.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if the priority expander is enabled +*/}} +{{- define "cluster-autoscaler.priorityExpanderEnabled" -}} +{{- $expanders := splitList "," (default "" .Values.extraArgs.expander) -}} +{{- if has "priority" $expanders -}} +{{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the autodiscoveryparameters for clusterapi. +*/}} +{{- define "cluster-autoscaler.capiAutodiscoveryConfig" -}} +{{- if .Values.autoDiscovery.clusterName -}} +{{- print "clusterName=" -}}{{ .Values.autoDiscovery.clusterName }} +{{- end -}} +{{- if and .Values.autoDiscovery.clusterName .Values.autoDiscovery.labels -}} +{{- print "," -}} +{{- end -}} +{{- if .Values.autoDiscovery.labels -}} +{{- range $i, $el := .Values.autoDiscovery.labels -}} +{{- if $i -}}{{- print "," -}}{{- end -}} +{{- range $key, $val := $el -}} +{{- $key -}}{{- print "=" -}}{{- $val -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml new file mode 100644 index 0000000..e3d3655 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml @@ -0,0 +1,163 @@ +{{- if and .Values.rbac.create .Values.rbac.clusterScoped -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - events + - endpoints + verbs: + - create + - patch + - apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create + - apiGroups: + - "" + resources: + - pods/status + verbs: + - update + - apiGroups: + - "" + resources: + - endpoints + resourceNames: + - cluster-autoscaler + verbs: + - get + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - watch + - list + - get + - update + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - replicationcontrollers + - persistentvolumeclaims + - persistentvolumes + verbs: + - watch + - list + - get + - apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - watch + - list + - get + - apiGroups: + - batch + - extensions + resources: + - jobs + verbs: + - get + - list + - patch + - watch + - apiGroups: + - extensions + resources: + - replicasets + - daemonsets + verbs: + - watch + - list + - get + - apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - watch + - list + - apiGroups: + - apps + resources: + - daemonsets + - replicasets + - statefulsets + verbs: + - watch + - list + - get + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + - csidrivers + - csistoragecapacities + verbs: + - watch + - list + - get + - apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - coordination.k8s.io + resourceNames: + - cluster-autoscaler + resources: + - leases + verbs: + - get + - update +{{- if .Values.rbac.pspEnabled }} + - apiGroups: + - extensions + - policy + resources: + - podsecuritypolicies + resourceNames: + - {{ template "cluster-autoscaler.fullname" . }} + verbs: + - use +{{- end -}} +{{- if and ( and ( eq .Values.cloudProvider "clusterapi" ) ( .Values.rbac.clusterScoped ) ( or ( eq .Values.clusterAPIMode "incluster-incluster" ) ( eq .Values.clusterAPIMode "kubeconfig-incluster" ) ))}} + - apiGroups: + - cluster.x-k8s.io + resources: + - machinedeployments + - machinedeployments/scale + - machines + - machinesets + verbs: + - get + - list + - update + - watch +{{- end }} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..d2384dc --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create .Values.rbac.clusterScoped -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cluster-autoscaler.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "cluster-autoscaler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/deployment.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/deployment.yaml new file mode 100644 index 0000000..e8edc7f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/deployment.yaml @@ -0,0 +1,291 @@ +{{- if or ( or .Values.autoDiscovery.clusterName .Values.autoDiscovery.labels ) .Values.autoscalingGroups }} +{{/* one of the above is required */}} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: Deployment +metadata: + annotations: +{{ toYaml .Values.deployment.annotations | indent 4 }} + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: +{{ include "cluster-autoscaler.instance-name" . | indent 6 }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 6 }} + {{- end }} +{{- if .Values.updateStrategy }} + strategy: + {{ toYaml .Values.updateStrategy | nindent 4 | trim }} +{{- end }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + labels: +{{ include "cluster-autoscaler.instance-name" . | indent 8 }} + {{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | indent 8 }} + {{- end }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: "{{ .Values.dnsPolicy }}" + {{- end }} + containers: + - name: {{ template "cluster-autoscaler.name" . }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + command: + - ./cluster-autoscaler + - --cloud-provider={{ .Values.cloudProvider }} + {{- if and (eq .Values.cloudProvider "clusterapi") (eq .Values.clusterAPIMode "kubeconfig-incluster") }} + - --namespace={{ .Values.clusterAPIConfigMapsNamespace | default "kube-system" }} + {{- else }} + - --namespace={{ .Release.Namespace }} + {{- end }} + {{- if .Values.autoscalingGroups }} + {{- range .Values.autoscalingGroups }} + - --nodes={{ .minSize }}:{{ .maxSize }}:{{ .name }} + {{- end }} + {{- end }} + {{- if eq .Values.cloudProvider "aws" }} + {{- if .Values.autoDiscovery.clusterName }} + - --node-group-auto-discovery=asg:tag={{ tpl (join "," .Values.autoDiscovery.tags) . }} + {{- end }} + {{- else if eq .Values.cloudProvider "gce" }} + {{- if .Values.autoscalingGroupsnamePrefix }} + {{- range .Values.autoscalingGroupsnamePrefix }} + - --node-group-auto-discovery=mig:namePrefix={{ .name }},min={{ .minSize }},max={{ .maxSize }} + {{- end }} + {{- end }} + {{- else if eq .Values.cloudProvider "magnum" }} + {{- if .Values.autoDiscovery.clusterName }} + - --cluster-name={{ .Values.autoDiscovery.clusterName }} + - --node-group-auto-discovery=magnum:role={{ tpl (join "," .Values.autoDiscovery.roles) . }} + {{- else }} + - --cluster-name={{ .Values.magnumClusterName }} + {{- end }} + {{- else if eq .Values.cloudProvider "clusterapi" }} + {{- if or .Values.autoDiscovery.clusterName .Values.autoDiscovery.labels }} + - --node-group-auto-discovery=clusterapi:{{ template "cluster-autoscaler.capiAutodiscoveryConfig" . }} + {{- end }} + {{- if eq .Values.clusterAPIMode "incluster-kubeconfig"}} + - --cloud-config={{ .Values.clusterAPICloudConfigPath }} + {{- else if eq .Values.clusterAPIMode "kubeconfig-incluster"}} + - --kubeconfig={{ .Values.clusterAPIWorkloadKubeconfigPath }} + - --clusterapi-cloud-config-authoritative + {{- else if eq .Values.clusterAPIMode "kubeconfig-kubeconfig"}} + - --kubeconfig={{ .Values.clusterAPIWorkloadKubeconfigPath }} + - --cloud-config={{ .Values.clusterAPICloudConfigPath }} + {{- else if eq .Values.clusterAPIMode "single-kubeconfig"}} + - --kubeconfig={{ .Values.clusterAPIWorkloadKubeconfigPath }} + {{- end }} + {{- end }} + {{- if eq .Values.cloudProvider "magnum" }} + - --cloud-config={{ .Values.cloudConfigPath }} + {{- end }} + {{- range $key, $value := .Values.extraArgs }} + {{- if not (kindIs "invalid" $value) }} + - --{{ $key | mustRegexFind "^[^_]+" }}={{ $value }} + {{- else }} + - --{{ $key | mustRegexFind "^[^_]+" }} + {{- end }} + {{- end }} + env: + {{- if and (eq .Values.cloudProvider "aws") (ne .Values.awsRegion "") }} + - name: AWS_REGION + value: "{{ .Values.awsRegion }}" + {{- if .Values.awsAccessKeyID }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: AwsAccessKeyId + name: {{ template "cluster-autoscaler.fullname" . }} + {{- end }} + {{- if .Values.awsSecretAccessKey }} + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AwsSecretAccessKey + name: {{ template "cluster-autoscaler.fullname" . }} + {{- end }} + {{- else if eq .Values.cloudProvider "azure" }} + - name: ARM_SUBSCRIPTION_ID + valueFrom: + secretKeyRef: + key: SubscriptionID + name: {{ template "cluster-autoscaler.fullname" . }} + - name: ARM_RESOURCE_GROUP + valueFrom: + secretKeyRef: + key: ResourceGroup + name: {{ template "cluster-autoscaler.fullname" . }} + - name: ARM_VM_TYPE + valueFrom: + secretKeyRef: + key: VMType + name: {{ template "cluster-autoscaler.fullname" . }} + - name: AZURE_CLUSTER_NAME + valueFrom: + secretKeyRef: + key: ClusterName + name: {{ template "cluster-autoscaler.fullname" . }} + {{- if .Values.azureUseManagedIdentityExtension }} + - name: ARM_USE_MANAGED_IDENTITY_EXTENSION + value: "true" + {{- else }} + - name: ARM_TENANT_ID + valueFrom: + secretKeyRef: + key: TenantID + name: {{ template "cluster-autoscaler.fullname" . }} + - name: ARM_CLIENT_ID + valueFrom: + secretKeyRef: + key: ClientID + name: {{ template "cluster-autoscaler.fullname" . }} + - name: ARM_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: ClientSecret + name: {{ template "cluster-autoscaler.fullname" . }} + - name: AZURE_NODE_RESOURCE_GROUP + valueFrom: + secretKeyRef: + key: NodeResourceGroup + name: {{ template "cluster-autoscaler.fullname" . }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.extraEnv }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + {{- range $key, $value := .Values.extraEnvConfigMaps }} + - name: {{ $key }} + valueFrom: + configMapKeyRef: + name: {{ default (include "cluster-autoscaler.fullname" $) $value.name }} + key: {{ required "Must specify key!" $value.key }} + {{- end }} + {{- range $key, $value := .Values.extraEnvSecrets }} + - name: {{ $key }} + valueFrom: + secretKeyRef: + name: {{ default (include "cluster-autoscaler.fullname" $) $value.name }} + key: {{ required "Must specify key!" $value.key }} + {{- end }} + {{- if or .Values.envFromSecret .Values.envFromConfigMap }} + envFrom: + {{- if .Values.envFromSecret }} + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + {{- if .Values.envFromConfigMap }} + - configMapRef: + name: {{ .Values.envFromConfigMap }} + {{- end }} + {{- end }} + livenessProbe: + httpGet: + path: /health-check + port: 8085 + ports: + - containerPort: 8085 + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- if .Values.containerSecurityContext }} + securityContext: + {{ toYaml .Values.containerSecurityContext | nindent 12 | trim }} + {{- end }} + {{- if or (eq .Values.cloudProvider "magnum") .Values.extraVolumeSecrets .Values.extraVolumeMounts .Values.clusterAPIKubeconfigSecret }} + volumeMounts: + {{- if eq .Values.cloudProvider "magnum" }} + - name: cloudconfig + mountPath: {{ .Values.cloudConfigPath }} + readOnly: true + {{- end }} + {{- if and (eq .Values.cloudProvider "magnum") (.Values.magnumCABundlePath) }} + - name: ca-bundle + mountPath: {{ .Values.magnumCABundlePath }} + readOnly: true + {{- end }} + {{- range $key, $value := .Values.extraVolumeSecrets }} + - name: {{ $key }} + mountPath: {{ required "Must specify mountPath!" $value.mountPath }} + readOnly: true + {{- end }} + {{- if .Values.clusterAPIKubeconfigSecret }} + - name: cluster-api-kubeconfig + mountPath: {{ .Values.clusterAPIWorkloadKubeconfigPath | trimSuffix "/value" }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + serviceAccountName: {{ template "cluster-autoscaler.serviceAccountName" . }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.securityContext }} + securityContext: + {{ toYaml .Values.securityContext | nindent 8 | trim }} + {{- end }} + {{- if or (eq .Values.cloudProvider "magnum") .Values.extraVolumeSecrets .Values.extraVolumes .Values.clusterAPIKubeconfigSecret }} + volumes: + {{- if eq .Values.cloudProvider "magnum" }} + - name: cloudconfig + hostPath: + path: {{ .Values.cloudConfigPath }} + {{- end }} + {{- if and (eq .Values.cloudProvider "magnum") (.Values.magnumCABundlePath) }} + - name: ca-bundle + hostPath: + path: {{ .Values.magnumCABundlePath }} + {{- end }} + {{- range $key, $value := .Values.extraVolumeSecrets }} + - name: {{ $key }} + secret: + secretName: {{ default (include "cluster-autoscaler.fullname" $) $value.name }} + {{- if $value.items }} + items: + {{- toYaml $value.items | nindent 14 }} + {{- end }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.clusterAPIKubeconfigSecret }} + - name: cluster-api-kubeconfig + secret: + secretName: {{ .Values.clusterAPIKubeconfigSecret }} + {{- end }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/pdb.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/pdb.yaml new file mode 100644 index 0000000..19a7d01 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/pdb.yaml @@ -0,0 +1,16 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: +{{ include "cluster-autoscaler.instance-name" . | indent 6 }} +{{- if .Values.podDisruptionBudget }} + {{ toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..28369bf --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml @@ -0,0 +1,46 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: {{ template "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "cluster-autoscaler.fullname" . }} + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} +spec: + # Prevents running in privileged mode + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'secret' + - 'hostPath' + - 'emptyDir' + - 'projected' + - 'downwardAPI' +{{- if eq .Values.cloudProvider "gce" }} + allowedHostPaths: + - pathPrefix: {{ .Values.cloudConfigPath }} +{{- end }} + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml new file mode 100644 index 0000000..1e5b895 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml @@ -0,0 +1,22 @@ +{{- if hasKey .Values.extraArgs "expander" }} +{{- if and (.Values.expanderPriorities) (include "cluster-autoscaler.priorityExpanderEnabled" .) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-autoscaler-priority-expander + namespace: {{ .Release.Namespace }} + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + {{- if .Values.priorityConfigMapAnnotations }} + annotations: +{{ toYaml .Values.priorityConfigMapAnnotations | indent 4 }} + {{- end }} +data: + priorities: |- +{{- if kindIs "string" .Values.expanderPriorities }} +{{ .Values.expanderPriorities | indent 4 }} +{{- else }} +{{ toYaml .Values.expanderPriorities | indent 4 }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml new file mode 100644 index 0000000..097c969 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml @@ -0,0 +1,15 @@ +{{- if .Values.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "cluster-autoscaler.fullname" . }} + {{- if .Values.prometheusRule.namespace }} + namespace: {{ .Values.prometheusRule.namespace }} + {{- end }} + labels: {{- toYaml .Values.prometheusRule.additionalLabels | nindent 4 }} +spec: + groups: + - name: {{ include "cluster-autoscaler.fullname" . }} + interval: {{ .Values.prometheusRule.interval }} + rules: {{- tpl (toYaml .Values.prometheusRule.rules) . | nindent 8 }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/role.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/role.yaml new file mode 100644 index 0000000..b22fb58 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/role.yaml @@ -0,0 +1,78 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create +{{- if (include "cluster-autoscaler.priorityExpanderEnabled" .) }} + - list + - watch +{{- end }} + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cluster-autoscaler-status +{{- if (include "cluster-autoscaler.priorityExpanderEnabled" .) }} + - cluster-autoscaler-priority-expander +{{- end }} + verbs: + - delete + - get + - update +{{- if (include "cluster-autoscaler.priorityExpanderEnabled" .) }} + - watch +{{- end }} +{{- if eq (default "" (index .Values.extraArgs "leader-elect-resource-lock")) "configmaps" }} + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cluster-autoscaler + verbs: + - get + - update +{{- end }} +{{- if and ( and ( eq .Values.cloudProvider "clusterapi" ) ( not .Values.rbac.clusterScoped ) ( or ( eq .Values.clusterAPIMode "incluster-incluster" ) ( eq .Values.clusterAPIMode "kubeconfig-incluster" ) ))}} + - apiGroups: + - cluster.x-k8s.io + resources: + - machinedeployments + - machinedeployments/scale + - machines + - machinesets + verbs: + - get + - list + - update + - watch +{{- end }} +{{- if ( not .Values.rbac.clusterScoped ) }} + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - coordination.k8s.io + resourceNames: + - cluster-autoscaler + resources: + - leases + verbs: + - get + - update +{{- end }} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml new file mode 100644 index 0000000..ba5f037 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "cluster-autoscaler.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "cluster-autoscaler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/secret.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/secret.yaml new file mode 100644 index 0000000..9c58d0f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/secret.yaml @@ -0,0 +1,21 @@ +{{- if or (eq .Values.cloudProvider "azure") (and (eq .Values.cloudProvider "aws") (not (has "" (list .Values.awsAccessKeyID .Values.awsSecretAccessKey)))) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cluster-autoscaler.fullname" . }} + namespace: {{ .Release.Namespace }} +data: +{{- if eq .Values.cloudProvider "azure" }} + ClientID: "{{ .Values.azureClientID | b64enc }}" + ClientSecret: "{{ .Values.azureClientSecret | b64enc }}" + ResourceGroup: "{{ .Values.azureResourceGroup | b64enc }}" + SubscriptionID: "{{ .Values.azureSubscriptionID | b64enc }}" + TenantID: "{{ .Values.azureTenantID | b64enc }}" + VMType: "{{ .Values.azureVMType | b64enc }}" + ClusterName: "{{ .Values.azureClusterName | b64enc }}" + NodeResourceGroup: "{{ .Values.azureNodeResourceGroup | b64enc }}" +{{- else if eq .Values.cloudProvider "aws" }} + AwsAccessKeyId: "{{ .Values.awsAccessKeyID | b64enc }}" + AwsSecretAccessKey: "{{ .Values.awsSecretAccessKey | b64enc }}" +{{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/service.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/service.yaml new file mode 100644 index 0000000..d630512 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/service.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} + name: {{ template "cluster-autoscaler.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: +{{- if .Values.service.clusterIP }} + clusterIP: "{{ .Values.service.clusterIP }}" +{{- end }} +{{- if .Values.service.externalIPs }} + externalIPs: +{{ toYaml .Values.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - port: {{ .Values.service.servicePort }} + protocol: TCP + targetPort: 8085 + name: {{ .Values.service.portName }} + selector: +{{ include "cluster-autoscaler.instance-name" . | indent 4 }} + type: "{{ .Values.service.type }}" diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/serviceaccount.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/serviceaccount.yaml new file mode 100644 index 0000000..29c2580 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.rbac.create .Values.rbac.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- if .Values.rbac.serviceAccount.annotations }} + annotations: {{ toYaml .Values.rbac.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.rbac.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml new file mode 100644 index 0000000..be37239 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml @@ -0,0 +1,24 @@ +{{ if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cluster-autoscaler.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- end }} + labels: + {{- range $key, $value := .Values.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + selector: + matchLabels: +{{ include "cluster-autoscaler.instance-name" . | indent 6 }} + endpoints: + - port: {{ .Values.service.portName }} + interval: {{ .Values.serviceMonitor.interval }} + path: {{ .Values.serviceMonitor.path }} + namespaceSelector: + matchNames: + - {{.Release.Namespace}} +{{ end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/values.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/values.yaml new file mode 100644 index 0000000..bdaaf63 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/cluster-autoscaler/values.yaml @@ -0,0 +1,378 @@ +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# affinity -- Affinity for pod assignment +affinity: {} + +autoDiscovery: + # cloudProviders `aws`, `gce`, `magnum` and `clusterapi` are supported by auto-discovery at this time + # AWS: Set tags as described in https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup + + # autoDiscovery.clusterName -- Enable autodiscovery for `cloudProvider=aws`, for groups matching `autoDiscovery.tags`. + # Enable autodiscovery for `cloudProvider=clusterapi`, for groups matching `autoDiscovery.labels`. + # Enable autodiscovery for `cloudProvider=gce`, but no MIG tagging required. + # Enable autodiscovery for `cloudProvider=magnum`, for groups matching `autoDiscovery.roles`. + clusterName: "adsd-cumulus-dev" + + # autoDiscovery.tags -- ASG tags to match, run through `tpl`. + tags: + - k8s.io/cluster-autoscaler/enabled + - k8s.io/cluster-autoscaler/{{ .Values.autoDiscovery.clusterName }} + # - kubernetes.io/cluster/{{ .Values.autoDiscovery.clusterName }} + + # autoDiscovery.roles -- Magnum node group roles to match. + roles: + - worker + + # autoDiscovery.labels -- Cluster-API labels to match https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#configuring-node-group-auto-discovery + labels: [] + # - color: green + # - shape: circle +# autoscalingGroups -- For AWS, Azure AKS or Magnum. At least one element is required if not using `autoDiscovery`. For example: +#
+# - name: asg1
+# maxSize: 2
+# minSize: 1 +#
+autoscalingGroups: [] +# - name: asg1 +# maxSize: 2 +# minSize: 1 +# - name: asg2 +# maxSize: 2 +# minSize: 1 + +# autoscalingGroupsnamePrefix -- For GCE. At least one element is required if not using `autoDiscovery`. For example: +#
+# - name: ig01
+# maxSize: 10
+# minSize: 0 +#
+autoscalingGroupsnamePrefix: [] +# - name: ig01 +# maxSize: 10 +# minSize: 0 +# - name: ig02 +# maxSize: 10 +# minSize: 0 + +# awsAccessKeyID -- AWS access key ID ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) +awsAccessKeyID: "" + +# awsRegion -- AWS region (required if `cloudProvider=aws`) +awsRegion: us-gov-east-1 + +# awsSecretAccessKey -- AWS access secret key ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) +awsSecretAccessKey: "" + +# azureClientID -- Service Principal ClientID with contributor permission to Cluster and Node ResourceGroup. +# Required if `cloudProvider=azure` +azureClientID: "" + +# azureClientSecret -- Service Principal ClientSecret with contributor permission to Cluster and Node ResourceGroup. +# Required if `cloudProvider=azure` +azureClientSecret: "" + +# azureResourceGroup -- Azure resource group that the cluster is located. +# Required if `cloudProvider=azure` +azureResourceGroup: "" + +# azureSubscriptionID -- Azure subscription where the resources are located. +# Required if `cloudProvider=azure` +azureSubscriptionID: "" + +# azureTenantID -- Azure tenant where the resources are located. +# Required if `cloudProvider=azure` +azureTenantID: "" + +# azureVMType -- Azure VM type. +azureVMType: "AKS" + +# azureClusterName -- Azure AKS cluster name. +# Required if `cloudProvider=azure` +azureClusterName: "" + +# azureNodeResourceGroup -- Azure resource group where the cluster's nodes are located, typically set as `MC___`. +# Required if `cloudProvider=azure` +azureNodeResourceGroup: "" + +# azureUseManagedIdentityExtension -- Whether to use Azure's managed identity extension for credentials. If using MSI, ensure subscription ID, resource group, and azure AKS cluster name are set. +azureUseManagedIdentityExtension: false + +# magnumClusterName -- Cluster name or ID in Magnum. +# Required if `cloudProvider=magnum` and not setting `autoDiscovery.clusterName`. +magnumClusterName: "" + +# magnumCABundlePath -- Path to the host's CA bundle, from `ca-file` in the cloud-config file. +magnumCABundlePath: "/etc/kubernetes/ca-bundle.crt" + +# clusterAPIMode -- Cluster API mode, see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#connecting-cluster-autoscaler-to-cluster-api-management-and-workload-clusters +# Syntax: workloadClusterMode-ManagementClusterMode +# for `kubeconfig-kubeconfig`, `incluster-kubeconfig` and `single-kubeconfig` you always must mount the external kubeconfig using either `extraVolumeSecrets` or `extraMounts` and `extraVolumes` +# if you dont set `clusterAPIKubeconfigSecret`and thus use an in-cluster config or want to use a non capi generated kubeconfig you must do so for the workload kubeconfig as well +clusterAPIMode: incluster-incluster # incluster-incluster, incluster-kubeconfig, kubeconfig-incluster, kubeconfig-kubeconfig, single-kubeconfig + +# clusterAPIKubeconfigSecret -- Secret containing kubeconfig for connecting to Cluster API managed workloadcluster +# Required if `cloudProvider=clusterapi` and `clusterAPIMode=kubeconfig-kubeconfig,kubeconfig-incluster or incluster-kubeconfig` +clusterAPIKubeconfigSecret: "" + +# clusterAPIWorkloadKubeconfigPath -- Path to kubeconfig for connecting to Cluster API managed workloadcluster, only used if `clusterAPIMode=kubeconfig-kubeconfig or kubeconfig-incluster` +clusterAPIWorkloadKubeconfigPath: /etc/kubernetes/value + +# clusterAPICloudConfigPath -- Path to kubeconfig for connecting to Cluster API Management Cluster, only used if `clusterAPIMode=kubeconfig-kubeconfig or incluster-kubeconfig` +clusterAPICloudConfigPath: /etc/kubernetes/mgmt-kubeconfig + +# clusterAPIConfigMapsNamespace -- Namespace on the workload cluster to store Leader election and status configmaps +clusterAPIConfigMapsNamespace: "" + +# cloudConfigPath -- Configuration file for cloud provider. +cloudConfigPath: /etc/gce.conf + +# cloudProvider -- The cloud provider where the autoscaler runs. +# Currently only `gce`, `aws`, `azure`, `magnum` and `clusterapi` are supported. +# `aws` supported for AWS. `gce` for GCE. `azure` for Azure AKS. +# `magnum` for OpenStack Magnum, `clusterapi` for Cluster API. +cloudProvider: aws + +# containerSecurityContext -- [Security context for container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) +containerSecurityContext: {} + # capabilities: + # drop: + # - ALL + +deployment: + # deployment.annotations -- Annotations to add to the Deployment object. + annotations: {} + +# dnsPolicy -- Defaults to `ClusterFirst`. Valid values are: +# `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`. +# If autoscaler does not depend on cluster DNS, recommended to set this to `Default`. +dnsPolicy: ClusterFirst + +## Priorities Expander +# expanderPriorities -- The expanderPriorities is used if `extraArgs.expander` contains `priority` and expanderPriorities is also set with the priorities. +# If `extraArgs.expander` contains `priority`, then expanderPriorities is used to define cluster-autoscaler-priority-expander priorities. +# See: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/expander/priority/readme.md +expanderPriorities: {} + +# priorityConfigMapAnnotations -- Annotations to add to `cluster-autoscaler-priority-expander` ConfigMap. +priorityConfigMapAnnotations: {} + # key1: "value1" + # key2: "value2" + +# extraArgs -- Additional container arguments. +# Refer to https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca for the full list of cluster autoscaler +# parameters and their default values. +# Everything after the first _ will be ignored allowing the use of multi-string arguments. +extraArgs: + logtostderr: true + stderrthreshold: info + v: 4 + # write-status-configmap: true + # status-config-map-name: cluster-autoscaler-status + # leader-elect: true + # leader-elect-resource-lock: endpoints + skip-nodes-with-local-storage: true + expander: least-waste + # scale-down-enabled: true + balance-similar-node-groups: true + aws-use-static-instance-list: true + # min-replica-count: 0 + # scale-down-utilization-threshold: 0.5 + # scale-down-non-empty-candidates-count: 30 + # max-node-provision-time: 15m0s + # scan-interval: 10s + # scale-down-delay-after-add: 10m + # scale-down-delay-after-delete: 0s + # scale-down-delay-after-failure: 3m + # scale-down-unneeded-time: 10m + skip-nodes-with-system-pods: false + # balancing-ignore-label_1: first-label-to-ignore + # balancing-ignore-label_2: second-label-to-ignore + +# extraEnv -- Additional container environment variables. +extraEnv: {} + +# extraEnvConfigMaps -- Additional container environment variables from ConfigMaps. +extraEnvConfigMaps: {} + +# extraEnvSecrets -- Additional container environment variables from Secrets. +extraEnvSecrets: {} + +# envFromConfigMap -- ConfigMap name to use as envFrom. +envFromConfigMap: "" + +# envFromSecret -- Secret name to use as envFrom. +envFromSecret: "" + +# extraVolumeSecrets -- Additional volumes to mount from Secrets. +extraVolumeSecrets: {} + # autoscaler-vol: + # mountPath: /data/autoscaler/ + # custom-vol: + # name: custom-secret + # mountPath: /data/custom/ + # items: + # - key: subkey + # path: mypath + +# extraVolumes -- Additional volumes. +extraVolumes: [] + # - name: ssl-certs + # hostPath: + # path: /etc/ssl/certs/ca-bundle.crt + +# extraVolumeMounts -- Additional volumes to mount. +extraVolumeMounts: [] + # - name: ssl-certs + # mountPath: /etc/ssl/certs/ca-certificates.crt + # readOnly: true + +# fullnameOverride -- String to fully override `cluster-autoscaler.fullname` template. +fullnameOverride: "" + +image: + # image.repository -- Image repository + repository: 252960665057.dkr.ecr.us-gov-east-1.amazonaws.com/eks/adsd-cumulus-dev/cluster-autoscaler + # image.tag -- Image tag + tag: v1.24.0 + # image.pullPolicy -- Image pull policy + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # image.pullSecrets -- Image pull secrets + pullSecrets: [] + # - myRegistrKeySecretName + +# kubeTargetVersionOverride -- Allow overriding the `.Capabilities.KubeVersion.GitVersion` check. Useful for `helm template` commands. +kubeTargetVersionOverride: "" + +# nameOverride -- String to partially override `cluster-autoscaler.fullname` template (will maintain the release name) +nameOverride: "" + +# nodeSelector -- Node labels for pod assignment. Ref: https://kubernetes.io/docs/user-guide/node-selection/. +nodeSelector: {} + +# podAnnotations -- Annotations to add to each pod. +podAnnotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + +# podDisruptionBudget -- Pod disruption budget. +podDisruptionBudget: + maxUnavailable: 1 + # minAvailable: 2 + +# podLabels -- Labels to add to each pod. +podLabels: {} + +# additionalLabels -- Labels to add to each object of the chart. +additionalLabels: {} + +# priorityClassName -- priorityClassName +priorityClassName: "system-cluster-critical" + +rbac: + # rbac.create -- If `true`, create and use RBAC resources. + create: true + # rbac.pspEnabled -- If `true`, creates and uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. + # Must be used with `rbac.create` set to `true`. + pspEnabled: false + # rbac.clusterScoped -- if set to false will only provision RBAC to alter resources in the current namespace. Most useful for Cluster-API + clusterScoped: true + serviceAccount: + # rbac.serviceAccount.annotations -- Additional Service Account annotations. + annotations: + eks.amazonaws.com/role-arn: "arn:aws:iam::252960665057:role/eks-adsd-cumulus-dev-irsa-kube-system-cluster-autoscaler" + # rbac.serviceAccount.create -- If `true` and `rbac.create` is also true, a Service Account will be created. + create: true + # rbac.serviceAccount.name -- The name of the ServiceAccount to use. If not set and create is `true`, a name is generated using the fullname template. + name: "cluster-autoscaler" + # rbac.serviceAccount.automountServiceAccountToken -- Automount API credentials for a Service Account. + automountServiceAccountToken: true + +# replicaCount -- Desired number of pods +replicaCount: 1 + +# resources -- Pod resource requests and limits. +resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 100m + memory: 300Mi + +# securityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) +securityContext: {} + # runAsNonRoot: true + # runAsUser: 1001 + # runAsGroup: 1001 + +service: + # service.annotations -- Annotations to add to service + annotations: {} + # service.labels -- Labels to add to service + labels: {} + # service.externalIPs -- List of IP addresses at which the service is available. Ref: https://kubernetes.io/docs/user-guide/services/#external-ips. + externalIPs: [] + + # service.loadBalancerIP -- IP address to assign to load balancer (if supported). + loadBalancerIP: "" + # service.loadBalancerSourceRanges -- List of IP CIDRs allowed access to load balancer (if supported). + loadBalancerSourceRanges: [] + # service.servicePort -- Service port to expose. + servicePort: 8085 + # service.portName -- Name for service port. + portName: http + # service.type -- Type of service to create. + type: ClusterIP + +## Are you using Prometheus Operator? +serviceMonitor: + # serviceMonitor.enabled -- If true, creates a Prometheus Operator ServiceMonitor. + enabled: false + # serviceMonitor.interval -- Interval that Prometheus scrapes Cluster Autoscaler metrics. + interval: 10s + # serviceMonitor.namespace -- Namespace which Prometheus is running in. + namespace: monitoring + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + # serviceMonitor.selector -- Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install. + selector: + release: prometheus-operator + # serviceMonitor.path -- The path to scrape for metrics; autoscaler exposes `/metrics` (this is standard) + path: /metrics + +## Custom PrometheusRule to be defined +## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart +## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions +prometheusRule: + # prometheusRule.enabled -- If true, creates a Prometheus Operator PrometheusRule. + enabled: false + # prometheusRule.additionalLabels -- Additional labels to be set in metadata. + additionalLabels: {} + # prometheusRule.namespace -- Namespace which Prometheus is running in. + namespace: monitoring + # prometheusRule.interval -- How often rules in the group are evaluated (falls back to `global.evaluation_interval` if not set). + interval: null + # prometheusRule.rules -- Rules spec template (see https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule). + rules: [] + +# tolerations -- List of node taints to tolerate (requires Kubernetes >= 1.6). +tolerations: [] + +# topologySpreadConstraints -- You can use topology spread constraints to control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains. (requires Kubernetes >= 1.19). +topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: cluster-autoscaler + +# updateStrategy -- [Deployment update strategy](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) +updateStrategy: {} + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + # type: RollingUpdate diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/.helmignore b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/Chart.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/Chart.yaml new file mode 100644 index 0000000..f428bb8 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: intermediate-certificate-issuer +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl new file mode 100644 index 0000000..5f6c44f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "intermediate-certificate-issuer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "intermediate-certificate-issuer.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "intermediate-certificate-issuer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "intermediate-certificate-issuer.labels" -}} +helm.sh/chart: {{ include "intermediate-certificate-issuer.chart" . }} +{{ include "intermediate-certificate-issuer.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "intermediate-certificate-issuer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "intermediate-certificate-issuer.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "intermediate-certificate-issuer.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "intermediate-certificate-issuer.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml new file mode 100644 index 0000000..ad99f63 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ca-key-pair + namespace: {{ .Release.Namespace }} +data: + tls.crt: {{ .Values.tls.crt }} + tls.key: {{ .Values.tls.key }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml new file mode 100644 index 0000000..76a3874 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml @@ -0,0 +1,7 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + ca: + secretName: ca-key-pair diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/values.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/values.yaml new file mode 100644 index 0000000..50dfd22 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/intermediate-certificate-issuer/values.yaml @@ -0,0 +1,6 @@ +tls: + # tls.crt contains the issuers full chain in the correct order: + # issuer -> intermediate(s) -> root. + crt: + # tls.key contains the base64 encoded signing key. + key: diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/Chart.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/Chart.yaml new file mode 100644 index 0000000..ff4a032 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +name: istio-operator +version: 1.20.0 +tillerVersion: ">=2.7.2" +description: Helm chart for deploying Istio operator +keywords: + - istio + - operator +sources: + - https://github.com/istio/istio/tree/master/operator +engine: gotpl +icon: https://istio.io/latest/favicons/android-192x192.png diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/crds/crd-operator.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/crds/crd-operator.yaml new file mode 100644 index 0000000..93ac1de --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/crds/crd-operator.yaml @@ -0,0 +1,48 @@ +# SYNC WITH manifests/charts/base/files +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: istiooperators.install.istio.io + labels: + release: istio +spec: + conversion: + strategy: None + group: install.istio.io + names: + kind: IstioOperator + listKind: IstioOperatorList + plural: istiooperators + singular: istiooperator + shortNames: + - iop + - io + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Istio control plane revision + jsonPath: .spec.revision + name: Revision + type: string + - description: IOP current state + jsonPath: .status.status + name: Status + type: string + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true +--- diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/files/gen-operator.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/files/gen-operator.yaml new file mode 100644 index 0000000..e77d5aa --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/files/gen-operator.yaml @@ -0,0 +1,220 @@ +--- +# Source: istio-operator/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: istio-operator + labels: + istio-operator-managed: Reconcile + istio-injection: disabled +--- +# Source: istio-operator/templates/service_account.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: istio-operator + name: istio-operator +--- +# Source: istio-operator/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: istio-operator +rules: +# istio groups +- apiGroups: + - authentication.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - config.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - install.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - security.istio.io + resources: + - '*' + verbs: + - '*' +# k8s groups +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions.apiextensions.k8s.io + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - deployments + - deployments/finalizers + - replicasets + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - update +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - roles + - rolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - update +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - namespaces + - pods + - pods/proxy + - persistentvolumeclaims + - secrets + - services + - serviceaccounts + verbs: + - '*' +--- +# Source: istio-operator/templates/clusterrole_binding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-operator +subjects: +- kind: ServiceAccount + name: istio-operator + namespace: istio-operator +roleRef: + kind: ClusterRole + name: istio-operator + apiGroup: rbac.authorization.k8s.io +--- +# Source: istio-operator/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + namespace: istio-operator + labels: + name: istio-operator + name: istio-operator +spec: + ports: + - name: http-metrics + port: 8383 + targetPort: 8383 + protocol: TCP + selector: + name: istio-operator +--- +# Source: istio-operator/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: istio-operator + name: istio-operator +spec: + replicas: 1 + selector: + matchLabels: + name: istio-operator + template: + metadata: + labels: + name: istio-operator + spec: + serviceAccountName: istio-operator + containers: + - name: istio-operator + image: gcr.io/istio-testing/operator:1.10-dev + command: + - operator + - server + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsGroup: 1337 + runAsUser: 1337 + runAsNonRoot: true + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 50m + memory: 128Mi + env: + - name: WATCH_NAMESPACE + value: "istio-system" + - name: LEADER_ELECTION_NAMESPACE + value: "istio-operator" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "istio-operator" + - name: WAIT_FOR_RESOURCES_TIMEOUT + value: "300s" + - name: REVISION + value: "" diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/clusterrole.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/clusterrole.yaml new file mode 100644 index 0000000..4e6bd74 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/clusterrole.yaml @@ -0,0 +1,115 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +rules: +# istio groups +- apiGroups: + - authentication.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - config.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - install.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - security.istio.io + resources: + - '*' + verbs: + - '*' +# k8s groups +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions.apiextensions.k8s.io + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - deployments + - deployments/finalizers + - replicasets + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - update +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - roles + - rolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - update +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - namespaces + - pods + - pods/proxy + - persistentvolumeclaims + - secrets + - services + - serviceaccounts + verbs: + - '*' +--- diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/clusterrole_binding.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/clusterrole_binding.yaml new file mode 100644 index 0000000..9b9df7d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/clusterrole_binding.yaml @@ -0,0 +1,13 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +subjects: +- kind: ServiceAccount + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} + namespace: {{.Values.operatorNamespace}} +roleRef: + kind: ClusterRole + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} + apiGroup: rbac.authorization.k8s.io +--- diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/crds.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/crds.yaml new file mode 100644 index 0000000..a370365 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/crds.yaml @@ -0,0 +1,6 @@ +{{- if .Values.enableCRDTemplates -}} +{{- range $path, $bytes := .Files.Glob "crds/*.yaml" -}} +--- +{{ $.Files.Get $path }} +{{- end -}} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/deployment.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/deployment.yaml new file mode 100644 index 0000000..1baaa8d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/deployment.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: {{.Values.operatorNamespace}} + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +spec: + replicas: 1 + selector: + matchLabels: + name: istio-operator + template: + metadata: + labels: + name: istio-operator + spec: + serviceAccountName: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} + containers: + - name: istio-operator + image: {{.Values.hub}}/operator:{{.Values.tag}} + command: + - operator + - server + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsGroup: 1337 + runAsUser: 1337 + runAsNonRoot: true + imagePullPolicy: IfNotPresent + resources: +{{ toYaml .Values.operator.resources | trim | indent 12 }} + env: + - name: WATCH_NAMESPACE + value: {{.Values.watchedNamespaces | quote}} + - name: LEADER_ELECTION_NAMESPACE + value: {{.Values.operatorNamespace | quote}} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: {{.Values.operatorNamespace | quote}} + - name: WAIT_FOR_RESOURCES_TIMEOUT + value: {{.Values.waitForResourcesTimeout | quote}} + - name: REVISION + value: {{.Values.revision | quote}} +--- diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/namespace.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/namespace.yaml new file mode 100644 index 0000000..31dc5aa --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/namespace.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: {{.Values.operatorNamespace}} + labels: + istio-operator-managed: Reconcile + istio-injection: disabled +--- diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/service.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/service.yaml new file mode 100644 index 0000000..ab3ed57 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: {{.Values.operatorNamespace}} + labels: + name: istio-operator + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +spec: + ports: + - name: http-metrics + port: 8383 + targetPort: 8383 + protocol: TCP + selector: + name: istio-operator +--- diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/service_account.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/service_account.yaml new file mode 100644 index 0000000..03e9377 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/templates/service_account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: {{.Values.operatorNamespace}} + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} +- name: {{ . }} +{{- end }} +{{- end }} +--- diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/values.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/values.yaml new file mode 100644 index 0000000..39a5bd2 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-operator/values.yaml @@ -0,0 +1,29 @@ +hub: docker.io/istio +tag: 1.10.1 + +# ImagePullSecrets for operator ServiceAccount, list of secrets in the same namespace +# used to pull operator image. Must be set for any cluster configured with private docker registry. +imagePullSecrets: [] + +operatorNamespace: istio-operator + +# Used to replace istioNamespace to support operator watch multiple namespaces. +watchedNamespaces: istio-system +waitForResourcesTimeout: 300s + +# Used for helm2 to add the CRDs to templates. +enableCRDTemplates: false + +# revision for the operator resources +revision: "" + +# Operator resource defaults +operator: + resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 50m + memory: 128Mi + diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/.helmignore b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/Chart.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/Chart.yaml new file mode 100644 index 0000000..cf94e4f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: istio-peerauthentication +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.20.0" diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/templates/_helpers.tpl new file mode 100644 index 0000000..94c398d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "istio-peerauthentication.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "istio-peerauthentication.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "istio-peerauthentication.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "istio-peerauthentication.labels" -}} +helm.sh/chart: {{ include "istio-peerauthentication.chart" . }} +{{ include "istio-peerauthentication.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "istio-peerauthentication.selectorLabels" -}} +app.kubernetes.io/name: {{ include "istio-peerauthentication.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "istio-peerauthentication.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "istio-peerauthentication.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml new file mode 100644 index 0000000..3238311 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml @@ -0,0 +1,9 @@ +{{ if .Values.requireMutualTLS }} +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: "default" +spec: + mtls: + mode: STRICT +{{ end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/values.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-peerauthentication/values.yaml new file mode 100644 index 0000000..e69de29 diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/.helmignore b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/Chart.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/Chart.yaml new file mode 100644 index 0000000..aaf3983 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: istio-profile +description: Configuration for istio to be picked up by istio's operator. +type: application +version: 0.1.2 +appVersion: "1.20.0" diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/templates/_helpers.tpl new file mode 100644 index 0000000..8a02937 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "istio-profile.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "istio-profile.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "istio-profile.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "istio-profile.labels" -}} +helm.sh/chart: {{ include "istio-profile.chart" . }} +{{ include "istio-profile.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "istio-profile.selectorLabels" -}} +app.kubernetes.io/name: {{ include "istio-profile.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "istio-profile.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "istio-profile.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/templates/istiooperator.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/templates/istiooperator.yaml new file mode 100644 index 0000000..618fbdd --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/templates/istiooperator.yaml @@ -0,0 +1,175 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +metadata: + name: istio-profile +spec: + hub: {{ .Values.hub | default "docker.io/istio" }} + tag: {{ .Values.tag | default "1.16.1" }} + + meshConfig: +{{- if .Values.envoy.accessLog.enabled }} + accessLogFile: /dev/stdout +{{- end }} +{{- if and .Values.envoy.accessLog.enabled .Values.envoy.accessLog.format }} + accessLogFormat: {{ .Values.envoy.accessLog.format }} +{{- end }} +{{- if and .Values.envoy.accessLog.enabled .Values.envoy.accessLog.encoding }} + accessLogEncoding: {{ .Values.envoy.accessLog.encoding }} +{{- end }} + defaultConfig: + proxyMetadata: {} + enablePrometheusMerge: true + + components: + base: + enabled: true + pilot: + enabled: true + + ingressGateways: + - name: istio-ingressgateway + enabled: true + k8s: + serviceAnnotations: + "service.beta.kubernetes.io/aws-load-balancer-internal": "true" + "service.beta.kubernetes.io/aws-load-balancer-type": "nlb" + + egressGateways: + - name: istio-egressgateway + enabled: {{ .Values.egressGateways.enabled }} + + cni: + enabled: false + + istiodRemote: + enabled: false + + values: + global: + istioNamespace: {{ .Values.namespace }} + istiod: + enableAnalysis: false + logging: + level: "default:info" + logAsJson: false + pilotCertProvider: istiod + jwtPolicy: third-party-jwt + proxy: + image: proxyv2 + clusterDomain: "cluster.local" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 2000m + memory: 1024Mi + logLevel: warning + componentLogLevel: "misc:error" + privileged: false + enableCoreDump: false + statusPort: 15020 + readinessInitialDelaySeconds: 1 + readinessPeriodSeconds: 2 + readinessFailureThreshold: 30 + includeIPRanges: "*" + excludeIPRanges: {{ default "" .Values.apiserver | quote }} + excludeOutboundPorts: "" + excludeInboundPorts: "" + autoInject: enabled + tracer: "zipkin" + proxy_init: + image: proxyv2 + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 10m + memory: 10Mi + # Specify image pull policy if default behavior isn't desired. + # Default behavior: latest images will be Always else IfNotPresent. + imagePullPolicy: "" + operatorManageWebhooks: false + tracer: + lightstep: {} + zipkin: {} + datadog: {} + stackdriver: {} + imagePullSecrets: [] + defaultNodeSelector: {} + configValidation: true + multiCluster: + enabled: false + clusterName: "" + omitSidecarInjectorConfigMap: false + network: "" + defaultResources: + requests: + cpu: 10m + defaultPodDisruptionBudget: + enabled: true + priorityClassName: "" + sds: + token: + aud: istio-ca + sts: + servicePort: 0 + meshNetworks: {} + mountMtlsCerts: false + base: + enableCRDTemplates: false + validationURL: "" + pilot: + autoscaleEnabled: true + autoscaleMin: 1 + autoscaleMax: 5 + replicaCount: 1 + image: pilot + traceSampling: 1.0 + env: {} + cpu: + targetAverageUtilization: 80 + nodeSelector: {} + keepaliveMaxServerConnectionAge: 30m + deploymentLabels: + configMap: true + + telemetry: + enabled: {{ .Values.telemetry.enabled }} + v2: + enabled: true + prometheus: + enabled: true + stackdriver: + enabled: false + + istiodRemote: + injectionURL: "" + + gateways: + istio-egressgateway: + env: {} + autoscaleEnabled: true + type: ClusterIP + name: istio-egressgateway + secretVolumes: + - name: egressgateway-certs + secretName: istio-egressgateway-certs + mountPath: /etc/istio/egressgateway-certs + - name: egressgateway-ca-certs + secretName: istio-egressgateway-ca-certs + mountPath: /etc/istio/egressgateway-ca-certs + + istio-ingressgateway: + autoscaleEnabled: true + type: LoadBalancer + name: istio-ingressgateway + env: {} + secretVolumes: + - name: ingressgateway-certs + secretName: istio-ingressgateway-certs + mountPath: /etc/istio/ingressgateway-certs + - name: ingressgateway-ca-certs + secretName: istio-ingressgateway-ca-certs + mountPath: /etc/istio/ingressgateway-ca-certs diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/values.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/values.yaml new file mode 100644 index 0000000..9b43fab --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/istio-profile/values.yaml @@ -0,0 +1,44 @@ + +namespace: istio-system +requireMutualTLS: true +hub: docker.io/istio +tag: 1.10.1 +apiserver: "" + +############################################################################## +# Observability options: +############################################################################## + +# Controls settings for the envoy proxy that is added as a sidecar +envoy: + # Controls settings related to access the service. + accessLog: + # When enabled, envoy is configured to log to stdout. + enabled: true + # Format for the proxy access log. Default value is envoy's format. + # Controls accessLogFormat istio configuration. + format: + # Encoding for the proxy access log (text or json.) Default value is text. + # Controls accessLogEncoding istio configuration. + encoding: + +# When set to true, istio provides telemetry data to prometheus. +# False disables collecting telemetry data. +telemetry: + enabled: true + +# When set to true, enables tracking of a request through mesh that is +# destributed across mutliple services. +tracing: + enabled: true + +############################################################################## +# Traffic Management options: +############################################################################## + +# Egress gateways allow you to apply Istio features, for example, monitoring +# and route rules, to traffic exiting the mesh. +# When set to true, the egress gateway is created. +egressGateways: + enabled: true + diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/.helmignore b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/Chart.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/Chart.yaml new file mode 100644 index 0000000..9cfc3c1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: self-signed-certificate-issuer +description: A Helm chart for Kubernetes +type: application +version: 0.1.0 +appVersion: "1.0.0" diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl new file mode 100644 index 0000000..e62a63b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "self-signed-certificate-issuer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "self-signed-certificate-issuer.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "self-signed-certificate-issuer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "self-signed-certificate-issuer.labels" -}} +helm.sh/chart: {{ include "self-signed-certificate-issuer.chart" . }} +{{ include "self-signed-certificate-issuer.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "self-signed-certificate-issuer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "self-signed-certificate-issuer.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "self-signed-certificate-issuer.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "self-signed-certificate-issuer.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml new file mode 100644 index 0000000..ab1ee31 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml @@ -0,0 +1,8 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + ca: + secretName: root-secret + diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml new file mode 100644 index 0000000..84e895d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml @@ -0,0 +1,17 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: selfsigned-ca + namespace: {{ .Release.Namespace }} +spec: + isCA: true + commonName: selfsigned-ca + secretName: root-secret + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: selfsigned-issuer + kind: ClusterIssuer + group: cert-manager.io + diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml new file mode 100644 index 0000000..81660bd --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml @@ -0,0 +1,7 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: selfsigned-issuer +spec: + selfSigned: {} + diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/values.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/self-signed-certificate-issuer/values.yaml new file mode 100644 index 0000000..e69de29 diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/.helmignore b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/Chart.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/Chart.yaml new file mode 100644 index 0000000..e179122 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: vault-certificate-issuer +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl new file mode 100644 index 0000000..a9a1425 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "vault-certificate-issuer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "vault-certificate-issuer.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "vault-certificate-issuer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "vault-certificate-issuer.labels" -}} +helm.sh/chart: {{ include "vault-certificate-issuer.chart" . }} +{{ include "vault-certificate-issuer.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "vault-certificate-issuer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "vault-certificate-issuer.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "vault-certificate-issuer.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "vault-certificate-issuer.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml new file mode 100644 index 0000000..8880f1c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml @@ -0,0 +1,18 @@ +{{ if eq .Values.vault.authentication_type "AppRole" }} +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + vault: + path: {{ .Values.vault.path }} + server: {{ .Values.vault.url }} + caBundle: {{ .Values.vault.ca_bundle }} + auth: + appRole: + path: {{ .Values.approle.role_path }} + roleId: {{ .Values.approle.role_id }} + secretRef: + name: cert-manager-vault-approle + key: secretId +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml new file mode 100644 index 0000000..23d58e1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml @@ -0,0 +1,10 @@ +{{ if eq .Values.vault.authentication_type "AppRole" }} +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: cert-manager-vault-approle + namespace: {{ .Release.Namespace }} +data: + secretId: {{ .Values.approle.secret_id }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml new file mode 100644 index 0000000..f964aed --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml @@ -0,0 +1,20 @@ +{{ if eq .Values.vault.authentication_type "ServiceAccount" }} +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + vault: + path: {{ .Values.vault.path }} + server: {{ .Values.vault.url }} + caBundle: {{ .Values.vault.ca_bundle }} + auth: + kubernetes: + role: {{ .Values.serviceAccount.role }} +{{- if .Values.serviceAccount.MountPath }} + path: {{ .Values.serviceAccount.mountPath }} +{{- end }} + secretRef: + name: {{ .Values.serviceAccount.secret }} + key: token +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml new file mode 100644 index 0000000..0410d30 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml @@ -0,0 +1,15 @@ +{{ if eq .Values.vault.authentication_type "Token" }} +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + vault: + path: {{ .Values.vault.path }} + server: {{ .Values.vault.url }} + caBundle: {{ .Values.vault.ca_bundle }} + auth: + tokenSecretRef: + name: cert-manager-vault-token + key: token +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml new file mode 100644 index 0000000..35bb13d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml @@ -0,0 +1,10 @@ +{{ if eq .Values.vault.authentication_type "Token" }} +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: cert-manager-vault-token + namespace: {{ .Release.Namespace }} +data: + token: {{ .Values.token.token }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/values.yaml b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/values.yaml new file mode 100644 index 0000000..4cac439 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/charts/vault-certificate-issuer/values.yaml @@ -0,0 +1,47 @@ + +# Common settings for all types of authentication +vault: + # the URL whereby Vault is reachable. + url: + # the Vault path that will be used for signing. Note that the path + # must use the sign endpoint. + path: + # an optional field containing a base64 encoded string of the + # Certificate Authority to trust the Vault connection. This is + # typically always required when using an https URL. + ca_bundle: + # the type of authenciation to use, must be one of: + # - AppRole + # - Token + # - ServiceAccount + authentication_type: + +# AppRole authentication type: +approle: + # secret key + secret_id: + # RoleID of the role to assume + role_id: + # the app role path + role_path: + +# Token authentication type: +token: + # a token string that has been generated from one of the many + # authentication backends that Vault supports. These tokens have + # an expiry and so need to be periodically refreshed. cert-manager + # does not refresh these token automatically and so another process + # must be put in place to do this. The token is stored in the + # cert-manager-vault-token secret in the cert-manager namespace. + token: + +# ServiceAccount authenication type: +serviceAccount: + # the name of the secret associated with the service account in the + # cert-manager namespace to use to authenticate with vault + secret: + # the role which is the Vault role that the Service Account is to assume + role: + # optional value which is the authentication mount path, defaulting + # to kubernetes. + mountPath: diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/.tf-control b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/.tf-control new file mode 100644 index 0000000..280f449 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/.tf-control @@ -0,0 +1,20 @@ +# .tf-control +# allows for setting a specific command to be used for tf-* commands under this git repo +# see tf-control.sh help for more info + +TFCONTROL_VERSION="1.0.5" + +TFCOMMAND="terraform_latest" +# TF_CLI_CONFIG_FILE=PATH-TO-FILE/.tf-control.tfrc +# TFARGS="" +# TFNOLOG="" +# TFNOCOLOR="" + +# use the following to force a specific version. An upgrade of an existing 0.12.31 to 1.x +# needs you to cycle through 0.13.17, 0.14.11, and then latest (0.15.5 not needed). Other +# steps in between. See https://github.e.it.census.gov/terraform/support/tree/master/docs/how-to/terraform-upgrade for details +# +#TFCOMMAND="terraform_0.12.31" +#TFCOMMAND="terraform_0.13.7" +#TFCOMMAND="terraform_0.14.11" +#TFCOMMAND="terraform_0.15.5" diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/.tf-control.tfrc new file mode 100644 index 0000000..7425488 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/.tf-control.tfrc @@ -0,0 +1,24 @@ +TFCONTROL_VERSION="1.0.5" + +# https://www.terraform.io/docs/cli/config/config-file.html +plugin_cache_dir = "/data/terraform/terraform.d/plugin-cache" +#disable_checkpoint = true + +provider_installation { +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "*/*/*" ] +# } + filesystem_mirror { + path = "/data/terraform/terraform.d/providers" + include = [ "*/*/*" ] + } +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "external.terraform.census.gov/*/*" ] +# } + direct { + include = [ "*/*/*" ] + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/cluster-autoscaler.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/cluster-autoscaler.tf new file mode 100644 index 0000000..7116997 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/cluster-autoscaler.tf @@ -0,0 +1,92 @@ +module "role_cluster-autoscaler" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + + role_description = "EKS IAM Role for ${var.cluster_name} for service account ${var.cluster_autoscaler_namespace}:${var.cluster_autoscaler_name}" + role_name = format("%v%v-irsa__%v", local._prefixes["eks-role"], var.cluster_name, "cluster-autoscaler") + + attach_cluster_autoscaler_policy = true + cluster_autoscaler_cluster_names = [var.cluster_name] + + oidc_providers = { + main = { + provider_arn = local.oidc_provider_arn + namespace_service_accounts = [ + format("%v:%v", var.cluster_autoscaler_namespace, var.cluster_autoscaler_name), + format("%v:%v", var.cluster_autoscaler_namespace, format("%v-aws-%v", var.cluster_autoscaler_name, var.cluster_autoscaler_name)), + ] + } + } + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + { + "eks:namespace" = var.cluster_autoscaler_namespace + "eks:user" = var.cluster_autoscaler_name + } + ) +} + +locals { + cluster_autoscaler_image_output = { for k, v in module.images_cluster_autoscaler.images : v.name => v } +} + +module "images_cluster_autoscaler" { + source = "git@github.e.it.census.gov:terraform-modules/aws-ecr-copy-images.git?ref=tf-upgrade" + + profile = var.profile + application_list = [] + application_name = format("eks/%v", var.cluster_name) + image_config = [for k, v in local.images_settings : v if(v.enabled && k == "cluster-autoscaler")] + force_delete = true + + enable_lifecycle_policy = true + enable_lifecycle_policy_image_config = true + lifecycle_policy_all = true + lifecycle_policy_keep_count = 3 + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +resource "helm_release" "cluster-autoscaler" { + chart = "cluster-autoscaler" + name = "cluster-autoscaler" + namespace = "kube-system" + repository = var.cluster_autoscaler_charts["cluster-autoscaler"].use_remote ? var.cluster_autoscaler_charts["cluster-autoscaler"].repository : "${path.module}/charts" + version = var.cluster_autoscaler_charts["cluster-autoscaler"].use_remote ? var.cluster_autoscaler_charts["cluster-autoscaler"].version : null + + depends_on = [module.images_cluster_autoscaler] + set { + name = "image.repository" + value = split(":", local.cluster_autoscaler_image_output["cluster-autoscaler"].dest_full_path)[0] + } + set { + name = "image.tag" + value = local.cluster_autoscaler_image_output["cluster-autoscaler"].tag + } + set { + name = "autoDiscovery.clusterName" + value = var.cluster_name + } + set { + name = "awsRegion" + value = local.region + } + set { + name = "rbac.serviceAccount.create" + value = "true" + } + set { + name = "rbac.serviceAccount.name" + value = var.cluster_autoscaler_name + } + set { + name = "rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" + value = module.role_cluster-autoscaler.iam_role_arn + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/locals.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/locals.tf new file mode 100644 index 0000000..4b9ae5a --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/locals.tf @@ -0,0 +1,17 @@ +locals { + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } +} + +# replace TF remote state accordingly in parent_rs with that from the parent directory, and be sure to make the link +locals { + vpc_id = local.parent_rs.cluster_vpc_id + subnet_ids = local.parent_rs.cluster_subnet_ids + cluster_worker_sg_id = local.parent_rs.cluster_worker_sg_id + + oidc_provider_url = local.parent_rs.oidc_provider_url + oidc_provider_arn = local.parent_rs.oidc_provider_arn +} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/region.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/region.tf new file mode 100644 index 0000000..f617506 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/region.tf @@ -0,0 +1,3 @@ +locals { + region = var.region +} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/test-cluster-autoscaling.json b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/test-cluster-autoscaling.json new file mode 100644 index 0000000..ab00596 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/test-cluster-autoscaling.json @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 4 # tells deployment to run 2 pods matching the template + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + resources: + requests: + cpu: 3 + limits: + cpu: 3 + image: "252960665057.dkr.ecr.us-gov-east-1.amazonaws.com/eks/adsd-cumulus-dev/nginx:1.21" + ports: + - containerPort: 80 diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/tf-run.data b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/tf-run.data new file mode 100644 index 0000000..aefb514 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/tf-run.data @@ -0,0 +1,44 @@ +VERSION 2.0.0 +TAG setup +REMOTE-STATE +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh + +TAG links +LINKTOP init +LINKTOP includes.d/variables.account_tags.tf +LINKTOP includes.d/variables.account_tags.auto.tfvars +LINKTOP includes.d/variables.infrastructure_tags.tf +LINKTOP includes.d/variables.infrastructure_tags.auto.tfvars +LINKTOP includes.d/variables.application_tags.tf +# LINKTOP includes.d/variables.application_tags.auto.tfvars +LINK variables.application_tags.auto.tfvars +LINKTOP provider_configs.d/provider.ldap_new.auto.tfvars +LINKTOP provider_configs.d/provider.ldap_new.tf +LINKTOP provider_configs.d/provider.ldap_new.variables.tf +LINK settings.auto.tfvars +LINK includes.d/parent_rs.tf +LINK includes.d/data.eks-subdirectory.tf +LINK includes.d/kubeconfig.eks-subdirectory.tf +LINK variables.eks.tf +LINK prefixes.tf +LINK providers.tf +LINK variables.addons.tf +LINK versions.tf +LINK version.tf +LINK variables.vpc.tf +LINK variables.vpc.auto.tfvars +# links for images, charts +LINK images.yml +LINK charts.yml +LINK charts-images.tf + +TAG init +COMMAND tf-init + +TAG start +ALL + +TAG state-link +COMMAND tf-directory-setup.py -l s3 + diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/tf-run.destroy.data b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/tf-run.destroy.data new file mode 100644 index 0000000..7a82c9f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/tf-run.destroy.data @@ -0,0 +1,6 @@ +VERSION 1.0.1 +BACKUP-STATE +COMMAND tf-init +COMMAND tf-state list + +ALL diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/variables.cluster-autoscaler.auto.tfvars b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/variables.cluster-autoscaler.auto.tfvars new file mode 100644 index 0000000..d28733d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/variables.cluster-autoscaler.auto.tfvars @@ -0,0 +1,21 @@ +cluster_autoscaler_charts = { + "cluster-autoscaler" = { + name = "cluster-autoscaler" + repository = "https://kubernetes.github.io/autoscaler" + version = "9.35.0" + use_remote = true + } +} + +cluster_autoscaler_images = { + "cluster-autoscaler" = { + name = "cluster-autoscaler" + image = "registry.k8s.io/autoscaling/cluster-autoscaler" + dest_path = null + source_registry = "registry.k8s.io" + source_image = "autoscaling/cluster-autoscaler" + source_tag = null + tag = "v1.29.0" + enabled = true + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/variables.cluster-autoscaler.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/variables.cluster-autoscaler.tf new file mode 100644 index 0000000..2c502ce --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/cluster-autoscaler/variables.cluster-autoscaler.tf @@ -0,0 +1,40 @@ +variable "cluster_autoscaler_namespace" { + description = "Cluster Autoscaler namespace" + type = string + default = "kube-system" +} + +variable "cluster_autoscaler_name" { + description = "Cluster Autoscaler service account name" + type = string + # default = "cluster-autoscaler" + default = "cluster-autoscaler-aws-cluster-autoscaler" +} + +variable "cluster_autoscaler_charts" { + description = "Cluster Autoscaler Map of object with details about remote charts" + type = map(object( + { + name = string + documentation = optional(string, null) + repository = string + version = string + use_remote = bool + })) + default = {} +} + +variable "cluster_autoscaler_images" { + description = "Cluster Autoscaler List of image configuration objects to copy from SOURCE to DESTINATION" + type = map(object({ + name = string, + documentation = optional(string, null) + tag = string, + dest_path = string, + source_registry = string, + source_image = string, + source_tag = string, + enabled = bool, + })) + default = {} +} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/common-services.auto.tfvars b/examples/full-cluster-tf-upgrade/1.32/common-services/common-services.auto.tfvars new file mode 100644 index 0000000..8198041 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/common-services.auto.tfvars @@ -0,0 +1,2 @@ +#tls_crt_file = "certs/pki.test4.sandbox.csp2.census.gov.bundle.crt" +#tls_key_file = "certs/pki.test4.sandbox.csp2.census.gov.key" diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/data.eks-subdirectory.tf new file mode 120000 index 0000000..43b5430 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/data.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/dns.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/dns.tf new file mode 100644 index 0000000..dd28da1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/dns.tf @@ -0,0 +1,40 @@ +data "kubernetes_service" "istio-ingressgateway" { + metadata { + name = "istio-ingressgateway" + namespace = "istio-system" + } +} + +locals { + is_gateway_active = data.kubernetes_service.istio-ingressgateway.status != null +} + +data "aws_lb" "lb" { + count = local.is_gateway_active ? 1 : 0 + name = split("-", data.kubernetes_service.istio-ingressgateway.status.0.load_balancer.0.ingress.0.hostname)[0] +} + +# resource "aws_route53_record" "istio-ingress" { +# count = local.is_gateway_active ? 1 : 0 +# name = format("*.%v", local.parent_rs.cluster_domain_name) +# type = "CNAME" +# ttl = 900 +# zone_id = local.parent_rs.cluster_domain_id +# +# records = [data.aws_lb.lb[0].dns_name] +# } + +module "istio-ingress" { + count = local.is_gateway_active ? 1 : 0 + source = "git@github.e.it.census.gov:terraform-modules/aws-dns//cname" + + name = format("*.%v", local.parent_rs.cluster_domain_name) + zone = local.parent_rs.cluster_domain_name + values = [data.aws_lb.lb[0].dns_name] + enable_heritage = false +} + +moved { + from = aws_route53_record.istio-ingress[0] + to = module.istio-ingress[0].aws_route53_record.entry +} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/images.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/images.tf new file mode 100644 index 0000000..ba913f0 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/images.tf @@ -0,0 +1,77 @@ +locals { + image_config = [for k, v in local.images_settings : v if(v.enabled && k != "cluster-autoscaler")] + image_output = { for k, v in module.images.images : v.name => v } + + charts = { for k, v in local.chart_settings : k => v if try(v.enabled, true) } +} + +module "images" { + source = "git@github.e.it.census.gov:terraform-modules/aws-ecr-copy-images.git?ref=tf-upgrade" + + profile = var.profile + application_list = [] + application_name = format("eks/%v", var.cluster_name) + image_config = local.image_config + force_delete = true + + enable_lifecycle_policy = true + enable_lifecycle_policy_image_config = true + lifecycle_policy_all = true + lifecycle_policy_keep_count = 3 + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) + + ### optional + ## account_alias = "" + ## account_id = "" + ## destination_password = "" + ## destination_username = "" + ## override_prefixes = {} + ## region = "" + ## source_password = "" + ## source_username = "" +} + + +## image_info = { +## "elastic/elasticsearch#7.14.0" = { +## "dest_full_path" = "817869416306.dkr.ecr.us-gov-east-1.amazonaws.com/eks/test-cluster-name/elastic/elasticsearch:7.14.0" +## "dest_registry" = "817869416306.dkr.ecr.us-gov-east-1.amazonaws.com" +## "dest_repository" = "eks/test-cluster-name/elastic/elasticsearch" +## "enabled" = true +## "key" = "elastic/elasticsearch#7.14.0" +## "name" = "elastic/elasticsearch" +## "source_full_path" = "docker.elastic.co/elasticsearch/elasticsearch:7.14.0" +## "source_image" = "elasticsearch/elasticsearch" +## "source_registry" = "docker.elastic.co" +## "tag" = "7.14.0" +## } +## "elastic/kibana#7.14.0" = { +## "dest_full_path" = "817869416306.dkr.ecr.us-gov-east-1.amazonaws.com/eks/test-cluster-name/elastic/kibana:7.14.0" +## "dest_registry" = "817869416306.dkr.ecr.us-gov-east-1.amazonaws.com" +## "dest_repository" = "eks/test-cluster-name/elastic/kibana" +## "enabled" = true +## "key" = "elastic/kibana#7.14.0" +## "name" = "elastic/kibana" +## "source_full_path" = "docker.elastic.co/kibana/kibana:7.14.0" +## "source_image" = "kibana/kibana" +## "source_registry" = "docker.elastic.co" +## "tag" = "7.14.0" +## } +## "fluent/fluentd-kubernetes-daemonset#v1.13.3-debian-elasticsearch7-1.2" = { +## "dest_full_path" = "817869416306.dkr.ecr.us-gov-east-1.amazonaws.com/eks/test-cluster-name/fluent/fluentd-kubernetes-daemonset:v1.13.3-debian-elasticsearch7-1.2" +## "dest_registry" = "817869416306.dkr.ecr.us-gov-east-1.amazonaws.com" +## "dest_repository" = "eks/test-cluster-name/fluent/fluentd-kubernetes-daemonset" +## "enabled" = true +## "key" = "fluent/fluentd-kubernetes-daemonset#v1.13.3-debian-elasticsearch7-1.2" +## "name" = "fluent/fluentd-kubernetes-daemonset" +## "source_full_path" = "docker.io/fluent/fluentd-kubernetes-daemonset:v1.13.3-debian-elasticsearch7-1.2" +## "source_image" = "fluent/fluentd-kubernetes-daemonset" +## "source_registry" = "docker.io" +## "tag" = "v1.13.3-debian-elasticsearch7-1.2" +## } +## } diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/kubeconfig.eks-subdirectory.tf new file mode 120000 index 0000000..e3750a4 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/kubeconfig.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/kubeconfig.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/locals.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/locals.tf new file mode 100644 index 0000000..4b9ae5a --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/locals.tf @@ -0,0 +1,17 @@ +locals { + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } +} + +# replace TF remote state accordingly in parent_rs with that from the parent directory, and be sure to make the link +locals { + vpc_id = local.parent_rs.cluster_vpc_id + subnet_ids = local.parent_rs.cluster_subnet_ids + cluster_worker_sg_id = local.parent_rs.cluster_worker_sg_id + + oidc_provider_url = local.parent_rs.oidc_provider_url + oidc_provider_arn = local.parent_rs.oidc_provider_arn +} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/main.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/main.tf new file mode 100644 index 0000000..085983a --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/main.tf @@ -0,0 +1,475 @@ +resource "kubernetes_namespace" "cert-manager" { + metadata { + name = "cert-manager" + } +} + +resource "kubernetes_namespace" "istio-system" { + metadata { + name = "istio-system" + } +} + +# Install Metrics-Server +resource "helm_release" "metrics-server" { + chart = "metrics-server" + name = "metrics-server" + namespace = "kube-system" + repository = local.charts["metrics-server"].use_remote ? local.charts["metrics-server"].repository : "${path.module}/charts" + version = local.charts["metrics-server"].use_remote ? local.charts["metrics-server"].version : null + + # depends_on = [null_resource.copy_images] + depends_on = [module.images] + set { + name = "extraArgs[0]" + value = "--kubelet-preferred-address-types=InternalIP" + } + set { + name = "apiService.create" + value = "true" + } + set { + name = "extraArgs[1]" + value = "--cert-dir=/tmp" + } + set { + name = "extraArgs[2]" + value = "--kubelet-use-node-status-port" + } + set { + name = "extraArgs[3]" + value = "--metric-resolution=15s" + } + # set { + # name = "extraArgs[4]" + # value = "--kubelet-insecure-tls=true" + # } + set { + name = "containerPorts.https" + value = 10250 + } + set { + name = "image.registry" + # value = local.account_ecr_registry + value = local.image_output["metrics-server"].dest_registry + } + set { + name = "image.repository" + # value = format("%v/%v", local.repo_parent_name, local.images["metric-server"].name) + # value = local.image_map["metrics-server"].repository + value = local.image_output["metrics-server"].dest_repository + } + + set { + name = "image.tag" + # value = var.metrics_server_tag + value = local.image_output["metrics-server"].tag + } + + timeout = 300 +} + +##-- +## move to cluster-autoscaler.tf +##-- +## resource "helm_release" "cluster-autoscaler" { +## chart = "cluster-autoscaler" +## name = "cluster-autoscaler" +## namespace = "kube-system" +## # repository = "${path.module}/charts/" +## repository = local.charts["cluster-autoscaler"].use_remote ? local.charts["cluster-autoscaler"].repository : "${path.module}/charts" +## version = local.charts["cluster-autoscaler"].use_remote ? local.charts["cluster-autoscaler"].version : null +## # depends_on = [null_resource.copy_images] +## +## depends_on = [module.images] +## set { +## name = "image.repository" +## # value = local.image_repos["cluster-autoscaler"] +## value = split(":", local.image_output["cluster-autoscaler"].dest_full_path)[0] +## } +## set { +## name = "image.tag" +## # value = var.cluster_autoscaler_tag +## value = local.image_output["cluster-autoscaler"].tag +## } +## set { +## name = "autoDiscovery.clusterName" +## value = var.cluster_name +## } +## set { +## name = "awsRegion" +## value = local.region +## } +## set { +## name = "rbac.serviceAccount.create" +## value = "false" +## } +## } + +# Install cert-manager +# https://cert-manager.io/docs/installation/helm/ +# https://artifacthub.io/packages/helm/cert-manager/cert-manager +resource "helm_release" "cert-manager" { + chart = "cert-manager" + name = "cert-manager" + namespace = kubernetes_namespace.cert-manager.metadata[0].name + repository = local.charts["cert-manager"].use_remote ? local.charts["cert-manager"].repository : "${path.module}/charts" + version = local.charts["cert-manager"].use_remote ? local.charts["cert-manager"].version : null + + # depends_on = [null_resource.copy_images] + depends_on = [module.images] + + set { + name = "installCRDs" + value = "true" + } + set { + name = "extraArgs" + value = "{--enable-certificate-owner-ref=true}" + } + + set { + name = "image.repository" + # value = local.image_repos["cert-manager-controller"] + value = split(":", local.image_output["cert-manager-controller"].dest_full_path)[0] + } + set { + name = "image.tag" + # value = var.cert_manager_controller_tag + value = local.image_output["cert-manager-controller"].tag + } + + set { + name = "cainjector.image.repository" + # value = local.image_repos["cert-manager-cainjector"] + value = split(":", local.image_output["cert-manager-cainjector"].dest_full_path)[0] + } + set { + name = "cainjector.image.tag" + # value = var.cert_manager_cainjector_tag + value = local.image_output["cert-manager-cainjector"].tag + } + + set { + name = "webhook.image.repository" + # value = local.image_repos["cert-manager-webhook"] + value = split(":", local.image_output["cert-manager-webhook"].dest_full_path)[0] + } + set { + name = "webhook.image.tag" + # value = var.cert_manager_webhook_tag + value = local.image_output["cert-manager-webhook"].tag + } + # set { + # name = "startupapicheck.enabled" + # value = "false" + # } + set { + name = "startupapicheck.image.repository" + value = split(":", local.image_output["cert-manager-startupapicheck"].dest_full_path)[0] + } + set { + name = "startupapicheck.image.tag" + value = local.image_output["cert-manager-startupapicheck"].tag + } + + # timeout = 180 + timeout = 600 +} + +# cert-manager reports ready before the cert-manager-webhook pod +# has completely started and is ready to process requests. This sleep +# is set for a completely arbitrary time to allow cert-manager-webhook +# to finish starting. On slow systems, this may not be long enough, +# but on t3.xlarge, it works fine. +resource "time_sleep" "let_cert-manager-webhook_boot" { + depends_on = [helm_release.cert-manager] + + create_duration = "19s" +} + +## strip out all code for various certificate options and use only the subordinate_ca module (intermediate-certificate-issuer) +## locals { +## tls_crt_file = length(var.tls_crt_file) > 0 ? var.tls_crt_file : "certs/${local.ca_dns_name}.bundle.crt" +## tls_crt_contents = (length(local.tls_crt_file) > 0 && fileexists(local.tls_crt_file)) ? file(local.tls_crt_file) : var.tls_crt_contents +## tls_crt_b64 = length(local.tls_crt_contents) > 0 ? base64encode(local.tls_crt_contents) : var.tls_crt_b64 +## +## tls_key_file = length(var.tls_key_file) > 0 ? var.tls_key_file : "certs/${local.ca_dns_name}.key" +## tls_key_contents = (length(local.tls_key_file) > 0 && fileexists(local.tls_key_file)) ? file(local.tls_key_file) : var.tls_key_contents +## tls_key_b64 = length(local.tls_key_contents) > 0 ? base64encode(local.tls_key_contents) : var.tls_key_b64 +## +## intermediate_ca = (length(local.tls_crt_b64) > 0) && (length(local.tls_key_b64) > 0) +## +## vault_ca_bundle_pem_file = var.vault_ca_bundle_pem_file +## vault_ca_bundle_pem = ((length(local.vault_ca_bundle_pem_file) > 0) ? +## file(local.vault_ca_bundle_pem_file) +## : var.vault_ca_bundle_pem) +## vault_ca_bundle_pem_b64 = ((length(local.vault_ca_bundle_pem) > 0) ? +## base64encode(local.vault_ca_bundle_pem) +## : var.vault_ca_bundle_pem_b64) +## +## vault_ca = !local.intermediate_ca && length(var.vault_url) > 0 +## +## self_signed_ca = !local.intermediate_ca && !local.vault_ca +## +## defined_ca = (local.self_signed_ca ? 1 : 0) + (local.intermediate_ca ? 1 : 0) + (local.vault_ca ? 1 : 0) +## } +## +# configure the certificate issuer. +## # when self-signed certs requested +## resource "helm_release" "self-signed-certificate-issuer" { +## count = local.self_signed_ca == true ? 1 : 0 +## +## chart = "self-signed-certificate-issuer" +## name = "certificate-issuer" +## namespace = kubernetes_namespace.cert-manager.metadata[0].name +## repository = "${path.module}/charts/" +## +## depends_on = [time_sleep.let_cert-manager-webhook_boot] +## +## # Required because the chart creates "non-standard" kubernetes resources +## # that use the cert-manager CRDs. +## disable_openapi_validation = true +## } + +# when using an internediate CA is requested +resource "helm_release" "intermediate-certificate-issuer" { + ## count = local.intermediate_ca == true ? 1 : 0 + + chart = "intermediate-certificate-issuer" + name = "certificate-issuer" + namespace = kubernetes_namespace.cert-manager.metadata[0].name + repository = "${path.module}/charts/" + + depends_on = [time_sleep.let_cert-manager-webhook_boot, module.subordinate_ca] + + # Required because the chart creates "non-standard" kubernetes resources + # that use the cert-manager CRDs. + disable_openapi_validation = true + + set { + name = "tls.crt" + value = module.subordinate_ca.certificate_tls_crt + } + set { + name = "tls.key" + value = module.subordinate_ca.certificate_tls_key + } +} + + +## # when using vault as a CA is requested +## resource "helm_release" "vault-certificate-issuer" { +## count = local.vault_ca == true ? 1 : 0 +## +## chart = "vault-certificate-issuer" +## name = "certificate-issuer" +## namespace = kubernetes_namespace.cert-manager.metadata[0].name +## repository = "${path.module}/charts/" +## +## depends_on = [time_sleep.let_cert-manager-webhook_boot] +## +## # Required because the chart creates "non-standard" kubernetes resources +## # that use the cert-manager CRDs. +## disable_openapi_validation = true +## +## set { +## name = "vault.url" +## value = var.vault_url +## } +## set { +## name = "vault.path" +## value = var.vault_path +## } +## set { +## name = "vault.ca_bundle" +## value = local.vault_ca_bundle_pem_b64 +## } +## set { +## name = "vault.authentication_type" +## value = var.vault_authentication +## } +## +## set { +## name = "approle.secret_id" +## value = var.vault_approle_secret_id +## } +## set { +## name = "approle.role_id" +## value = var.vault_approle_secret_id +## } +## set { +## name = "approle.role_path" +## value = var.vault_approle_role_path +## } +## +## set { +## name = "token.token" +## value = var.vault_token +## } +## +## set { +## name = "serviceAccount.serviceAccount" +## value = var.vault_serviceaccount_sa +## } +## +## set { +## name = "serviceAccount.role" +## value = var.vault_serviceaccount_role +## } +## set { +## name = "serviceAccount.mountPath" +## value = var.vault_serviceaccount_mountpath +## } +## } + +# installs the istio-operator that will listen for profile configurations to +# install / configure modify the istio components. +resource "helm_release" "istio-operator" { + chart = "istio-operator" + name = "istio-operator" + namespace = kubernetes_namespace.istio-system.metadata[0].name + repository = "${path.module}/charts/" + + depends_on = [helm_release.cert-manager] + + set { + name = "hub" + # value = format("%v/%v", local.account_ecr, "istio") + value = format("%v/eks/%v/%v", local.image_output["istio/operator"].dest_registry, var.cluster_name, "istio") + } + set { + name = "tag" + # value = var.istio_tag + value = local.image_output["istio/operator"].tag + } + set { + name = "operatorNamespace" + value = "operators" + } + set { + name = "watchedNamespaces" + value = kubernetes_namespace.istio-system.metadata[0].name + } + + timeout = 180 +} + +# Need to access the IP address of the apiserver for the next step. +data "kubernetes_service" "apiserver" { + metadata { + name = "kubernetes" + } +} + +# sets up service mesh +resource "helm_release" "istio-profile" { + chart = "istio-profile" + name = "istio-profile" + namespace = kubernetes_namespace.istio-system.metadata[0].name + repository = "${path.module}/charts/" + + # depends_on = [helm_release.istio-operator, null_resource.certificate-issuers] + depends_on = [helm_release.istio-operator] + + set { + name = "hub" + # value = format("%v/%v", local.account_ecr, "istio") + value = format("%v/eks/%v/%v", local.image_output["istio/operator"].dest_registry, var.cluster_name, "istio") + } + set { + name = "tag" + value = var.istio_tag + } + # Passes in the API server so it can be excluded from requiring mTLS from + # pods that are protected by istio. It already implements SSL. + set { + name = "apiserver" + value = "${data.kubernetes_service.apiserver.spec[0].cluster_ip}/32" + } +} + +# Creating the istio profile is very quick. Time is needed to allow +# istio-operator to install the CRDs and deploy istio. +resource "time_sleep" "let_istio-operator_install_istio" { + depends_on = [helm_release.istio-profile] + + create_duration = "19s" +} + +# Require all pods in the service mesh to use mTLS +resource "helm_release" "istio-peer-authentication" { + chart = "istio-peerauthentication" + name = "istio-peerauthentication" + namespace = kubernetes_namespace.istio-system.metadata[0].name + repository = "${path.module}/charts/" + + depends_on = [time_sleep.let_istio-operator_install_istio] +} + +## resource "null_resource" "certificate-issuers" { +## triggers = { +## self_signed_ca = join(",", helm_release.self-signed-certificate-issuer[*].id) +## intermediate_ca = join(",", helm_release.intermediate-certificate-issuer[*].id) +## vault_ca = join(",", helm_release.vault-certificate-issuer[*].id) +## } +## provisioner "local-exec" { +## command = "if [ ${local.defined_ca} == 0 ]; then echo 'no-certificate-issuer defined'; exit 1; fi" +## } +## } + +## +## name = "cert-manager" +## name = "metrics-server" +## name = "cert-manager-controller" +## name = "cert-manager-cainjector" +## name = "cert-manager-webhook" +## name = "cluster-autoscaler" +## name = "metrics-server" +## name = "istio/operator" +## name = "istio/pilot" +## name = "istio/proxyv2" +## +## +## local.image_output[name]. +## +## ## "fluent/fluentd-kubernetes-daemonset#v1.13.3-debian-elasticsearch7-1.2" = { +## ## "dest_full_path" = "817869416306.dkr.ecr.us-gov-east-1.amazonaws.com/eks/test-cluster-name/fluent/fluentd-kubernetes-daemonset:v1.13.3-debian-elasticsearch7-1.2" +## ## "dest_registry" = "817869416306.dkr.ecr.us-gov-east-1.amazonaws.com" +## ## "dest_repository" = "eks/test-cluster-name/fluent/fluentd-kubernetes-daemonset" +## ## "enabled" = true +## ## "key" = "fluent/fluentd-kubernetes-daemonset#v1.13.3-debian-elasticsearch7-1.2" +## ## "name" = "fluent/fluentd-kubernetes-daemonset" +## ## "source_full_path" = "docker.io/fluent/fluentd-kubernetes-daemonset:v1.13.3-debian-elasticsearch7-1.2" +## ## "source_image" = "fluent/fluentd-kubernetes-daemonset" +## ## "source_registry" = "docker.io" +## ## "tag" = "v1.13.3-debian-elasticsearch7-1.2" +## ## } +## +## +## +## > local.image_map +## { +## "cert-manager-cainjector" = { +## "enabled" = true +## "full_path" = "247901282001.dkr.ecr.us-gov-west-1.amazonaws.com/eks/ditd-gppsys-ite/cert-manager-cainjector" +## "image" = "quay.io/jetstack/cert-manager-cainjector" +## "name" = "cert-manager-cainjector" +## "registry" = "247901282001.dkr.ecr.us-gov-west-1.amazonaws.com" +## "repository" = "eks/ditd-gppsys-ite/cert-manager-cainjector" +## "tag" = "v1.4.3" +## } +## +## +## > local.image_repos +## { +## "cert-manager-cainjector" = "247901282001.dkr.ecr.us-gov-west-1.amazonaws.com/eks/ditd-gppsys-ite/cert-manager-cainjector" +## "cert-manager-controller" = "247901282001.dkr.ecr.us-gov-west-1.amazonaws.com/eks/ditd-gppsys-ite/cert-manager-controller" +## "cert-manager-webhook" = "247901282001.dkr.ecr.us-gov-west-1.amazonaws.com/eks/ditd-gppsys-ite/cert-manager-webhook" +## "cluster-autoscaler" = "247901282001.dkr.ecr.us-gov-west-1.amazonaws.com/eks/ditd-gppsys-ite/cluster-autoscaler" +## "istio/operator" = "247901282001.dkr.ecr.us-gov-west-1.amazonaws.com/eks/ditd-gppsys-ite/istio/operator" +## "istio/pilot" = "247901282001.dkr.ecr.us-gov-west-1.amazonaws.com/eks/ditd-gppsys-ite/istio/pilot" +## "istio/proxyv2" = "247901282001.dkr.ecr.us-gov-west-1.amazonaws.com/eks/ditd-gppsys-ite/istio/proxyv2" +## "metrics-server" = "247901282001.dkr.ecr.us-gov-west-1.amazonaws.com/eks/ditd-gppsys-ite/metrics-server" +## } +## diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/parent_rs.tf new file mode 120000 index 0000000..d85ece6 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/parent_rs.tf @@ -0,0 +1 @@ +../includes.d/parent_rs.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/prefixes.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/providers.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/region.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/tags.md b/examples/full-cluster-tf-upgrade/1.32/common-services/tags.md new file mode 100644 index 0000000..ab5b05e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/tags.md @@ -0,0 +1,20 @@ +# Tagging + +## Istio + +For the [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/guide/service/annotations/#load-balancer-attributes), we want +to enable the following + +* S3 Access Logs +``` +service.beta.kubernetes.io/aws-load-balancer-attributes: access_logs.s3.enabled=true,access_logs.s3.bucket=my-access-log-bucket,access_logs.s3.prefix=my-app +``` +* Disable IP address persistence (needed for Cumulus, may not be needed for others, will make a variable) +``` +#service.beta.kubernetes.io/aws-load-balancer-target-group-attributes: stickiness.enabled=true,stickiness.type=source_ip +service.beta.kubernetes.io/aws-load-balancer-target-group-attributes: stickiness.enabled=false +``` +* Pass additional tags (from `var.application_tags.auto.tfvars`) +``` +service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: key=value,key=value +``` diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/tf-run.data b/examples/full-cluster-tf-upgrade/1.32/common-services/tf-run.data new file mode 100644 index 0000000..bd539f5 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/tf-run.data @@ -0,0 +1,72 @@ +VERSION 2.1.1 +REMOTE-STATE +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh + +TAG links +LINKTOP init +LINKTOP includes.d/variables.account_tags.tf +LINKTOP includes.d/variables.account_tags.auto.tfvars +LINKTOP includes.d/variables.infrastructure_tags.tf +LINKTOP includes.d/variables.infrastructure_tags.auto.tfvars +LINKTOP includes.d/variables.application_tags.tf +# LINKTOP includes.d/variables.application_tags.auto.tfvars +LINK variables.application_tags.auto.tfvars +LINKTOP provider_configs.d/provider.ldap_new.auto.tfvars +LINKTOP provider_configs.d/provider.ldap_new.tf +LINKTOP provider_configs.d/provider.ldap_new.variables.tf +LINK settings.auto.tfvars +LINK includes.d/parent_rs.tf +LINK includes.d/data.eks-subdirectory.tf +LINK includes.d/kubeconfig.eks-subdirectory.tf +LINK variables.eks.tf +LINK prefixes.tf +LINK providers.tf +LINK variables.addons.tf +LINK versions.tf +LINK version.tf +LINK variables.vpc.tf +LINK variables.vpc.auto.tfvars +# links for images, charts +LINK images.yml +LINK charts.yml +LINK charts-images.tf + +TAG init +COMMAND tf-init + +TAG start +module.images + +TAG state-link +COMMAND tf-directory-setup.py -l s3 + +## certificates replaced with new subordindate_ca module usign acmpca +TAG start-certificate +module.subordinate_ca + +## COMMENT Adding key to git-secret, hiding, and adding to git. Manually commit afterwards. +## COMMAND git-secret add certs/*.key +## COMMAND git-secret hide -m +## COMMAND git add certs/*.key.secret +## COMMENT execute: git commit -m add-pki-key -a +## +## COMMENT Submit certs/*csr using command ouptut listed in apply to TCO for signing +## COMMENT When submitting the form to request TCO to provision the certifcate, in the Additional Information field, enter "requesting sub-CA certificate". +## COMMENT Then contact the TCO team to inform them of the ticket number from the form submission, to raise their awareness of the sub-CA certifcate type. +## COMMENT Also request the TCO team to provide the Trust Chain along with the sub-CA certificate. +## COMMENT Once the sub-CA certificate and Trust Chain files are available, put the sub-CA certificate file under the certs folder and the Trust Chain under certs/root. +## STOP Wait for certificate to be signed, then continue with %%NEXT%%. +## +## TAG have-certificate +## module.cert +## module.cert + +TAG continue +kubernetes_namespace.cert-manager kubernetes_namespace.istio-system +helm_release.metrics-server helm_release.cert-manager helm_release.intermediate-certificate-issuer helm_release.istio-operator helm_release.istio-profile helm_release.istio-peer-authentication +ALL + +## COMMENT Manually append the Trust Chain to the generated certificate bundle +COMMENT cd cluster-autoscaler and tf-run.sh apply +COMMENT come back to this directory diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/tf-run.destroy.data b/examples/full-cluster-tf-upgrade/1.32/common-services/tf-run.destroy.data new file mode 100644 index 0000000..0d44f6e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/tf-run.destroy.data @@ -0,0 +1,9 @@ +VERSION 2.0.0 +BACKUP-STATE +COMMAND tf-init +COMMAND tf-state list + +## module.cert +## COMMENT git-secret remove -c */*.key + +ALL diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/variables.common-services.auto.tfvars b/examples/full-cluster-tf-upgrade/1.32/common-services/variables.common-services.auto.tfvars new file mode 100644 index 0000000..da9513b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/variables.common-services.auto.tfvars @@ -0,0 +1,21 @@ +#istio_tag = "1.23.4" +istio_tag = "1.26.0" +tls_crt_b64 = "" +tls_crt_contents = "" +tls_crt_file = "" +tls_key_b64 = "" +tls_key_contents = "" +tls_key_file = "" +vault_approle_role_id = "" +vault_approle_role_path = "" +vault_approle_secret_id = "" +vault_authentication = "" +vault_ca_bundle_pem = "" +vault_ca_bundle_pem_b64 = "" +vault_ca_bundle_pem_file = "" +vault_path = "" +vault_serviceaccount_mountpath = "" +vault_serviceaccount_role = "" +vault_serviceaccount_sa = "" +vault_token = "" +vault_url = "" diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/variables.common-services.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/variables.common-services.tf new file mode 100644 index 0000000..f25682b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/variables.common-services.tf @@ -0,0 +1,208 @@ +############################################################################# +# Options for configuring cert-manager to generate certificates for https +# termination in the cluster: +# +# - By not configuring any other method, cert-manager is configured to +# generate a private key and a self-signed CA which will be stored in the +# root-secret secret in the cert-manager namespace. Certificates are then +# signed using this internal CA. +# - tls_cert / tls_key - intermediate CA - By configuring a tls_cert and +# tls_key (either file, contents or base64 encoded data, see below,) +# cert-manager will be configured to create certificates based upon the +# intermediate certificate provided. +# - vault - By configuring information about the vault, cert-manager will be +# configured to interact with the vault to create certificates. +# +# For fields that ultimately need to be base64 encoded, there are +# typically three input variables for each field. +# 1. variable with a path to a file that holds the unencoded data which +# will be read by terraform and encoded into a base64 string to be used +# as needed. This field has the highest precedence of the three fields. +# 2. variable with the raw unencoded data which will be encoded into a +# base64 string to be used as needed. This field has the second highest +# precedence of the three fields. +# 3. variable with the base64 encoded data ready for use. This field has +# the lowest priority of the three fields. +############################################################################# + +############################################################################# +# vault +# +# To use Vault as the certificate authority for cert-manager, first supply +# the common configuration elements. Once complete, configure the selected +# authenication method and fill in the details for that authentication type. +############################################################################# +variable "vault_url" { + description = "URL to the vault server." + type = string + default = "" +} + +variable "vault_path" { + description = "Path is the Vault path that will be used for signing. Note that the path must use the sign endpoint." + type = string + default = "" +} + +variable "vault_ca_bundle_pem_file" { + description = "Path to the pem file that holds the CA bundle containing the Certificate Authority to trust the Vault connection. This is typically always required when using an https URL." + type = string + default = "" +} + +variable "vault_ca_bundle_pem" { + description = "Contents of the pem file holding the CA bundle containing the Certificate Authority to trust the Vault connection. This is typically always required when using an https URL." + type = string + default = "" +} + +variable "vault_ca_bundle_pem_b64" { + description = "Base64 encoded contents of the pem file holding the CA bundle containing the Certificate Authority to trust the Vault connection. This is typically always required when using an https URL." + type = string + default = "" +} + +variable "vault_authentication" { + description = "How to authenticate with the vault. This value must be blank when not using the value, or one of 'AppRole', 'Token', or 'ServiceAccount'." + type = string + default = "" +} + +############################################################################# +# for AppRole authentication +variable "vault_approle_secret_id" { + description = "The vault SecretID for the AppRole. This is stored in the vault secret in the cert-manager namespace." + type = string + default = "" + # sensitive = true +} + +variable "vault_approle_role_id" { + description = "The vault RoleId for cert-manager to assume." + type = string + default = "" +} + +variable "vault_approle_role_path" { + description = "The vault app role path for the role for cert-manager to assume." + type = string + default = "" +} + +############################################################################# +# for Token authentication +variable "vault_token" { + description = "The vault token that cert-manager should use to authenticate with vault. Note that tokens expire, and the token must be refreshed manually. This token is stored in the valut secret in the cert-manager namespace." + type = string + default = "" + # sensitive = true +} + +############################################################################# +# for ServiceAccount authentication +variable "vault_serviceaccount_sa" { + description = "The name of the service account in the cert-manager namespace to use to access the token to communicate with vault." + type = string + default = "" +} + +variable "vault_serviceaccount_role" { + description = "The role cert-manager is to assume." + type = string + default = "" +} + +variable "vault_serviceaccount_mountpath" { + description = "The location to mount the secret into the filesystem. Defaults to kubernetes" + type = string + default = "" +} + +############################################################################# +# tls_cert / tls_key - intermediate CA +# +# To use an intermediate CA, configure two of these fields with correct +# values which configures cert-manager to sign cert requests with an +# intermediate key. +# +# Input can be the file, file contents, or base64 encoded file contents to +# allow chaining the output of a module that can generate an intermediate CA +# to the input of this script. Depending on how the intermediate CA is +# generated, pass the output as input in whichever form is easiest. +# +# See https://cert-manager.io/docs/configuration/ca/ +############################################################################# + +variable "tls_crt_file" { + description = "Path to the file that holds the tls.crt representing the issuer's full chain in the correct order (issuer -> intermediate(s) -> root.) When left blank, cert-manager is configured with a self-signed CA." + type = string + default = "" +} + +variable "tls_key_file" { + description = "Path to the file that holds the signing private key. When left blank, cert-manager is configured with a self-signed CA." + type = string + default = "" +} + +variable "tls_crt_contents" { + description = "The contents of the file that holds the tls.crt representing the issuer's full chain in the correct order (issuer -> intermediate(s) -> root.) When left blank, cert-manager is configured with a self-signed CA." + type = string + default = "" +} + +variable "tls_key_contents" { + description = "The contents of the file that holds the signing private key. When left blank, cert-manager is configured with a self-signed CA." + type = string + default = "" +} + +variable "tls_crt_b64" { + description = "The base64 encoded contents of the file that holds the tls.crt representing the issuer's full chain in the correct order (issuer -> intermediate(s) -> root.) When left blank, cert-manager is configured with a self-signed CA." + type = string + default = "" +} + +variable "tls_key_b64" { + description = "The base64 encoded contents of the file that holds the signing private key. When left blank, cert-manager is configured with a self-signed CA." + type = string + default = "" +} + +# See the readme `Updating the cert-manager chart` to find these values. +variable "cert_manager_controller_tag" { + description = "Which tag of public.ecr.aws/eks-anywhere/jetstack/cert-manager-controller" + type = string + default = "v1.4.3" +} + +variable "cluster_autoscaler_tag" { + description = "Image tag of public.ecr.aws/v0g0y9g5/cluster-autoscaler" + type = string + default = "v1.21.0" +} + +variable "metrics_server_tag" { + description = "Which tag of metrics-server" + type = string + default = "0.5.0-debian-10-r83" +} + +variable "cert_manager_cainjector_tag" { + description = "Which tag of public.ecr.aws/eks-anywhere/jetstack/cert-manager-cainjector" + type = string + default = "v1.4.3" +} + +variable "cert_manager_webhook_tag" { + description = "Which tag of public.ecr.aws/eks-anywhere/jetstack/cert-manager-webhook" + type = string + default = "v1.4.3" +} + +# Set the readme `Updating the istio chart` to find these values. +variable "istio_tag" { + description = "The version of istio to install" + type = string + default = "1.10.1" +} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/variables.images.auto.tfvars b/examples/full-cluster-tf-upgrade/1.32/common-services/variables.images.auto.tfvars new file mode 100644 index 0000000..d85a621 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/variables.images.auto.tfvars @@ -0,0 +1,163 @@ +wanted_charts = [ + "cert-manager", + "metrics-server", +] +wanted_images = [ + "cert-manager-controller", + "cert-manager-cainjector", + "cert-manager-webhook", + "cert-manager-startupapicheck", + "metrics-server", + "istio-operator", + "istio-pilot", + "istio-proxyv2", + "prometheus", + "alertmanager", + "prometheus-operator", +] + +chart_details = { + "cert-manager" = { + name = "cert-manager" + repository = "https://charts.jetstack.io" + version = "v1.16.4" + use_remote = true + } + "metrics-server" = { + name = "metrics-server" + repository = "https://charts.bitnami.com/bitnami" + version = "7.3.0" + use_remote = true + } + #helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + #helm repo update + # 25.17.0 + # helm install my-release oci://registry-1.docker.io/bitnamicharts/prometheus + # 0.12.1 +} + +image_details = { + # cert-manager + "cert-manager-controller" = { + name = "cert-manager-controller" + image = "quay.io/jetstack/cert-manager-controller" + dest_path = null + source_registry = "quay.io" + source_image = "jetstack/cert-manager-controller" + source_tag = null + tag = "v1.16.2" + enabled = true + } + "cert-manager-cainjector" = { + name = "cert-manager-cainjector" + image = "quay.io/jetstack/cert-manager-cainjector" + dest_path = null + source_registry = "quay.io" + source_image = "jetstack/cert-manager-cainjector" + source_tag = null + tag = "v1.16.2" + enabled = true + } + "cert-manager-webhook" = { + name = "cert-manager-webhook" + image = "quay.io/jetstack/cert-manager-webhook" + dest_path = null + source_registry = "quay.io" + source_image = "jetstack/cert-manager-webhook" + source_tag = null + tag = "v1.16.2" + enabled = true + } + "cert-manager-startupapicheck" = { + name = "cert-manager-startupapicheck" + image = "quay.io/jetstack/cert-manager-startupapicheck" + dest_path = null + source_registry = "quay.io" + source_image = "jetstack/cert-manager-startupapicheck" + source_tag = null + tag = "v1.16.2" + enabled = true + } + + # metrics-server + "metrics-server" = { + name = "metrics-server" + image = "docker.io/bitnami/metrics-server" + dest_path = null + source_registry = "docker.io" + source_image = "bitnami/metrics-server" + source_tag = null + tag = "0.7.2" + # tag = "0.7.2-debian-12-r8" + enabled = true + } + + # istio + "istio-operator" = { + name = "istio/operator" + image = "docker.io/istio/operator" + dest_path = null + source_registry = "docker.io" + source_image = "istio/operator" + source_tag = null + tag = "1.24.2" + enabled = true + } + "istio-pilot" = { + name = "istio/pilot" + image = "docker.io/istio/pilot" + dest_path = null + source_registry = "docker.io" + source_image = "istio/pilot" + source_tag = null + tag = "1.24.2" + enabled = true + } + "istio-proxyv2" = { + name = "istio/proxyv2" + image = "docker.io/istio/proxyv2" + dest_path = null + source_registry = "docker.io" + source_image = "istio/proxyv2" + source_tag = null + tag = "1.24.2" + enabled = true + } + + # prometheus + "prometheus" = { + name = "prometheus" + image = "docker.io/bitnami/prometheus" + dest_path = null + source_registry = "docker.io" + source_image = "bitnami/prometheus" + source_tag = null + tag = "3.0.1" + # tag = "3.0.1-debian-12-r1" + enabled = true + } + "alertmanager" = { + name = "alertmanager" + image = "docker.io/bitnami/alertmanager" + dest_path = null + source_registry = "docker.io" + source_image = "bitnami/alertmanager" + source_tag = null + tag = "0.27.0" + # tag = "0.27.0-debian-12-r28" + enabled = true + } + + # prometheus-operator + "prometheus-operator" = { + name = "prometheus-operator" + image = "docker.io/bitnami/prometheus-operator" + dest_path = null + source_registry = "docker.io" + source_image = "bitnami/prometheus-operator" + source_tag = null + tag = "0.79.2" + # tag = "0.79.2-debian-12-r0" + enabled = true + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/variables.images.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/variables.images.tf new file mode 100644 index 0000000..41a35a5 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/variables.images.tf @@ -0,0 +1,38 @@ +variable "chart_details" { + description = "Map of object with details about remote charts" + type = map(object( + { + name = string + repository = string + version = string + use_remote = bool + })) + default = {} +} + +variable "image_details" { + description = "List of image configuration objects to copy from SOURCE to DESTINATION" + type = map(object({ + name = string, + tag = string, + dest_path = string, + source_registry = string, + source_image = string, + source_tag = string, + enabled = bool, + })) + default = {} +} + + +variable "wanted_charts" { + description = "List of chart names (from charts.yml) desired for this directory/module" + type = list(string) + default = [] +} + +variable "wanted_images" { + description = "List of image names (from images.yml) desired for this directory/module" + type = list(string) + default = [] +} diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/version.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/common-services/versions.tf b/examples/full-cluster-tf-upgrade/1.32/common-services/versions.tf new file mode 120000 index 0000000..8bd0ff1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/common-services/versions.tf @@ -0,0 +1 @@ +../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/create-iam-config.sh b/examples/full-cluster-tf-upgrade/1.32/create-iam-config.sh new file mode 100755 index 0000000..9bb68f1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/create-iam-config.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +PROFILE=$1 +CLUSTER=$2 +REGION=$3 + +if [ -z "$PROFILE" ] +then + PROFILE=$(grep -E '^\bprofile\b *' *tfvars| sed -e 's/^.*profile.* =//' -e 's/\"//g' -e 's/^ *//' | head -n 1) +fi +if [ -z "$PROFILE" ] +then + echo "* unable to determine profile, please pass as argument 1" + exit 1 +else + echo "* using profile $PROFILE" +fi + +if [ -z "$CLUSTER" ] +then + CLUSTER=$(grep -E '^\bcluster_name\b *' settings.auto.tfvars| sed -e 's/^.*cluster_name.* =//' -e 's/\"//g' -e 's/^ *//' | head -n 1) +fi +if [ -z "$CLUSTER" ] +then + echo "* unable to determine cluster name, please pass as argument 2" + exit 1 +else + echo "* using cluster $CLUSTER" +fi + +ADMINROLE=$(terraform output role_cluster-admin-role_arn) +if [ -z "$ADMINROLE" ] +then + echo "* unable to determine cluster $CLUSTER admin role. Check that you are in the correct directory an terraform has been run" + exit 1 +fi + +if [ -z "$REGION" ] +then + echo "* getting region from profile $PROFILE" + REGION=$(aws configure --profile $PROFILE get region) +else + echo "* using region $REGION" +fi + +NEWPROFILE="$PROFILE-eks-$CLUSTER" +EXISTS=$(aws configure list-profiles | grep -c "^$NEWPROFILE$") + +if [ $EXISTS == 0 ] +then + echo "* creating new configuration profile $NEWPROFILE for assume role $ADMINROLE" +else + echo "* replacing configuration for profile $NEWPROFILE for assume role $ADMINROLE" +fi +echo "" + +( echo "aws configure set profile.$NEWPROFILE.source_profile $PROFILE" ; \ + echo "aws configure set profile.$NEWPROFILE.region $REGION" ; \ + echo "aws configure set profile.$NEWPROFILE.role_arn $ADMINROLE" ; \ + echo "aws configure set profile.$NEWPROFILE.role_session_name $USER" ) | sh -x + +echo "" +echo "* test with: aws --profile $NEWPROFILE sts get-caller-identity" diff --git a/examples/full-cluster-tf-upgrade/1.32/data.eks-main.tf b/examples/full-cluster-tf-upgrade/1.32/data.eks-main.tf new file mode 100644 index 0000000..7ead28b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/data.eks-main.tf @@ -0,0 +1,18 @@ +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster + # for main.tf + aws_eks_cluster = aws_eks_cluster.eks_cluster + # for all subdirectories + ## aws_eks_cluster = data.aws_eks_cluster.cluster +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +#--- +# for all subdirectories only +#--- +## data "aws_eks_cluster" "cluster" { +## name = var.cluster_name +## } diff --git a/examples/full-cluster-tf-upgrade/1.32/dns-zone.route53-profile.tf b/examples/full-cluster-tf-upgrade/1.32/dns-zone.route53-profile.tf new file mode 100644 index 0000000..9f995a0 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/dns-zone.route53-profile.tf @@ -0,0 +1,34 @@ +data "aws_route53profiles_profiles" "east_vpc_profiles" { + provider = aws.east +} +data "aws_route53profiles_profiles" "west_vpc_profiles" { + provider = aws.west +} + +locals { + east_route53_profiles = { for v in data.aws_route53profiles_profiles.east_vpc_profiles.profiles : v.name => v.id } + west_route53_profiles = { for v in data.aws_route53profiles_profiles.west_vpc_profiles.profiles : v.name => v.id } + route53_profile_mapping = { + "shared" = "services" + "ite" = "test" + "qa" = "test" + "uat" = "test" + } + route53_profile = lookup(local.route53_profile_mapping, var.vpc_environment, var.vpc_environment) +} + +resource "aws_route53profiles_resource_association" "east_zone" { + provider = aws.east + region = "us-gov-east-1" + name = format("%v-%v zone %v", local.route53_profile, "vpc", aws_route53_zone.cluster_domain.zone_id) + profile_id = local.east_route53_profiles[local.route53_profile] + resource_arn = aws_route53_zone.cluster_domain.arn +} + +resource "aws_route53profiles_resource_association" "west_zone" { + provider = aws.west + region = "us-gov-west-1" + name = format("%v-%v zone %v", local.route53_profile, "vpc", aws_route53_zone.cluster_domain.zone_id) + profile_id = local.west_route53_profiles[local.route53_profile] + resource_arn = aws_route53_zone.cluster_domain.arn +} diff --git a/examples/full-cluster-tf-upgrade/1.32/dns-zone.tf b/examples/full-cluster-tf-upgrade/1.32/dns-zone.tf new file mode 100644 index 0000000..ba4fe61 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/dns-zone.tf @@ -0,0 +1,237 @@ +locals { + vpc_domain_name = coalesce(var.domain, var.vpc_domain_name) + cluster_domain_name = format("%v.%v", var.cluster_name, local.vpc_domain_name) + cluster_domain_description = format("%v EKS Cluster DNS Zone", var.cluster_name) +} + +#--- +# network prod +#--- +provider "aws" { + alias = "route53_main_east" + profile = var.profile + region = var.region_map["east"] + assume_role { + role_arn = format("arn:%v:iam::%v:role/r-inf-terraform-route53", data.aws_arn.current.partition, var.route53_endpoints["route53_main"].account_id) + session_name = var.os_username + } +} + +provider "aws" { + alias = "route53_main_west" + profile = var.profile + region = var.region_map["west"] + assume_role { + role_arn = format("arn:%v:iam::%v:role/r-inf-terraform-route53", data.aws_arn.current.partition, var.route53_endpoints["route53_main"].account_id) + session_name = var.os_username + } +} + +#--- +# dummy vpc, so we can associate the zone to this account +#--- +data "aws_vpc" "dummy_vpc" { + count = !(var.shared_vpc_label == null || var.shared_vpc_label == "") ? 1 : 0 + filter { + name = "tag:Name" + values = ["vpc0-dummy"] + } +} + +resource "aws_route53_zone" "cluster_domain" { + name = local.cluster_domain_name + comment = local.cluster_domain_description + force_destroy = false + + vpc { + vpc_id = !(var.shared_vpc_label == null || var.shared_vpc_label == "") ? try(data.aws_vpc.dummy_vpc[0].id, null) : data.aws_vpc.eks_vpc.id + vpc_region = local.region + } + + lifecycle { + ignore_changes = [vpc] + precondition { + condition = (var.shared_vpc_label == null || var.shared_vpc_label == "") || (!(var.shared_vpc_label == null || var.shared_vpc_label == "") && !(var.domain == null || var.domain == "")) + error_message = "var.domain must be provided when shared VPCs are in use." + } + } + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + { "Name" = local.cluster_domain_name }, + ) +} + +#--- +# need to also associate with network-prod account and this vpc +#--- +module "route53_cluster_domain_east" { + count = local.region == "us-gov-east-1" && !(var.shared_vpc_label == null || var.shared_vpc_label == "") ? 1 : 0 + providers = { + aws.self = aws + aws.peer = aws.route53_main_east + } + + source = "git@github.e.it.census.gov:terraform-modules/aws-vpc-setup.git//route53-zone-association/zone?ref=tf-upgrade" + region = "us-gov-east-1" + vpc_id = data.aws_vpc.eks_vpc.id + zone_ids = [aws_route53_zone.cluster_domain.zone_id] + + tags = merge( + local.common_tags, + var.application_tags, + ) +} + +module "route53_cluster_domain_west" { + count = local.region == "us-gov-west-1" && !(var.shared_vpc_label == null || var.shared_vpc_label == "") ? 1 : 0 + providers = { + aws.self = aws + aws.peer = aws.route53_main_west + } + + source = "git@github.e.it.census.gov:terraform-modules/aws-vpc-setup.git//route53-zone-association/zone?ref=tf-upgrade" + region = "us-gov-west-1" + vpc_id = data.aws_vpc.eks_vpc.id + zone_ids = [aws_route53_zone.cluster_domain.zone_id] + + tags = merge( + local.common_tags, + var.application_tags, + ) +} + + +## # now we need to add the NS records for the new zone to the parent zone +## data "aws_route53_zone" "parent" { +## name = var.vpc_domain_name +## private_zone = true +## } +## +## resource "aws_route53_record" "cluster_domain" { +## allow_overwrite = true +## name = local.cluster_domain_name +## type = "NS" +## ttl = 900 +## zone_id = data.aws_route53_zone.parent.zone_id +## +## records = aws_route53_zone.cluster_domain.name_servers +## } + +output "cluster_domain_name" { + description = "DNS Zone Name" + value = local.cluster_domain_name +} + +output "cluster_domain_id" { + description = "DNS Zone ID" + value = aws_route53_zone.cluster_domain.zone_id +} + +output "cluster_domain_ns" { + description = "DNS Zone Nameservers" + value = aws_route53_zone.cluster_domain.name_servers +} + +#--- +# associate to main do2-govcloud vpc1-services east and west for inbound resolution +# and to vpc7-endpoints in network prod +#--- + +#--- +# network prod +#--- +provider "aws" { + alias = "route53_main" + region = var.region_map["east"] + profile = var.profile + assume_role { + role_arn = format("arn:%v:iam::%v:role/r-inf-terraform-route53", data.aws_arn.current.partition, var.route53_endpoints["route53_main"].account_id) + session_name = var.os_username + } +} + +module "route53_main_east" { + providers = { + aws.self = aws + aws.peer = aws.route53_main + } + + source = "git@github.e.it.census.gov:terraform-modules/aws-vpc-setup.git//route53-zone-association/zone?ref=tf-upgrade" + region = "us-gov-east-1" + vpc_id = var.route53_endpoints["route53_main"]["us-gov-east-1"] + zone_ids = [aws_route53_zone.cluster_domain.zone_id] + + tags = merge( + local.common_tags, + var.application_tags, + ) +} + +module "route53_main_west" { + providers = { + aws.self = aws + aws.peer = aws.route53_main + } + + source = "git@github.e.it.census.gov:terraform-modules/aws-vpc-setup.git//route53-zone-association/zone?ref=tf-upgrade" + region = "us-gov-west-1" + vpc_id = var.route53_endpoints["route53_main"]["us-gov-west-1"] + zone_ids = [aws_route53_zone.cluster_domain.zone_id] + + tags = merge( + local.common_tags, + var.application_tags, + ) +} + +#--- +# do2-gov ("legacy") +#--- +provider "aws" { + alias = "route53_main_legacy" + region = var.region_map["east"] + profile = var.profile + assume_role { + role_arn = format("arn:%v:iam::%v:role/r-inf-terraform-route53", data.aws_arn.current.partition, var.route53_endpoints["route53_main_legacy"].account_id) + session_name = var.os_username + } +} + +module "route53_main_legacy_east" { + providers = { + aws.self = aws + aws.peer = aws.route53_main_legacy + } + + source = "git@github.e.it.census.gov:terraform-modules/aws-vpc-setup.git//route53-zone-association/zone?ref=tf-upgrade" + region = "us-gov-east-1" + vpc_id = var.route53_endpoints["route53_main_legacy"]["us-gov-east-1"] + zone_ids = [aws_route53_zone.cluster_domain.zone_id] + + tags = merge( + local.common_tags, + var.application_tags, + ) +} + +module "route53_main_legacy_west" { + providers = { + aws.self = aws + aws.peer = aws.route53_main_legacy + } + + source = "git@github.e.it.census.gov:terraform-modules/aws-vpc-setup.git//route53-zone-association/zone?ref=tf-upgrade" + region = "us-gov-west-1" + vpc_id = var.route53_endpoints["route53_main_legacy"]["us-gov-west-1"] + zone_ids = [aws_route53_zone.cluster_domain.zone_id] + + tags = merge( + local.common_tags, + var.application_tags, + ) +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/dns-zone.tf.dmz b/examples/full-cluster-tf-upgrade/1.32/dns-zone.tf.dmz new file mode 100644 index 0000000..5686d0c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/dns-zone.tf.dmz @@ -0,0 +1,179 @@ +locals { + vpc_domain_name = coalesce(var.domain, var.vpc_domain_name) + cluster_domain_name = format("%v.%v", var.cluster_name, local.vpc_domain_name) + cluster_domain_description = format("%v EKS Cluster DNS Zone", var.cluster_name) +} + +#--- +# dmz network prod +#--- +provider "aws" { + alias = "route53_main_dmz_east" + profile = var.profile + region = var.region_map["east"] + assume_role { + role_arn = format("arn:%v:iam::%v:role/r-inf-terraform-route53", data.aws_arn.current.partition, var.route53_endpoints["route53_main_dmz"].account_id) + session_name = var.os_username + } +} + +provider "aws" { + alias = "route53_main_dmz_west" + profile = var.profile + region = var.region_map["west"] + assume_role { + role_arn = format("arn:%v:iam::%v:role/r-inf-terraform-route53", data.aws_arn.current.partition, var.route53_endpoints["route53_main_dmz"].account_id) + session_name = var.os_username + } +} + +#--- +# dummy vpc, so we can associate the zone to this account +#--- +data "aws_vpc" "dummy_vpc" { + count = ! (var.shared_vpc_label == null || var.shared_vpc_label == "") ? 1 : 0 + filter { + name = "tag:Name" + values = ["vpc0-dummy"] + } +} + +resource "aws_route53_zone" "cluster_domain" { + name = local.cluster_domain_name + comment = local.cluster_domain_description + force_destroy = false + + vpc { + vpc_id = ! (var.shared_vpc_label == null || var.shared_vpc_label == "") ? try(data.aws_vpc.dummy_vpc[0].id, null) : data.aws_vpc.eks_vpc.id + vpc_region = local.region + } + + lifecycle { + ignore_changes = [vpc] + precondition { + condition = (var.shared_vpc_label == null || var.shared_vpc_label == "") || (! (var.shared_vpc_label == null || var.shared_vpc_label == "") && ! (var.domain == null || var.domain == "")) + error_message = "var.domain must be provided when shared VPCs are in use." + } + } + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + { "Name" = local.cluster_domain_name }, + ) +} + +#--- +# need to also associate with network-prod account and this vpc +#--- +module "route53_cluster_domain_east" { + count = local.region == "us-gov-east-1" && ! (var.shared_vpc_label == null || var.shared_vpc_label == "") ? 1 : 0 + providers = { + aws.self = aws + aws.peer = aws.route53_main_dmz_east + } + + source = "git@github.e.it.census.gov:terraform-modules/aws-vpc-setup.git//route53-zone-association/zone?ref=tf-upgrade" + region = "us-gov-east-1" + vpc_id = data.aws_vpc.eks_vpc.id + zone_ids = [aws_route53_zone.cluster_domain.zone_id] + + tags = merge( + local.common_tags, + var.application_tags, + ) +} + +module "route53_cluster_domain_west" { + count = local.region == "us-gov-west-1" && ! (var.shared_vpc_label == null || var.shared_vpc_label == "") ? 1 : 0 + providers = { + aws.self = aws + aws.peer = aws.route53_main_dmz_west + } + + source = "git@github.e.it.census.gov:terraform-modules/aws-vpc-setup.git//route53-zone-association/zone?ref=tf-upgrade" + region = "us-gov-west-1" + vpc_id = data.aws_vpc.eks_vpc.id + zone_ids = [aws_route53_zone.cluster_domain.zone_id] + + tags = merge( + local.common_tags, + var.application_tags, + ) +} + + +## # now we need to add the NS records for the new zone to the parent zone +## data "aws_route53_zone" "parent" { +## name = var.vpc_domain_name +## private_zone = true +## } +## +## resource "aws_route53_record" "cluster_domain" { +## allow_overwrite = true +## name = local.cluster_domain_name +## type = "NS" +## ttl = 900 +## zone_id = data.aws_route53_zone.parent.zone_id +## +## records = aws_route53_zone.cluster_domain.name_servers +## } + +output "cluster_domain_name" { + description = "DNS Zone Name" + value = local.cluster_domain_name +} + +output "cluster_domain_id" { + description = "DNS Zone ID" + value = aws_route53_zone.cluster_domain.zone_id +} + +output "cluster_domain_ns" { + description = "DNS Zone Nameservers" + value = aws_route53_zone.cluster_domain.name_servers +} + +#--- +# associate to main do2-govcloud vpc1-services east and west for inbound resolution +# and to vpc7-endpoints in network prod +#--- + +#--- +# dmz network prod +#--- +module "route53_main_east" { + providers = { + aws.self = aws + aws.peer = aws.route53_main_dmz_east + } + + source = "git@github.e.it.census.gov:terraform-modules/aws-vpc-setup.git//route53-zone-association/zone?ref=tf-upgrade" + region = "us-gov-east-1" + vpc_id = var.route53_endpoints["route53_main_dmz"]["us-gov-east-1"] + zone_ids = [aws_route53_zone.cluster_domain.zone_id] + + tags = merge( + local.common_tags, + var.application_tags, + ) +} + +module "route53_main_west" { + providers = { + aws.self = aws + aws.peer = aws.route53_main_dmz_west + } + + source = "git@github.e.it.census.gov:terraform-modules/aws-vpc-setup.git//route53-zone-association/zone?ref=tf-upgrade" + region = "us-gov-west-1" + vpc_id = var.route53_endpoints["route53_main_dmz"]["us-gov-west-1"] + zone_ids = [aws_route53_zone.cluster_domain.zone_id] + + tags = merge( + local.common_tags, + var.application_tags, + ) +} diff --git a/examples/full-cluster-tf-upgrade/1.32/ebs-encryption.tf b/examples/full-cluster-tf-upgrade/1.32/ebs-encryption.tf new file mode 100644 index 0000000..c125a89 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/ebs-encryption.tf @@ -0,0 +1,108 @@ +locals { + _app_tags_sc_parameters = [for k, v in var.application_tags : format("%v=%v", k, v)] + app_tags_sc_parameters = { for i in range(0, length(local._app_tags_sc_parameters)) : format("tagSpecification_%v", i + 1) => local._app_tags_sc_parameters[i] } +} + +resource "kubernetes_storage_class" "gp3_encrypted" { + metadata { + name = "gp3-encrypted" + annotations = { + "storageclass.kubernetes.io/is-default-class" = "true" + } + } + parameters = { + fsType = "ext4" + type = "gp3" + encrypted = "true" + kmsKeyId = data.aws_kms_key.ebs_key.arn + } + storage_provisioner = "ebs.csi.aws.com" + reclaim_policy = "Delete" + volume_binding_mode = "Immediate" + allow_volume_expansion = "true" +} + +resource "kubernetes_storage_class" "ebs_encrypted" { + metadata { + name = "gp2-encrypted" + annotations = { + "storageclass.kubernetes.io/is-default-class" = "false" + } + } + parameters = merge( + local.app_tags_sc_parameters, + { + fsType = "ext4" + type = "gp2" + encrypted = "true" + # kms_key_id = data.aws_kms_key.ebs_key.arn + kmsKeyId = data.aws_kms_key.ebs_key.arn + }) + storage_provisioner = "kubernetes.io/aws-ebs" + reclaim_policy = "Delete" + volume_binding_mode = "Immediate" + allow_volume_expansion = "true" +} + +# run once. This deletes the default storage class created by eks called 'gp2' +# vs trying to patch it + +resource "null_resource" "delete_default_sc" { + triggers = { + id = kubernetes_storage_class.ebs_encrypted.id + } + depends_on = [null_resource.kubeconfig] + provisioner "local-exec" { + command = "kubectl --kubeconfig ${path.root}/setup/kube.config delete sc gp2" + } +} + +## { +## "apiVersion": "storage.k8s.io/v1", +## "kind": "StorageClass", +## "metadata": { +## "annotations": { +## "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"storage.k8s.io/v1\",\"kind\":\"StorageClass\",\"metadata\":{\"annotations\":{\"storageclass.kubernetes.io/is-default-class\":\"true\"},\"name\":\"gp2\"},\"parameters\":{\"fsType\":\"ext4\",\"type\":\"gp2\"},\"provisioner\":\"kubernetes.io/aws-ebs\",\"volumeBindingMode\":\"WaitForFirstConsumer\"}\n", +## "storageclass.kubernetes.io/is-default-class": "true" +## }, +## "creationTimestamp": "2021-09-20T16:10:48Z", +## "managedFields": [ +## { +## "apiVersion": "storage.k8s.io/v1", +## "fieldsType": "FieldsV1", +## "fieldsV1": { +## "f:metadata": { +## "f:annotations": { +## ".": {}, +## "f:kubectl.kubernetes.io/last-applied-configuration": {}, +## "f:storageclass.kubernetes.io/is-default-class": {} +## } +## }, +## "f:parameters": { +## ".": {}, +## "f:fsType": {}, +## "f:type": {} +## }, +## "f:provisioner": {}, +## "f:reclaimPolicy": {}, +## "f:volumeBindingMode": {} +## }, +## "manager": "kubectl-client-side-apply", +## "operation": "Update", +## "time": "2021-09-20T16:10:48Z" +## } +## ], +## "name": "gp2", +## "resourceVersion": "253", +## "uid": "5768ea51-ae73-450e-b0de-38a07be0a5d3" +## }, +## "parameters": { +## "fsType": "ext4", +## "type": "gp2" +## }, +## "provisioner": "kubernetes.io/aws-ebs", +## "reclaimPolicy": "Delete", +## "volumeBindingMode": "WaitForFirstConsumer" + +## } + diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/.tf-control b/examples/full-cluster-tf-upgrade/1.32/efs/.tf-control new file mode 100644 index 0000000..280f449 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/.tf-control @@ -0,0 +1,20 @@ +# .tf-control +# allows for setting a specific command to be used for tf-* commands under this git repo +# see tf-control.sh help for more info + +TFCONTROL_VERSION="1.0.5" + +TFCOMMAND="terraform_latest" +# TF_CLI_CONFIG_FILE=PATH-TO-FILE/.tf-control.tfrc +# TFARGS="" +# TFNOLOG="" +# TFNOCOLOR="" + +# use the following to force a specific version. An upgrade of an existing 0.12.31 to 1.x +# needs you to cycle through 0.13.17, 0.14.11, and then latest (0.15.5 not needed). Other +# steps in between. See https://github.e.it.census.gov/terraform/support/tree/master/docs/how-to/terraform-upgrade for details +# +#TFCOMMAND="terraform_0.12.31" +#TFCOMMAND="terraform_0.13.7" +#TFCOMMAND="terraform_0.14.11" +#TFCOMMAND="terraform_0.15.5" diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.32/efs/.tf-control.tfrc new file mode 100644 index 0000000..7425488 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/.tf-control.tfrc @@ -0,0 +1,24 @@ +TFCONTROL_VERSION="1.0.5" + +# https://www.terraform.io/docs/cli/config/config-file.html +plugin_cache_dir = "/data/terraform/terraform.d/plugin-cache" +#disable_checkpoint = true + +provider_installation { +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "*/*/*" ] +# } + filesystem_mirror { + path = "/data/terraform/terraform.d/providers" + include = [ "*/*/*" ] + } +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "external.terraform.census.gov/*/*" ] +# } + direct { + include = [ "*/*/*" ] + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/README.efs.md b/examples/full-cluster-tf-upgrade/1.32/efs/README.efs.md new file mode 100644 index 0000000..14039bd --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/README.efs.md @@ -0,0 +1,81 @@ +# eks-efs + +A standard EKS cluster only provides the `gp2` storage class, which is an EBS based persistent volume. +`gp2` can only be used with ReadWriteOnce persistent volumes. +If an application requires ReadOnlyMany or ReadWriteMany, a different type of persistent volume is required. +The eks-efs module installs an efs-provisioner in the cluster with a storage class of `efs` which allows all types of persistent volumes. + +## Parameters + +| Name | Description | +| ---- | ----------- | +| region | The AWS region that EKS cluster is located. | +| cluster_name | The name of the cluster in which efs-provisioner will be installed. | +| subnet_ids | A list of subnets inside the VPC. Used for EFS mount points. | +| security_groups | Security groups for all worker management | +| aws_efs_csi_driver_version | Which version of the aws-efs-csi-driver helm chart to use. Currently defaults to 2.1.4. | +| external_provisioner_tag | Which tag of public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner to use. Currently defaults to v2.1.1-eks-1-18-2 | +| livenessprobe_tag | Which tag of public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe to use. Currently defaults to v2.2.0-eks-1-18-2 | +| node_driver_registrar_tag | Which tag of public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar to use. Currently defaults to v2.1.0-eks-1-18-2 | + +## Updating the aws-efs-csi-driver chart + +When using a private VPC, the helm chart cannot be downloaded from "https://kubernetes-sigs.github.io/aws-efs-csi-driver/" during installation. +A local copy of the chart is maintained within the terraform script. +The lastest version of the helm chart can be found by looking at https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/charts/aws-efs-csi-driver/Chart.yaml and checking the `version:` tag (not the `appVersion` tag.) +To update this helm chart to the latest version, the procedure is to: + +```script +cd charts +helm add repo https://kubernetes-sigs.github.io/aws-efs-csi-driver/ aws-efs-csi-driver +helm repo update +rm -fr aws-efs-csi-driver +helm pull aws-efs-csi-driver --untar +``` + +After completing these steps, be sure to examine aws-efs-csi-driver/values.yaml and confirm that the tags listed for the sidecar images match the tags assigned by default in input.tf. +For example, the values.yaml file: + +```json +sidecars: + livenessProbe: + image: + repository: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe + tag: v2.2.0-eks-1-18-2 + pullPolicy: IfNotPresent + resources: {} + nodeDriverRegistrar: + image: + repository: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar + tag: v2.1.0-eks-1-18-2 + pullPolicy: IfNotPresent + resources: {} + csiProvisioner: + image: + repository: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner + tag: v2.1.1-eks-1-18-2 + pullPolicy: IfNotPresent + resources: {} +``` + +Entries in input.tf: + +```hcl +variable "livenessprobe_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/livenessp +robe to use." + default = "v2.2.0-eks-1-18-2" +} + +variable "node_driver_registrar_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/node-driv +er-registrar to use." + default = "v2.1.0-eks-1-18-2" +} + +variable "external_provisioner_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/external- +provisioner to use." + default = "v2.1.1-eks-1-18-2" +} +``` diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/README.md b/examples/full-cluster-tf-upgrade/1.32/efs/README.md new file mode 100644 index 0000000..7d589b0 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/README.md @@ -0,0 +1,164 @@ +# EFS + +This sets up the needed EFS resources for persistent volumes. See [this](README.efs.md) for more details. + +## Links + +* https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html +* https://github.com/kubernetes-sigs/aws-efs-csi-driver +* https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/433 +* https://github.com/hashicorp/terraform-provider-kubernetes/issues/723#issuecomment-679423792 +* https://dev.to/vidyasagarmsc/update-multiple-lines-in-a-yaml-file-49fb + +## Initialize + +* Proxy setup + +Proxy is needed because system may not have access to the `registry.terraform.io` site directory, +and if indirectly, it may not be able to handle a proxy redirect. You may not need to use this, but if you get +errors from the `tf-init`, this is your first thing to setup. + +```shell +export HTTP_PROXY=http://proxy.tco.census.gov:3128 +export HTTPS_PROXY=http://proxy.tco.census.gov:3128 +``` + +## Terraform Automated + +A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. + +* copy the `remote_state.yml` from the parent and update `directory` to be the current directory +* run the tf-run.sh + +```console +% tf-run.sh apply +``` + +* example of the `tf-run.sh` steps + +This is part of a larger cluster configuration, so at the end of the run it indicates another directory +to visit when done. + +```console +% tf-run.sh list +* running action=plan +* START: tf-run.sh v1.1.2 start=1636558187 end= logfile=logs/run.plan.20211110.1636558187.log (not-created) +* reading from tf-run.data +* read 7 entries from tf-run.data +> list +** START: start=1636558187 +* 1 COMMAND> tf-directory-setup.py -l none -f +* 2 COMMAND> setup-new-directory.sh +* 3 COMMAND> tf-init -upgrade +* 4 POLICY> (*.tf) aws_iam_policy.efs-policy +* 4 tf-plan -target=aws_iam_policy.efs-policy +* 5 tf-plan +* 6 COMMAND> tf-directory-setup.py -l s3 +* 7 STOP> cd ../common-services and tf-run.sh apply +** END: start=1636558187 end=1636558187 elapsed=0 logfile=logs/run.plan.20211110.1636558187.log (not-created) +``` + +It is highly recommended to use the `tf-run.sh` approach. + +## Terraform Manual + + +```shell +tf-directory-setup.py -l none +setup-new-directory.sh +tf-init +```` + +* Apply the EFS policy first (before the role) + +```shell +tf-apply -target=aws_iam_policy.efs-policy +``` + +* Apply the rest + +This must be done from a system with the skopeo command, so RHEL8+. + +To use the local install, The efs/charts/ directory +must be populated with the expected code (see [README.md](README.md)) outside of terraform, +much like the .tf files are created. Currently, as the box we run this from has internet access, +we can deploy by pulling the helm stuff from the internet. + +```shell +tf-apply +tf-directory-setup.py -l s3 +``` + +## Post Setup Examination + +This gives us (look at the efs-csi-* ones) to see what was setup. Your `kubectl` configuration file +needs to be setup (one is extracted in `setup/kube.config` as part of this configuration). + +```console +% kubectl --kubeconfig setup/kube.config get pods -n kube-system +NAME READY STATUS RESTARTS AGE +aws-node-j6n6z 1/1 Running 1 27h +aws-node-nmgqm 1/1 Running 1 27h +aws-node-t5ggn 1/1 Running 1 27h +aws-node-vxlvw 1/1 Running 0 27h +coredns-65bfc5645f-254kx 1/1 Running 0 29h +coredns-65bfc5645f-zpvld 1/1 Running 0 29h +efs-csi-controller-7c88dbd56d-chdkt 3/3 Running 0 3m36s +efs-csi-controller-7c88dbd56d-hsws7 3/3 Running 0 3m36s +efs-csi-node-4gjdh 3/3 Running 0 3m36s +efs-csi-node-g49r7 3/3 Running 0 3m36s +efs-csi-node-hq6q9 3/3 Running 0 3m36s +efs-csi-node-lcdmd 3/3 Running 0 3m36s +kube-proxy-dp9zl 1/1 Running 0 27h +kube-proxy-n9l75 1/1 Running 0 27h +kube-proxy-qrv2w 1/1 Running 0 27h +kube-proxy-zssvb 1/1 Running 0 27h +``` + +* Create PVC Automated + +Use the `persistent-volume.tf`, which is setup by default, and should happen as part of the final apply above. + +* Create PVC Manually + +```json +# pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: efs-test3-claim +spec: + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 25Gi + storageClassName: efs +``` + +* Examinine the PV and PVC + +```console +% kubectl get pv +No resources found +% kubectl get pvc +No resources found in default namespace. +% kubectl apply -f pvc.yaml +persistentvolumeclaim/efs-test3-claim created +% kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +efs-test3-claim Pending efs 39s +``` + +* Describing the PVC + +```shell +kubectl --kubeconfig setup/kube.config describe pvc efs-test3-claim +``` + +To patch to make it work with the regional STS endpoint (this is handled in the TF code): + +```shell +kubectl --kubeconfig setup/kube.config -n kube-system set env deployment/efs-csi-controller AWS_STS_REGIONAL_ENDPOINTS=regional +``` diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/addon.tf b/examples/full-cluster-tf-upgrade/1.32/efs/addon.tf new file mode 100644 index 0000000..e1af409 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/addon.tf @@ -0,0 +1,15 @@ +# https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html + +resource "aws_eks_addon" "aws-efs-csi-driver" { + count = lookup(lookup(var.addon_versions, var.cluster_version, {}), "aws-efs-csi-driver", null) != null ? 1 : 0 + + cluster_name = var.cluster_name + addon_name = "aws-efs-csi-driver" + addon_version = lookup(lookup(var.addon_versions, var.cluster_version, {}), "aws-efs-csi-driver", null) + service_account_role_arn = module.role_efs-driver.role_arn + configuration_values = null + # resolve_conflicts = "OVERWRITE" + # note OVERWRITE resets to eks addon defaults, PRESERVE uses any values set here + resolve_conflicts_on_create = "OVERWRITE" + resolve_conflicts_on_update = "OVERWRITE" +} diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.32/efs/data.eks-subdirectory.tf new file mode 120000 index 0000000..43b5430 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/data.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/ecr.tf b/examples/full-cluster-tf-upgrade/1.32/efs/ecr.tf new file mode 100644 index 0000000..986226e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/ecr.tf @@ -0,0 +1,70 @@ +locals { + ecr_mapping_default = "602401143452" + ecr_mapping = { + "us-gov-east-1" = "151742754352" + "us-gov-west-1" = "013241004608" + "us-east-1" = "602401143452" + "us-west-2" = "602401143452" + "us-east-1" = "602401143452" + "us-west-2" = "602401143452" + } + public_ecr = format("%v.dkr.ecr.%v.amazonaws.com", lookup(local.ecr_mapping, local.region, local.ecr_mapping_default), local.region) +} + +## # Populated from: +## # https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html +## +## data "aws_caller_identity" "whoami" {} +## +## locals { +## af_south_1 = (var.region == "af-south-1" ? "877085696533.dkr.ecr.af-south-1.amazonaws.com/" : "") +## af = local.af_south_1 +## +## ap_east_1 = var.region == "ap-east-1" ? "800184023465.dkr.ecr.ap-east-1.amazonaws.com/" : "" +## ap_northeast_1 = var.region == "ap-northeast-1" ? "602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/" : "" +## ap_northeast_2 = var.region == "ap-northeast-2" ? "602401143452.dkr.ecr.ap-northeast-2.amazonaws.com/" : "" +## ap_northeast_3 = var.region == "ap-northeast-3" ? "602401143452.dkr.ecr.ap-northeast-3.amazonaws.com/" : "" +## ap_south_1 = var.region == "ap-south-1" ? "602401143452.dkr.ecr.ap-south-1.amazonaws.com/" : "" +## ap_southeast_1 = var.region == "ap-southeast-1" ? "602401143452.dkr.ecr.ap-southeast-1.amazonaws.com/" : "" +## ap_southeast_2 = var.region == "ap-southeast-2" ? "602401143452.dkr.ecr.ap-southeast-2.amazonaws.com/" : "" +## ap_1 = "${local.ap_east_1}${local.ap_northeast_1}${local.ap_northeast_2}${local.ap_northeast_3}${local.ap_south_1}" +## ap_2 = "${local.ap_southeast_1}${local.ap_southeast_2}" +## ap = "${local.ap_1}${local.ap_2}" +## +## ca_central_1 = var.region == "ca-central-1" ? "602401143452.dkr.ecr.ca-central-1.amazonaws.com/" : "" +## ca = local.ca_central_1 +## +## cn_north_1 = var.region == "cn-north-1" ? "918309763551.dkr.ecr.cn-north-1.amazonaws.com.cn/" : "" +## cn_northwest_1 = var.region == "cn-northwest-1" ? "961992271922.dkr.ecr.cn-northwest-1.amazonaws.com.cn/" : "" +## cn = "${local.cn_north_1}${local.cn_northwest_1}" +## +## eu_central_1 = var.region == "eu-central-1" ? "602401143452.dkr.ecr.eu-central-1.amazonaws.com/" : "" +## eu_north_1 = var.region == "eu-north-1" ? "602401143452.dkr.ecr.eu-north-1.amazonaws.com/" : "" +## eu_south_1 = var.region == "eu-south-1" ? "590381155156.dkr.ecr.eu-south-1.amazonaws.com/" : "" +## eu_west_1 = var.region == "eu-west-1" ? "602401143452.dkr.ecr.eu-west-1.amazonaws.com/" : "" +## eu_west_2 = var.region == "eu-west-2" ? "602401143452.dkr.ecr.eu-west-2.amazonaws.com/" : "" +## eu_west_3 = var.region == "eu-west-3" ? "602401143452.dkr.ecr.eu-west-3.amazonaws.com/" : "" +## eu = "${local.eu_central_1}${local.eu_north_1}${local.eu_south_1}${local.eu_west_1}${local.eu_west_2}${local.eu_west_3}" +## +## me_south_1 = var.region == "me-south-1" ? "558608220178.dkr.ecr.me-south-1.amazonaws.com/" : "" +## me = local.me_south_1 +## +## sa_east_1 = var.region == "sa-east-1" ? "602401143452.dkr.ecr.sa-east-1.amazonaws.com/" : "" +## sa = local.sa_east_1 +## +## us_east_1 = var.region == "us-east-1" ? "602401143452.dkr.ecr.us-east-1.amazonaws.com/" : "" +## us_east_2 = var.region == "us-east-2" ? "602401143452.dkr.ecr.us-east-2.amazonaws.com/" : "" +## us_gov_east_1 = var.region == "us-gov-east-1" ? "151742754352.dkr.ecr.us-gov-east-1.amazonaws.com/" : "" +## us_gov_west_1 = var.region == "us-gov-west-1" ? "013241004608.dkr.ecr.us-gov-west-1.amazonaws.com/" : "" +## us_west_1 = var.region == "us-west-1" ? "602401143452.dkr.ecr.us-west-1.amazonaws.com/" : "" +## us_west_2 = var.region == "us-west-2" ? "602401143452.dkr.ecr.us-west-2.amazonaws.com/" : "" +## us = "${local.us_east_1}${local.us_east_2}${local.us_gov_east_1}${local.us_gov_west_1}${local.us_west_1}${local.us_west_2}" +## +## ecr = "${local.af}${local.ap}${local.ca}${local.cn}${local.eu}${local.me}${local.sa}${local.us}" +## +## +## public_reg = "public.ecr.aws" +## src_reg = format("%v/eks-distro/kubernetes-csi", local.public_reg) +## account_ecr = "${data.aws_caller_identity.whoami.account_id}.dkr.ecr.${var.region}.amazonaws.com/${var.cluster_name}" +## } +## diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/efs.tf b/examples/full-cluster-tf-upgrade/1.32/efs/efs.tf new file mode 100644 index 0000000..4859130 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/efs.tf @@ -0,0 +1,27 @@ +# Create an Amazon EFS file system for the EKS cluster. +# Step 4a: Create a file system. +# Step 4b: Create mount targets. +module "efs" { + source = "git@github.e.it.census.gov:terraform-modules/aws-efs.git" + + name = var.cluster_name + vpc_id = local.vpc_id + subnet_ids = local.subnet_ids + ## consider changing this to the new extra_cluster_sg + security_groups = [local.cluster_worker_sg_id] + ## subnet_ids = local.cni_subnet_ids + ## security_groups = [local.cluster_cni_worker_sg_id] + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + tomap({ "efs.csi.aws.com/cluster" = "true" }), + ) +} + +# look at efs module. Add +# efs_tags +# kms_tags +# moint_point_tags +# or use the override tags thing diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.32/efs/kubeconfig.eks-subdirectory.tf new file mode 120000 index 0000000..e3750a4 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/kubeconfig.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/kubeconfig.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/locals.tf b/examples/full-cluster-tf-upgrade/1.32/efs/locals.tf new file mode 100644 index 0000000..4b9ae5a --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/locals.tf @@ -0,0 +1,17 @@ +locals { + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } +} + +# replace TF remote state accordingly in parent_rs with that from the parent directory, and be sure to make the link +locals { + vpc_id = local.parent_rs.cluster_vpc_id + subnet_ids = local.parent_rs.cluster_subnet_ids + cluster_worker_sg_id = local.parent_rs.cluster_worker_sg_id + + oidc_provider_url = local.parent_rs.oidc_provider_url + oidc_provider_arn = local.parent_rs.oidc_provider_arn +} diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.32/efs/parent_rs.tf new file mode 120000 index 0000000..d85ece6 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/parent_rs.tf @@ -0,0 +1 @@ +../includes.d/parent_rs.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/persistent-volume.tf b/examples/full-cluster-tf-upgrade/1.32/efs/persistent-volume.tf new file mode 100644 index 0000000..7ff0766 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/persistent-volume.tf @@ -0,0 +1,19 @@ +resource "kubernetes_persistent_volume_claim" "cluster-base-efs" { + metadata { + name = format("%v%v-%v", local._prefixes["eks"], var.cluster_name, "base-claim") + # namespace = kubernetes_namespace.cicd_namespace.metadata[0].name + } + wait_until_bound = false + spec { + access_modes = ["ReadWriteMany"] + # capacity = { + # storage = "25Gi" + # } + resources { + requests = { + storage = "25Gi" + } + } + storage_class_name = "efs" + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/policy.tf b/examples/full-cluster-tf-upgrade/1.32/efs/policy.tf new file mode 100644 index 0000000..b98f39d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/policy.tf @@ -0,0 +1,87 @@ +# created from +# arn:aws-us-gov:iam::aws:policy/service-role/AmazonEFSCSIDriverPolicy + +# apply policy before creating role +# tf-apply -target=aws_iam_policy.efs-policy + +resource "aws_iam_policy" "efs-policy" { + name = format("%v%v-efs-driver", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow configuration of the EFS" + policy = data.aws_iam_policy_document.efs-policy.json + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + { "Name" = format("%v%v-efs-driver", local._prefixes["eks-policy"], var.cluster_name) }, + ) +} + +data "aws_iam_policy_document" "efs-policy" { + statement { + sid = "EKSEFSDescribe" + effect = "Allow" + resources = ["*"] + actions = [ + "elasticfilesystem:DescribeAccessPoints", + "elasticfilesystem:DescribeFileSystems", + "elasticfilesystem:DescribeMountTargets", + "ec2:DescribeAvailabilityZones", + ] + } + statement { + sid = "EKSEFSCreateAccessPoint" + effect = "Allow" + resources = ["*"] + actions = [ + "elasticfilesystem:CreateAccessPoint" + ] + condition { + test = "Null" + variable = "aws:RequestTag/efs.csi.aws.com/cluster" + values = ["false"] + } + condition { + test = "ForAllValues:StringEquals" + variable = "aws:TagKeys" + values = ["efs.csi.aws.com/cluster"] + } + } + statement { + sid = "AllowTagNewAccessPoints" + effect = "Allow" + resources = ["*"] + actions = [ + "elasticfilesystem:TagResource", + ] + condition { + test = "StringEquals" + variable = "elasticfilesystem:CreateAction" + values = ["CreateAccessPoint"] + } + condition { + test = "Null" + variable = "aws:RequestTag/efs.csi.aws.com/cluster" + values = ["false"] + } + condition { + test = "ForAllValues:StringEquals" + variable = "aws:TagKeys" + values = ["efs.csi.aws.com/cluster"] + } + } + statement { + sid = "EKSEFSDeleteAccessPoint" + effect = "Allow" + resources = ["*"] + actions = [ + "elasticfilesystem:DeleteAccessPoint" + ] + condition { + test = "Null" + variable = "aws:ResourceTag/efs.csi.aws.com/cluster" + values = ["false"] + } + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/prefixes.tf b/examples/full-cluster-tf-upgrade/1.32/efs/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/providers.tf b/examples/full-cluster-tf-upgrade/1.32/efs/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/region.tf b/examples/full-cluster-tf-upgrade/1.32/efs/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/role.tf b/examples/full-cluster-tf-upgrade/1.32/efs/role.tf new file mode 100644 index 0000000..0cda603 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/role.tf @@ -0,0 +1,53 @@ +#--- +# cluster +#--- +locals { + # oidc = replace(data.aws_eks_cluster.cluster.identity[0].oidc[0].issuer, "https://", "") + account_id = data.aws_caller_identity.current.account_id + principal = format("arn:%v:iam::%v:oidc-provider/%v", data.aws_arn.current.partition, local.account_id, local.oidc_provider_url) +} + +# create: aws_iam_policy.efs-policy first +module "role_efs-driver" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" + + role_name = format("%v%v-efs-driver", local._prefixes["eks"], var.cluster_name) + role_description = "EKS EFS Driver Role for ${var.cluster_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.efs_assume_webidentity.json + attached_policies = [aws_iam_policy.efs-policy.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + tomap({ "Name" = format("%v%v-efs-driver", local._prefixes["eks-role"], var.cluster_name) }), + ) +} + +data "aws_iam_policy_document" "efs_assume_webidentity" { + statement { + sid = "EFSAssumeRoleWebIdentity" + effect = "Allow" + actions = ["sts:AssumeRoleWithWebIdentity"] + principals { + type = "Federated" + identifiers = [local.principal] + } + condition { + test = "StringLike" + variable = "${local.oidc_provider_url}:sub" + values = ["system:serviceaccount:kube-system:efs-csi-*"] + } + condition { + test = "StringLike" + variable = "${local.oidc_provider_url}:aud" + values = ["sts.amazonaws.com"] + } + } +} + +output "role_efs-driver_arn" { + description = "Role ARN for EKS EFS Driver Role" + value = module.role_efs-driver.role_arn +} diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/storage-class.tf b/examples/full-cluster-tf-upgrade/1.32/efs/storage-class.tf new file mode 100644 index 0000000..ca88c36 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/storage-class.tf @@ -0,0 +1,17 @@ +resource "kubernetes_storage_class" "efs-sc" { + depends_on = [ + module.efs, + aws_eks_addon.aws-efs-csi-driver, + ] + + metadata { + name = "efs" + } + storage_provisioner = "efs.csi.aws.com" + parameters = { + provisioningMode = "efs-ap" + fileSystemId = module.efs.id + directoryPerms = "700" + } + mount_options = ["tls"] +} diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/tf-run.data b/examples/full-cluster-tf-upgrade/1.32/efs/tf-run.data new file mode 100644 index 0000000..bf48958 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/tf-run.data @@ -0,0 +1,31 @@ +VERSION 1.2.8 +REMOTE-STATE +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +LINKTOP init +LINKTOP includes.d/variables.account_tags.tf +LINKTOP includes.d/variables.account_tags.auto.tfvars +LINKTOP includes.d/variables.infrastructure_tags.tf +LINKTOP includes.d/variables.infrastructure_tags.auto.tfvars +LINKTOP includes.d/variables.application_tags.tf +# LINKTOP includes.d/variables.application_tags.auto.tfvars +LINK variables.application_tags.auto.tfvars +LINKTOP provider_configs.d/provider.ldap_new.auto.tfvars +LINKTOP provider_configs.d/provider.ldap_new.tf +LINKTOP provider_configs.d/provider.ldap_new.variables.tf +LINK settings.auto.tfvars +LINK includes.d/parent_rs.tf +LINK includes.d/data.eks-subdirectory.tf +LINK includes.d/kubeconfig.eks-subdirectory.tf +LINK variables.eks.tf +LINK prefixes.tf +LINK providers.tf +LINK variables.addons.tf +LINK versions.tf +LINK version.tf +COMMAND tf-init + +POLICY +ALL +COMMAND tf-directory-setup.py -l s3 +STOP cd ../addons and tf-run.sh apply diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/tf-run.destroy.data b/examples/full-cluster-tf-upgrade/1.32/efs/tf-run.destroy.data new file mode 100644 index 0000000..7a82c9f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/tf-run.destroy.data @@ -0,0 +1,6 @@ +VERSION 1.0.1 +BACKUP-STATE +COMMAND tf-init +COMMAND tf-state list + +ALL diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/variables.efs.tf b/examples/full-cluster-tf-upgrade/1.32/efs/variables.efs.tf new file mode 100644 index 0000000..0e2acb6 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/variables.efs.tf @@ -0,0 +1,37 @@ +# variable "eks_vpc_name" { +# description = "Define the VPC name that will be used by this cluster" +# type = string +# default = "*vpc4*" +# } +# +# variable "subnets_name" { +# description = "Define the name of the subnets to be used by this cluster" +# type = string +# default = "*-apps-*" +# } + +variable "cluster_worker_sg_id" { + description = "Security group for all worker management." + type = string + default = "" +} + +# See the readme `Updating the aws-efs-csi-driver chart` to find these values. +variable "livenessprobe_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/liveness" + type = string + default = "v2.2.0-eks-1-18-2" +} + +variable "node_driver_registrar_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/node-dri" + type = string + default = "v2.1.0-eks-1-18-2" +} + +variable "external_provisioner_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/external" + type = string + default = "v2.1.1-eks-1-18-2" +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.32/efs/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/version.tf b/examples/full-cluster-tf-upgrade/1.32/efs/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/efs/versions.tf b/examples/full-cluster-tf-upgrade/1.32/efs/versions.tf new file mode 120000 index 0000000..8bd0ff1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/efs/versions.tf @@ -0,0 +1 @@ +../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/eks-console-access.tf b/examples/full-cluster-tf-upgrade/1.32/eks-console-access.tf new file mode 100644 index 0000000..5ed3866 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/eks-console-access.tf @@ -0,0 +1,71 @@ +# ```shell +# curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml +# curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml +# ``` +# +# For full console, we'll use the first one. +# +# ```console +# % kubectl apply -f eks-console-full-access.yaml +# clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created +# clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created +# ``` + +locals { + cluster_roles = [ + { + name = "eks-console-full-access" + url = "https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml" + enabled = true + }, + { + name = "eks-console-restricted-access" + url = "https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml" + enabled = false + }, + ] + cluster_roles_map = { for cr in local.cluster_roles : cr.name => cr } +} + + +data "http" "cluster_roles" { + for_each = local.cluster_roles_map + url = each.value.url +} + +resource "null_resource" "cluster_roles" { + for_each = local.cluster_roles_map + triggers = { + roles = join(",", [each.key, each.value.url]) + directory = null_resource.setup_directory.triggers.directory + } + # provisioner "local-exec" { + # command = "test -d setup || mkdir setup" + # } + provisioner "local-exec" { + command = "echo '${data.http.cluster_roles[each.key].body}' > ${self.triggers.directory}/${each.value.name}.yaml" + } +} + +resource "null_resource" "apply_cluster_roles" { + for_each = { for k, v in local.cluster_roles_map : k => v if v.enabled } + triggers = { + roles = join(",", [each.key, each.value.url]) + } + depends_on = [null_resource.kubeconfig] + # provisioner "local-exec" { + # command = "if [ -z $KUBECONFIG ]; then 'echo missing KUBECONFIG'; exit 1; else exit 0; fi" + # } + # provisioner "local-exec" { + # command = "if [ ! -r $KUBECONFIG ]; then 'echo unreadable KUBECONFIG'; exit 1; else exit 0; fi" + # } + # provisioner "local-exec" { + # command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + # } + provisioner "local-exec" { + environment = { + KUBECONFIG = "${path.root}/setup/kube.config" + } + command = "kubectl apply -f setup/${each.value.name}.yaml" + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/group.tf b/examples/full-cluster-tf-upgrade/1.32/group.tf new file mode 100644 index 0000000..cdffce9 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/group.tf @@ -0,0 +1,13 @@ +module "group_cluster-admin" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git" + + group_name = format("%v%v-cluster-admin", local._prefixes["eks"], var.cluster_name) + attached_policies = [aws_iam_policy.cluster-admin-policy.arn, aws_iam_policy.cluster-admin_assume_policy.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) +} diff --git a/examples/full-cluster-tf-upgrade/1.32/images.yml b/examples/full-cluster-tf-upgrade/1.32/images.yml new file mode 100644 index 0000000..3006045 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/images.yml @@ -0,0 +1,133 @@ +cluster-autoscaler: + documentation: "https://github.com/kubernetes/autoscaler/releases" + name: "cluster-autoscaler" + image: "registry.k8s.io/autoscaling/cluster-autoscaler" + dest_path: null + source_registry: "registry.k8s.io" + source_image: "autoscaling/cluster-autoscaler" + source_tag: null + enabled: true + tag: "v1.34.1" +cert-manager-controller: + documentation: "https://cert-manager.io/docs/releases/" + name: "cert-manager-controller" + image: "quay.io/jetstack/cert-manager-controller" + dest_path: null + source_registry: "quay.io" + source_image: "jetstack/cert-manager-controller" + source_tag: null + enabled: true + tag: "v1.19.1" +cert-manager-cainjector: + documentation: "https://cert-manager.io/docs/releases/" + name: "cert-manager-cainjector" + image: "quay.io/jetstack/cert-manager-cainjector" + dest_path: null + source_registry: "quay.io" + source_image: "jetstack/cert-manager-cainjector" + source_tag: null + enabled: true + tag: "v1.19.1" +cert-manager-webhook: + documentation: "https://cert-manager.io/docs/releases/" + name: "cert-manager-webhook" + image: "quay.io/jetstack/cert-manager-webhook" + dest_path: null + source_registry: "quay.io" + source_image: "jetstack/cert-manager-webhook" + source_tag: null + enabled: true + tag: "v1.19.1" +cert-manager-ctl: + documentation: "https://cert-manager.io/docs/releases/" + name: "cert-manager-ctl" + image: "quay.io/jetstack/cert-manager-ctl" + dest_path: null + source_registry: "quay.io" + source_image: "jetstack/cert-manager-ctl" + source_tag: null + enabled: false +# tag: "v1.16.2" + tag: "v1.14.7" +cert-manager-startupapicheck: + documentation: "https://cert-manager.io/docs/releases/" + name: "cert-manager-startupapicheck" + image: "quay.io/jetstack/cert-manager-startupapicheck" + dest_path: null + source_registry: "quay.io" + source_image: "jetstack/cert-manager-startupapicheck" + source_tag: null + enabled: true + tag: "v1.19.1" +metrics-server: + documentation: "https://hub.docker.com/r/bitnami/metrics-server/tags" + name: "metrics-server" + image: "docker.io/bitnami/metrics-server" + dest_path: null + source_registry: "docker.io" + source_image: "bitnami/metrics-server" + source_tag: null + enabled: true + tag: "0.7.2" +istio-operator: + documentation: "https://istio.io/latest/docs/releases/supported-releases" + name: "istio/operator" + image: "docker.io/istio/operator" + dest_path: null + source_registry: "docker.io" + source_image: "istio/operator" + source_tag: null + enabled: true + tag: "1.23.6" +istio-pilot: + documentation: "https://istio.io/latest/docs/releases/supported-releases" + name: "istio/pilot" + image: "docker.io/istio/pilot" + dest_path: null + source_registry: "docker.io" + source_image: "istio/pilot" + source_tag: null + enabled: true + # tag: "1.25.3" + tag: "1.28.0" +istio-proxyv2: + documentation: "https://istio.io/latest/docs/releases/supported-releases" + name: "istio/proxyv2" + image: "docker.io/istio/proxyv2" + dest_path: null + source_registry: "docker.io" + source_image: "istio/proxyv2" + source_tag: null + enabled: true + # tag: "1.25.3" + tag: "1.28.0" +prometheus: + documentation: "https://hub.docker.com/r/bitnami/prometheus/tags" + name: "prometheus" + image: "docker.io/bitnami/prometheus" + dest_path: null + source_registry: "docker.io" + source_image: "bitnami/prometheus" + source_tag: null + enabled: true + tag: "3.0.1" +prometheus-operator: + documentation: "https://hub.docker.com/r/bitnami/prometheus-operator/tags" + name: "prometheus-operator" + image: "docker.io/bitnami/prometheus-operator" + dest_path: null + source_registry: "docker.io" + source_image: "bitnami/prometheus-operator" + source_tag: null + enabled: true + tag: "0.79.2" +alertmanager: + documentation: "https://hub.docker.com/r/bitnami/alertmanager/tags" + name: "alertmanager" + image: "docker.io/bitnami/alertmanager" + dest_path: null + source_registry: "docker.io" + source_image: "bitnami/alertmanager" + source_tag: null + enabled: true + tag: "0.27.0" diff --git a/examples/full-cluster-tf-upgrade/1.32/import.tf.off b/examples/full-cluster-tf-upgrade/1.32/import.tf.off new file mode 100644 index 0000000..0f20f92 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/import.tf.off @@ -0,0 +1,4 @@ +import { + to = aws_cloudwatch_log_group.cluster + id = format("/aws/eks/%v/cluster", var.cluster_name) +} diff --git a/examples/full-cluster-tf-upgrade/1.32/includes.d/README.md b/examples/full-cluster-tf-upgrade/1.32/includes.d/README.md new file mode 100644 index 0000000..97c168f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/includes.d/README.md @@ -0,0 +1,30 @@ +## Requirements + +No requirements. + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | n/a | +| [null](#provider\_null) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [null_resource.kubeconfig](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +No inputs. + +## Outputs + +No outputs. diff --git a/examples/full-cluster-tf-upgrade/1.32/includes.d/data.eks-main.tf b/examples/full-cluster-tf-upgrade/1.32/includes.d/data.eks-main.tf new file mode 100644 index 0000000..7ead28b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/includes.d/data.eks-main.tf @@ -0,0 +1,18 @@ +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster + # for main.tf + aws_eks_cluster = aws_eks_cluster.eks_cluster + # for all subdirectories + ## aws_eks_cluster = data.aws_eks_cluster.cluster +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +#--- +# for all subdirectories only +#--- +## data "aws_eks_cluster" "cluster" { +## name = var.cluster_name +## } diff --git a/examples/full-cluster-tf-upgrade/1.32/includes.d/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.32/includes.d/data.eks-subdirectory.tf new file mode 100644 index 0000000..870e8c6 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/includes.d/data.eks-subdirectory.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster + # for main.tf + # aws_eks_cluster = aws_eks_cluster.eks_cluster + # for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/full-cluster-tf-upgrade/1.32/includes.d/kubeconfig.eks-main.tf b/examples/full-cluster-tf-upgrade/1.32/includes.d/kubeconfig.eks-main.tf new file mode 100644 index 0000000..5a6333e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/includes.d/kubeconfig.eks-main.tf @@ -0,0 +1,29 @@ +resource "null_resource" "kubeconfig" { + triggers = { + always_run = timestamp() + } + provisioner "local-exec" { + command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + } + provisioner "local-exec" { + command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" + } + provisioner "local-exec" { + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + } + command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" + } + depends_on = [aws_eks_cluster.eks_cluster] +} + +#--- +# call it like +#--- +## provisioner "local-exec" { +## environment = { +## KUBECONFIG = "${path.root}/setup/kube.config" +## } +## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" +## } diff --git a/examples/full-cluster-tf-upgrade/1.32/includes.d/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.32/includes.d/kubeconfig.eks-subdirectory.tf new file mode 100644 index 0000000..5e386f5 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/includes.d/kubeconfig.eks-subdirectory.tf @@ -0,0 +1,29 @@ +resource "null_resource" "kubeconfig" { + triggers = { + always_run = timestamp() + } + provisioner "local-exec" { + command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + } + provisioner "local-exec" { + command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" + } + provisioner "local-exec" { + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + } + command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" + } + depends_on = [data.aws_eks_cluster.cluster] +} + +#--- +# call it like +#--- +## provisioner "local-exec" { +## environment = { +## KUBECONFIG = "${path.root}/setup/kube.config" +## } +## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" +## } diff --git a/examples/full-cluster-tf-upgrade/1.32/includes.d/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.32/includes.d/parent_rs.tf new file mode 100644 index 0000000..7d4b782 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/includes.d/parent_rs.tf @@ -0,0 +1,4 @@ +# replace TF remote state accordingly in parent_rs with that from the parent directory, and be sure to make the link +locals { + parent_rs = data.terraform_remote_state.vpc-state-path_application-state-path-eks-cluster-name.outputs +} diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/.tf-control b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/.tf-control new file mode 100644 index 0000000..280f449 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/.tf-control @@ -0,0 +1,20 @@ +# .tf-control +# allows for setting a specific command to be used for tf-* commands under this git repo +# see tf-control.sh help for more info + +TFCONTROL_VERSION="1.0.5" + +TFCOMMAND="terraform_latest" +# TF_CLI_CONFIG_FILE=PATH-TO-FILE/.tf-control.tfrc +# TFARGS="" +# TFNOLOG="" +# TFNOCOLOR="" + +# use the following to force a specific version. An upgrade of an existing 0.12.31 to 1.x +# needs you to cycle through 0.13.17, 0.14.11, and then latest (0.15.5 not needed). Other +# steps in between. See https://github.e.it.census.gov/terraform/support/tree/master/docs/how-to/terraform-upgrade for details +# +#TFCOMMAND="terraform_0.12.31" +#TFCOMMAND="terraform_0.13.7" +#TFCOMMAND="terraform_0.14.11" +#TFCOMMAND="terraform_0.15.5" diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/.tf-control.tfrc new file mode 100644 index 0000000..7425488 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/.tf-control.tfrc @@ -0,0 +1,24 @@ +TFCONTROL_VERSION="1.0.5" + +# https://www.terraform.io/docs/cli/config/config-file.html +plugin_cache_dir = "/data/terraform/terraform.d/plugin-cache" +#disable_checkpoint = true + +provider_installation { +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "*/*/*" ] +# } + filesystem_mirror { + path = "/data/terraform/terraform.d/providers" + include = [ "*/*/*" ] + } +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "external.terraform.census.gov/*/*" ] +# } + direct { + include = [ "*/*/*" ] + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/README.md b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/README.md new file mode 100644 index 0000000..6915c05 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/README.md @@ -0,0 +1,64 @@ +# irsa-roles + +This is a directory under which actual IRSA role subdirectories exist. No resources are created here. + +See the directories to follow the directions containd within: + +* cluster-autoscaler + +## Setup Steps + +First, copy the `remote_state.yml` from the parent and update `directory` to be the current directory. + +## Terraform Automated + +A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. + +* copy the `remote_state.yml` from the parent and update `directory` to be the current directory +* run the tf-run.sh + +```console +% tf-run.sh apply +``` + +* example of the `tf-run.sh` steps + +This is part of a larger cluster configuration, so at the end of the run it indicates another directory +to visit when done. + +```console +% tf-run.sh list +* running action=plan +* START: tf-run.sh v1.1.2 start=1636562881 end= logfile=logs/run.plan.20211110.1636562881.log (not-created) +* reading from tf-run.data +* read 6 entries from tf-run.data +> list +** START: start=1636562881 +* 1 COMMAND> tf-directory-setup.py -l none -f +* 2 COMMAND> setup-new-directory.sh +* 3 COMMAND> tf-init -upgrade +* 4 tf-plan +* 5 COMMAND> tf-directory-setup.py -l s3 +* 6 COMMENT> cd cluster-roles and tf-run.sh apply +** END: start=1636562881 end=1636562881 elapsed=0 logfile=logs/run.plan.20211110.1636562881.log (not-created) +``` + +It is highly recommended to use the `tf-run.sh` approach. + +## Terraform Manual + +* setup + +```shell +tf-directory-setup.py -l none +setup-new-directory.sh +tf-init +```` + +* Apply the rest + +```shell +tf-plan +tf-apply +tf-directory-setup.py -l s3 +``` diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/data.eks-subdirectory.tf new file mode 120000 index 0000000..43b5430 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/data.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/parent_rs.tf new file mode 120000 index 0000000..d85ece6 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/parent_rs.tf @@ -0,0 +1 @@ +../includes.d/parent_rs.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/prefixes.tf b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/providers.tf b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/region.tf b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/tf-run.data b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/tf-run.data new file mode 100644 index 0000000..d48406d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/tf-run.data @@ -0,0 +1,13 @@ +VERSION 1.3.0 +REMOTE-STATE +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +COMMAND tf-init -upgrade + +LINKTOP init +LINK versions.tf +LINK settings.auto.tfvars +LINK variables.application_tags.auto.tfvars + +ALL +COMMAND tf-directory-setup.py -l s3 diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/tf-run.destroy.data b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/tf-run.destroy.data new file mode 100644 index 0000000..73dcd61 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/tf-run.destroy.data @@ -0,0 +1,9 @@ +VERSION 1.0.1 +BACKUP-STATE +COMMAND tf-init +COMMAND tf-state list + +COMMENT cd cluster-autoscaler and tf-run.sh destroy, then come back here +STOP + +ALL diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.irsa.auto.tfvars b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.irsa.auto.tfvars new file mode 100644 index 0000000..d436089 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.irsa.auto.tfvars @@ -0,0 +1,3 @@ +name = "unknown" +namespace = "unknown" +namespace_short = "" diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.irsa.tf b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.irsa.tf new file mode 100644 index 0000000..63f3ab1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.irsa.tf @@ -0,0 +1,14 @@ +variable "namespace" { + description = "K8S namespace for IAM Role for Service Account (per-pod)" + type = string +} + +variable "namespace_short" { + description = "K8S namespace for IAM Role for Service Account (per-pod), short version (without the cluster name) to keep the role name under 64 characters" + type = string +} + +variable "name" { + description = "K8S service names for IAM Role for Service Account (per-pod)" + type = string +} diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.tags.tf b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.tags.tf new file mode 120000 index 0000000..2622118 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/variables.tags.tf @@ -0,0 +1 @@ +../variables.tags.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/version.tf b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/irsa-roles/versions.tf b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/versions.tf new file mode 120000 index 0000000..8bd0ff1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/irsa-roles/versions.tf @@ -0,0 +1 @@ +../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.32/kubeconfig.eks-main.tf b/examples/full-cluster-tf-upgrade/1.32/kubeconfig.eks-main.tf new file mode 100644 index 0000000..5a6333e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/kubeconfig.eks-main.tf @@ -0,0 +1,29 @@ +resource "null_resource" "kubeconfig" { + triggers = { + always_run = timestamp() + } + provisioner "local-exec" { + command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + } + provisioner "local-exec" { + command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" + } + provisioner "local-exec" { + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + } + command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" + } + depends_on = [aws_eks_cluster.eks_cluster] +} + +#--- +# call it like +#--- +## provisioner "local-exec" { +## environment = { +## KUBECONFIG = "${path.root}/setup/kube.config" +## } +## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" +## } diff --git a/examples/full-cluster-tf-upgrade/1.32/locals.tf b/examples/full-cluster-tf-upgrade/1.32/locals.tf new file mode 100644 index 0000000..d7458c2 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/locals.tf @@ -0,0 +1,7 @@ +locals { + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/main.tf b/examples/full-cluster-tf-upgrade/1.32/main.tf new file mode 100644 index 0000000..e1507fe --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/main.tf @@ -0,0 +1,246 @@ +data "aws_vpc" "eks_vpc" { + filter { + name = "tag:Name" + values = [var.eks_vpc_name] + } +} + +data "aws_subnets" "subnets" { + filter { + name = "tag:Name" + values = [var.subnets_name] + } + filter { + name = "vpc-id" + values = [data.aws_vpc.eks_vpc.id] + } +} + +data "aws_subnet" "subnets" { + for_each = toset(data.aws_subnets.subnets.ids) + id = each.key +} + +data "aws_ebs_default_kms_key" "current" {} + +data "aws_kms_key" "ebs_key" { + key_id = data.aws_ebs_default_kms_key.current.key_arn +} + +# in ew, need to exclude us-east-1e for now, as it lacks sufficient resources to establish the cluster +locals { + vpc_id = data.aws_vpc.eks_vpc.id + vpc_cidr_block = data.aws_vpc.eks_vpc.cidr_block + subnets = [for k, v in data.aws_subnet.subnets : v.id if length(regexall("us-east-1e", v.availability_zone)) == 0] + s3_base_arn = format("arn:%v:%v:::%%v", data.aws_arn.current.partition, "s3") + + # https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html + autoscale_tags = { + format("k8s.io/cluster-autoscaler/%v", var.cluster_name) = "owned" + "k8s.io/cluster-autoscaler/enabled" = "TRUE" + } + +} + +# The log group name format is /aws/eks//cluster +# Reference: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html +# define the CW log here to be able to specify a log retention date. There is no other way to do that. + +resource "aws_cloudwatch_log_group" "cluster" { + name = format("/aws/eks/%v/cluster", var.cluster_name) + retention_in_days = var.cluster_log_retention_days + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) +} + +# we changed endpoint_public_access to false by default. This is so we can reach the EKS API through private IPs +# from on-prem and from the cloud. Otherwise, another account outside of where this is created will be unable to +# access teh API. This also requires a SG change in securitygroup.tf + +resource "aws_eks_cluster" "eks_cluster" { + name = var.cluster_name + version = var.cluster_version + role_arn = module.role_eks-cluster.role_arn + enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] + + vpc_config { + subnet_ids = local.subnets + # security_group_ids = [aws_security_group.additional_eks_cluster_sg.id] + security_group_ids = [aws_security_group.extra_cluster_sg.id] + endpoint_private_access = true + endpoint_public_access = false + public_access_cidrs = var.census_public_cidr + } + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) + + # Ensure that IAM Role permissions are created before and deleted after EKS Cluster handling. + # Otherwise, EKS will not be able to properly delete EKS managed EC2 infrastructure such as Security Groups. + depends_on = [ + module.role_eks-cluster, + module.role_eks-nodegroup + ] +} + +resource "aws_eks_node_group" "eks-nodegroup" { + cluster_name = aws_eks_cluster.eks_cluster.name + node_group_name = format("%v%v-nodegroup", local._prefixes["eks"], var.cluster_name) + node_role_arn = module.role_eks-nodegroup.role_arn + subnet_ids = local.subnets + ami_type = "AL2_x86_64" + # instance_types = [var.eks_instance_type] + # disk_size = var.eks_instance_disk_size + + scaling_config { + desired_size = var.eks_ng_desire_size + max_size = var.eks_ng_max_size + min_size = var.eks_ng_min_size + } + + launch_template { + id = aws_launch_template.eks-nodegroup.id + version = aws_launch_template.eks-nodegroup.latest_version + } + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + local.autoscale_tags, + ) + + lifecycle { + ignore_changes = [launch_template, scaling_config] + } + + # Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling. + # Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces. + depends_on = [ + module.role_eks-cluster, + module.role_eks-nodegroup, + ] +} + +#--- +# Launch Template with AMI +#--- +#data "aws_ssm_parameter" "cluster" { +# name = "/aws/service/eks/optimized-ami/${aws_eks_cluster.eks_cluster.version}/amazon-linux-2/recommended/image_id" +#} + +#data "aws_launch_template" "cluster" { +# name = aws_launch_template.cluster.name +# +# depends_on = [aws_launch_template.cluster] +#} + +locals { + launch_template_tags = { + "Name" = format("%v%v-nodegroup-instance-name", local._prefixes["eks"], var.cluster_name) + format("kubernetes.io/cluster/%v", var.cluster_name) = "owned" + } +} + +resource "aws_launch_template" "eks-nodegroup" { + instance_type = var.eks_instance_type + name = format("%v%v-launch-template", local._prefixes["eks"], var.cluster_name) + update_default_version = true + # key_name = aws_key_pair.cluster_keypair.key_name + # key_name = module.key_pair.key_pair_name + # vpc_security_group_ids = [aws_security_group.additional_eks_cluster_sg.id] + vpc_security_group_ids = [aws_security_group.extra_cluster_sg.id] + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) + + tag_specifications { + resource_type = "instance" + + tags = merge( + local.base_tags, + { "boc:created_by" = "eks-launch-template" }, + local.common_tags, + local.launch_template_tags, + var.tags, + var.application_tags, + ) + } + + tag_specifications { + resource_type = "volume" + + tags = merge( + local.base_tags, + { "boc:created_by" = "eks-launch-template" }, + local.common_tags, + var.tags, + var.application_tags, + ) + } + + tag_specifications { + resource_type = "network-interface" + + tags = merge( + local.base_tags, + { "boc:created_by" = "eks-launch-template" }, + local.common_tags, + var.tags, + var.application_tags, + ) + } + + # tag_specifications { + # resource_type = "snapshot" + # + # tags = merge( + # local.base_tags, + # tomap({ "boc:created_by" = "eks-launch-template" }), + # local.common_tags, + # var.tags, + # ) + # } + + block_device_mappings { + device_name = "/dev/xvda" + + ebs { + volume_size = var.eks_instance_disk_size + volume_type = var.eks_instance_volume_type + delete_on_termination = true + encrypted = true + # kms_key_id = data.aws_kms_key.ebs_key.arn + # kms_key_id = data.aws_ebs_default_kms_key.current.key_arn + kms_key_id = data.aws_kms_key.ebs_key.arn + } + } + + user_data = base64encode(local.eks-node-private-userdata) +} + +#### User data for worker launch + +locals { + eks-node-private-userdata = templatefile( + "${path.module}/templates/node-private-userdata.tmpl", { + endpoint = aws_eks_cluster.eks_cluster.endpoint + cluster_ca = aws_eks_cluster.eks_cluster.certificate_authority[0].data + cluster_name = var.cluster_name + } + ) +} diff --git a/examples/full-cluster-tf-upgrade/1.32/oidc.tf b/examples/full-cluster-tf-upgrade/1.32/oidc.tf new file mode 100644 index 0000000..311b99d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/oidc.tf @@ -0,0 +1,32 @@ +# Most of this file references the AWS documentation to install the +# Amazon EFS CSI driver. This documentation is found here: +# https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html + +data "tls_certificate" "certs" { + url = aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer +} + +# Create the oidc provider for the service account. This is a prerequisite +# for using the EFS CSI Driver: +# https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html + +resource "aws_iam_openid_connect_provider" "oidc" { + client_id_list = ["sts.amazonaws.com"] + thumbprint_list = [data.tls_certificate.certs.certificates[0].sha1_fingerprint] + url = aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer +} + +locals { + oidc_provider_url = replace(aws_iam_openid_connect_provider.oidc.url, "https://", "") + oidc_provider_arn = aws_iam_openid_connect_provider.oidc.arn +} + +output "oidc_provider_url" { + description = "OpenID Connector provider URL" + value = local.oidc_provider_url +} + +output "oidc_provider_arn" { + description = "OpenID Connector provider ARN" + value = local.oidc_provider_arn +} diff --git a/examples/full-cluster-tf-upgrade/1.32/outputs.tf b/examples/full-cluster-tf-upgrade/1.32/outputs.tf new file mode 100644 index 0000000..fe6708f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/outputs.tf @@ -0,0 +1,63 @@ +#output "cluster" { +# description = "Full EKS Cluster object output" +# value = aws_eks_cluster.eks_cluster +#} + +output "cluster_name" { + description = "The name of the cluster that was created." + value = aws_eks_cluster.eks_cluster.name +} + +output "cluster_endpoint" { + description = "The endpoint used to reach the Kubernetes API server." + value = aws_eks_cluster.eks_cluster.endpoint +} + +output "cluster_certificate_authority_data" { + description = "Certificate data required to successfully communicate with the Kubernetes API server." + value = aws_eks_cluster.eks_cluster.certificate_authority[0].data +} + +output "cluster_auth_token" { + description = "The token required to authenticate with the cluster." + # value = data.aws_eks_cluster_auth.eks_auth.token + value = local.aws_eks_cluster_auth.token + sensitive = true +} + +output "cluster_worker_sg_id" { + description = "Security group ids attached to the cluster worker nodes." + value = aws_security_group.all_worker_mgmt.id +} + +output "cluster_sg_id" { + description = "Security group ids attached to the cluster control plane." + value = aws_security_group.additional_eks_cluster_sg.id +} + +output "extra_cluster_sg_id" { + description = "Security group IDs for cluster/node access" + value = aws_security_group.extra_cluster_sg.id +} + +output "cluster_subnet_ids" { + description = "Subnet IDs used to create the cluster" + value = local.subnets +} + +output "cluster_vpc_id" { + description = "VPC IDs on which the cluster was created" + value = local.vpc_id +} + +## # secondary subnets +## output "cluster_cni_subnet_ids" { +## description = "Subnet IDs used to create the cluster on the CNI custom network." +## value = local.cni_subnets +## } +## +## output "cluster_cni_custom_sg_id" { +## description = "Security group ids attached to the cluster worker nodes for CNI custom networking.." +## value = aws_security_group.cni_custom_sg.id +## } +## diff --git a/examples/full-cluster-tf-upgrade/1.32/policy.tf b/examples/full-cluster-tf-upgrade/1.32/policy.tf new file mode 100644 index 0000000..b7ea3b0 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/policy.tf @@ -0,0 +1,185 @@ +resource "aws_iam_policy" "nlb-policy" { + name = format("%v%v-nlb", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow configuration of the ELB" + policy = data.aws_iam_policy_document.nlb-policy.json + + tags = merge( + local.base_tags, + var.tags, + var.application_tags, + ) +} + +# Q: why CreateSecurityGroup +# TBD: refine resources to limit only to eks configurations +data "aws_iam_policy_document" "nlb-policy" { + statement { + sid = "EKSNLBConfiguration" + effect = "Allow" + actions = [ + "elasticloadbalancing:*", + "ec2:CreateSecurityGroup", + "ec2:Describe*", + ] + resources = ["*"] + } +} + +resource "aws_iam_policy" "cloudwatch-policy" { + name = format("%v%v-cloudwatch", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow sending metric data to cloudwatch" + policy = data.aws_iam_policy_document.cloudwatch-policy.json + + tags = merge( + local.base_tags, + var.tags, + var.application_tags, + ) +} + +# TBD: refine resources to limit only to eks configurations +data "aws_iam_policy_document" "cloudwatch-policy" { + statement { + sid = "EKSCloudwatchMetrics" + effect = "Allow" + actions = [ + "cloudwatch:PutMetricData", + ] + resources = ["*"] + } +} + +#--- +# cluster admin policy +#--- +resource "aws_iam_policy" "cluster-admin-policy" { + name = format("%v%v-cluster-admin", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow for administration of the cluster ${var.cluster_name} using AWS resources" + policy = data.aws_iam_policy_document.cluster-admin-policy.json + + tags = merge( + local.base_tags, + var.tags, + var.application_tags, + ) +} + +data "aws_iam_policy_document" "cluster-admin-policy" { + dynamic "statement" { + for_each = local.admin_policy_statements + iterator = s + content { + sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) + effect = lookup(s.value, "effect", "Allow") + actions = lookup(s.value, "actions", []) + resources = lookup(s.value, "resources", []) + } + } +} + +locals { + base_arn = format("arn:%v:%%v:%v:%v:%%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) + iam_arn = format("arn:%v:iam::%v:%%v", data.aws_arn.current.partition, data.aws_caller_identity.current.account_id) + common_arn = format("arn:%v:%%v:%v:%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) + eks_resources = ["cluster", "addon", "nodegroup", "identityproviderconfig"] + + admin_policy_statements = { + ECRRead = { + actions = [ + "ecr:Describe*", + "ecr:Get*", + "ecr:ListImages", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + ] + resources = ["*"] + } + ECRWrite = { + actions = [ + "ecr:BatchDeleteImage", + "ecr:CompleteLayerUpload", + "ecr:CreateRepository", + "ecr:DeleteRepository", + "ecr:InitiateLayerUpload", + "ecr:PutImage", + "ecr:UploadLayerPart" + ] + resources = [format(local.common_arn, "ecr", format("repository/eks/%v/*", var.cluster_name))] + } + EKSRead = { + actions = [ + "eks:ListClusters", + "eks:ListAddons", + "eks:ListNodegroups", + "eks:DescribeCluster", + "eks:DescribeAddon*", + "eks:DescribeNodegroup", + ] + resources = [ + format(local.common_arn, "eks", "cluster/*"), + format(local.common_arn, "eks", "addon/*"), + format(local.common_arn, "eks", "addons/*"), + format(local.common_arn, "eks", "/addons/*"), + format(local.common_arn, "eks", "nodegroup/*"), + ] + } + IAMRead = { + actions = [ + "iam:ListRoles", + ] + resources = ["*"] + } + SSMGet = { + actions = [ + "ssm:GetParameter", + ] + resources = [ + format("arn:%v:%v:%v:%v:%v", data.aws_arn.current.partition, "ssm", data.aws_region.current.name, "", "parameter/aws/service/eks/*") + ] + } + EKSReadMyClusters = { + actions = [ + "eks:List*", + "eks:Read*", + "eks:Describe*", + "eks:AccessKubernetesApi", + ] + resources = flatten(concat( + [format(local.common_arn, "eks", format("/clusters/%v/addons", var.cluster_name))], + [for r in local.eks_resources : [format(local.common_arn, "eks", format("%v/%v", r, var.cluster_name)), + format(local.common_arn, "eks", format("%v/%v/*", r, var.cluster_name))]] + )) + } + } +} + + +#--- +# cluster admin assume policy +#--- +resource "aws_iam_policy" "cluster-admin_assume_policy" { + name = format("%v%v-cluster-admin-assume", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow for assume role to the cluster-admin role for ${var.cluster_name}" + policy = data.aws_iam_policy_document.cluster-admin_assume_policy.json + + tags = merge( + local.base_tags, + var.tags, + var.application_tags, + tomap({ "Name" = format("%v%v-cluster-admin-assume", local._prefixes["eks-policy"], var.cluster_name) }), + ) +} + +data "aws_iam_policy_document" "cluster-admin_assume_policy" { + statement { + sid = "AllowSTSAssumeClusterAdminRole" + effect = "Allow" + actions = ["sts:AssumeRole"] + resources = [module.role_cluster-admin.role_arn] + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/prefixes.tf b/examples/full-cluster-tf-upgrade/1.32/prefixes.tf new file mode 100644 index 0000000..03303f1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/prefixes.tf @@ -0,0 +1,34 @@ +locals { + _prefixes = { + "efs" = "v-efs-" + "s3" = "v-s3-" + "ebs" = "v-ebs-" + "kms" = "k-kms-" + "role" = "r-" + "policy" = "p-" + "group" = "g-" + "security-group" = "" # "sg-" + # VPC + "vpc" = "" + "dhcp-options" = "" + "vpc-peer" = "vpcp-" + "route-table" = "route-" + "subnet" = "" + "vpc-endpoint" = "vpce-" + "elastic-ip" = "eip-" + "nat-gateway" = "nat-" + "internet-gateway" = "igw-" + "network-acl" = "nacl-" + "customer-gateway" = "cgw-" + "vpn-gateway" = "vpcg-" + "vpn-connection" = "vpn_" + "log-group" = "lg-" + "log-stream" = "lgs-" + # EKS + "eks" = "eks-" + "eks-user" = "s-eks-" + "eks-role" = "r-eks-" + "eks-policy" = "p-eks-" + "eks-security-group" = "eks-" # "sg-eks-" + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/providers.tf b/examples/full-cluster-tf-upgrade/1.32/providers.tf new file mode 100644 index 0000000..f0e85a2 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/providers.tf @@ -0,0 +1,25 @@ +terraform { + required_version = ">= 0.12.31" +} + +# to import, you cannot have provider fields which count on data elements (as these locals show). You need to use the config_path. +# see these for more info: +# https://github.com/hashicorp/terraform-provider-kubernetes/issues/793 +# https://www.terraform.io/docs/cli/commands/import.html#provider-configuration +# https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs + +provider "kubernetes" { + host = local.aws_eks_cluster.endpoint + cluster_ca_certificate = base64decode(local.aws_eks_cluster.certificate_authority[0].data) + token = local.aws_eks_cluster_auth.token + # config_path = "${path.root}/setup/kube.config" +} + +provider "helm" { + kubernetes { + host = local.aws_eks_cluster.endpoint + + cluster_ca_certificate = base64decode(local.aws_eks_cluster.certificate_authority[0].data) + token = local.aws_eks_cluster_auth.token + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/region.tf b/examples/full-cluster-tf-upgrade/1.32/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/role.tf b/examples/full-cluster-tf-upgrade/1.32/role.tf new file mode 100644 index 0000000..ffced71 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/role.tf @@ -0,0 +1,175 @@ +#--- +# cluster +#--- +locals { + cluster_managed_policy_list = [ + "AmazonEKSClusterPolicy", + "AmazonEC2FullAccess", + "CloudWatchLogsFullAccess", + ] + cluster_managed_policies = [for p in data.aws_iam_policy.cluster_managed_policies : p.arn] +} + +data "aws_iam_policy" "cluster_managed_policies" { + for_each = toset(local.cluster_managed_policy_list) + name = each.key +} + +# this needs the two policies nlb-policy and cloudwatch-policy, created first + +module "role_eks-cluster" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" + + role_name = format("%v%v-cluster", local._prefixes["eks"], var.cluster_name) + role_description = "EKS Cluster Role for ${var.cluster_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.eks_assume.json + attached_policies = concat([aws_iam_policy.nlb-policy.arn, aws_iam_policy.cloudwatch-policy.arn], local.cluster_managed_policies) + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + var.application_tags, + ) +} + +data "aws_iam_policy_document" "eks_assume" { + statement { + sid = "EKSAssumeRole" + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["eks.amazonaws.com"] + } + } +} + +output "role_eks-cluster_arn" { + description = "Role ARN for EKS Cluster Role" + value = module.role_eks-cluster.role_arn +} + +#--- +# nodegroup +#--- +locals { + nodegroup_managed_policy_list = [ + "AmazonEKSWorkerNodePolicy", + "AmazonEKS_CNI_Policy", + "AmazonEC2ContainerRegistryPowerUser", + "AmazonEC2ContainerRegistryReadOnly", + "CloudWatchLogsFullAccess", + # "AmazonS3FullAccess", + "AmazonSSMManagedInstanceCore", + "AmazonEC2RoleforSSM", + ] + nodegroup_managed_policies = [for p in data.aws_iam_policy.nodegroup_managed_policies : p.arn] +} + +data "aws_iam_policy" "nodegroup_managed_policies" { + for_each = toset(local.nodegroup_managed_policy_list) + name = each.key +} + +module "role_eks-nodegroup" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" + + role_name = format("%v%v-nodegroup", local._prefixes["eks"], var.cluster_name) + role_description = "EKS Nodegroup Role for ${var.cluster_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.ec2_assume.json + attached_policies = concat(local.nodegroup_managed_policies) + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) +} + +#---- +# STS: ec2 assume +#--- +data "aws_iam_policy_document" "ec2_assume" { + statement { + sid = "EKSAssumeRole" + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +output "role_eks-nodegroup-role_arn" { + description = "Role ARN for EKS Cluster Nodegroup Role" + value = module.role_eks-nodegroup.role_arn +} + +#--- +# cluster-admin +#--- +module "role_cluster-admin" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" + + role_name = format("%v%v-cluster-admin", local._prefixes["eks"], var.cluster_name) + role_description = "SAML EKS cluster admin Role for ${var.cluster_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.allow_sts.json + # assume_policy_document = data.aws_iam_policy_document.cluster-admin_combined.json + attached_policies = [aws_iam_policy.cluster-admin-policy.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) +} + +output "role_cluster-admin-role_arn" { + description = "Role ARN for EKS Cluster Admin Role" + value = module.role_cluster-admin.role_arn +} + +# data "aws_iam_policy_document" "empty" {} + +data "aws_iam_policy_document" "allow_sts" { + statement { + sid = "AllowSTSAssume" + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "AWS" + identifiers = [ + format(local.iam_arn, "root"), + ] + } + } + ## statement { + ## sid = "AllowSTSAssumeFromSSO" + ## effect = "Allow" + ## actions = ["sts:AssumeRole"] + ## principals { + ## type = "AWS" + ## identifiers = [ + ## format(local.iam_arn, "root"), + ## ] + ## } + ## condition { + ## test = "ArnLike" + ## variable = "aws:PrincipalArn" + ## values = [ + ## format(local.iam_arn, "role/aws-reserved/sso.amazonaws.com/*/AWSReservedSSO_csvd-sa-sc-developer_*"), + ## format(local.iam_arn, "role/aws-reserved/sso.amazonaws.com/AWSReservedSSO_csvd-sa-sc-developer_*"), + ## ] + ## } + ## } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/saml.tf b/examples/full-cluster-tf-upgrade/1.32/saml.tf new file mode 100644 index 0000000..22c1f74 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/saml.tf @@ -0,0 +1,26 @@ +# because we can't link into remote state from the parent account, we have to use this +# also, there is no data source for saml provider + +locals { + saml_provider_arn = format(local.common_arn, "iam", "saml-provider/Census_TCO_IDMS") + saml_url = var.aws_environment == "gov" ? "https://signin.amazonaws-us-gov.com/saml" : "https://signin.aws.amazon.com/saml" +} + +data "aws_iam_policy_document" "saml_assume" { + statement { + sid = "SAMLFederationCensusIdP" + effect = "Allow" + actions = ["sts:AssumeRoleWithSAML"] + + principals { + type = "Federated" + identifiers = [local.saml_provider_arn] + } + + condition { + test = "StringEquals" + variable = "SAML:aud" + values = [local.saml_url] + } + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/securitygroup.ports.tf b/examples/full-cluster-tf-upgrade/1.32/securitygroup.ports.tf new file mode 100644 index 0000000..addc0fd --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/securitygroup.ports.tf @@ -0,0 +1,128 @@ +# See +# https://stackoverflow.com/questions/71902887/transport-error-while-dialing-dial-tcp-xx-xx-xx-xx15012-i-o-timeout-with-aws-e +# Ports needed to correctly install Istio for the error message: transport: Error while dialing dial tcp xx.xx.xx.xx15012: i/o timeout +# other ports here as needed +locals { + sg_additional_ports = [ + { + component = "istio" + description = "Envoy admin port / outbound" + from_port = 15000 + to_port = 15001 + }, + { + component = "istio" + description = "Debug port" + from_port = 15004 + to_port = 15004 + }, + { + component = "istio" + description = "Envoy inbound" + from_port = 15006 + to_port = 15006 + }, + { + component = "istio" + description = "HBONE mTLS tunnel port / secure networks XDS and CA services (Plaintext)" + from_port = 15008 + to_port = 15010 + }, + { + component = "istio" + description = "XDS and CA services (TLS and mTLS)" + from_port = 15012 + to_port = 15012 + }, + { + component = "istio" + description = "Control plane monitoring" + from_port = 15014 + to_port = 15014 + }, + { + component = "istio" + description = "Webhook container port, forwarded from 443" + from_port = 15017 + to_port = 15017 + }, + { + component = "istio" + description = "Merged Prometheus telemetry from Istio agent, Envoy, and application, Health checks" + from_port = 15020 + to_port = 15021 + }, + { + component = "istio" + description = "DNS port" + from_port = 15053 + to_port = 15053 + }, + { + component = "istio" + description = "Envoy Prometheus telemetry" + from_port = 15090 + to_port = 15090 + }, + { + component = "istio" + description = "aws-load-balancer-controller" + from_port = 9443 + to_port = 9443 + }, + { + component = "cert-manager" + description = "cert-manager-webhook" + from_port = 10250 + to_port = 10250 + }, + ] + + sg_additional_ingress_rules = { + for ikey, ivalue in local.sg_additional_ports : + "${ikey}_ingress" => { + description = ivalue.description + protocol = "tcp" + from_port = ivalue.from_port + to_port = ivalue.to_port + type = "ingress" + self = true + } + } + + sg_additional_egress_rules = { + for ekey, evalue in local.sg_additional_ports : + "${ekey}_egress" => { + description = evalue.description + protocol = "tcp" + from_port = evalue.from_port + to_port = evalue.to_port + type = "egress" + self = true + } + } +} + +resource "aws_vpc_security_group_ingress_rule" "additional" { + for_each = { for k, v in local.sg_additional_ingress_rules : v.from_port => v } + security_group_id = aws_security_group.additional_eks_cluster_sg.id + + description = each.value.description + from_port = each.value.from_port + to_port = each.value.to_port + ip_protocol = each.value.protocol + referenced_security_group_id = each.value.self ? aws_security_group.additional_eks_cluster_sg.id : null + # referenced_security_group_id = aws_security_group.all_worker_mgmt.id +} + +resource "aws_vpc_security_group_egress_rule" "additional" { + for_each = { for k, v in local.sg_additional_egress_rules : v.from_port => v } + security_group_id = aws_security_group.additional_eks_cluster_sg.id + + description = each.value.description + from_port = each.value.from_port + to_port = each.value.to_port + ip_protocol = each.value.protocol + referenced_security_group_id = each.value.self ? aws_security_group.additional_eks_cluster_sg.id : null + # referenced_security_group_id = aws_security_group.all_worker_mgmt.id +} diff --git a/examples/full-cluster-tf-upgrade/1.32/securitygroup.tf b/examples/full-cluster-tf-upgrade/1.32/securitygroup.tf new file mode 100644 index 0000000..6e4555c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/securitygroup.tf @@ -0,0 +1,184 @@ +# these grant access, which may no longer be necessary. See https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html +# for details. If no SG is used when creating the cluster, it uses the default SG for it. In shared vpcs, there is no access to +# a default SG, which has no in or out rules, but the cluster will fail trying to get to the default SG. +# +# EKS created sg: eks-cluster-sg-{cluster-name}-{id} +# * in any from self +# * out any +# +# additional_eks_cluster_sg +# * in any from all_worker_mgmt SG +# * in port 443 from census on-prem, and 10/8 +# * out any +# +# all_worker_mgmt +# * in any local cidr +# * out any +# +# We can refine the SGs to let it create the default SG, whic contains all the needed cluster, and then +# create a new single SG for the needed traffic and add it to the launch template and cluster when created. +# We absorbe the local 10.x.x.x/x cidr into the 10/8 +# +# extra_cluster_sg +# * in any from self +# * in port 443 from census on-prem, and 10/8 +# * in port 10250 for kubectl logs from census on-prem, and 10/8 + +# once setup, you cannot change any ports here +resource "aws_security_group" "additional_eks_cluster_sg" { + name = format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + { "Name" = format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) }, + ) + + vpc_id = data.aws_vpc.eks_vpc.id + + ingress { + from_port = 0 + to_port = 0 + protocol = -1 + + security_groups = [ + aws_security_group.all_worker_mgmt.id, + ## aws_security_group.cni_custom_sg.id + ] + } + # this grants in-VPC access to the K8S api + # updated to get all census private cidrs to get on-prem, as we are now sending the interface traffic over + # a private IP only (disabling public access). This is to reach a cluster api from another account and VPC + # so we open all the cloud accounts too + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + # cidr_blocks = [ var.vpc_cidr_block ] + cidr_blocks = concat(var.census_private_cidr, ["10.0.0.0/8"]) + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + lifecycle { + ignore_changes = [ingress, egress] + } +} + +# once setup, you cannot change any ports here +resource "aws_security_group" "all_worker_mgmt" { + name = format("%v%v-all-worker-mgmt", local._prefixes["eks-security-group"], var.cluster_name) + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + { "Name" = format("%v%v-all-worker-mgmt", local._prefixes["eks-security-group"], var.cluster_name) }, + ) + + vpc_id = data.aws_vpc.eks_vpc.id + + ingress { + from_port = 0 + to_port = 0 + protocol = -1 + cidr_blocks = [local.vpc_cidr_block] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + lifecycle { + ignore_changes = [ingress, egress] + } +} + +## resource "aws_security_group" "cni_custom_sg" { +## name = format("%v%v-cni-custom-networking", local._prefixes["eks-security-group"], var.cluster_name) +## +## tags = merge( +## local.base_tags, +## local.common_tags, +## var.tags, +## ) +## +## vpc_id = data.aws_vpc.eks_vpc.id +## +## ingress { +## from_port = 0 +## to_port = 0 +## protocol = -1 +## cidr_blocks = [ +## local.vpc_cidr_block, +## var.cni_vpc_cidr_block, +## ] +## } +## +## egress { +## from_port = 0 +## to_port = 0 +## protocol = "-1" +## cidr_blocks = ["0.0.0.0/0"] +## } +## } + +# once setup, you cannot change any ports here +# attach to cluster create, nodegroups +resource "aws_security_group" "extra_cluster_sg" { + name = format("%v%v-extra", local._prefixes["eks-security-group"], var.cluster_name) + description = format("Security group for additional access for EKS cluster %v", var.cluster_name) + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + { "Name" = format("%v%v-extra", local._prefixes["eks-security-group"], var.cluster_name) }, + ) + + vpc_id = data.aws_vpc.eks_vpc.id + + ingress { + from_port = 0 + to_port = 0 + protocol = -1 + self = true + } + + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = concat(var.census_private_cidr, ["10.0.0.0/8"]) + } + + # kubectl logs + ingress { + from_port = 10250 + to_port = 10250 + protocol = "tcp" + cidr_blocks = concat(var.census_private_cidr, ["10.0.0.0/8"]) + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + lifecycle { + ignore_changes = [ingress, egress] + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/settings.auto.tfvars.example b/examples/full-cluster-tf-upgrade/1.32/settings.auto.tfvars.example new file mode 100644 index 0000000..9feb3a0 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/settings.auto.tfvars.example @@ -0,0 +1,16 @@ +# clustername must NOT include eks +# vpc_name must be full name (from variables.vpc.auto.tfvars) +# domain_name is removed + +cluster_name = "{org}-{project}-{env}" +cluster_version = "1.32" +region = "us-gov-east-1" ## set to proper region where this will be deployed +contact_email = "" ## enter valid @census.gov email for the customer's group contact list +domain = "NAME" ## set to correct domain if using a shared vpc +eks_instance_disk_size = 40 +eks_vpc_name = "{vpc_full_name}" +eks_instance_type = "t3.xlarge" +eks_ng_desire_size = 3 +eks_ng_max_size = 15 +eks_ng_min_size = 3 +subnets_name = "*-{subnet_label}-*" diff --git a/examples/full-cluster-tf-upgrade/1.32/setup-directory.tf b/examples/full-cluster-tf-upgrade/1.32/setup-directory.tf new file mode 100644 index 0000000..e8eaba6 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/setup-directory.tf @@ -0,0 +1,9 @@ +resource "null_resource" "setup_directory" { + triggers = { + directory = format("%v/setup", path.root) + } + + provisioner "local-exec" { + command = "test -d ${self.triggers.directory} || mkdir -p ${self.triggers.directory}" + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/templates/node-private-userdata.tmpl b/examples/full-cluster-tf-upgrade/1.32/templates/node-private-userdata.tmpl new file mode 100644 index 0000000..0770f07 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/templates/node-private-userdata.tmpl @@ -0,0 +1,9 @@ +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="==MYBOUNDARY==" + +--==MYBOUNDARY== +Content-Type: text/x-shellscript; charset="us-ascii" +#!/bin/bash -xe +sudo /etc/eks/bootstrap.sh --apiserver-endpoint "$endpoint" --b64-cluster-ca "$cluster_ca" "$cluster_name" +--==MYBOUNDARY==--\ + diff --git a/examples/full-cluster-tf-upgrade/1.32/tf-run.data b/examples/full-cluster-tf-upgrade/1.32/tf-run.data new file mode 100644 index 0000000..e37c37d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/tf-run.data @@ -0,0 +1,79 @@ +VERSION 1.4.8 +REMOTE-STATE +COMMENT make sure the private-lb subnet and container subnets are tagged properly (see README.md) +STOP then continue with at step %%NEXT%% (tag:subnets-verified) + +TAG subnets-verified +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +LINKTOP init + +LINKTOP provider_configs.d/provider.ldap_new.auto.tfvars +LINKTOP provider_configs.d/provider.ldap_new.tf +LINKTOP provider_configs.d/provider.ldap_new.variables.tf + +LINKTOP includes.d/variables.account_tags.tf +LINKTOP includes.d/variables.account_tags.auto.tfvars +LINKTOP includes.d/variables.infrastructure_tags.tf +LINKTOP includes.d/variables.infrastructure_tags.auto.tfvars +LINKTOP includes.d/variables.application_tags.tf +# LINKTOP includes.d/variables.application_tags.auto.tfvars + +LINK variables.vpc.tf +LINK variables.vpc.auto.tfvars +LINK variables.availability_zones.tf + +COMMAND tf-init + +STOP check variables.vpc.* files and then continue with %%NEXT%% (tag:setup-complete) + +TAG setup-complete +POLICY + +TAG ec2-key +null_resource.setup_directory +module.cluster_key_pair time_static.timestamp local_sensitive_file.ssh_private_key local_sensitive_file.ssh_public_key local_file.gitignore +## null_resource.generate_keypair +## aws_key_pair.cluster_keypair + +COMMAND tf-directory-setup.py -l s3 +## COMMENT be sure to add the setup/ec2-ssh-eks-{cluster} to git-secret, git-secret hide, add the setup/*secret and setup/*pub got git, and commit the entirety of the change + +TAG dns-zone +aws_route53_zone.cluster_domain +module.route53_cluster_domain_east module.route53_cluster_domain_west + +TAG create-cluster +ALL + +COMMENT Assumes setup the includes.d/parent_rs.tf according to the REAMDE.md has been done, will fail if not. You can answer no at the pause if you are not sure +PAUSE + +TAG setup-aws-auth +COMMENT cd aws-auth and tf-run.sh apply +STOP Once applied in this subdirectory, come back here and continue with step %%NEXT%% (tag:setup-efs) + +TAG setup-efs +COMMENT cd efs and tf-run.sh apply +STOP Once applied in this subdirectory, come back here and continue with step %%NEXT%% (tag:setup-addons) + +TAG setup-addons +COMMENT cd addons and tf-run.sh apply +STOP Once applied in this subdirectory, come back here and continue with step %%NEXT%% (tag:setup-irsa) + +TAG setup-irsa +COMMENT cd irsa-roles and tf-run.sh apply +STOP Once applied in this subdirectory, come back here and continue with step %%NEXT%% (tag:setup-common-services) + +TAG setup-common-services +COMMENT cd common-services and tf-run.sh apply +COMMENT Notes: this subdirectory is complicated, and it has a certificate step which is manual +COMMENT Notes: common-services also has other subdirectories to be applied, follow the directions from tf-run there +STOP Once applied in this subdirectory, come back here and continue with step %%NEXT%% (tag:setup-cluster-roles) + +TAG setup-cluster-roles +COMMENT cd cluster-roles and tf-run.sh apply +STOP Once applied in this subdirectory, come back here and continue with step %%NEXT%% (tag:complete) + +TAG complete +COMMENT You have completed the setup of the EKS cluster. There is a DNS Infoblox step, please contact badra001 for that diff --git a/examples/full-cluster-tf-upgrade/1.32/tf-run.destroy.data b/examples/full-cluster-tf-upgrade/1.32/tf-run.destroy.data new file mode 100644 index 0000000..8e0aa80 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/tf-run.destroy.data @@ -0,0 +1,37 @@ +VERSION 1.0.4 +BACKUP-STATE +COMMAND tf-init +COMMAND tf-state list + +aws_route53_zone.cluster_domain + +aws_eks_node_group.eks-nodegroup aws_eks_cluster.eks_cluster aws_iam_openid_connect_provider.oidc aws_launch_template.eks-nodegroup + +## aws_key_pair.cluster_keypair +## null_resource.generate_keypair + +module.cluster_key_pair time_static.timestamp local_sensitive_file.ssh_private_key local_sensitive_file.ssh_public_key local_file.gitignore + +module.role_cluster-admin module.role_eks-cluster module.role_eks-nodegroup +module.group_cluster-admin.aws_iam_group.this + +POLICY + +aws_security_group.additional_eks_cluster_sg aws_security_group.all_worker_mgmt aws_security_group.extra_cluster_sg +null_resource.cluster_roles["eks-console-full-access"] null_resource.cluster_roles["eks-console-restricted-access"] + +ALL + +## ./common-services/tf-run.destroy.data +## ./irsa-roles/cluster-autoscaler/tf-run.destroy.data +## ./irsa-roles/tf-run.destroy.data +## ./addons/tf-run.destroy.data +## ./efs/tf-run.destroy.data +## NO ./aws-auth/tf-run.destroy.data +## ./tf-run.destroy.data + + + +TAG ec2-key +## null_resource.generate_keypair +## aws_key_pair.cluster_keypair diff --git a/examples/full-cluster-tf-upgrade/1.32/variables.addons.tf b/examples/full-cluster-tf-upgrade/1.32/variables.addons.tf new file mode 100644 index 0000000..9aea722 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/variables.addons.tf @@ -0,0 +1,120 @@ +# aws eks describe-addon-versions --kubernetes-version 1.25 --query 'addons[].{Name:addonName,Version:addonVersions[].addonVersion}' --output text + +variable "addon_versions" { + description = "Map of addon versions by Kubernetes version" + type = map(map(string)) + default = { + "1.24" = { + "coredns" = "v1.8.7-eksbuild.3" + "kube-proxy" = "v1.24.7-eksbuild.2" + "vpc-cni" = "v1.11.4-eksbuild.1" + "aws-ebs-csi-driver" = "v1.18.0-eksbuild.1" + } + "1.25" = { + "coredns" = "v1.9.3-eksbuild.11" + "kube-proxy" = "v1.25.16-eksbuild.3" + "vpc-cni" = "v1.17.1-eksbuild.1" + "aws-ebs-csi-driver" = "v1.28.0-eksbuild.1" + "aws-efs-csi-driver" = "v1.7.6-eksbuild.1" + "adot" = "v0.94.1-eksbuild.1" + "amazon-cloudwatch-observability" = "v1.4.0-eksbuild.1" + } + "1.26" = { + "coredns" = "v1.9.3-eksbuild.11" + "kube-proxy" = "v1.26.13-eksbuild.2" + "vpc-cni" = "v1.17.1-eksbuild.1" + "aws-ebs-csi-driver" = "v1.28.0-eksbuild.1" + "aws-efs-csi-driver" = "v1.7.6-eksbuild.1" + "adot" = "v0.94.1-eksbuild.1" + "snapshot-controller" = "v6.3.2-eksbuild.1" + "amazon-cloudwatch-observability" = "v1.4.0-eksbuild.1" + } + "1.27" = { + "coredns" = "v1.10.1-eksbuild.7" + "kube-proxy" = "v1.27.10-eksbuild.2" + "vpc-cni" = "v1.17.1-eksbuild.1" + "aws-ebs-csi-driver" = "v1.28.0-eksbuild.1" + "aws-efs-csi-driver" = "v1.7.6-eksbuild.1" + "snapshot-controller" = "v6.3.2-eksbuild.1" + "adot" = "v0.94.1-eksbuild.1" + "amazon-cloudwatch-observability" = "v1.4.0-eksbuild.1" + } + "1.28" = { + "coredns" = "v1.10.1-eksbuild.13" + "kube-proxy" = "v1.28.12-eksbuild.5" + "vpc-cni" = "v1.18.3-eksbuild.3" + "aws-ebs-csi-driver" = "v1.34.0-eksbuild.1" + "aws-efs-csi-driver" = "v2.0.7-eksbuild.1" + "adot" = "v0.102.0-eksbuild.1" + "snapshot-controller" = "v8.0.0-eksbuild.1" + "amazon-cloudwatch-observability" = "v2.1.0-eksbuild.1" + "eks-pod-identity-agent" = "v1.3.2-eksbuild.2" + } + "1.29" = { + "coredns" = "v1.11.3-eksbuild.2" + "kube-proxy" = "v1.29.10-eksbuild.3" + "vpc-cni" = "v1.19.0-eksbuild.1" + "aws-ebs-csi-driver" = "v1.37.0-eksbuild.1" + "aws-efs-csi-driver" = "v2.1.0-eksbuild.1" + "adot" = "v0.109.0-eksbuild.1" + "snapshot-controller" = "v8.1.0-eksbuild.2" + "amazon-cloudwatch-observability" = "v2.3.1-eksbuild.1" + "eks-pod-identity-agent" = "v1.3.4-eksbuild.1" + } + "1.30" = { + "coredns" = "v1.11.3-eksbuild.1" + "kube-proxy" = "v1.30.3-eksbuild.5" + "vpc-cni" = "v1.18.5-eksbuild.1" + "aws-ebs-csi-driver" = "v1.35.0-eksbuild.1" + "aws-efs-csi-driver" = "v2.0.7-eksbuild.1" + "adot" = "v0.102.0-eksbuild.1" + "snapshot-controller" = "v8.0.0-eksbuild.1" + "amazon-cloudwatch-observability" = "v2.1.2-eksbuild.1" + "eks-pod-identity-agent" = "v1.3.2-eksbuild.2" + } + "1.31" = { + "coredns" = "v1.11.4-eksbuild.1" + "kube-proxy" = "v1.31.3-eksbuild.2" + "vpc-cni" = "v1.19.2-eksbuild.1" + "aws-ebs-csi-driver" = "v1.37.0-eksbuild.1" + "aws-efs-csi-driver" = "v2.1.2-eksbuild.1" + "adot" = "v0.109.0-eksbuild.2" + "snapshot-controller" = "v8.1.0-eksbuild.2" + "amazon-cloudwatch-observability" = "v2.6.0-eksbuild.1" + "eks-pod-identity-agent" = "v1.3.4-eksbuild.1" + } + "1.32" = { + "coredns" = "v1.11.4-eksbuild.24" + "kube-proxy" = "v1.32.9-eksbuild.2" + "vpc-cni" = "v1.20.5-eksbuild.1" + "aws-ebs-csi-driver" = "v1.53.0-eksbuild.1" + "aws-efs-csi-driver" = "v2.1.15-eksbuild.1" + "adot" = "v0.131.0-eksbuild.1" + "snapshot-controller" = "v8.3.0-eksbuild.1" + "amazon-cloudwatch-observability" = "v4.7.0-eksbuild.1" + "eks-pod-identity-agent" = "v1.3.10-eksbuild.1" + # "external-dns" = "v0.20.0-eksbuild.1" + # "prometheus-node-exporter" = "v1.10.2-eksbuild.4" + # "cert-manager" = "v1.19.1-eksbuild.2" + # "aws-fsx-csi-driver" = "v1.6.0-eksbuild.1" + # "aws-mountpoint-s3-csi-driver" = "v2.2.0-eksbuild.1" + # "aws-secrets-store-csi-driver-provider" = "v2.1.1-eksbuild.1" + # "eks-node-monitoring-agent" = "v1.4.2-eksbuild.1" + # "fluent-bit" = "v4.2.0-eksbuild.1" + # "aws-guardduty-agent" = "v1.12.1-eksbuild.2" + # "kube-state-metrics" = "v2.17.0-eksbuild.4" + } + } +} + +variable "cloudwatch-observability_log_names" { + description = "Amazon Cloudwatch Observability log group names" + type = list(string) + default = ["application", "dataplane", "host", "performance"] +} + +variable "cloudwatch-observability_log_retention_days" { + description = "Amazon Cloudwatch Observability log group retention in days" + type = number + default = 90 +} diff --git a/examples/full-cluster-tf-upgrade/1.32/variables.dns.auto.tfvars b/examples/full-cluster-tf-upgrade/1.32/variables.dns.auto.tfvars new file mode 100644 index 0000000..048e546 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/variables.dns.auto.tfvars @@ -0,0 +1,28 @@ +# uncomment the appropriate section + +##-- +## aws-gov internal +##-- +# main_dns_vpcs = { +# "us-gov-west-1" = "vpc-77877a12" +# "us-gov-east-1" = "vpc-099a991da7c4eb8a5" +# } +# main_dns_profile = "107742151971-do2-govcloud" + +##-- +## aws-gov dmz +##-- +# main_dns_vpcs = { +# "us-gov-east-1" = "vpc-02f1a0a3b40843e4e" +# "us-gov-west-1" = "vpc-0ce5930e94e434889" +# } +# main_dns_profile = "273715889907-ent-gov-dmz-network-prod" + +##-- +## lab-gov +##-- +# main_dns_vpcs = { +# "us-gov-east-1" = "vpc-070595c5b133243dd" +# "us-gov-west-1" = "vpc-08b7b4db6a5ddf9c1" +# } +# main_dns_profile = " "269244441389-lab-gov-network-nonprod" diff --git a/examples/full-cluster-tf-upgrade/1.32/variables.dns.tf b/examples/full-cluster-tf-upgrade/1.32/variables.dns.tf new file mode 100644 index 0000000..c82d30c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/variables.dns.tf @@ -0,0 +1,21 @@ +variable "main_dns_vpcs" { + description = "Map of region and VPC ids of the vpc1-services in us-gov-west-1 and us-gov-east-1 for centralized DNS" + type = map(string) + default = { + "us-gov-west-1" = "vpc-77877a12" + "us-gov-east-1" = "vpc-099a991da7c4eb8a5" + } +} + +variable "main_dns_profile" { + description = "Profile name for AWS for the main DNS central account" + type = string + default = "107742151971-do2-govcloud" +} + + +variable "dns_zone_description_prefix" { + description = "Zone description with the org-project-program-environment" + type = string + default = "" +} diff --git a/examples/full-cluster-tf-upgrade/1.32/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.32/variables.eks.tf new file mode 100644 index 0000000..5e166f4 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/variables.eks.tf @@ -0,0 +1,80 @@ +variable "eks_vpc_name" { + description = "Define the VPC name that will be used by this cluster" + type = string + default = "*UNKNOWN*" +} + +variable "subnets_name" { + description = "Define the name of the subnets to be used by this cluster" + type = string + default = "*-container-*" +} + +variable "cluster_name" { + description = "EKS cluster name name component used through out the EKS cluster describing its purpose (ex: dice-dev)" + type = string + default = null +} + +variable "cluster_version" { + description = "The EKS version number, see https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html" + type = string + default = "1.28" +} + +variable "cluster_log_retention_days" { + description = "The EKS cluster CloudWatch Log retention in days" + type = number + default = 90 +} + +variable "eks_instance_type" { + description = "EKS worker node instance type" + type = string + default = "t3.xlarge" +} +variable "eks_ng_desire_size" { + description = "Node Group desire size, default is 1" + type = number + default = 4 +} + +variable "eks_ng_min_size" { + description = "Node Group minimum size, default is 1" + type = number + default = 4 +} + +variable "eks_ng_max_size" { + description = "Node Group maximum size, default is 10" + type = number + default = 16 +} + +variable "eks_instance_disk_size" { + description = "The size of the disk in gigabytes" + type = number + default = 40 +} + +variable "eks_instance_volume_type" { + description = "The launch template volume type. One of [gp2, gp3]. Default gp3" + type = string + default = "gp3" + + validation { + condition = contains(["gp2", "gp3"], var.eks_instance_volume_type) + error_message = "eks_instance_volume_type invalid (gp2,gp3)." + } +} + +variable "domain" { + description = "The DNS domain name of the cluster. Defaults to empty which causes the sample application to use the domain assigned to the load balancer of the istio ingress gateway." + type = string + default = null +} + +variable "contact_email" { + description = "Email address in @census.gov of contact for the certificate. This is strongly recommended to be a group email address." + type = string +} diff --git a/examples/full-cluster-tf-upgrade/1.32/variables.route53.tf b/examples/full-cluster-tf-upgrade/1.32/variables.route53.tf new file mode 100644 index 0000000..fc70623 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/variables.route53.tf @@ -0,0 +1,24 @@ +variable "route53_endpoints" { + description = "Map of target route53 endpoints (for inbound) central VPCs" + type = map(map(string)) + default = { + route53_main = { + "account_id" = "057405694017" + "alias" = "ent-gov-network-prod" + "us-gov-east-1" = "vpc-0871ba8a6040d623a" + "us-gov-west-1" = "vpc-0f03ea065333f72c5" + } + route53_main_legacy = { + "account_id" = "107742151971" + "alias" = "do2-govcloud" + "us-gov-east-1" = "vpc-099a991da7c4eb8a5" + "us-gov-west-1" = "vpc-77877a12" + } + route53_main_dmz = { + "account_id" = "273715889907" + "alias" = "ent-gov-dmz-network-prod" + "us-gov-east-1" = "vpc-02f1a0a3b40843e4e" + "us-gov-west-1" = "vpc-0ce5930e94e434889" + } + } +} diff --git a/examples/full-cluster-tf-upgrade/1.32/variables.route53.tf.lab b/examples/full-cluster-tf-upgrade/1.32/variables.route53.tf.lab new file mode 100644 index 0000000..fbbfdbb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/variables.route53.tf.lab @@ -0,0 +1,13 @@ +variable "route53_endpoints" { + description = "Map of target route53 endpoints (for inbound) central VPCs" + type = map(map(string)) + default = { + route53_main = { + "account_id" = "269244441389" + "alias" = "lab-gov-network-nonprod" + "us-gov-east-1" = "vpc-070595c5b133243dd" + "us-gov-west-1" = "vpc-08b7b4db6a5ddf9c1" + } + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/variables.tags.tf b/examples/full-cluster-tf-upgrade/1.32/variables.tags.tf new file mode 100644 index 0000000..6e2a62e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/variables.tags.tf @@ -0,0 +1,9 @@ +# this exists in CAT, but not in other accounts. At some point, remove this file and all references to +# var.tags + +variable "tags" { + description = "AWS Tags to apply to appropriate resources." + type = map(string) + default = {} +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/variables.username.tf b/examples/full-cluster-tf-upgrade/1.32/variables.username.tf new file mode 100644 index 0000000..46f8f47 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/variables.username.tf @@ -0,0 +1,5 @@ +variable "os_username" { + description = "OS username from environment variable, ideally as $USER" + type = string + default = null +} diff --git a/examples/full-cluster-tf-upgrade/1.32/version.tf b/examples/full-cluster-tf-upgrade/1.32/version.tf new file mode 100644 index 0000000..724e0f6 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/version.tf @@ -0,0 +1,4 @@ +locals { + _module_version = "1.0.0" +} + diff --git a/examples/full-cluster-tf-upgrade/1.32/versions.tf b/examples/full-cluster-tf-upgrade/1.32/versions.tf new file mode 100644 index 0000000..adf8e62 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.32/versions.tf @@ -0,0 +1,41 @@ +terraform { + required_version = ">= 1.0.0" + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.0" + } + ldap = { + source = "trevex/ldap" + version = ">= 0.5.4" + } + external = { + source = "hashicorp/external" + version = ">= 1.0" + } + null = { + source = "hashicorp/null" + version = ">= 1.0" + } + random = { + source = "hashicorp/random" + version = ">= 1.0" + } + template = { + source = "hashicorp/template" + version = ">= 1.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 1.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 1.0" + } + time = { + source = "hashicorp/time" + version = ">= 0.9" + } + } +}