From fe6ac350cd521fc3b03789df70e16e3068a29feb Mon Sep 17 00:00:00 2001 From: badra001 Date: Mon, 11 Mar 2024 09:57:40 -0400 Subject: [PATCH] update images, tags for eks 1.29 --- .../full-cluster-tf-upgrade/1.29/.gitignore | 5 + .../full-cluster-tf-upgrade/1.29/.tf-control | 20 + .../1.29/.tf-control.tfrc | 24 + .../full-cluster-tf-upgrade/1.29/README.md | 478 ++++++++++++++++++ .../full-cluster-tf-upgrade/1.29/ROLES.md | 119 +++++ .../1.29/addons/.tf-control | 20 + .../1.29/addons/.tf-control.tfrc | 24 + .../1.29/addons/README.addons.md | 3 + .../1.29/addons/README.ebs.md | 75 +++ .../1.29/addons/README.md | 122 +++++ .../1.29/addons/addon_coredns.tf | 11 + .../1.29/addons/addon_ebs-csi.tf | 127 +++++ .../1.29/addons/addon_kube-proxy.tf | 13 + .../1.29/addons/addon_vpc-cni.tf | 87 ++++ .../1.29/addons/addons.tf | 5 + .../1.29/addons/data.eks-subdirectory.tf | 1 + .../addons/kubeconfig.eks-subdirectory.tf | 1 + .../1.29/addons/locals.tf | 17 + .../1.29/addons/parent_rs.tf | 1 + .../1.29/addons/prefixes.tf | 1 + .../1.29/addons/providers.tf | 1 + .../1.29/addons/region.tf | 4 + .../1.29/addons/role.tf.off | 101 ++++ .../1.29/addons/tf-run.data | 31 ++ .../1.29/addons/tf-run.destroy.data | 6 + .../1.29/addons/variables.addons.tf | 1 + .../1.29/addons/variables.eks.tf | 1 + .../1.29/addons/version.tf | 1 + .../1.29/applications/tf-run.data | 31 ++ .../1.29/applications/tf-run.destroy.data | 6 + .../1.29/aws-auth/.tf-control | 20 + .../1.29/aws-auth/.tf-control.tfrc | 24 + .../1.29/aws-auth/README.md | 68 +++ .../1.29/aws-auth/aws-auth.auto.tfvars | 28 + .../aws-auth/config_map.aws-auth.yaml.tpl | 17 + .../1.29/aws-auth/data.eks-subdirectory.tf | 1 + .../aws-auth/kubeconfig.eks-subdirectory.tf | 1 + .../1.29/aws-auth/patch-aws-auth.tf | 135 +++++ .../1.29/aws-auth/prefixes.tf | 1 + .../1.29/aws-auth/providers.tf | 1 + .../1.29/aws-auth/region.tf | 4 + .../1.29/aws-auth/settings.aws-auth.tf | 11 + .../1.29/aws-auth/tf-run.data | 14 + .../1.29/aws-auth/tf-run.destroy.data | 9 + .../1.29/aws-auth/variables.aws-auth.tf | 23 + .../1.29/aws-auth/variables.eks.tf | 1 + .../1.29/aws-auth/version.tf | 1 + .../1.29/aws-auth/versions.tf | 1 + .../1.29/bin/copy_image.sh | 326 ++++++++++++ .../1.29/bin/fix-terminating-namespace.sh | 29 ++ .../1.29/bin/remove-ecr.sh | 77 +++ .../1.29/bin/show-k8s-things.sh | 7 + .../1.29/cluster-roles/.tf-control | 20 + .../1.29/cluster-roles/.tf-control.tfrc | 24 + .../1.29/cluster-roles/README.md | 238 +++++++++ .../1.29/cluster-roles/RESULTS.md | 41 ++ .../1.29/cluster-roles/cm.tf.off | 6 + .../cluster-roles/data.eks-subdirectory.tf | 1 + .../1.29/cluster-roles/dba-clusterrole.tf | 24 + .../1.29/cluster-roles/dba-rolebinding.tf | 40 ++ .../1.29/cluster-roles/dba.iam.tf | 117 +++++ .../cluster-roles/deployer-clusterrole.tf | 67 +++ .../cluster-roles/deployer-rolebinding.tf | 91 ++++ .../1.29/cluster-roles/deployer.iam.tf | 167 ++++++ .../kubeconfig.eks-subdirectory.tf | 1 + .../1.29/cluster-roles/locals.tf | 11 + .../1.29/cluster-roles/main.tf | 30 ++ .../1.29/cluster-roles/prefixes.tf | 1 + .../1.29/cluster-roles/providers.tf | 1 + .../1.29/cluster-roles/region.tf | 4 + .../1.29/cluster-roles/tf-run.data | 18 + .../1.29/cluster-roles/variables.auto.tfvars | 16 + .../1.29/cluster-roles/variables.eks.tf | 1 + .../1.29/cluster-roles/variables.tf | 83 +++ .../1.29/cluster-roles/version.tf | 1 + .../1.29/cluster-roles/versions.tf | 1 + .../1.29/common-services/.gitignore | 1 + .../1.29/common-services/.tf-control | 20 + .../1.29/common-services/.tf-control.tfrc | 24 + .../1.29/common-services/README.md | 66 +++ .../1.29/common-services/README.output.md | 84 +++ .../common-services/cert-manager-issuer.tf | 14 + .../charts/cluster-autoscaler/.helmignore | 23 + .../charts/cluster-autoscaler/Chart.yaml | 20 + .../charts/cluster-autoscaler/README.md | 5 + .../cluster-autoscaler/templates/NOTES.txt | 18 + .../cluster-autoscaler/templates/_helpers.tpl | 117 +++++ .../templates/clusterrole.yaml | 163 ++++++ .../templates/clusterrolebinding.yaml | 16 + .../templates/deployment.yaml | 291 +++++++++++ .../cluster-autoscaler/templates/pdb.yaml | 16 + .../templates/podsecuritypolicy.yaml | 46 ++ .../priority-expander-configmap.yaml | 22 + .../templates/prometheusrule.yaml | 15 + .../cluster-autoscaler/templates/role.yaml | 78 +++ .../templates/rolebinding.yaml | 17 + .../cluster-autoscaler/templates/secret.yaml | 21 + .../cluster-autoscaler/templates/service.yaml | 37 ++ .../templates/serviceaccount.yaml | 13 + .../templates/servicemonitor.yaml | 24 + .../charts/cluster-autoscaler/values.yaml | 378 ++++++++++++++ .../.helmignore | 23 + .../Chart.yaml | 24 + .../templates/_helpers.tpl | 62 +++ .../templates/ca-key-pair.yaml | 8 + .../templates/clusterissuer.yaml | 7 + .../values.yaml | 6 + .../charts/istio-operator/Chart.yaml | 12 + .../istio-operator/crds/crd-operator.yaml | 48 ++ .../istio-operator/files/gen-operator.yaml | 220 ++++++++ .../istio-operator/templates/clusterrole.yaml | 115 +++++ .../templates/clusterrole_binding.yaml | 13 + .../charts/istio-operator/templates/crds.yaml | 6 + .../istio-operator/templates/deployment.yaml | 51 ++ .../istio-operator/templates/namespace.yaml | 8 + .../istio-operator/templates/service.yaml | 16 + .../templates/service_account.yaml | 12 + .../charts/istio-operator/values.yaml | 29 ++ .../istio-peerauthentication/.helmignore | 23 + .../istio-peerauthentication/Chart.yaml | 24 + .../templates/_helpers.tpl | 62 +++ .../templates/peerauthentication.yaml | 9 + .../istio-peerauthentication/values.yaml | 0 .../charts/istio-profile/.helmignore | 23 + .../charts/istio-profile/Chart.yaml | 6 + .../istio-profile/templates/_helpers.tpl | 62 +++ .../templates/istiooperator.yaml | 186 +++++++ .../charts/istio-profile/values.yaml | 44 ++ .../.helmignore | 23 + .../self-signed-certificate-issuer/Chart.yaml | 6 + .../templates/_helpers.tpl | 62 +++ .../templates/ca-issuer.yaml | 8 + .../templates/selfsigned-ca.yaml | 17 + .../templates/selfsigned-clusterissuer.yaml | 7 + .../values.yaml | 0 .../vault-certificate-issuer/.helmignore | 23 + .../vault-certificate-issuer/Chart.yaml | 24 + .../templates/_helpers.tpl | 62 +++ .../templates/app-role-issuer.yaml | 18 + .../templates/app-role-secret.yaml | 10 + .../templates/service-account-issuer.yaml | 20 + .../templates/token-issuer.yaml | 15 + .../templates/token-secret.yaml | 10 + .../vault-certificate-issuer/values.yaml | 47 ++ .../cloudwatch-agent/.tf-control | 20 + .../cloudwatch-agent/.tf-control.tfrc | 24 + .../cloudwatch-agent/README.md | 127 +++++ .../cloudwatch-agent/cloudwatch-agent.tf | 123 +++++ .../cloudwatch-agent/fluentbit.tf | 186 +++++++ .../cloudwatch-agent/fluentbit.values.yml | 229 +++++++++ .../cloudwatch-agent/locals.tf | 17 + .../cloudwatch-agent/region.tf | 3 + .../templates/fluentbit.env.yml.tpl | 13 + .../cloudwatch-agent/tf-run.data | 32 ++ .../cloudwatch-agent/tf-run.destroy.data | 6 + .../variables.cloudwatch-agent.auto.tfvars | 27 + .../variables.cloudwatch-agent.tf | 53 ++ .../variables.fluentbit.auto.tfvars | 14 + .../cloudwatch-agent/variables.fluentbit.tf | 57 +++ .../cluster-autoscaler/.tf-control | 20 + .../cluster-autoscaler/.tf-control.tfrc | 24 + .../cluster-autoscaler/cluster-autoscaler.tf | 85 ++++ .../cluster-autoscaler/locals.tf | 17 + .../cluster-autoscaler/region.tf | 3 + .../test-cluster-autoscaling.json | 24 + .../cluster-autoscaler/tf-run.data | 31 ++ .../cluster-autoscaler/tf-run.destroy.data | 6 + .../variables.cluster-autoscaler.auto.tfvars | 21 + .../variables.cluster-autoscaler.tf | 40 ++ .../common-services.auto.tfvars | 2 + .../1.29/common-services/copy_image.sh.off | 1 + .../1.29/common-services/copy_images.tf.off | 91 ++++ .../common-services/data.eks-subdirectory.tf | 1 + .../1.29/common-services/dns.tf | 25 + .../1.29/common-services/images.tf | 71 +++ .../kubeconfig.eks-subdirectory.tf | 1 + .../1.29/common-services/locals.tf | 17 + .../1.29/common-services/main.tf | 471 +++++++++++++++++ .../1.29/common-services/parent_rs.tf | 1 + .../1.29/common-services/prefixes.tf | 1 + .../1.29/common-services/providers.tf | 1 + .../1.29/common-services/region.tf | 4 + .../1.29/common-services/tags.md | 20 + .../1.29/common-services/tf-run.data | 69 +++ .../1.29/common-services/tf-run.destroy.data | 9 + .../variables.common-services.auto.tfvars | 39 ++ .../variables.common-services.tf | 208 ++++++++ .../1.29/common-services/variables.eks.tf | 1 + .../variables.images.auto.tfvars | 145 ++++++ .../1.29/common-services/variables.images.tf | 26 + .../1.29/common-services/version.tf | 1 + .../1.29/common-services/versions.tf | 1 + .../1.29/create-iam-config.sh | 63 +++ .../1.29/data.eks-main.tf | 18 + .../full-cluster-tf-upgrade/1.29/dns-zone.tf | 237 +++++++++ .../1.29/dns-zone.tf.cat | 128 +++++ .../1.29/ebs-encryption.tf | 108 ++++ .../1.29/ec2-keypair.tf.obsolete | 92 ++++ .../1.29/efs/.tf-control | 20 + .../1.29/efs/.tf-control.tfrc | 24 + .../1.29/efs/README.efs.md | 81 +++ .../1.29/efs/README.md | 164 ++++++ .../full-cluster-tf-upgrade/1.29/efs/addon.tf | 15 + .../1.29/efs/data.eks-subdirectory.tf | 1 + .../full-cluster-tf-upgrade/1.29/efs/ecr.tf | 70 +++ .../full-cluster-tf-upgrade/1.29/efs/efs.tf | 27 + .../1.29/efs/kubeconfig.eks-subdirectory.tf | 1 + .../1.29/efs/locals.tf | 17 + .../1.29/efs/parent_rs.tf | 1 + .../1.29/efs/persistent-volume.tf | 19 + .../1.29/efs/policy.tf | 87 ++++ .../1.29/efs/prefixes.tf | 1 + .../1.29/efs/providers.tf | 1 + .../1.29/efs/region.tf | 4 + .../full-cluster-tf-upgrade/1.29/efs/role.tf | 53 ++ .../1.29/efs/storage-class.tf | 17 + .../1.29/efs/tf-run.data | 31 ++ .../1.29/efs/tf-run.destroy.data | 6 + .../1.29/efs/variables.efs.tf | 37 ++ .../1.29/efs/variables.eks.tf | 1 + .../1.29/efs/version.tf | 1 + .../1.29/efs/versions.tf | 1 + .../1.29/eks-console-access.tf | 71 +++ .../full-cluster-tf-upgrade/1.29/group.tf | 13 + .../1.29/includes.d/README.md | 30 ++ .../1.29/includes.d/data.eks-main.tf | 18 + .../1.29/includes.d/data.eks-subdirectory.tf | 15 + .../1.29/includes.d/kubeconfig.eks-main.tf | 29 ++ .../includes.d/kubeconfig.eks-subdirectory.tf | 29 ++ .../1.29/includes.d/parent_rs.tf | 4 + .../1.29/irsa-roles/.tf-control | 20 + .../1.29/irsa-roles/.tf-control.tfrc | 24 + .../1.29/irsa-roles/README.md | 64 +++ .../1.29/irsa-roles/data.eks-subdirectory.tf | 1 + .../1.29/irsa-roles/parent_rs.tf | 1 + .../1.29/irsa-roles/prefixes.tf | 1 + .../1.29/irsa-roles/providers.tf | 1 + .../1.29/irsa-roles/region.tf | 4 + .../1.29/irsa-roles/tf-run.data | 13 + .../1.29/irsa-roles/tf-run.destroy.data | 9 + .../1.29/irsa-roles/variables.eks.tf | 1 + .../irsa-roles/variables.irsa.auto.tfvars | 3 + .../1.29/irsa-roles/variables.irsa.tf | 14 + .../1.29/irsa-roles/variables.tags.tf | 1 + .../1.29/irsa-roles/version.tf | 1 + .../1.29/irsa-roles/versions.tf | 1 + .../1.29/kubeconfig.eks-main.tf | 29 ++ examples/full-cluster-tf-upgrade/1.29/main.tf | 250 +++++++++ examples/full-cluster-tf-upgrade/1.29/oidc.tf | 32 ++ .../full-cluster-tf-upgrade/1.29/outputs.tf | 63 +++ .../full-cluster-tf-upgrade/1.29/policy.tf | 185 +++++++ .../full-cluster-tf-upgrade/1.29/prefixes.tf | 34 ++ .../full-cluster-tf-upgrade/1.29/providers.tf | 25 + .../full-cluster-tf-upgrade/1.29/region.tf | 4 + examples/full-cluster-tf-upgrade/1.29/role.tf | 164 ++++++ examples/full-cluster-tf-upgrade/1.29/saml.tf | 26 + .../1.29/securitygroup.ports.tf | 128 +++++ .../1.29/securitygroup.tf | 184 +++++++ .../1.29/settings.auto.tfvars.example | 16 + .../1.29/setup-directory.tf | 9 + .../1.29/templates/node-private-userdata.tmpl | 9 + .../full-cluster-tf-upgrade/1.29/tf-run.data | 79 +++ .../1.29/tf-run.destroy.data | 37 ++ .../1.29/variables.addons.tf | 40 ++ .../1.29/variables.dns.auto.tfvars | 28 + .../1.29/variables.dns.tf | 21 + .../1.29/variables.eks.tf | 74 +++ .../1.29/variables.route53.tf | 24 + .../1.29/variables.route53.tf.lab | 13 + .../1.29/variables.tags.tf | 9 + .../1.29/variables.username.tf | 5 + .../full-cluster-tf-upgrade/1.29/version.tf | 4 + .../full-cluster-tf-upgrade/1.29/versions.tf | 41 ++ 273 files changed, 11755 insertions(+) create mode 100644 examples/full-cluster-tf-upgrade/1.29/.gitignore create mode 100644 examples/full-cluster-tf-upgrade/1.29/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.29/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.29/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/ROLES.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/README.addons.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/README.ebs.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/addon_coredns.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/addon_ebs-csi.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/addon_kube-proxy.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/addon_vpc-cni.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/addons.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/addons/data.eks-subdirectory.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/addons/kubeconfig.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/locals.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/addons/parent_rs.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/addons/prefixes.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/addons/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/role.tf.off create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/addons/tf-run.destroy.data create mode 120000 examples/full-cluster-tf-upgrade/1.29/addons/variables.addons.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/addons/variables.eks.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/addons/version.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/applications/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/applications/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/aws-auth/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.29/aws-auth/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.29/aws-auth/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/aws-auth/aws-auth.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.29/aws-auth/config_map.aws-auth.yaml.tpl create mode 120000 examples/full-cluster-tf-upgrade/1.29/aws-auth/data.eks-subdirectory.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/aws-auth/kubeconfig.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/aws-auth/patch-aws-auth.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/aws-auth/prefixes.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/aws-auth/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/aws-auth/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/aws-auth/settings.aws-auth.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/aws-auth/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/aws-auth/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/aws-auth/variables.aws-auth.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/aws-auth/variables.eks.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/aws-auth/version.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/aws-auth/versions.tf create mode 100755 examples/full-cluster-tf-upgrade/1.29/bin/copy_image.sh create mode 100755 examples/full-cluster-tf-upgrade/1.29/bin/fix-terminating-namespace.sh create mode 100755 examples/full-cluster-tf-upgrade/1.29/bin/remove-ecr.sh create mode 100755 examples/full-cluster-tf-upgrade/1.29/bin/show-k8s-things.sh create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/RESULTS.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/cm.tf.off create mode 120000 examples/full-cluster-tf-upgrade/1.29/cluster-roles/data.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/dba-clusterrole.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/dba-rolebinding.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/dba.iam.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/deployer-clusterrole.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/deployer-rolebinding.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/deployer.iam.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/cluster-roles/kubeconfig.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/locals.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/main.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/cluster-roles/prefixes.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/cluster-roles/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/variables.auto.tfvars create mode 120000 examples/full-cluster-tf-upgrade/1.29/cluster-roles/variables.eks.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/cluster-roles/variables.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/cluster-roles/version.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/cluster-roles/versions.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/.gitignore create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/README.output.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cert-manager-issuer.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/.helmignore create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/NOTES.txt create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/_helpers.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/deployment.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/pdb.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/role.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/secret.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/service.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/serviceaccount.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/.helmignore create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/crds/crd-operator.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/files/gen-operator.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/clusterrole.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/clusterrole_binding.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/crds.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/deployment.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/namespace.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/service.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/service_account.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/.helmignore create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/templates/_helpers.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/.helmignore create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/templates/_helpers.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/templates/istiooperator.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/.helmignore create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/.helmignore create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/Chart.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/values.yaml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/cloudwatch-agent.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/fluentbit.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/fluentbit.values.yml create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/locals.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/templates/fluentbit.env.yml.tpl create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/variables.cloudwatch-agent.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/variables.cloudwatch-agent.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/variables.fluentbit.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/variables.fluentbit.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cluster-autoscaler/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cluster-autoscaler/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cluster-autoscaler/cluster-autoscaler.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cluster-autoscaler/locals.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cluster-autoscaler/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cluster-autoscaler/test-cluster-autoscaling.json create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cluster-autoscaler/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cluster-autoscaler/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cluster-autoscaler/variables.cluster-autoscaler.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/cluster-autoscaler/variables.cluster-autoscaler.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/common-services.auto.tfvars create mode 120000 examples/full-cluster-tf-upgrade/1.29/common-services/copy_image.sh.off create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/copy_images.tf.off create mode 120000 examples/full-cluster-tf-upgrade/1.29/common-services/data.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/dns.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/images.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/common-services/kubeconfig.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/locals.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/main.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/common-services/parent_rs.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/common-services/prefixes.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/common-services/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/tags.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/variables.common-services.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/variables.common-services.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/common-services/variables.eks.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/variables.images.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.29/common-services/variables.images.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/common-services/version.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/common-services/versions.tf create mode 100755 examples/full-cluster-tf-upgrade/1.29/create-iam-config.sh create mode 100644 examples/full-cluster-tf-upgrade/1.29/data.eks-main.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/dns-zone.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/dns-zone.tf.cat create mode 100644 examples/full-cluster-tf-upgrade/1.29/ebs-encryption.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/ec2-keypair.tf.obsolete create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/README.efs.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/addon.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/efs/data.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/ecr.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/efs.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/efs/kubeconfig.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/locals.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/efs/parent_rs.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/persistent-volume.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/policy.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/efs/prefixes.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/efs/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/role.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/storage-class.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/efs/variables.efs.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/efs/variables.eks.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/efs/version.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/efs/versions.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/eks-console-access.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/group.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/includes.d/README.md create mode 100644 examples/full-cluster-tf-upgrade/1.29/includes.d/data.eks-main.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/includes.d/data.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/includes.d/kubeconfig.eks-main.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/includes.d/kubeconfig.eks-subdirectory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/includes.d/parent_rs.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/irsa-roles/.tf-control create mode 100644 examples/full-cluster-tf-upgrade/1.29/irsa-roles/.tf-control.tfrc create mode 100644 examples/full-cluster-tf-upgrade/1.29/irsa-roles/README.md create mode 120000 examples/full-cluster-tf-upgrade/1.29/irsa-roles/data.eks-subdirectory.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/irsa-roles/parent_rs.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/irsa-roles/prefixes.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/irsa-roles/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/irsa-roles/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/irsa-roles/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/irsa-roles/tf-run.destroy.data create mode 120000 examples/full-cluster-tf-upgrade/1.29/irsa-roles/variables.eks.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/irsa-roles/variables.irsa.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.29/irsa-roles/variables.irsa.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/irsa-roles/variables.tags.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/irsa-roles/version.tf create mode 120000 examples/full-cluster-tf-upgrade/1.29/irsa-roles/versions.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/kubeconfig.eks-main.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/main.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/oidc.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/outputs.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/policy.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/prefixes.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/providers.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/region.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/role.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/saml.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/securitygroup.ports.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/securitygroup.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/settings.auto.tfvars.example create mode 100644 examples/full-cluster-tf-upgrade/1.29/setup-directory.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/templates/node-private-userdata.tmpl create mode 100644 examples/full-cluster-tf-upgrade/1.29/tf-run.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/tf-run.destroy.data create mode 100644 examples/full-cluster-tf-upgrade/1.29/variables.addons.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/variables.dns.auto.tfvars create mode 100644 examples/full-cluster-tf-upgrade/1.29/variables.dns.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/variables.eks.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/variables.route53.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/variables.route53.tf.lab create mode 100644 examples/full-cluster-tf-upgrade/1.29/variables.tags.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/variables.username.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/version.tf create mode 100644 examples/full-cluster-tf-upgrade/1.29/versions.tf diff --git a/examples/full-cluster-tf-upgrade/1.29/.gitignore b/examples/full-cluster-tf-upgrade/1.29/.gitignore new file mode 100644 index 0000000..010b80b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/.gitignore @@ -0,0 +1,5 @@ +kube.config +ecr-login.txt +setup/ec2-ssh-eks-* +!setup/ec2-ssh-eks-*.pub +logs diff --git a/examples/full-cluster-tf-upgrade/1.29/.tf-control b/examples/full-cluster-tf-upgrade/1.29/.tf-control new file mode 100644 index 0000000..280f449 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/.tf-control @@ -0,0 +1,20 @@ +# .tf-control +# allows for setting a specific command to be used for tf-* commands under this git repo +# see tf-control.sh help for more info + +TFCONTROL_VERSION="1.0.5" + +TFCOMMAND="terraform_latest" +# TF_CLI_CONFIG_FILE=PATH-TO-FILE/.tf-control.tfrc +# TFARGS="" +# TFNOLOG="" +# TFNOCOLOR="" + +# use the following to force a specific version. An upgrade of an existing 0.12.31 to 1.x +# needs you to cycle through 0.13.17, 0.14.11, and then latest (0.15.5 not needed). Other +# steps in between. See https://github.e.it.census.gov/terraform/support/tree/master/docs/how-to/terraform-upgrade for details +# +#TFCOMMAND="terraform_0.12.31" +#TFCOMMAND="terraform_0.13.7" +#TFCOMMAND="terraform_0.14.11" +#TFCOMMAND="terraform_0.15.5" diff --git a/examples/full-cluster-tf-upgrade/1.29/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.29/.tf-control.tfrc new file mode 100644 index 0000000..7425488 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/.tf-control.tfrc @@ -0,0 +1,24 @@ +TFCONTROL_VERSION="1.0.5" + +# https://www.terraform.io/docs/cli/config/config-file.html +plugin_cache_dir = "/data/terraform/terraform.d/plugin-cache" +#disable_checkpoint = true + +provider_installation { +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "*/*/*" ] +# } + filesystem_mirror { + path = "/data/terraform/terraform.d/providers" + include = [ "*/*/*" ] + } +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "external.terraform.census.gov/*/*" ] +# } + direct { + include = [ "*/*/*" ] + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.29/README.md b/examples/full-cluster-tf-upgrade/1.29/README.md new file mode 100644 index 0000000..8db62eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/README.md @@ -0,0 +1,478 @@ +# EKS Full Cluster Example 1.28 + +This is for deploying an EKS cluster with 1.28. This is revision of 1.28 example, so it is in beta mode. + +## About + +There are a number of steps to end up with a cluster. + +1. From main repository, in the same `vpc/{region}/vpc{number}` directory + 1. [Tag subnets](#subnet-tagging) in main repository (before creating nodegroup) +1. In the submodule repository, in the `vpc/{region}/vpc{number}/apps/{clustername}` directory + 1. Update `settings.auto.tfvars` + 1. Update `includes.d/parent_rs.tf` +1. Terraform [Automated Setup](#terraform-automated-setup) +1. Optionally, follow the Terraform Setup-by-Step, which is essentially the same as following the automated tf-run.data + 1. Initialize [Cluster Main](#initialize-cluster-main) directory + 1. Create [policies](#policies) + 1. Create [EC2 Keypair](#keypair-creation) + 1. Finish [cluster setup](#cluster-creation) + 1. Setup [aws-auth](#setup-aws-auth) + 1. Setup [EFS](#setup-efs) + 1. Setup [Common Services](#common-services) +1. [Access to the cluster](#access-to-the-cluster) +1. [Cluster Setup](#cluster-setup) + +## Post-Setup Tasks + +1. Connect DNS zone from on-prem to Route53 Resolvers with a forwarder + +## Subnet Tagging + +A tag needs to be added to the subnet(s) where the cluster will run. We haven't figured out yet how to incorporate this more +automatically. + +The file to update is the `variable.subnets.auto.tfvars`, in this case `vpc/east/vpc3/variables.subnets.auto.tfvars`: + +```hcl +private_subnets = [ + { base_cidr = "10.188.18.0/23", label = "private-lb", bits = 2, private = true, + tags = { "kubernetes.io/role/internal-elb" = 1 } + }, + { base_cidr = "10.188.17.0/24", label = "endpoints", bits = 2, private = true, tags = {} }, + { base_cidr = "10.188.20.0/23", label = "db", bits = 2, private = true, tags = {} }, + { base_cidr = "10.188.22.0/23", label = "apps", bits = 2, private = true, tags = {} }, + { base_cidr = "10.188.24.0/21", label = "container", bits = 2, private = true, + tags = { + "kubernetes.io/cluster/org-project-env" = "shared" + }, + } +# space all used up +] +``` + +We add the tag `"kubernetes.io/cluster/{cluster_name}" = "shared"` in order for the node groups to pick up the +cluster subnets. This is on the new `container` ubnet. + +For creating a service which uses load balancers (ELB, ALB, or NLB), the last tag listed here is needed +`"kubernetes.io/role/internal-elb" = 1`. This is only one tag for all EKS, not one per cluster, and it should apply +to the subnet(s) for load balancing. A separate set of subnets exist for load balacning, with a name including `private-lb`. + + +## Update the settings.auto.tfvars file + +Set the appropriate values in the `settings.auto.tfvars` file. An example starter file is at `settings.auto.tfvars.example`. +If you are deploying into an account using a shared VPC, you **must** define the domain name. Please be sure the domain +name exists. To do so, check the output of `dig`. It should come back with a value with `awsdns` in the response. + +```console +% dig +short in soa myenvironment.mydomain.csp1.census.gov +% dig +short in soa dev.geo.csp1.census.gov +ns-0.awsdns-us-gov-00.com. awsdns-hostmaster.amazon.com. 1 7200 900 1209600 86400 +``` + +Here is a sample file: + +```hcl +cluster_name = "org-project-env" +cluster_version = "1.28" +region = "us-gov-east-1" +domain = "org-project-env.env.domain.census.gov" +contact_email = "org-project-env-group-mailing-list@census.gov" +eks_instance_disk_size = 40 +eks_vpc_name = "vpc_full_name" +eks_instance_type = "t3.xlarge" +eks_ng_desire_size = 3 +eks_ng_max_size = 15 +eks_ng_min_size = 3 +subnets_name = "*-subnet_label-*" +``` + +You need to change these values: + +* cluster_name: put in the proper org, project, and environment. Cluster names should not be replicated across the environment. +These are tracked in the repo [cloud-information/aws/documentation/containers/](https://github.e.it.census.gov/terraform/cloud-information/blob/master/documentation/dns.md). +* region: include the correct region. This really is a duplicate of the `region` variable, so it may be removed in the future. +* domain: this is the domain name of the cluster, consisting of the cluster name and the proper domain name for the environment/VPC. +* contact_email: put in an email addres of a group responsible for this cluster. +* eks_instance_disk_size: this should be default to 40Gb for most use-cases; only change this if you have special requirement and have exception approval. +* eks_vpc_name: replace *vpc_full_name* with the appropriate vpc full name. This is used to find the vpc ID. +* subnets_name: replace *subnet_label* with the label of the subnets allocated to providing ENIs for the cluster node group and containers; often as `container` or `task` + +All the others are subject to your configuration. They are a good starting point, but can vary. + +## Update the includes.d/parent_rs.tf file + +```hcl +locals { + parent_rs = data.terraform_remote_state.vpc_{region}_vpc{number}_apps_eks-{cluster-name}.outputs +} +``` + +* region: west or east, dependent on which region the VPC is in +* number: incremental VPC number +* cluster-name: cluster name, the same as used in the `settings.auto.tfvars` file above + +# Terraform Automated Setup + +A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. + +* copy the `remote_state.yml` from the parent and update `directory` to be the current directory +* run the tf-run.sh + +```console +% tf-run.sh apply +``` + +* example of the `tf-run.sh` steps + +This is part of a larger cluster configuration, so at the end of the run it indicates another directory +to visit when done. + +```console +% tf-run.sh list +* running action=plan +* START: tf-run.sh v1.1.2 start=1636562594 end= logfile=logs/run.plan.20211110.1636562594.log (not-created) +* reading from tf-run.data +* read 22 entries from tf-run.data +> list +** START: start=1636562594 +* 1 COMMENT> make sure the private-lb subnet and container subnets are tagged properly (see README.md) +* 2 STOP> then continue with at step 3 +* 3 COMMAND> tf-directory-setup.py -l none -f +* 4 COMMAND> setup-new-directory.sh +* 5 COMMAND> tf-init -upgrade +* 6 POLICY> (*.tf) aws_iam_policy.nlb-policy aws_iam_policy.cloudwatch-policy aws_iam_policy.cluster-admin-policy aws_iam_policy.cluster-admin_assume_policy +* 6 tf-plan -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy -target=aws_iam_policy.cluster-admin_assume_policy +* 7 COMMENT> EC2 key pairs +* 8 tf-plan -target=null_resource.generate_keypair +* 9 tf-plan -target=aws_key_pair.cluster_keypair +* 10 COMMAND> tf-directory-setup.py -l s3 +* 11 COMMENT> be sure to add the setup/ec2-ssh-eks-{cluster} to git-secret, git-secret hide, add the setup/*secret and setup/*pub got git, and commit the entirety of the change +* 12 tf-plan +* 13 COMMENT> setup the includes.d/parent_rs.tf according to the includes.d/README +* 14 STOP> +* 15 COMMENT> cd aws-auth and tf-run.sh apply +* 16 STOP> +* 17 COMMENT> cd efs and tf-run.sh apply +* 18 STOP> +* 19 COMMENT> cd irsa-roles and tf-run.sh apply +* 20 STOP> +* 21 COMMENT> cd common-services and tf-run.sh apply +* 22 STOP> +** END: start=1636562594 end=1636562594 elapsed=0 logfile=logs/run.plan.20211110.1636562594.log (not-created) +``` + +It is highly recommended to use the `tf-run.sh` approach. This has a number of stopping points along the way with comments telling you what to do. +It also directs you to the subdirectories to visit to complete the setup. + +# Terraform Manual Setup + +## Initialize Cluster Main + +We need to setup the main directory for the cluster. Be sure `remote_state.yml` is correct. Then: + +```shell +tf-directory-setup.py -l none +tf-init +``` + +## Policies + +First, we have to create the two polices. The roles will not get created until they do. + +```shell +TFTARGET=$(grep ^res.*iam_policy *.tf |awk '{print "-target=" $2 "." $3}'|sed -e 's/"//g') +tf-plan $TFTARGET +tf-apply $TFTARGET +unset TFTARGET +``` + +## Keypair Creation + +We need to create the SSH key, which then allows for the public key to be uploaded. + +```shell +tf-plan -target=null_resource.generate_keypair +tf-apply -target=null_resource.generate_keypair + +tf-plan -target=aws_key_pair.cluster_keypair +tf-apply -target=aws_key_pair.cluster_keypair +``` + +## Cluster Creation + +One created, we can run the rest of the code + +```shell +tf-plan +tf-apply +``` + +Finalize by linking to the remote state file: + +```shell +tf-directory-setup.py -l s3 +``` + +## Setup aws-auth + +Be sure `remote_state.yml` is correct. Examine the `settings.aws-auth.tfvars` and replace any remote state references to the proper +objects. There is at least one, a `rolearn`. You can get the remote state path with + +```shell +grep ^data remote_state.*{clustername}.tf | awk '{print $1 "." $2 "." $3}' |sed -e 's/"//g' +``` + +Next, we setup the remote state files, link to the parent remote state, and initialize terraform. + +```shell +tf-directory-setup.py -l none +# should only be one file here +ln -s ../remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_*.tf . +setup-new-directory.sh +tf-init +``` + +Then, we can plan, apply, and finalize: + +```shell +tf-pan +tf-apply +tf-directory-setup.py -l s3 +``` + +## Setup EFS + +Be sure `remote_state.yml` is correct. Examine the `main.tf` and replace any remote state references to the proper +objects. You can find where they are used: + +```console +% grep data.terraform_remote_state *.tf +main.tf: vpc_id = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_vpc_id +main.tf: subnet_ids = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_subnet_ids +main.tf: cluster_worker_sg_id = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_worker_sg_id +main.tf: oidc_provider_url = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.oidc_provider_url +main.tf: oidc_provider_arn = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.oidc_provider_arn +``` + +Find the value to replace these with: + +```shell +grep ^data remote_state.*{clustername}.tf | awk '{print $1 "." $2 "." $3}' |sed -e 's/"//g' +``` + +Next, we setup the remote state files, link to the parent remote state, and initialize terraform. + +```shell +tf-directory-setup.py -l none +# should only be one file here +ln -s ../remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_*.tf . +setup-new-directory.sh +``` + +Then, we have to create the polices. The roles will not get created until they do. + +```shell +TFTARGET=$(grep ^res.*iam_policy *.tf |awk '{print "-target=" $2 "." $3}'|sed -e 's/"//g') +tf-plan $TFTARGET +tf-apply $TFTARGET +unset TFTARGET +``` + +Finally, you can apply the rest: + + +```shell +tf-plan +tf-apply +``` + +## Common Services +### Certificate Authority + +Set the download to `false` + +```shell +# ca-cert.tf + ca_cert_download = false +``` + +Do the first apply, which generates the key and csr. You'll need to then submit the CSR. (directions generated) + +```shell +tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') +``` + + +```shell +# terraform taint null_resource.ca_cert[0] +# # (wait for submitted cert to be ready) +tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') +tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') +``` + +### Rest of Setup + +```shell +tf-plan +tf-apply +tf-directory-setup.py -l s3 +``` + +## Access to the cluster + +There are two ways to access the cluster. One is from the AWS Console and the other is via the IAM account or role. + +The cluster access vi console is found in the EKS section, under *clusters*. + +For IAM access, one must have IAM account credentials configured in `$HOME/.aws/credentials` and `$HOME/.aws/config`. [Here](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html) +are the docs, and we have an example below. Region is important, otherwise it defaults to `us-gov-west-1` and the STS will fail. + +```script +# $HOME/.aws/credentials +[252960665057-ma6-gov] +aws_access_key_id = ABCD1234... +aws_secret_access_key = abcd5678... + +# $HOME/.aws/config +[profile 252960665057-ma6-gov-eks-org-project-env] +source_profile = 252960665057-ma6-gov +region = us-gov-east-1 +role_arn = arn:aws-us-gov:iam::252960665057:role/r-eks-org-project-env-cluster-admin +role_session_name = badra001 +``` + +With this configuration, using the proifle `252960665057-ma6-gov` gives you the normal IAM access + +```console +% aws --profile 252960665057-ma6-gov sts get-caller-identity +{ + "UserId": "AIDATVZNBNXQ5UPHMBGPY", + "Account": "252960665057", + "Arn": "arn:aws-us-gov:iam::252960665057:user/a-badra001" +} +``` + +Using the other profile will use the source profile (which has to have permission to assume the role), the role arn, and a session +name mapping it back to your Census username (JBID). + +```console +% aws --profile 252960665057-ma6-gov-eks-org-project-env sts get-caller-identity +{ + "UserId": "AROATVZNBNXQ7AV7W2ISZ:badra001", + "Account": "252960665057", + "Arn": "arn:aws-us-gov:sts::252960665057:assumed-role/r-eks-org-project-env-cluster-admin/badra001" +} +``` + +# Cluster Setup + +## Download Configuration + +Now that the cluster is created, we need the `kubectl` command and to download the configuration. + +* get [kubectl](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) + +```console +% aws eks --profile $(get-profile) --region $(get-region) update-kubeconfig --name test2 --kubeconfig ./test2.kube.config +Added new context arn:aws:eks:us-east-1:079788916859:cluster/test2 to /data/git-repos/terraform/079788916859-do2-cat_apps-adsd-eks/vpc/east-1/vpc4/apps/eks-test2/test2.kube.config +% export KUBECONFIG=$(pwd)/test2.kube.config +% kubectl get nodes +NAME STATUS ROLES AGE VERSION +ip-10-194-24-49.ec2.internal Ready 24m v1.20.4-eks-6b7464 +ip-10-194-24-90.ec2.internal Ready 24m v1.20.4-eks-6b7464 +ip-10-194-25-120.ec2.internal Ready 24m v1.20.4-eks-6b7464 +ip-10-194-26-252.ec2.internal Ready 24m v1.20.4-eks-6b7464 +``` + +## Authentication + +### Automated + +This is in theh `aws-auth` subdirectory. + +```shell +cd aws-auth +tf-init +tf-plan +tf-apply +``` + +### Manual + +To allow users and roles to manipulate the cluster, we add to the mapRole or mapUsera. + +```shell +kubectl edit -n kube-system configmap/aws-auth +``` + +Add sections for `mapRoles`: + +```yaml + mapRoles: | + - rolearn: arn:aws:iam::079788916859:role/r-inf-cloud-admin + username: system:node:{{EC2PrivateDNSName}} + groups: + - system:bootstrappers + - system:nodes + - eks-console-dashboard-full-access-group +``` + +Add sections for `mapUsers`: + +```yaml + mapUsers: | + - userarn: arn:aws:iam::079788916859:user/u-zawac002 + username: admin + groups: + - system:masters +``` + +We will like want to do this through templating. + +* users + * arn:aws:iam::079788916859:user/u-badra001 + * arn:aws:iam::079788916859:user/u-ashle001 + * arn:aws:iam::079788916859:user/u-mcgin314 + * arn:aws:iam::079788916859:user/u-sall0002 + * arn:aws:iam::079788916859:user/u-zawac002 +* roles + * arn:aws:iam::079788916859:role/r-inf-cloud-admin + * arn:aws:iam::079788916859:role/r-adsd-cumulus + * arn:aws:iam::079788916859:role/r-adsd-eks + * arn:aws:iam::079788916859:role/r-adsd-tools + +## Adding Cluster Roles for AWS Console + +To allow [console access](https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml), we need these steps. + +It requires the cluster to be up and the `{clustername}.kube.config` file to exist along with the environment variable pointing to it. + +### Automated + +This appies just the full access cluste role, as the restricted one needs additional configuration. + +```shell +tf-apply -target=null_resource.apply_cluster_roles +``` + +### Manual + +```shell +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml +``` + +For full console, we'll use the first one. + +```console +% kubectl apply -f eks-console-full-access.yaml +clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created +clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created +``` + +# CHANGELOG + +- 1.0.0 -- 2023-10-27 + - setup for 1.28, ready for edits diff --git a/examples/full-cluster-tf-upgrade/1.29/ROLES.md b/examples/full-cluster-tf-upgrade/1.29/ROLES.md new file mode 100644 index 0000000..3880590 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/ROLES.md @@ -0,0 +1,119 @@ +# Roles + +There are several types of roles we handle within the EKS cluster. + +1. IAM Role for Service Account (IRSA) +These roles involve an IAM role with a formatted name of r-eks-{cluster}-irsa__{k8snamespace}__{k8suser}. This will +grant approproriate IAM permissions to a pod. It includes specific conditions for the local OIDC provider mapping to +system:serviceaccount:{k8snamespace}:{k8suse}. This is super important because the pod inherits the permissions +of the node group, which grants far too much access to the running pods. These are not mapped into the ConfigMap aws-auth. + +A default:default will exists which grants little to no AWS permissions. + +1. Cluster Admin Role +This role is used for the cluster administration. It is of the form r-eks-{cluster}-cluster-admin. It has read access to the +[EKS Console](https://console.amazonaws-us-gov.com/eks/home). It has: +* access to read and write ECR for the specific repositories used for the cluster at /eks/{clustername} +* access to the EKS API for the cluster +* can download the kube.config file +* is mapped with the ConfigMap aws-auth into k8suser admin and k8sgroup system:masters +* permissions to update the node groups (via cli) +* others as discovered + +Users will use this role through the use of STS:AssumeRole either with the console or CLI. + +1. Additional Application Roles +These will be for granting access to clusterroles via namespace and k8suser to IAM or SAML users. They will take the form +r-eks-{cluster}-{name} where name should consider some portion of the namespace and purpose, and the name cannot be one of the existing +roles already in existence. These will typically not need any AWS Access beyond that of the update-config or get-token to obtain +the configuration file. These will require a clusterrole and clusterrolebinding, and will need a username to go along with them. +See [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) and [here](https://aws.amazon.com/premiumsupport/knowledge-center/eks-iam-permissions-namespaces/) +for details about this. The configuration file to create this (yaml) will be stored in github, and ideally, it will be created through the use of Terraform to be able +to easily add these as needed. + +Users will use this role through the use of STS:AssumeRole either with the console or CLI. + +## IRSA Roles + +```hcl + condition { + test = "StringEquals" + variable = "${local.oidc_provider_url}:sub" + values = ["system:serviceaccount:${local.app2_namespace}:${local.app2_name}"] + } +``` + +* irsa-roles.aws-cli.tf +* irsa-roles.cumulus.tf +* irsa-roles.jenkins.tf + + +## Cluster Admin Role + +## Additional Application Roles + +## cumulus-dba +## cumulus-deployer +## cicd-deployer + +## jenkins + +* Tool: Jenkins +* Purpose: Used for CICD Pipeline + * build images + * copy images + * deploy pods + * deploy services + * other things as necessary +* Source System: VM on-prem +* AWS Access + * IAM Service account tied to the cluster name + * s-eks-{cluster}-cicd + * permissions to read and write ECR * but NOT eks/{clustername} + * permission to eks get-token + * permission to eks update-cluster (get kubeconfig) +* Kubernetes Access + * Username + * recommend the same pattern: eks-{cluster}-cicd + * Group + * group names needed + * Permissions + * defined in K8S thing .. + * Files for configuration of K8S + * yml: + * tf: + +# AWS Commands + +```shell +aws eks get token +aws eks update-config +``` + +## CICD + +There are a number of ways to handle the CICD pipline. How in part depends on whether it runs outside of the cluster or inside of the cluster. These + +* service account for CICD (say, s-adsd-cicd-deployer) with full permissions to ECR and to get eks config and token along with k8s permissions through +ConfigMap aws-auth. +* role for CICD per cluster, say r-eks-{cluster}-cicd-deployer with same permissions above. +* These are all account specific, so running CICD across multiple accounts will need multiple IAM accounts and roles. +* consider some central way of doing this so a CICD can deploy to any cluster in any account in any region. +* perhaps start with a smaller per cluster user/role and work towards a better solution later + +# TBD + +1. Determine how to create a default:default IRSA role which grants little to no AWS permissions (maybe sts get-caller-identity). +1. Create a module for IRSA +1. Explore the use of the OIDC integration with Access Manager +1. Develop a strategy for CICD access + +# Links + +* [AWS RBAC](https://aws.amazon.com/premiumsupport/knowledge-center/eks-iam-permissions-namespaces/) +* [K8S RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) +* [Add User Role](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html) +* [OIDC Identity Provider](https://docs.aws.amazon.com/eks/latest/userguide/authenticate-oidc-identity-provider.html) +* [OIDC with MicroFocus](https://community.microfocus.com/cyberres/accessmanager/w/access_manager_tips/27815/access-amazon-web-services-using-amazon-cognito-for-mobile-applications-and-netiq-access-manager-4-1) + + diff --git a/examples/full-cluster-tf-upgrade/1.29/addons/.tf-control b/examples/full-cluster-tf-upgrade/1.29/addons/.tf-control new file mode 100644 index 0000000..280f449 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/addons/.tf-control @@ -0,0 +1,20 @@ +# .tf-control +# allows for setting a specific command to be used for tf-* commands under this git repo +# see tf-control.sh help for more info + +TFCONTROL_VERSION="1.0.5" + +TFCOMMAND="terraform_latest" +# TF_CLI_CONFIG_FILE=PATH-TO-FILE/.tf-control.tfrc +# TFARGS="" +# TFNOLOG="" +# TFNOCOLOR="" + +# use the following to force a specific version. An upgrade of an existing 0.12.31 to 1.x +# needs you to cycle through 0.13.17, 0.14.11, and then latest (0.15.5 not needed). Other +# steps in between. See https://github.e.it.census.gov/terraform/support/tree/master/docs/how-to/terraform-upgrade for details +# +#TFCOMMAND="terraform_0.12.31" +#TFCOMMAND="terraform_0.13.7" +#TFCOMMAND="terraform_0.14.11" +#TFCOMMAND="terraform_0.15.5" diff --git a/examples/full-cluster-tf-upgrade/1.29/addons/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.29/addons/.tf-control.tfrc new file mode 100644 index 0000000..7425488 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/addons/.tf-control.tfrc @@ -0,0 +1,24 @@ +TFCONTROL_VERSION="1.0.5" + +# https://www.terraform.io/docs/cli/config/config-file.html +plugin_cache_dir = "/data/terraform/terraform.d/plugin-cache" +#disable_checkpoint = true + +provider_installation { +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "*/*/*" ] +# } + filesystem_mirror { + path = "/data/terraform/terraform.d/providers" + include = [ "*/*/*" ] + } +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "external.terraform.census.gov/*/*" ] +# } + direct { + include = [ "*/*/*" ] + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.29/addons/README.addons.md b/examples/full-cluster-tf-upgrade/1.29/addons/README.addons.md new file mode 100644 index 0000000..8c1e730 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/addons/README.addons.md @@ -0,0 +1,3 @@ +tf-aws eks describe-addon-versions --kubernetes-version 1.28 + + diff --git a/examples/full-cluster-tf-upgrade/1.29/addons/README.ebs.md b/examples/full-cluster-tf-upgrade/1.29/addons/README.ebs.md new file mode 100644 index 0000000..de10f70 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/addons/README.ebs.md @@ -0,0 +1,75 @@ +# eks-ebs + +With EKS based upon Kubernetes 1.23 or higher, the default gp2 storage class will no longer auto-provision persistent volumes. +While an EFS-based auto-provisioner which supports all types of persistent volumes has been installed, it does not perform as well as a gp2/gp3 based perstent volume. +The eks-ebs module installs an ebs-provisioner in the cluster with a storage class of `gp3-encrypted`, deletes the pre-existing `gp2` storage class, and makes `gp3-encrypted` the default storage class for the cluster. + +## Parameters + +| Name | Description | +| ---- | ----------- | +| region | The AWS region that EKS cluster is located. | +| cluster_name | The name of the cluster in which ebs-provisioner will be installed. | +| aws_ebs_csi_driver_version | Which version of the aws-ebs-csi-driver helm chart to use. Currently defaults to 2.14.1. | + +## Updating the aws-ebs-csi-driver chart + +When using a private VPC, the helm chart cannot be downloaded from "https://kubernetes-sigs.github.io/aws-ebs-csi-driver/" during installation. +A local copy of the chart is maintained within the terraform script. +The lastest version of the helm chart can be found by looking at https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/charts/aws-ebs-csi-driver/Chart.yaml and checking the `version:` tag (not the `appVersion` tag.) +To update this helm chart to the latest version, the procedure is to: + +```script +cd charts +helm repo add aws-ebs-csi-driver https://kubernetes-sigs.github.io/aws-ebs-csi-driver/ +helm repo update +rm -fr aws-ebs-csi-driver +helm pull aws-ebs-csi-driver/aws-ebs-csi-driver --untar +``` + +After completing these steps, be sure to examine aws-ebs-csi-driver/values.yaml and confirm that the tags listed for the sidecar images match the tags assigned by default in input.tf. +For example, the values.yaml file: + +```json +sidecars: + livenessProbe: + image: + repository: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe + tag: v2.2.0-eks-1-18-2 + pullPolicy: IfNotPresent + resources: {} + nodeDriverRegistrar: + image: + repository: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar + tag: v2.1.0-eks-1-18-2 + pullPolicy: IfNotPresent + resources: {} + csiProvisioner: + image: + repository: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner + tag: v2.1.1-eks-1-18-2 + pullPolicy: IfNotPresent + resources: {} +``` + +Entries in input.tf: + +```hcl +variable "livenessprobe_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/livenessp +robe to use." + default = "v2.2.0-eks-1-18-2" +} + +variable "node_driver_registrar_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/node-driv +er-registrar to use." + default = "v2.1.0-eks-1-18-2" +} + +variable "external_provisioner_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/external- +provisioner to use." + default = "v2.1.1-eks-1-18-2" +} +``` diff --git a/examples/full-cluster-tf-upgrade/1.29/addons/README.md b/examples/full-cluster-tf-upgrade/1.29/addons/README.md new file mode 100644 index 0000000..37bb3c8 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/addons/README.md @@ -0,0 +1,122 @@ +# EBS + +Staring with EKS based upon Kubernetes 1.23, the gp2 storage class no longer supports auto-provisiong. +This module sets up the needed resources to provision EBS-based gp3 persistent volumes. See [this](README.efs.md) for more details. + +## Links + +* https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html +* https://github.com/kubernetes-sigs/aws-ebs-csi-driver +* https://github.com/kubernetes-sigs/aws-ebs-csi-driver/issues/722 +* https://github.com/kubernetes-sigs/aws-ebs-csi-driver/issues/1086 + +## Initialize + +* Proxy setup + +Proxy is needed because system may not have access to the `registry.terraform.io` site directory, +and if indirectly, it may not be able to handle a proxy redirect. You may not need to use this, but if you get +errors from the `tf-init`, this is your first thing to setup. + +```shell +export HTTP_PROXY=http://proxy.tco.census.gov:3128 +export HTTPS_PROXY=http://proxy.tco.census.gov:3128 +``` + +## Terraform Automated + +A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. + +* copy the `remote_state.yml` from the parent and update `directory` to be the current directory +* run the tf-run.sh + +```console +% tf-run.sh apply +``` + +* example of the `tf-run.sh` steps + +This is part of a larger cluster configuration, so at the end of the run it indicates another directory +to visit when done. + +```console +<<>> +``` + +It is highly recommended to use the `tf-run.sh` approach. + +## Terraform Manual + + +```shell +tf-directory-setup.py -l none +setup-new-directory.sh +tf-init +```` + +* Apply the rest + +This must be done from a system with the skopeo command, so RHEL8+. + +To use the local install, The ebs/charts/ directory +must be populated with the expected code (see [README.md](README.md)) outside of terraform, +much like the .tf files are created. Currently, as the box we run this from has internet access, +we can deploy by pulling the helm stuff from the internet. + +```shell +tf-apply +tf-directory-setup.py -l s3 +``` + +## Post Setup Examination + +This gives us (look at the ebs-csi-* ones) to see what was setup. Your `kubectl` configuration file +needs to be setup (one is extracted in `setup/kube.config` as part of this configuration). + +```console +% kubectl --kubeconfig setup/kube.config get pods -n kube-system +<<>> +``` + +* Create PVC Automated + +Use the `persistent-volume.tf`, which is setup by default, and should happen as part of the final apply above. + +* Create PVC Manually + +```json +# pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ebs-test3-claim +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 1Gi + storageClassName: gp3-encrypted +``` + +* Examinine the PV and PVC + +```console +% kubectl get pv +No resources found +% kubectl get pvc +No resources found in default namespace. +% kubectl apply -f pvc.yaml +persistentvolumeclaim/ebs-test3-claim created +% kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +ebs-test3-claim Pending gp3-encrypted 39s +``` + +* Describing the PVC + +```shell +kubectl --kubeconfig setup/kube.config describe pvc ebs-test3-claim +``` + diff --git a/examples/full-cluster-tf-upgrade/1.29/addons/addon_coredns.tf b/examples/full-cluster-tf-upgrade/1.29/addons/addon_coredns.tf new file mode 100644 index 0000000..8c12156 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/addons/addon_coredns.tf @@ -0,0 +1,11 @@ +resource "aws_eks_addon" "coredns" { + count = lookup(local.addon_versions, "coredns", null) != null ? 1 : 0 + + cluster_name = var.cluster_name + addon_name = "coredns" + addon_version = lookup(local.addon_versions, "coredns") + # resolve_conflicts = "OVERWRITE" + # note OVERWRITE resets to eks addon defaults, PRESERVE uses any values set here + resolve_conflicts_on_create = "OVERWRITE" + resolve_conflicts_on_update = "OVERWRITE" +} diff --git a/examples/full-cluster-tf-upgrade/1.29/addons/addon_ebs-csi.tf b/examples/full-cluster-tf-upgrade/1.29/addons/addon_ebs-csi.tf new file mode 100644 index 0000000..2ff0e33 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/addons/addon_ebs-csi.tf @@ -0,0 +1,127 @@ +## resource "aws_iam_role" "cluster_ebs_role" { +## name = "${var.cluster_name}_ebs_driver_role" +## assume_role_policy = < list +** START: start=1636558903 +* 1 COMMAND> tf-directory-setup.py -l none -f +* 2 COMMAND> setup-new-directory.sh +* 3 COMMAND> tf-init -upgrade +* 4 tf-plan +* 5 COMMAND> tf-directory-setup.py -l s3 +* 6 STOP> cd ../efs and tf-run.sh apply +** END: start=1636558903 end=1636558903 elapsed=0 logfile=logs/run.plan.20211110.1636558903.log (not-created) +``` + +It is highly recommended to use the `tf-run.sh` approach. + +## Terraform Manual + +First, copy the `remote_state.yml` from the parent and update `directory` to be the current directory. + +```shell +tf-directory-setup.py -l none +setup-new-directory.sh +tf-init +```` + +* Apply the rest + +```shell +tf-apply +tf-directory-setup.py -l s3 +``` + +## Post Setup Examination + +Your `kubectl` configuration file needs to be setup (one is extracted in `setup/kube.config` as part of this configuration). + +```console +% kubectl --kubeconfig setup/kube.config get configmap -n kube-system aws-auth +NAME DATA AGE +aws-auth 2 44d +``` diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/aws-auth.auto.tfvars b/examples/full-cluster-tf-upgrade/1.29/aws-auth/aws-auth.auto.tfvars new file mode 100644 index 0000000..6898918 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/aws-auth.auto.tfvars @@ -0,0 +1,28 @@ +aws_auth_users = [ + { + userarn = "" + aws_username = "a-ashle001" + username = "admin" + groups = ["system:masters", "eks-console-dashboard-full-access-group"] + }, + { + userarn = "" + aws_username = "a-badra001" + username = "admin" + groups = ["system:masters", "eks-console-dashboard-full-access-group"] + }, +] +aws_auth_roles = [ + { + rolearn = "" + aws_rolename = "r-inf-cloud-admin" + username = "admin" + groups = ["system:masters", "eks-console-dashboard-full-access-group"] + }, + { + rolearn = "" + aws_rolename = "r-inf-terraform" + username = "admin" + groups = ["system:masters", "eks-console-dashboard-full-access-group"] + }, +] diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/config_map.aws-auth.yaml.tpl b/examples/full-cluster-tf-upgrade/1.29/aws-auth/config_map.aws-auth.yaml.tpl new file mode 100644 index 0000000..7c58ada --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/config_map.aws-auth.yaml.tpl @@ -0,0 +1,17 @@ +data: +%{ if length(roles) > 0 } + mapRoles: | + %{ for k, v in roles ~} + - rolearn: ${v.rolearn} + username: ${v.username} + groups: ${v.groups} + %{ endfor ~} +%{ endif } +%{ if length(users) > 0 } + mapUsers: | + %{ for k, v in users ~} + - userarn: ${v.userarn} + username: ${v.username} + groups: ${v.groups} + %{ endfor ~} +%{ endif } diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.29/aws-auth/data.eks-subdirectory.tf new file mode 120000 index 0000000..43b5430 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/data.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.29/aws-auth/kubeconfig.eks-subdirectory.tf new file mode 120000 index 0000000..e3750a4 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/kubeconfig.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/kubeconfig.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/patch-aws-auth.tf b/examples/full-cluster-tf-upgrade/1.29/aws-auth/patch-aws-auth.tf new file mode 100644 index 0000000..88e0bbe --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/patch-aws-auth.tf @@ -0,0 +1,135 @@ +data "kubernetes_config_map" "aws-auth" { + metadata { + namespace = "kube-system" + name = "aws-auth" + } +} + +data "aws_iam_user" "auth_users" { + for_each = toset([for u in local.joined_auth_users : u.aws_username]) + user_name = each.key +} + +data "aws_iam_role" "auth_roles" { + for_each = toset([for r in local.joined_auth_roles : r.aws_rolename]) + name = each.key +} + + +locals { + existing_roles_string = lookup(data.kubernetes_config_map.aws-auth.data, "mapRoles", "") + existing_users_string = lookup(data.kubernetes_config_map.aws-auth.data, "mapUsers", "") + + existing_roles = local.existing_roles_string != "" ? yamldecode(local.existing_roles_string) : [] + existing_users = local.existing_users_string != "" ? yamldecode(local.existing_users_string) : [] + + joined_auth_users = concat(local.aws_auth_users, var.aws_auth_users) + joined_auth_roles = concat(local.aws_auth_roles, var.aws_auth_roles) + + mapped_auth_users = [for u in local.joined_auth_users : { + userarn = data.aws_iam_user.auth_users[u.aws_username].arn + aws_username = u.aws_username + username = u.username + groups = u.groups + }] + mapped_auth_roles = [for u in local.joined_auth_roles : { + rolearn = data.aws_iam_role.auth_roles[u.aws_rolename].arn + aws_rolename = u.aws_rolename + username = u.username + groups = u.groups + }] + + merged_users = merge( + { for user in local.existing_users : user.userarn => user }, + # { for user in local.aws_auth_users : user.userarn => user }, + # { for user in var.aws_auth_users : user.userarn => user } + { for user in local.mapped_auth_users : user.userarn => user }, + ) + + merged_roles = merge( + { for role in local.existing_roles : role.rolearn => role }, + # { for role in local.aws_auth_roles : role.rolearn => role }, + # { for role in var.aws_auth_roles : role.rolearn => role } + { for role in local.mapped_auth_roles : role.rolearn => role }, + ) + + # patch = yamlencode({ + # "data" = { + # "mapUsers" = values(local.merged_users) + # "mapRoles" = values(local.merged_roles) + # } + # }) + patch = < 0~} + mapRoles: | +%{for k, v in local.merged_roles~} + - rolearn: ${v.rolearn} + username: ${v.username} + groups: +%{for g in v.groups~} + - ${g} +%{endfor~} +%{endfor~} +%{endif~} +%{if length(local.merged_users) > 0~} + mapUsers: | +%{for k, v in local.merged_users~} + - userarn: ${v.userarn} + username: ${v.username} + groups: +%{for g in v.groups~} + - ${g} +%{endfor~} +%{endfor~} +%{endif~} +EOM + + # patch_t = templatefile("${path.root}/config_map.aws-auth.yaml.tpl",{ + # users = values(local.merged_users) + # roles = values(local.merged_roles) + # }) +} + +resource "null_resource" "patch-aws-auth" { + triggers = { + users = join(",", sort(keys(local.merged_users))) + roles = join(",", sort(keys(local.merged_roles))) + } + depends_on = [null_resource.kubeconfig] + # provisioner "local-exec" { + # command = "if [ -z $KUBECONFIG ]; then 'echo missing KUBECONFIG'; exit 1; else exit 0; fi" + # } + # provisioner "local-exec" { + # command = "if [ ! -r $KUBECONFIG ]; then 'echo unreadable KUBECONFIG'; exit 1; else exit 0; fi" + # } + # provisioner "local-exec" { + # command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + # } + provisioner "local-exec" { + command = "test -d setup || mkdir setup" + } + provisioner "local-exec" { + command = "echo '${local.patch}' > setup/config_map.patch.yaml" + } + # provisioner "local-exec" { + # command = "echo '${local.patch_t}' > config_map.patch_t.yaml" + # } + provisioner "local-exec" { + # command = "kubectl patch --type merge -n kube-system configmap/aws-auth -p '${local.patch}'" + command = "kubectl --kubeconfig ${path.root}/setup/kube.config patch --type merge -n kube-system configmap/aws-auth --patch-file setup/config_map.patch.yaml" + } +} + +# output "map" { +# value = data.kubernetes_config_map.aws-auth +# } +# output "map_output" { +# value = { +# "object" = data.kubernetes_config_map.aws-auth +# "existing_users" = local.existing_users +# "existing_roles" = local.existing_roles +# "patch" = local.patch +# "patch_text" = local.patch_t +# } +# } diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/prefixes.tf b/examples/full-cluster-tf-upgrade/1.29/aws-auth/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/providers.tf b/examples/full-cluster-tf-upgrade/1.29/aws-auth/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/region.tf b/examples/full-cluster-tf-upgrade/1.29/aws-auth/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/settings.aws-auth.tf b/examples/full-cluster-tf-upgrade/1.29/aws-auth/settings.aws-auth.tf new file mode 100644 index 0000000..4d3259d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/settings.aws-auth.tf @@ -0,0 +1,11 @@ +locals { + aws_auth_users = [] + aws_auth_roles = [ + { + rolearn : "" + aws_rolename : format("%v%v-cluster-admin", local._prefixes["eks-role"], var.cluster_name) + username : "admin" + groups = ["system:masters", "eks-console-dashboard-full-access-group"] + }, + ] +} diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/tf-run.data b/examples/full-cluster-tf-upgrade/1.29/aws-auth/tf-run.data new file mode 100644 index 0000000..8afedd9 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/tf-run.data @@ -0,0 +1,14 @@ +VERSION 1.3.0 +REMOTE-STATE +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +COMMAND tf-init -upgrade + +LINKTOP init +LINK versions.tf +LINK settings.auto.tfvars +LINK variables.application_tags.auto.tfvars + +ALL +COMMAND tf-directory-setup.py -l s3 +STOP cd ../efs and tf-run.sh apply diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/tf-run.destroy.data b/examples/full-cluster-tf-upgrade/1.29/aws-auth/tf-run.destroy.data new file mode 100644 index 0000000..fcf987a --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/tf-run.destroy.data @@ -0,0 +1,9 @@ +VERSION 1.0.1 +BACKUP-STATE +COMMAND tf-init +COMMAND tf-state list + +COMMENT We do not want to remove anything here, because once you do, you will not be able to access the cluster for the destroy step. +COMMENT Destroying the cluster will take care of this directory. + +STOP diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/variables.aws-auth.tf b/examples/full-cluster-tf-upgrade/1.29/aws-auth/variables.aws-auth.tf new file mode 100644 index 0000000..05708d5 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/variables.aws-auth.tf @@ -0,0 +1,23 @@ +# maybe just ignore the ARN entirely and force a read + +variable "aws_auth_users" { + description = "A list of objects where each object has userarn, username, k8s_username, and groups, where groups is a list of groups to associate with the user. Leaving userarn as an empty string will pull the user ARN from AWS." + type = list(object({ + userarn = string + aws_username = string + username = string + groups = list(string) + })) + default = [] +} + +variable "aws_auth_roles" { + description = "A list of objects where each object has rolearn, rolename, k8s_username, and groups, where groups is a list of groups to associate with the role. Leaving rolearn as an empty string will pull the role ARN from AWS." + type = list(object({ + rolearn = string + aws_rolename = string + username = string + groups = list(string) + })) + default = [] +} diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.29/aws-auth/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/version.tf b/examples/full-cluster-tf-upgrade/1.29/aws-auth/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/aws-auth/versions.tf b/examples/full-cluster-tf-upgrade/1.29/aws-auth/versions.tf new file mode 120000 index 0000000..8bd0ff1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/aws-auth/versions.tf @@ -0,0 +1 @@ +../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/bin/copy_image.sh b/examples/full-cluster-tf-upgrade/1.29/bin/copy_image.sh new file mode 100755 index 0000000..60fad27 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/bin/copy_image.sh @@ -0,0 +1,326 @@ +#!/bin/bash + +############################################################################### +# This script uses skopeo to copy a docker image from one repository to +# another. The primary intent is to copy the image from a public repository +# to a private repository. +############################################################################### +# Expected environment variables: +# +# SOURCE_IMAGE - The image to copy to to another location. Example: +# paradyme-docker-local.jfrog.io/appetizer:dev +# SOURCE_INSECURE - Set this to 1 of the source repository is in an insecure +# docker registry. Set it to 0 or leave it unset if the +# docker registry is secure. +# +# DESTINATION_IMAGE - The image to copy to to another location. Example: +# paradyme-docker-local.jfrog.io/appetizer:dev +# DESTINATION_INSECURE - Set this to 1 of the destination repository is in +# an insecure docker registry. Set it to 0 or leave it unset +# if the docker registry is secure. +# +# When the source repository requires authentication to access, configure +# these values. Otherwise do not set them. +# +# SOURCE_USERNAME - The username to supply for credentialed access to the +# repository. `anthony-zawacki` is an example. +# SOURCE_PASSWORD - The password to supply for credentialed access to the +# repository. An artifactory API_KEY for example. +# +# When the destination repository requires authentication to access, configure +# these values. Otherwise do not set them. +# +# DESTINATION_USERNAME - The username to supply for credentialed access to the +# repository. `anthony-zawacki` is an example. +# DESTINATION_PASSWORD - The password to supply for credentialed access to the +# repository. The output of: +# `aws ecr get-login-password --region us-east-2` for example. +# +# If the destination repository does not exist, the copy_image.sh script will +# create the repository automatically. In cases where the newly created +# repository should have a mutable image (perhaps always pushing to a `latest` +# tag in a development environment), it is possible to configure the +# repository to allow mutability by configuring this environment variable. +# Otherwise, do not set it. +# +# +############################################################################### + +ensure_skopeo() { + skopeo=$(command -v skopeo) + if [[ "$skopeo" == "" ]]; then + echo "The required executable, skopeo, was not found." + echo "Please install it and ensure it is in the path." + return 1 + fi + + return 0 +} + +usage() { + local msg="${1}"; shift; + + cat < (SOURCE_IMAGE) The name of the image to copy to another + registry. + -src-username (SOURCE_USERNAME) Optional parameter in cases where + the source registry requires authentication. Use this username for the + credentials. + -src-password (SOURCE_PASSWORD) Optional parameter in cases where + the source registry requires authentication. Use this password for the + credentials. + -src-insecure (SOURCE_INSECURE=1) Optional parameter indicates that the + source registry is not a secured registry and that tls validation + should be disabled for the processing of the image. The default is + to assume that the source registry is secured. + +src-insecure (SOURCE_INSECURE=0) Optional parameter explicitly indicating + that the source registry is secure and TLS must be used to access the + registry. + + -dest-image (DESTINATION_IMAGE) The name of the image to to use in the + destination registry. + -dest-username (DESTINATION_USERNAME) Optional parameter in cases + where the destination registry requires authentication. Use this + username for the credentials. + -dest-password (DESTINATION_PASSWORD) Optional parameter in cases + where the destination registry requires authentication. Use this + password for the credentials. + -dest-insecure (DESTINATION_INSECURE=1) Optional parameter indicates that the + destination registry is not a secured registry and that tls validation + should be disabled for the processing of the image. The default is + to assume that the destination registry is secured. + +dest-insecure (DESTINATION_INSECURE=0) Optional parameter explicitly + indicating that the destination registry is secure and TLS must be + used to access the registry. + -dest-mutable (DESTINATION_MUTABLE=1) Optional parameter indicates that if + creating the ECR repository is required, create it allowing mutable + images. + +dest-mutable (DESTNATION_MUTABLE=0) Optional parameter explicitly + indicating that if creating the ECR repository is required, create it + with immutable images. + +EOF + + exit 1 +} + +parse_commandline() { + local key + local positional=() + + while [[ $# -gt 0 ]]; do + key="$1"; shift + + case "$key" in + -src-image) + SOURCE_IMAGE="$1"; shift + ;; + -src-username) + SOURCE_USERNAME="$1"; shift + ;; + -src-password) + SOURCE_PASSWORD="$1"; shift + ;; + -src-insecure) + SOURCE_INSECURE=1 + ;; + +src-insecure) + SOURCE_INSECURE=0 + ;; + -dest-image) + DESTINATION_IMAGE="$1"; shift + ;; + -dest-username) + DESTINATION_USERNAME="$1"; shift + ;; + -dest-password) + DESTINATION_PASSWORD="$1"; shift + ;; + -dest-insecure) + DESTINATION_INSECURE=1 + ;; + +dest-insecure) + DESTINATION_INSECURE=0 + ;; + -dest-mutable) + DESTINATION_MUTABLE=1 + ;; + +dest-mutable) + DESTINATION_MUTABLE=0 + ;; + *) + positional+=("$key") + ;; + esac + done + + if [[ ${#positional[@]} -gt 0 ]]; then + usage "Unrecognized parameters: ${positional[*]}" + fi +} + +ensure_parameters() { + if [[ "$SOURCE_IMAGE" == "" ]]; then + usage "Must specify SOURCE_IMAGE" + fi + + if [[ "$DESTINATION_IMAGE" == "" ]]; then + usage "Must specify DESTINATION_IMAGE" + fi + + if [[ "$SOURCE_USERNAME" != "" || "$SOURCE_PASSWORD" != "" ]]; then + if [[ "$SOURCE_USERNAME" == "" || "$SOURCE_PASSWORD" == "" ]]; then + usage "Must specify both the SOURCE_USERNAME and SOURCE_PASSWORD." + fi + fi + + if [[ "$DESTINATION_USERNAME" != "" || "$DESTINATION_PASSWORD" != "" ]]; then + if [[ "$DESTINATION_USERNAME" == "" || "$DESTINATION_PASSWORD" == "" ]]; then + usage "Must specify both the DESTINATION_USERNAME and DESTINATION_PASSWORD." + fi + fi + + return 0 +} + +image_exists() { + declare src_creds="$SOURCE_USERNAME:$SOURCE_PASSWORD" + declare command=(skopeo inspect --insecure-policy) + + if [[ "$SOURCE_USERNAME" != "" ]]; then +# command+=(--src-creds "$src_creds") + command+=(--creds "$src_creds") + else +# command+=(--src-no-creds) + command+=(--no-creds) + fi + +# if [[ "$SOURCE_INSECURE" == "1" ]]; then +# command+=(--src-tls-verify=false) +# else +# command+=(--src-tls-verify=true) +# fi + + command+=("docker://$SOURCE_IMAGE") + + ${command[@]} > /dev/null 2>&1 + status=$? + echo "* source_image_exists() status=$status" + # return 0 if it does, 1 if not + return $? +} + +destination_image_exists() { + declare dst_creds="$DESTINATION_USERNAME:$DESTINATION_PASSWORD" + declare command=(skopeo inspect --insecure-policy) + + if [[ "$DESTINATION_USERNAME" != "" ]]; then +# command+=(--dest-creds "$dst_creds") + command+=(--creds "$dst_creds") + else +# command+=(--dest-no-creds) + command+=(--no-creds) + fi + +# if [[ "$DESTINATION_INSECURE" == "1" ]]; then +# command+=(--dest-tls-verify=false) +# else +# command+=(--dest-tls-verify=true) +# fi + + command+=("docker://$DESTINATION_IMAGE") + + ${command[@]} > /dev/null 2>&1 + status=$? + echo "* destination_image_exists() status=$status" + # return 0 if it does, 1 if not + return $? +} + +copy_image() { + declare src_creds="$SOURCE_USERNAME:$SOURCE_PASSWORD" + declare dest_creds="$DESTINATION_USERNAME:$DESTINATION_PASSWORD" + declare command=(skopeo copy --insecure-policy) + + if [[ "$SOURCE_USERNAME" != "" ]]; then + command+=(--src-creds "$src_creds") + else + command+=(--src-no-creds) + fi + + if [[ "$SOURCE_INSECURE" == "1" ]]; then + command+=(--src-tls-verify=false) + else + command+=(--src-tls-verify=true) + fi + + if [[ "$DESTINATION_USERNAME" != "" ]]; then + command+=(--dest-creds "$dest_creds") + else + command+=(--dest-no-creds) + fi + + if [[ "$DESTINATION_INSECURE" == "1" ]]; then + command+=(--dest-tls-verify=false) + else + command+=(--dest-tls-verify=true) + fi + + command+=("docker://$SOURCE_IMAGE" "docker://$DESTINATION_IMAGE") + + if [[ "$DESTINATION_IMAGE" == *.dkr.ecr.*.amazonaws.com/* ]]; then + echo "ECR registry detected, ensuring repository." + declare repository="${DESTINATION_IMAGE##*.amazonaws.com/}" + repository="${repository%%:*}" + declare region="${DESTINATION_IMAGE%%.amazonaws.com/*}" + region="${region##*.}" + export AWS_PAGER="" + if ! aws ecr describe-repositories \ + --region "$region" \ + --output "json" \ + --repository-names "$repository" \ + > /dev/null 2>&1; then + local mutability="IMMUTABLE" + if [ "$DESTINATION_MUTABLE" == "1" ]; then + mutability="MUTABLE" + fi + echo "creating repository $repository." + aws ecr create-repository \ + --image-tag-mutability "$mutability" \ + --image-scanning-configuration "scanOnPush=true" \ + --encryption-configuration "encryptionType=KMS" \ + --repository-name "$repository" \ + --region "$region" \ + > /dev/null 2>&1 || return $? + else + echo "repository $repository exists." + fi + fi + + echo "Copying $SOURCE_IMAGE" + echo "to $DESTINATION_IMAGE" + + while ! ${command[@]}; do + echo "Retrying uploading image..." + done +} + + +ensure_image() { + ( image_exists && ! destination_image_exists ) || copy_image +} + +main() { + ensure_skopeo && \ + parse_commandline "$@" && \ + ensure_parameters && \ + ensure_image && \ + echo "Done" +} + +return 0 > /dev/null 2>&1 || main "$@" + diff --git a/examples/full-cluster-tf-upgrade/1.29/bin/fix-terminating-namespace.sh b/examples/full-cluster-tf-upgrade/1.29/bin/fix-terminating-namespace.sh new file mode 100755 index 0000000..7282e79 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/bin/fix-terminating-namespace.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# fix_terminating_namespace() { +# local -r namespace="${1}"; shift; +# +# kubectl get ns "$namespace" 2>&1 | grep -q Terminating +# +# if [ $? -eq 0 ]; then +# kubectl get namespace "$namespace" -o json | \ +# grep -v '^ "kubernetes"$' | \ +# kubectl replace --raw "/api/v1/namespaces/$namespace/finalize" -f - +# else +# echo "Namespace $namespace not found or not stuck in terminating state." +# fi +# } +# } + +namespace="${1}" +shift; + +kubectl get ns "$namespace" 2>&1 | grep -q Terminating +if [ $? -eq 0 ] +then + kubectl get namespace "$namespace" -o json |\ + grep -v '^ "kubernetes"$' |\ + kubectl replace --raw "/api/v1/namespaces/$namespace/finalize" -f - +else + echo "Namespace $namespace not found or not stuck in terminating state." +fi diff --git a/examples/full-cluster-tf-upgrade/1.29/bin/remove-ecr.sh b/examples/full-cluster-tf-upgrade/1.29/bin/remove-ecr.sh new file mode 100755 index 0000000..06c7975 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/bin/remove-ecr.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +test -r /apps/terraform/etc/aws-functions.sh && source /apps/terraform/etc/aws-functions.sh + +REPO=$1 + +if [ -z $REPO ] +then + echo "missing respository, exiting" + exit 1 +fi + +echo "* listing repositories for $REPO" +#tf-aws ecr describe-repositories --query 'repositories[*].{a1:repositoryName,a2:repositoryArn}' --output text +REPOLIST=$(tf-aws ecr describe-repositories --query 'repositories[*].{a1:repositoryName}' --output text | grep ^$REPO) +for f in $REPOLIST +do + echo " $f" +done +echo "" + +echo "* listing images for each repo in $REPO" +for f in $REPOLIST +do + echo "> $f" + tf-aws ecr list-images --repository-name $f --query 'imageIds[*].{a1:imageDigest,a2:imageTag}' --output text +done +echo "" + +rc=1 +cc=0 +echo "* removing images for each repo in $REPO" +for f in $REPOLIST +do + echo "> $f" + c=1 + tf-aws ecr list-images --repository-name $f --query 'imageIds[*].{a1:imageDigest,a2:imageTag}' --output text | while read digest tag + do + echo "[$rc.$c] rm $digest tag $tag" + true tf-aws ecr batch-delete-image --repository-name $f --image-ids imageDigest=$digest + status=$? + if [ $status == 0 ] + then + cc=$(( $cc + 1 )) + fi + c=$(( $c + 1 )) + done + echo "= removed $c images from $f" + rc=$(( $rc + 1 )) +done +echo "" +echo "= $rc repos, removed $cc images" + +echo "* deleting the repo for each repo in $REPO" +for f in $REPOLIST +do + echo "> $f" + tf-aws ecr delete-repository --repository-name $f +done +echo "" + + +## 2827 2023-05-11 10:20:37 tf-aws ecr describe-repositories -- query 'repositories[*].{a1.repositoryName,a2:respositoryArn}' --output text +## 2828 2023-05-11 10:20:49 tf-aws ecr describe-repositories --query 'repositories[*].{a1.repositoryName,a2:respositoryArn}' --output text +## 2829 2023-05-11 10:20:57 tf-aws ecr describe-repositories --query 'repositories[*].{a1:repositoryName,a2:respositoryArn}' --output text +## 2830 2023-05-11 10:21:20 tf-aws ecr describe-repositories --query 'repositories[*].{a1:repositoryName,a2:repositoryArn}' --output text +## 2831 2023-05-11 10:21:32 tf-aws ecr describe-repositories --query 'repositories[*].{a1:repositoryName,a2:repositoryArn}' --output text|grep eks/ditd-partnerportal-test/ +## 2833 2023-05-11 10:21:51 tf-aws ecr help|grep image +## 2834 2023-05-11 10:22:14 tf-aws ecr list-images +## 2835 2023-05-11 10:22:26 tf-aws ecr list-images --repository-name eks/ditd-partnerportal-test/csi-snapshotter +## 2836 2023-05-11 10:22:45 tf-aws ecr batch-delete-image +## 2837 2023-05-11 10:22:57 tf-aws ecr batch-delete-image --repository-name eks/ditd-partnerportal-test/csi-snapshotter +## 2838 2023-05-11 10:23:05 tf-aws ecr batch-delete-image --repository-name eks/ditd-partnerportal-test/csi-snapshotter --image-ids --help +## 2839 2023-05-11 10:23:11 tf-aws ecr batch-delete-image --repository-name eks/ditd-partnerportal-test/csi-snapshotter --image-ids +## 2840 2023-05-11 10:23:19 tf-aws ecr batch-delete-image help # --repository-name eks/ditd-partnerportal-test/csi-snapshotter --image-ids +## 2841 2023-05-11 10:23:34 tf-aws ecr list-images --repository-name eks/ditd-partnerportal-test/csi-snapshotter +## 2842 2023-05-11 10:23:58 history > remove-ecr.sh diff --git a/examples/full-cluster-tf-upgrade/1.29/bin/show-k8s-things.sh b/examples/full-cluster-tf-upgrade/1.29/bin/show-k8s-things.sh new file mode 100755 index 0000000..c5f6290 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/bin/show-k8s-things.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +for f in all clusterrolebindings clusterroles nodes pods pvc pv rolebindings roles sc secrets services +do + echo "kubectl --config setup/kube.config get $f --all-namespaces -o wide > OUT.get-$f.txt" + kubectl --kubeconfig setup/kube.config get $f --all-namespaces -o wide > OUT.get-$f.txt +done diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/.tf-control b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/.tf-control new file mode 100644 index 0000000..280f449 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/.tf-control @@ -0,0 +1,20 @@ +# .tf-control +# allows for setting a specific command to be used for tf-* commands under this git repo +# see tf-control.sh help for more info + +TFCONTROL_VERSION="1.0.5" + +TFCOMMAND="terraform_latest" +# TF_CLI_CONFIG_FILE=PATH-TO-FILE/.tf-control.tfrc +# TFARGS="" +# TFNOLOG="" +# TFNOCOLOR="" + +# use the following to force a specific version. An upgrade of an existing 0.12.31 to 1.x +# needs you to cycle through 0.13.17, 0.14.11, and then latest (0.15.5 not needed). Other +# steps in between. See https://github.e.it.census.gov/terraform/support/tree/master/docs/how-to/terraform-upgrade for details +# +#TFCOMMAND="terraform_0.12.31" +#TFCOMMAND="terraform_0.13.7" +#TFCOMMAND="terraform_0.14.11" +#TFCOMMAND="terraform_0.15.5" diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/.tf-control.tfrc new file mode 100644 index 0000000..7425488 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/.tf-control.tfrc @@ -0,0 +1,24 @@ +TFCONTROL_VERSION="1.0.5" + +# https://www.terraform.io/docs/cli/config/config-file.html +plugin_cache_dir = "/data/terraform/terraform.d/plugin-cache" +#disable_checkpoint = true + +provider_installation { +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "*/*/*" ] +# } + filesystem_mirror { + path = "/data/terraform/terraform.d/providers" + include = [ "*/*/*" ] + } +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "external.terraform.census.gov/*/*" ] +# } + direct { + include = [ "*/*/*" ] + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/README.md b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/README.md new file mode 100644 index 0000000..eae6d1d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/README.md @@ -0,0 +1,238 @@ +# About cluster-roles + +This directory constructs the resources for roles, permissions and Kubernetes resources +for the EKS cluster adsd-cumulus-dev. + +# Application Information + +* Application: EKS adsd-cumulus-dev +* Organization: ADSD +* Project: DICE-dev +* Point of Contact(s): badra001, +* Creation Date: 2021-10-08 +* References: + * Requirements: {url} + * Remedy Ticket: {number} + * Other: {url} +* Related Configurations: + * {directory-path} + +# Application Requirements: EKS Cluster RBAC + +In order to let CICD pipeline and DBA to manage the applications and databases which Cumulus needed. 3 cluster roles need to be create + +1. Deployer Application Role +2. Deployer Istio System Role +3. DBA Administrator Role + +CICD deployer will be binding to Deployer roles in the namespaces that CICD will manager. Same as DBA Admin user, they only have admin roles for the namespaces that they are going to manage. + +## Deployer Application Role + +This role defines the k8s resources that CICD pipeline need to create for application deployment. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: deployer-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" +rules: + - apiGroups: + - cert-manager.io + - acme.cert-manager.io + resources: + - "*" + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - networking.istio.io + - security.istio.io + resources: + - virtualservices + - authorizationpolicies + - destinationrules + - peerauthentications + - requestauthentications + verbs: + - get + - list + - watch + - create + - delete + - patch + +``` +## Deployer Istio System Role + +This Role defines that deployer need to create gateway and certificate in istio-system namespace, per istio requires, TLS certificate need stay in the same +namespace as istio-ingressgateway. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: deployer-istiosystem-role +rules: + - apiGroups: + - cert-manager.io + - acme.cert-manager.io + resources: + - "*" + verbs: + - get + - list + - watch + - create + - update + - patch + - apiGroups: + - networking.istio.io + resources: + - gateways + verbs: + - get + - list + - watch + - create + - delete + - patch + +``` + +## DBA Administrator Role +This is admin role for a particular namespace or namespaces that DBA need to access and managed the DBs. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dba-admin-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: + - cert-manager.io + - acme.cert-manager.io + resources: + - "*" + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - networking.istio.io + - security.istio.io + resources: + - virtualservices + - authorizationpolicies + - destinationrules + - peerauthentications + - requestauthentications + verbs: + - get + - list + - watch + - create + - delete + - patch + +``` + +# Terraform Directions + + + + +# Details + + +account_alias = "" +account_id = "" +application_tags = {} +aws_environment = "" +census_private_cidr = [ + "148.129.0.0/16", + "172.16.0.0/12", + "192.168.0.0/16" +] +census_public_cidr = [ + "148.129.0.0/16" +] +cicd_k8s_group_name = "s-eks-adsd-cumulus-dev-cicd-deployer" +cicd_k8s_user_name = "cicd-deployer" +cicd_managed_namespaces = [ + "adsd-cumulus-dev-apps", + "adsd-cumulus-dev-addressupdate", + "adsd-cumulus-dev-adminmatchrecord", + "adsd-cumulus-dev-cbs-apps", + "adsd-cumulus-dev-collectionevent", + "adsd-cumulus-dev-collectionintervention", + "adsd-cumulus-dev-collectionoperation", + "adsd-cumulus-dev-collectionresponse", + "adsd-cumulus-dev-common", + "adsd-cumulus-dev-mft", + "adsd-cumulus-dev-monitoring" +] +cluster_name = "" +cluster_version = "1.20" +dba_admin_rolebinding_name = "dba-admin-rolebinding" +dba_administrator_role_name = "dba-admin-role" +dba_k8s_group_name = "s-eks-adsd-cumulus-dev-dba-admin" +dba_k8s_user_name = "dba-admin" +dba_managed_namespaces = [ + "adsd-cumulus-dev-db" +] +deployer_application_role_name = "deployer-application-role" +deployer_application_rolebinding_name = "deployer-application-rolebinding" +deployer_istiosystem_role_name = "deployer-istiosystem-role" +domain = "" +eks_instance_disk_size = 40 +eks_instance_type = "t3.xlarge" +eks_ng_desire_size = 4 +eks_ng_max_size = 16 +eks_ng_min_size = 4 +eks_vpc_name = "*vpc4*" +istio_installed_namespace = "istio-system" +kms_tfstate_key = "k-kms-inf-tfstate" +profile = "" +region = "" +region_map = {} +regions = [] +subnets_name = "*-apps-*" +tag_costallocation = "csvd:infrastructure" +tag_creator = "" +tfstate_bucket = "inf-tfstate-252960665057" +tfstate_bucket_prefix = "inf-tfstate" +tfstate_key_prefix = "ma6-gov" +tfstate_key_suffix = "terraform.tfstate" +tfstate_region = "us-gov-east-1" +tfstate_table = "tf_remote_state" +vpc_dns_servers = [ + "148.129.127.22", + "148.129.191.22" +] +vpc_domain_name = "dice.census.gov" +vpc_full_name = "" +vpc_ntp_servers = [ + "148.129.127.23", + "148.129.191.23" +] + + + diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/RESULTS.md b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/RESULTS.md new file mode 100644 index 0000000..5d31a20 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/RESULTS.md @@ -0,0 +1,41 @@ +## Cluster Roles + +```console +% kubectl --kubeconfig setup/kube.config get clusterrole -o wide |grep -iE "dba|deployer" +cumulus-dba-role 2021-10-07T14:36:45Z +dba-admin-role 2021-10-13T12:12:33Z +deployer-application-role 2021-10-13T12:12:33Z +deployer-istiosystem-role 2021-10-13T12:12:33Z +deployer-role 2021-10-07T16:37:43Z +``` + +## Role Binding + +```console +% kubectl --kubeconfig setup/kube.config get rolebinding -o wide --all-namespaces |grep -iE "deployer|dba" +adsd-cumulus-dev-addressupdate deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-addressupdate deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-adminmatchrecord deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-adminmatchrecord deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-apps deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-apps deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-cbs-apps deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-cbs-apps deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-collectionevent deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-collectionevent deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-collectionintervention deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-collectionintervention deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-collectionoperation deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-collectionoperation deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-collectionresponse deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-collectionresponse deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-common deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-common deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-db cumulus-dba-rolebinding ClusterRole/cumulus-dba-role 5d22h dba-admin cumulus-dba kube-system/dba +adsd-cumulus-dev-db dba-admin-rolebinding ClusterRole/dba-admin-role 56m dba-admin s-eks-adsd-cumulus-dev-dba-admin +adsd-cumulus-dev-mft deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-mft deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-monitoring deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-monitoring deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +istio-system deployer_istiosystem_role_binding ClusterRole/deployer-istiosystem-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +``` diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/cm.tf.off b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/cm.tf.off new file mode 100644 index 0000000..f84cb4b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/cm.tf.off @@ -0,0 +1,6 @@ +data "kubernetes_config_map" "awsauth" { + metadata { + name = "aws-auth" + namespace = "kube-system" + } +} diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/data.eks-subdirectory.tf new file mode 120000 index 0000000..43b5430 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/data.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/dba-clusterrole.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/dba-clusterrole.tf new file mode 100644 index 0000000..e60e7b5 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/dba-clusterrole.tf @@ -0,0 +1,24 @@ +resource "kubernetes_cluster_role" "dba_administrator_cluster_role" { + metadata { + name = var.dba_administrator_role_name + } + aggregation_rule { + cluster_role_selectors { + match_labels = { + "rbac.authorization.k8s.io/aggregate-to-admin" = "true" + } + } + } + + rule { + api_groups = ["cert-manager.io", "acme.cert-manager.io"] + resources = ["certificates", "challenges", "orders", "certificaterequests", "issuers"] + verbs = ["get", "list", "watch", "create", "update", "patch"] + } + + rule { + verbs = ["get", "list", "watch", "create", "update", "patch"] + api_groups = ["networking.istio.io", "security.istio.io"] + resources = ["virtualservices", "authorizationpolicies", "destinationrules", "peerauthentications", "requestauthentications"] + } +} diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/dba-rolebinding.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/dba-rolebinding.tf new file mode 100644 index 0000000..e7d48aa --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/dba-rolebinding.tf @@ -0,0 +1,40 @@ +locals { + dba_managed_namespaces = formatlist("%v-%v", var.cluster_name, var.dba_managed_namespaces) + dba_k8s_group_name = format("%v%v-%v", local._prefixes["eks-user"], var.cluster_name, var.dba_k8s_group_name) +} + +resource "kubernetes_namespace" "dba_managed_namespaces" { + for_each = toset(local.dba_managed_namespaces) + metadata { + name = each.key + labels = { + istio-injection = "enabled" + } + } +} + +resource "kubernetes_role_binding" "dba_admin_rolebinding" { + # for_each = toset(local.dba_managed_namespaces) + for_each = kubernetes_namespace.dba_managed_namespaces + + metadata { + name = var.dba_admin_rolebinding_name + namespace = each.key + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = var.dba_administrator_role_name + } + subject { + kind = "User" + name = var.dba_k8s_user_name + api_group = "rbac.authorization.k8s.io" + } + subject { + kind = "Group" + name = local.dba_k8s_group_name + api_group = "rbac.authorization.k8s.io" + } + # depends_on = [kubernetes_namespace.dba_managed_namespaces] +} diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/dba.iam.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/dba.iam.tf new file mode 100644 index 0000000..3ef0a8a --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/dba.iam.tf @@ -0,0 +1,117 @@ +locals { + policy_dba_k8s_group_name = replace(local.dba_k8s_group_name, local._prefixes["eks-user"], local._prefixes["eks-policy"]) + role_dba_k8s_group_name = format("%v%v-%v", local._prefixes["eks"], var.cluster_name, var.dba_k8s_group_name) +} + +module "role_dba_administrator" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" + + role_name = local.role_dba_k8s_group_name + role_description = "Role for EKS cluster ${var.cluster_name} for access by ${var.dba_k8s_group_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.dba_administrator_allow_sts.json + attached_policies = [aws_iam_policy.dba_administrator.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +resource "aws_iam_policy" "dba_administrator" { + name = local.policy_dba_k8s_group_name + path = "/" + description = "Policy for EKS ${var.cluster_name} IAM access ${var.dba_k8s_group_name}" + policy = data.aws_iam_policy_document.dba_administrator.json +} + +locals { + dba_administrator_policy_statements = { + ECRRead = { + actions = [ + "ecr:Describe*", + "ecr:Get*", + "ecr:ListImages", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + ] + resources = ["*"] + } + EKSRead = { + actions = [ + "eks:ListClusters", + ] + resources = ["*"] + } + EKSReadMyClusters = { + actions = [ + "eks:DescribeCluster", + "eks:AccessKubernetesApi", + ] + resources = [format(local.common_arn, "eks", format("%v/%v", "cluster", var.cluster_name))] + } + STSAssumeRole = { + actions = ["sts:AssumeRole"] + resources = [module.role_dba_administrator.role_arn] + } + } +} + +data "aws_iam_policy_document" "dba_administrator" { + dynamic "statement" { + for_each = local.dba_administrator_policy_statements + iterator = s + content { + sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) + effect = lookup(s.value, "effect", "Allow") + actions = lookup(s.value, "actions", []) + resources = lookup(s.value, "resources", []) + not_resources = lookup(s.value, "not_resources", []) + } + } +} + +# allow anyone in this account to assume the role, if they have the permission to do so +data "aws_iam_policy_document" "dba_administrator_allow_sts" { + statement { + sid = "AllowSTSAssume" + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "AWS" + identifiers = [ + format(local.iam_arn, "root"), + ] + } + } +} + +# output "role_dba_administrator_arn" { +# description = "DBA Adminstrator role ARN" +# value = module.role_dba_administrator.role_arn +# } + +module "group_dba_administrator" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git" + + group_name = local.role_dba_k8s_group_name + attached_policies = [aws_iam_policy.dba_administrator.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +output "info_dba_administrator" { + description = "DBA Adminstrator IAM details" + value = { + role_name = module.role_dba_administrator.role_name + role_arn = module.role_dba_administrator.role_arn + group_name = module.group_dba_administrator.group_name + group_arn = module.group_dba_administrator.group_arn + } +} diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/deployer-clusterrole.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/deployer-clusterrole.tf new file mode 100644 index 0000000..7cede6e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/deployer-clusterrole.tf @@ -0,0 +1,67 @@ +resource "kubernetes_cluster_role" "cicd_deployer_istiosystem_cluster_role" { + metadata { + name = var.deployer_istiosystem_role_name + } + + rule { + api_groups = ["acme.cert-manager.io"] + resources = ["challenges", "orders", "certificaterequests"] + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + } + + rule { + api_groups = ["cert-manager.io"] + resources = ["certificates"] + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + } + + + rule { + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + api_groups = ["networking.istio.io"] + resources = ["gateways"] + } +} + +resource "kubernetes_cluster_role" "cicd_deployer_istio_cluster_role" { + metadata { + name = var.deployer_application_istio_role_name + } + rule { + api_groups = ["security.istio.io"] + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + resources = ["requestauthentications", "authorizationpolicies", "peerauthentications"] + } + + rule { + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + api_groups = ["networking.istio.io"] + resources = ["virtualservices", "destinationrules", "gateways"] + } +} + +resource "kubernetes_cluster_role" "cicd_deployer_application_cluster_role" { + metadata { + name = var.deployer_application_role_name + } + aggregation_rule { + cluster_role_selectors { + match_labels = { + "rbac.authorization.k8s.io/aggregate-to-edit" = "true" + } + } + } + + rule { + api_groups = ["acme.cert-manager.io"] + resources = ["challenges", "orders", "certificaterequests"] + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + } + + rule { + api_groups = ["cert-manager.io"] + resources = ["certificates"] + verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] + } + +} diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/deployer-rolebinding.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/deployer-rolebinding.tf new file mode 100644 index 0000000..3b90b7b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/deployer-rolebinding.tf @@ -0,0 +1,91 @@ +resource "kubernetes_role_binding" "deployer_istio_role_binding" { + metadata { + name = "deployer_istiosystem_role_binding" + namespace = var.istio_installed_namespace + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = var.deployer_istiosystem_role_name + } + subject { + kind = "User" + name = var.cicd_k8s_user_name + api_group = "rbac.authorization.k8s.io" + } + subject { + kind = "Group" + # name = format("%v%v-%v", local._prefixes["eks-user"], var.cluster_name, var.cicd_k8s_group_name) + name = local.cicd_k8s_iam_username + api_group = "rbac.authorization.k8s.io" + } +} + +locals { + cicd_managed_namespaces = formatlist("%v-%v", var.cluster_name, var.cicd_managed_namespaces) + cicd_k8s_iam_username = format("%v%v-%v", local._prefixes["eks-user"], var.cluster_name, var.cicd_k8s_group_name) + cicd_k8s_group_name = format("%v%v-%v", local._prefixes["eks"], var.cluster_name, var.cicd_k8s_group_name) +} + +resource "kubernetes_namespace" "cicd_managed_namespaces" { + for_each = toset(local.cicd_managed_namespaces) + metadata { + name = each.key + labels = { + istio-injection = "enabled" + } + } +} + + +resource "kubernetes_role_binding" "deployer_application_istio_rolebinding" { + # for_each = toset(local.cicd_managed_namespaces) + for_each = kubernetes_namespace.cicd_managed_namespaces + + metadata { + name = var.deployer_application_istio_rolebinding_name + namespace = each.key + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = var.deployer_application_istio_role_name + } + subject { + kind = "User" + name = var.cicd_k8s_user_name + api_group = "rbac.authorization.k8s.io" + } + subject { + kind = "Group" + name = local.cicd_k8s_iam_username + api_group = "rbac.authorization.k8s.io" + } + # depends_on = [kubernetes_namespace.cicd_managed_namespaces] +} + +resource "kubernetes_role_binding" "deployer_application_rolebinding" { + # for_each = toset(local.cicd_managed_namespaces) + for_each = kubernetes_namespace.cicd_managed_namespaces + + metadata { + name = var.deployer_application_rolebinding_name + namespace = each.key + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = var.deployer_application_role_name + } + subject { + kind = "User" + name = var.cicd_k8s_user_name + api_group = "rbac.authorization.k8s.io" + } + subject { + kind = "Group" + name = local.cicd_k8s_iam_username + api_group = "rbac.authorization.k8s.io" + } + # depends_on = [kubernetes_namespace.cicd_managed_namespaces] +} diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/deployer.iam.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/deployer.iam.tf new file mode 100644 index 0000000..7d76a89 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/deployer.iam.tf @@ -0,0 +1,167 @@ +locals { + policy_cicd_k8s_group_name = replace(local.cicd_k8s_iam_username, local._prefixes["eks-user"], local._prefixes["eks-policy"]) + role_cicd_k8s_group_name = replace(local.cicd_k8s_iam_username, local._prefixes["eks-user"], "") + iam_policies_cicd = ["p-inf-manage-access-keys"] +} + +data "aws_iam_policy" "cicd_deployer_policies" { + for_each = toset(local.iam_policies_cicd) + name = each.key +} + +module "service_cicd_deployer" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-user.git" + + iam_username = local.cicd_k8s_iam_username + username = "" + email_address = "" + groups = ["g-inf-ip-restriction"] + generate_password = false + service_account = true + enable_sending_mail = false + create_access_keys = false + profile = var.profile + pgp_key_file = "./init/tf-gpg-key.b64" + + attached_policies = flatten(concat([for k, v in data.aws_iam_policy.cicd_deployer_policies : v.arn], [aws_iam_policy.cicd_deployer.arn])) + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} +module "role_cicd_deployer" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" + + role_name = local.role_cicd_k8s_group_name + role_description = "Role for EKS cluster ${var.cluster_name} for access by ${var.cicd_k8s_group_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.cicd_deployer_allow_sts.json + # attached_policies = flatten(concat([for k, v in data.aws_iam_policy.cicd_deployer_policies : v.arn], [aws_iam_policy.cicd_deployer.arn])) + attached_policies = [aws_iam_policy.cicd_deployer.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +resource "aws_iam_policy" "cicd_deployer" { + name = local.policy_cicd_k8s_group_name + path = "/" + description = "Policy for EKS ${var.cluster_name} IAM access ${var.cicd_k8s_group_name}" + policy = data.aws_iam_policy_document.cicd_deployer.json +} + +locals { + cicd_deployer_policy_statements = { + ECRRead = { + actions = [ + "ecr:Describe*", + "ecr:Get*", + "ecr:ListImages", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + ] + resources = ["*"] + } + ECRWrite = { + # effect = "Deny" + actions = [ + "ecr:BatchDeleteImage", + "ecr:CompleteLayerUpload", + "ecr:CreateRepository", + "ecr:DeleteRepository", + "ecr:InitiateLayerUpload", + "ecr:PutImage", + "ecr:UploadLayerPart" + ] + # not_resources = [format(local.common_arn, "ecr", format("repository/eks/%v/*", var.cluster_name))] + not_resources = [format(local.common_arn, "ecr", "repository/eks/*")] + } + EKSRead = { + actions = [ + "eks:ListClusters", + ] + resources = ["*"] + } + EKSReadMyClusters = { + actions = [ + "eks:AccessKubernetesApi", + "eks:DescribeCluster", + ] + resources = [format(local.common_arn, "eks", format("%v/%v", "cluster", var.cluster_name))] + } + # IAMRead = { + # actions = [ + # "iam:ListRoles", + # ] + # resources = ["*"] + # } + } +} + +data "aws_iam_policy_document" "cicd_deployer" { + dynamic "statement" { + for_each = local.cicd_deployer_policy_statements + iterator = s + content { + sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) + effect = lookup(s.value, "effect", "Allow") + actions = lookup(s.value, "actions", []) + resources = lookup(s.value, "resources", []) + not_resources = lookup(s.value, "not_resources", []) + } + } +} + +# allow anyone in this account to assume the role, if they have the permission to do so +data "aws_iam_policy_document" "cicd_deployer_allow_sts" { + statement { + sid = "AllowSTSAssume" + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "AWS" + identifiers = [ + format(local.iam_arn, "root"), + ] + } + } +} + +# output "service_cicd_deployer_arn" { +# description = "CICD Deployer user ARN" +# value = module.service_cicd_deployer.user_arn +# } +# +# output "service_cicd_deployer_username" { +# description = "CICD Deployer username" +# value = module.service_cicd_deployer.user_name +# } + +module "group_cicd_deployer" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git" + + group_name = local.cicd_k8s_group_name + attached_policies = flatten(concat([for k, v in data.aws_iam_policy.cicd_deployer_policies : v.arn], [aws_iam_policy.cicd_deployer.arn])) + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +output "info_cicd_deployer" { + description = "CID Deployer IAM details" + value = { + user_name = module.service_cicd_deployer.user_name + user_arn = module.service_cicd_deployer.user_arn + group_name = module.group_cicd_deployer.group_name + group_arn = module.group_cicd_deployer.group_arn + } +} diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/kubeconfig.eks-subdirectory.tf new file mode 120000 index 0000000..e3750a4 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/kubeconfig.eks-subdirectory.tf @@ -0,0 +1 @@ +../includes.d/kubeconfig.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/locals.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/locals.tf new file mode 100644 index 0000000..92d0613 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/locals.tf @@ -0,0 +1,11 @@ +locals { + base_arn = format("arn:%v:%%v:%v:%v:%%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) + iam_arn = format("arn:%v:iam::%v:%%v", data.aws_arn.current.partition, data.aws_caller_identity.current.account_id) + common_arn = format("arn:%v:%%v:%v:%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) + + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } +} diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/main.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/main.tf new file mode 100644 index 0000000..ef02738 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/main.tf @@ -0,0 +1,30 @@ +locals { + aws_auth_users = [ + { + userarn = module.service_cicd_deployer.user_arn + aws_username = "" + username = var.cicd_k8s_user_name + groups = [local.cicd_k8s_group_name] + }, + ] + aws_auth_roles = [ + { + rolearn : module.role_dba_administrator.role_arn + aws_rolename : "" + username : var.dba_k8s_user_name + groups = [local.dba_k8s_group_name] + }, + ] +} + +module "awsauth_cluster-roles" { + source = "git@github.e.it.census.gov:terraform-modules/aws-eks.git//patch-aws-auth" + + region = local.region + profile = var.profile + cluster_name = var.cluster_name + aws_auth_users = local.aws_auth_users + aws_auth_roles = local.aws_auth_roles + + keep_temporary_files = false +} diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/prefixes.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/providers.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/region.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/tf-run.data b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/tf-run.data new file mode 100644 index 0000000..1d1a079 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/tf-run.data @@ -0,0 +1,18 @@ +VERSION 1.3.0 +REMOTE-STATE +STOP only run this after the cluster roles represented here have been setup in K8S +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +COMMAND tf-init -upgrade +LINKTOP init +LINKTOP provider_configs.d/provider.ldap_new.auto.tfvars +LINKTOP provider_configs.d/provider.ldap_new.tf +LINKTOP provider_configs.d/provider.ldap_new.variables.tf +LINK versions.tf +LINK settings.auto.tfvars +LINK variables.application_tags.auto.tfvars +POLICY +ALL +COMMAND tf-directory-setup.py -l s3 + +COMMENT cd ../ and continue diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/variables.auto.tfvars b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/variables.auto.tfvars new file mode 100644 index 0000000..974aef0 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/variables.auto.tfvars @@ -0,0 +1,16 @@ +istio_installed_namespace = "istio-system" +# enable only for cicd needs +cicd_k8s_group_name = "cicd-deployer" +cicd_k8s_user_name = "cicd-deployer" +cicd_managed_namespaces = [] +deployer_application_istio_role_name = "deployer-application-istio-role" +deployer_application_istio_rolebinding_name = "deployer-application-istio-rolebinding" +deployer_application_role_name = "deployer-application-role" +deployer_application_rolebinding_name = "deployer-application-rolebinding" +deployer_istiosystem_role_name = "deployer-istiosystem-role" +# enable only for dba account needs (most likely, not needed) +dba_admin_rolebinding_name = "dba-admin-rolebinding" +dba_administrator_role_name = "dba-admin-role" +dba_k8s_group_name = "dba-admin" +dba_k8s_user_name = "dba-admin" +dba_managed_namespaces = [] diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/variables.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/variables.tf new file mode 100644 index 0000000..559f683 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/variables.tf @@ -0,0 +1,83 @@ +variable "deployer_istiosystem_role_name" { + description = "The kubernetes cluster role name of CIDR Deployer" + type = string + default = "deployer-istiosystem-role" +} + +variable "deployer_application_role_name" { + description = "The kubernetes cluster role name of CICD Deployer" + type = string + default = "deployer-application-role" +} + +variable "deployer_application_istio_role_name" { + description = "The kubernetes cluster role name of CICD Deployer" + type = string + default = "deployer-application-istio-role" +} + + + +variable "dba_administrator_role_name" { + description = "The kubernetes cluster role name of DBA Administrator" + type = string + default = "dba-admin-role" +} + +variable "istio_installed_namespace" { + description = "Namespace that Istio installed" + type = string + default = "istio-system" +} + +variable "cicd_k8s_user_name" { + description = "The user name of CICD Deployer" + type = string + default = "cicd-deployer" +} +variable "cicd_k8s_group_name" { + description = "The Group name of CICD Deployer belongs to (excluding prefix for service account and cluster)" + type = string + default = "cicd-deployer" +} + +variable "dba_k8s_user_name" { + description = "the user name of DBA Administrator" + type = string + default = "dba-admin" +} +variable "dba_k8s_group_name" { + description = "The Group name of dba-admin belongs to (excluding prefix for service account and cluster)" + type = string + default = "dba-admin" +} + +variable "deployer_application_rolebinding_name" { + description = "Role binding name of deployer that binding to role deployer_application_cluster_role" + type = string + default = "deployer-application-rolebinding" +} + +variable "deployer_application_istio_rolebinding_name" { + description = "Role binding name of deployer that binding to role deployer_application_cluster_role" + type = string + default = "deployer-application-istio-rolebinding" +} + +variable "dba_admin_rolebinding_name" { + description = "Role binding name of deployer that binding to role deployer_application_cluster_role" + type = string + default = "dba-admin-rolebinding" +} + +variable "cicd_managed_namespaces" { + description = "Deployer managed namespaces that deploy can create resources in (excluding cluster name prefix)" + type = list + default = [] +} + +variable "dba_managed_namespaces" { + description = "DBA admin managed namespaces (excluding cluster name prefix)" + type = list + default = [] +} diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/version.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/cluster-roles/versions.tf b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/versions.tf new file mode 120000 index 0000000..8bd0ff1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/cluster-roles/versions.tf @@ -0,0 +1 @@ +../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/.gitignore b/examples/full-cluster-tf-upgrade/1.29/common-services/.gitignore new file mode 100644 index 0000000..1ae9a3f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/.gitignore @@ -0,0 +1 @@ +certs/*.key diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/.tf-control b/examples/full-cluster-tf-upgrade/1.29/common-services/.tf-control new file mode 100644 index 0000000..280f449 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/.tf-control @@ -0,0 +1,20 @@ +# .tf-control +# allows for setting a specific command to be used for tf-* commands under this git repo +# see tf-control.sh help for more info + +TFCONTROL_VERSION="1.0.5" + +TFCOMMAND="terraform_latest" +# TF_CLI_CONFIG_FILE=PATH-TO-FILE/.tf-control.tfrc +# TFARGS="" +# TFNOLOG="" +# TFNOCOLOR="" + +# use the following to force a specific version. An upgrade of an existing 0.12.31 to 1.x +# needs you to cycle through 0.13.17, 0.14.11, and then latest (0.15.5 not needed). Other +# steps in between. See https://github.e.it.census.gov/terraform/support/tree/master/docs/how-to/terraform-upgrade for details +# +#TFCOMMAND="terraform_0.12.31" +#TFCOMMAND="terraform_0.13.7" +#TFCOMMAND="terraform_0.14.11" +#TFCOMMAND="terraform_0.15.5" diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.29/common-services/.tf-control.tfrc new file mode 100644 index 0000000..7425488 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/.tf-control.tfrc @@ -0,0 +1,24 @@ +TFCONTROL_VERSION="1.0.5" + +# https://www.terraform.io/docs/cli/config/config-file.html +plugin_cache_dir = "/data/terraform/terraform.d/plugin-cache" +#disable_checkpoint = true + +provider_installation { +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "*/*/*" ] +# } + filesystem_mirror { + path = "/data/terraform/terraform.d/providers" + include = [ "*/*/*" ] + } +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "external.terraform.census.gov/*/*" ] +# } + direct { + include = [ "*/*/*" ] + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/README.md b/examples/full-cluster-tf-upgrade/1.29/common-services/README.md new file mode 100644 index 0000000..f8b7f53 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/README.md @@ -0,0 +1,66 @@ +# common-services + +This is a directory where the common services are setup: + +* cert-manager +* istio service mesh +* metrics-server + +## Setup Steps + +First, copy the `remote_state.yml` from the parent and update `directory` to be the current directory. + +## Terraform Automated + +A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. + +* copy the `remote_state.yml` from the parent and update `directory` to be the current directory +* run the tf-run.sh + +```console +% tf-run.sh apply +``` + +* example of the `tf-run.sh` steps + +This is part of a larger cluster configuration, so at the end of the run it indicates another directory +to visit when done. + +```console +% tf-run.sh list +* running action=plan +* START: tf-run.sh v1.1.2 start=1636563207 end= logfile=logs/run.plan.20211110.1636563207.log (not-created) +* reading from tf-run.data +* read 23 entries from tf-run.data +> list +** START: start=1636563207 +* 1 COMMAND> tf-directory-setup.py -l none -f +* 2 COMMAND> setup-new-directory.sh +* 3 COMMAND> tf-init -upgrade +* 4 tf-plan -target=tls_private_key.ca +* 5 tf-plan -target=tls_cert_request.ca +* 6 tf-plan -target=null_resource.ca_root_cert +* 7 tf-plan -target=null_resource.ca_files +* 8 tf-plan -target=null_resource.ca_cert +* 9 tf-plan -target=local_file.ca_bundle_cert +* 10 COMMAND> tf-directory-setup.py -l s3 +* 11 COMMENT> submit certs/*csr using command ouptut listed in apply to TCO for signing +* 12 STOP> once that is availabile, change cert_download to true +* 13 COMMAND> terraform taint null_resource.ca_cert +* 14 tf-plan -target=null_resource.ca_root_cert +* 15 tf-plan -target=null_resource.ca_files +* 16 tf-plan -target=null_resource.ca_cert +* 17 COMMENT> second run is to complete the steps +* 18 tf-plan -target=null_resource.ca_root_cert +* 19 tf-plan -target=null_resource.ca_files +* 20 tf-plan -target=null_resource.ca_cert +* 21 tf-plan +* 22 COMMENT> run: git-secret add certs/*.key; git-secret hide +* 23 COMMENT> be sure to add all files to git, and be sure to commit -a to get .gitsecret/ changes +** END: start=1636563207 end=1636563207 elapsed=0 logfile=logs/run.plan.20211110.1636563207.log (not-created) +``` + +It is highly recommended to use the `tf-run.sh` approach. + +## Terraform Manual + diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/README.output.md b/examples/full-cluster-tf-upgrade/1.29/common-services/README.output.md new file mode 100644 index 0000000..089cab7 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/README.output.md @@ -0,0 +1,84 @@ +```console +% kubectl -n kube-system get pods -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +aws-load-balancer-controller-54fdf64896-jzwsr 1/1 Running 0 23h 10.194.26.74 ip-10-194-26-252.ec2.internal +aws-load-balancer-controller-54fdf64896-qqt6d 1/1 Running 0 23h 10.194.24.242 ip-10-194-24-49.ec2.internal +aws-node-29kmc 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal +aws-node-6d8ls 1/1 Running 1 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal +aws-node-6vrbg 1/1 Running 1 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal +aws-node-ldgxc 1/1 Running 1 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal +coredns-65bfc5645f-g86rx 1/1 Running 0 7d2h 10.194.24.207 ip-10-194-24-90.ec2.internal +coredns-65bfc5645f-xj9rl 1/1 Running 0 7d2h 10.194.24.69 ip-10-194-24-90.ec2.internal +efs-csi-controller-65fb886fd4-7slw6 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal +efs-csi-controller-65fb886fd4-vcf9l 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal +efs-csi-node-6t6v6 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal +efs-csi-node-kxqfb 3/3 Running 0 2d21h 10.194.24.49 ip-10-194-24-49.ec2.internal +efs-csi-node-p8hzn 3/3 Running 0 2d21h 10.194.26.252 ip-10-194-26-252.ec2.internal +efs-csi-node-xxq9h 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-proxy-78n7f 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-proxy-cms7c 1/1 Running 0 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-proxy-h2t6n 1/1 Running 0 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-proxy-jkxnz 1/1 Running 0 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal +``` + +```console +% kubectl get pods --all-namespaces -o wide +NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +cert-manager cert-manager-7fcbc79fc5-xwt4s 1/1 Running 0 51m 10.194.24.138 ip-10-194-24-49.ec2.internal +cert-manager cert-manager-cainjector-6b7f4575f4-xpgnc 1/1 Running 0 51m 10.194.24.56 ip-10-194-24-49.ec2.internal +cert-manager cert-manager-webhook-6cd54b96fc-rvld4 1/1 Running 0 51m 10.194.24.170 ip-10-194-24-90.ec2.internal +istio-system istio-egressgateway-7fcc58ddf7-dtx25 1/1 Running 0 95m 10.194.26.120 ip-10-194-26-252.ec2.internal +istio-system istio-ingressgateway-75f76c546b-vx2v6 1/1 Running 0 95m 10.194.24.8 ip-10-194-24-90.ec2.internal +istio-system istiod-85b6f86f94-vqfj2 1/1 Running 0 95m 10.194.25.155 ip-10-194-25-120.ec2.internal +kube-system aws-load-balancer-controller-54fdf64896-jzwsr 1/1 Running 0 23h 10.194.26.74 ip-10-194-26-252.ec2.internal +kube-system aws-load-balancer-controller-54fdf64896-qqt6d 1/1 Running 0 23h 10.194.24.242 ip-10-194-24-49.ec2.internal +kube-system aws-node-29kmc 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-system aws-node-6d8ls 1/1 Running 1 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal +kube-system aws-node-6vrbg 1/1 Running 1 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-system aws-node-ldgxc 1/1 Running 1 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-system coredns-65bfc5645f-g86rx 1/1 Running 0 7d2h 10.194.24.207 ip-10-194-24-90.ec2.internal +kube-system coredns-65bfc5645f-xj9rl 1/1 Running 0 7d2h 10.194.24.69 ip-10-194-24-90.ec2.internal +kube-system efs-csi-controller-65fb886fd4-7slw6 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-system efs-csi-controller-65fb886fd4-vcf9l 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal +kube-system efs-csi-node-6t6v6 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal +kube-system efs-csi-node-kxqfb 3/3 Running 0 2d21h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-system efs-csi-node-p8hzn 3/3 Running 0 2d21h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-system efs-csi-node-xxq9h 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-system kube-proxy-78n7f 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-system kube-proxy-cms7c 1/1 Running 0 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-system kube-proxy-h2t6n 1/1 Running 0 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-system kube-proxy-jkxnz 1/1 Running 0 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal +operators istio-operator-7cc8974d48-f2j2m 1/1 Running 0 14h 10.194.26.211 ip-10-194-26-252.ec2.internal +sample-alb sample-alb-8744f54f9-7w4cj 1/1 Running 0 23h 10.194.25.67 ip-10-194-25-120.ec2.internal +sample-alb sample-alb-8744f54f9-gs8f5 1/1 Running 0 23h 10.194.24.147 ip-10-194-24-49.ec2.internal +sample-alb sample-alb-8744f54f9-v6kgr 1/1 Running 0 23h 10.194.26.168 ip-10-194-26-252.ec2.internal +sample-elb sample-elb-69786b5f7d-d7nb4 1/1 Running 0 2d21h 10.194.26.178 ip-10-194-26-252.ec2.internal +sample-elb sample-elb-69786b5f7d-mw7jb 1/1 Running 0 2d21h 10.194.24.193 ip-10-194-24-49.ec2.internal +sample-elb sample-elb-69786b5f7d-tqz2s 1/1 Running 0 2d21h 10.194.25.96 ip-10-194-25-120.ec2.internal +sample-nlb sample-nlb-6cd5769dfb-n8dmd 1/1 Running 0 2d21h 10.194.25.198 ip-10-194-25-120.ec2.internal +sample-nlb sample-nlb-6cd5769dfb-qw8n4 1/1 Running 0 2d21h 10.194.24.132 ip-10-194-24-49.ec2.internal +sample-nlb sample-nlb-6cd5769dfb-t2nhp 1/1 Running 0 2d21h 10.194.26.18 ip-10-194-26-252.ec2.internal +``` + +```console +% kubectl -n istio-system get secret | grep -iE "ca-secret|tls" +istio-ca-secret istio.io/ca-root 5 7d2h +nginx-cert kubernetes.io/tls 3 6d20h +root-secret kubernetes.io/tls 3 7d14h +``` + + kubectl get pods --all-namespaces -o wide|grep -i cert +cert-manager cert-manager-7fcbc79fc5-xwt4s 1/1 Running 0 7d22h 10.194.24.138 ip-10-194-24-49.ec2.internal +cert-manager cert-manager-cainjector-6b7f4575f4-xpgnc 1/1 Running 0 7d22h 10.194.24.56 ip-10-194-24-49.ec2.internal +cert-manager cert-manager-webhook-6cd54b96fc-rvld4 1/1 Running 0 7d22h 10.194.24.170 ip-10-194-24-90.ec2.internal + +$ kubectl -n cert-manager get secrets +NAME TYPE + DATA AGE +ca-key-pair Opaque + 2 5m2s +... +$ kubectl get clusterissuer +NAME READY AGE +clusterissuer True 5m36s + diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/cert-manager-issuer.tf b/examples/full-cluster-tf-upgrade/1.29/common-services/cert-manager-issuer.tf new file mode 100644 index 0000000..65d1abd --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/cert-manager-issuer.tf @@ -0,0 +1,14 @@ +module "subordinate_ca" { + source = "git@github.e.it.census.gov:terraform-modules/aws-certificates//acmpca-eks-cert-manager" + + cluster_name = var.cluster_name + contact_email = var.contact_email + + tags = merge( + local.base_tags, + local.common_tags, + var.account_tags, + var.infrastructure_tags, + var.application_tags, + ) +} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/.helmignore b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/Chart.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/Chart.yaml new file mode 100644 index 0000000..87b5611 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v2 +appVersion: 1.28.2 +description: Scales Kubernetes worker nodes within autoscaling groups. +engine: gotpl +home: https://github.com/kubernetes/autoscaler +icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png +maintainers: + - email: e.bailey@sportradar.com + name: yurrriq + - email: mgoodness@gmail.com + name: mgoodness + - email: guyjtempleton@googlemail.com + name: gjtempleton + - email: scott.crooks@gmail.com + name: sc250024 +name: cluster-autoscaler +sources: + - https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler +type: application +version: 9.34.0 diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/README.md b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/README.md new file mode 100644 index 0000000..59a6e0c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/README.md @@ -0,0 +1,5 @@ +# cluster-autoscaler + +Scales Kubernetes worker nodes within autoscaling groups. + +Refer to following location for more info: https://artifacthub.io/packages/helm/cluster-autoscaler/cluster-autoscaler diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/NOTES.txt b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/NOTES.txt new file mode 100644 index 0000000..94e211e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/NOTES.txt @@ -0,0 +1,18 @@ +{{- if or .Values.autoDiscovery.clusterName .Values.autoscalingGroups -}} + +To verify that cluster-autoscaler has started, run: + + kubectl --namespace={{ .Release.Namespace }} get pods -l "app.kubernetes.io/name={{ template "cluster-autoscaler.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" + +{{- else -}} + +############################################################################## +#### ERROR: You must specify values for either #### +#### autoDiscovery.clusterName or autoscalingGroups[] #### +############################################################################## + +The deployment and pod will not be created and the installation is not functional +See README: + open https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler + +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/_helpers.tpl new file mode 100644 index 0000000..726086e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/_helpers.tpl @@ -0,0 +1,117 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cluster-autoscaler.name" -}} +{{- default (printf "%s-%s" .Values.cloudProvider .Chart.Name) .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "cluster-autoscaler.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default (printf "%s-%s" .Values.cloudProvider .Chart.Name) .Values.nameOverride -}} +{{- if ne $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cluster-autoscaler.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return instance and name labels. +*/}} +{{- define "cluster-autoscaler.instance-name" -}} +app.kubernetes.io/instance: {{ .Release.Name | quote }} +app.kubernetes.io/name: {{ include "cluster-autoscaler.name" . | quote }} +{{- end -}} + + +{{/* +Return labels, including instance and name. +*/}} +{{- define "cluster-autoscaler.labels" -}} +{{ include "cluster-autoscaler.instance-name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service | quote }} +helm.sh/chart: {{ include "cluster-autoscaler.chart" . | quote }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "deployment.apiVersion" -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "<1.9-0" $kubeTargetVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "<1.10-0" $kubeTargetVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the service account name used by the pod. +*/}} +{{- define "cluster-autoscaler.serviceAccountName" -}} +{{- if .Values.rbac.serviceAccount.create -}} + {{ default (include "cluster-autoscaler.fullname" .) .Values.rbac.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.rbac.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if the priority expander is enabled +*/}} +{{- define "cluster-autoscaler.priorityExpanderEnabled" -}} +{{- $expanders := splitList "," (default "" .Values.extraArgs.expander) -}} +{{- if has "priority" $expanders -}} +{{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the autodiscoveryparameters for clusterapi. +*/}} +{{- define "cluster-autoscaler.capiAutodiscoveryConfig" -}} +{{- if .Values.autoDiscovery.clusterName -}} +{{- print "clusterName=" -}}{{ .Values.autoDiscovery.clusterName }} +{{- end -}} +{{- if and .Values.autoDiscovery.clusterName .Values.autoDiscovery.labels -}} +{{- print "," -}} +{{- end -}} +{{- if .Values.autoDiscovery.labels -}} +{{- range $i, $el := .Values.autoDiscovery.labels -}} +{{- if $i -}}{{- print "," -}}{{- end -}} +{{- range $key, $val := $el -}} +{{- $key -}}{{- print "=" -}}{{- $val -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml new file mode 100644 index 0000000..e3d3655 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml @@ -0,0 +1,163 @@ +{{- if and .Values.rbac.create .Values.rbac.clusterScoped -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - events + - endpoints + verbs: + - create + - patch + - apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create + - apiGroups: + - "" + resources: + - pods/status + verbs: + - update + - apiGroups: + - "" + resources: + - endpoints + resourceNames: + - cluster-autoscaler + verbs: + - get + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - watch + - list + - get + - update + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - replicationcontrollers + - persistentvolumeclaims + - persistentvolumes + verbs: + - watch + - list + - get + - apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - watch + - list + - get + - apiGroups: + - batch + - extensions + resources: + - jobs + verbs: + - get + - list + - patch + - watch + - apiGroups: + - extensions + resources: + - replicasets + - daemonsets + verbs: + - watch + - list + - get + - apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - watch + - list + - apiGroups: + - apps + resources: + - daemonsets + - replicasets + - statefulsets + verbs: + - watch + - list + - get + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + - csidrivers + - csistoragecapacities + verbs: + - watch + - list + - get + - apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - coordination.k8s.io + resourceNames: + - cluster-autoscaler + resources: + - leases + verbs: + - get + - update +{{- if .Values.rbac.pspEnabled }} + - apiGroups: + - extensions + - policy + resources: + - podsecuritypolicies + resourceNames: + - {{ template "cluster-autoscaler.fullname" . }} + verbs: + - use +{{- end -}} +{{- if and ( and ( eq .Values.cloudProvider "clusterapi" ) ( .Values.rbac.clusterScoped ) ( or ( eq .Values.clusterAPIMode "incluster-incluster" ) ( eq .Values.clusterAPIMode "kubeconfig-incluster" ) ))}} + - apiGroups: + - cluster.x-k8s.io + resources: + - machinedeployments + - machinedeployments/scale + - machines + - machinesets + verbs: + - get + - list + - update + - watch +{{- end }} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..d2384dc --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create .Values.rbac.clusterScoped -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cluster-autoscaler.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "cluster-autoscaler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/deployment.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/deployment.yaml new file mode 100644 index 0000000..e8edc7f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/deployment.yaml @@ -0,0 +1,291 @@ +{{- if or ( or .Values.autoDiscovery.clusterName .Values.autoDiscovery.labels ) .Values.autoscalingGroups }} +{{/* one of the above is required */}} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: Deployment +metadata: + annotations: +{{ toYaml .Values.deployment.annotations | indent 4 }} + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: +{{ include "cluster-autoscaler.instance-name" . | indent 6 }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 6 }} + {{- end }} +{{- if .Values.updateStrategy }} + strategy: + {{ toYaml .Values.updateStrategy | nindent 4 | trim }} +{{- end }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + labels: +{{ include "cluster-autoscaler.instance-name" . | indent 8 }} + {{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | indent 8 }} + {{- end }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: "{{ .Values.dnsPolicy }}" + {{- end }} + containers: + - name: {{ template "cluster-autoscaler.name" . }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + command: + - ./cluster-autoscaler + - --cloud-provider={{ .Values.cloudProvider }} + {{- if and (eq .Values.cloudProvider "clusterapi") (eq .Values.clusterAPIMode "kubeconfig-incluster") }} + - --namespace={{ .Values.clusterAPIConfigMapsNamespace | default "kube-system" }} + {{- else }} + - --namespace={{ .Release.Namespace }} + {{- end }} + {{- if .Values.autoscalingGroups }} + {{- range .Values.autoscalingGroups }} + - --nodes={{ .minSize }}:{{ .maxSize }}:{{ .name }} + {{- end }} + {{- end }} + {{- if eq .Values.cloudProvider "aws" }} + {{- if .Values.autoDiscovery.clusterName }} + - --node-group-auto-discovery=asg:tag={{ tpl (join "," .Values.autoDiscovery.tags) . }} + {{- end }} + {{- else if eq .Values.cloudProvider "gce" }} + {{- if .Values.autoscalingGroupsnamePrefix }} + {{- range .Values.autoscalingGroupsnamePrefix }} + - --node-group-auto-discovery=mig:namePrefix={{ .name }},min={{ .minSize }},max={{ .maxSize }} + {{- end }} + {{- end }} + {{- else if eq .Values.cloudProvider "magnum" }} + {{- if .Values.autoDiscovery.clusterName }} + - --cluster-name={{ .Values.autoDiscovery.clusterName }} + - --node-group-auto-discovery=magnum:role={{ tpl (join "," .Values.autoDiscovery.roles) . }} + {{- else }} + - --cluster-name={{ .Values.magnumClusterName }} + {{- end }} + {{- else if eq .Values.cloudProvider "clusterapi" }} + {{- if or .Values.autoDiscovery.clusterName .Values.autoDiscovery.labels }} + - --node-group-auto-discovery=clusterapi:{{ template "cluster-autoscaler.capiAutodiscoveryConfig" . }} + {{- end }} + {{- if eq .Values.clusterAPIMode "incluster-kubeconfig"}} + - --cloud-config={{ .Values.clusterAPICloudConfigPath }} + {{- else if eq .Values.clusterAPIMode "kubeconfig-incluster"}} + - --kubeconfig={{ .Values.clusterAPIWorkloadKubeconfigPath }} + - --clusterapi-cloud-config-authoritative + {{- else if eq .Values.clusterAPIMode "kubeconfig-kubeconfig"}} + - --kubeconfig={{ .Values.clusterAPIWorkloadKubeconfigPath }} + - --cloud-config={{ .Values.clusterAPICloudConfigPath }} + {{- else if eq .Values.clusterAPIMode "single-kubeconfig"}} + - --kubeconfig={{ .Values.clusterAPIWorkloadKubeconfigPath }} + {{- end }} + {{- end }} + {{- if eq .Values.cloudProvider "magnum" }} + - --cloud-config={{ .Values.cloudConfigPath }} + {{- end }} + {{- range $key, $value := .Values.extraArgs }} + {{- if not (kindIs "invalid" $value) }} + - --{{ $key | mustRegexFind "^[^_]+" }}={{ $value }} + {{- else }} + - --{{ $key | mustRegexFind "^[^_]+" }} + {{- end }} + {{- end }} + env: + {{- if and (eq .Values.cloudProvider "aws") (ne .Values.awsRegion "") }} + - name: AWS_REGION + value: "{{ .Values.awsRegion }}" + {{- if .Values.awsAccessKeyID }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: AwsAccessKeyId + name: {{ template "cluster-autoscaler.fullname" . }} + {{- end }} + {{- if .Values.awsSecretAccessKey }} + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AwsSecretAccessKey + name: {{ template "cluster-autoscaler.fullname" . }} + {{- end }} + {{- else if eq .Values.cloudProvider "azure" }} + - name: ARM_SUBSCRIPTION_ID + valueFrom: + secretKeyRef: + key: SubscriptionID + name: {{ template "cluster-autoscaler.fullname" . }} + - name: ARM_RESOURCE_GROUP + valueFrom: + secretKeyRef: + key: ResourceGroup + name: {{ template "cluster-autoscaler.fullname" . }} + - name: ARM_VM_TYPE + valueFrom: + secretKeyRef: + key: VMType + name: {{ template "cluster-autoscaler.fullname" . }} + - name: AZURE_CLUSTER_NAME + valueFrom: + secretKeyRef: + key: ClusterName + name: {{ template "cluster-autoscaler.fullname" . }} + {{- if .Values.azureUseManagedIdentityExtension }} + - name: ARM_USE_MANAGED_IDENTITY_EXTENSION + value: "true" + {{- else }} + - name: ARM_TENANT_ID + valueFrom: + secretKeyRef: + key: TenantID + name: {{ template "cluster-autoscaler.fullname" . }} + - name: ARM_CLIENT_ID + valueFrom: + secretKeyRef: + key: ClientID + name: {{ template "cluster-autoscaler.fullname" . }} + - name: ARM_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: ClientSecret + name: {{ template "cluster-autoscaler.fullname" . }} + - name: AZURE_NODE_RESOURCE_GROUP + valueFrom: + secretKeyRef: + key: NodeResourceGroup + name: {{ template "cluster-autoscaler.fullname" . }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.extraEnv }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + {{- range $key, $value := .Values.extraEnvConfigMaps }} + - name: {{ $key }} + valueFrom: + configMapKeyRef: + name: {{ default (include "cluster-autoscaler.fullname" $) $value.name }} + key: {{ required "Must specify key!" $value.key }} + {{- end }} + {{- range $key, $value := .Values.extraEnvSecrets }} + - name: {{ $key }} + valueFrom: + secretKeyRef: + name: {{ default (include "cluster-autoscaler.fullname" $) $value.name }} + key: {{ required "Must specify key!" $value.key }} + {{- end }} + {{- if or .Values.envFromSecret .Values.envFromConfigMap }} + envFrom: + {{- if .Values.envFromSecret }} + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + {{- if .Values.envFromConfigMap }} + - configMapRef: + name: {{ .Values.envFromConfigMap }} + {{- end }} + {{- end }} + livenessProbe: + httpGet: + path: /health-check + port: 8085 + ports: + - containerPort: 8085 + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- if .Values.containerSecurityContext }} + securityContext: + {{ toYaml .Values.containerSecurityContext | nindent 12 | trim }} + {{- end }} + {{- if or (eq .Values.cloudProvider "magnum") .Values.extraVolumeSecrets .Values.extraVolumeMounts .Values.clusterAPIKubeconfigSecret }} + volumeMounts: + {{- if eq .Values.cloudProvider "magnum" }} + - name: cloudconfig + mountPath: {{ .Values.cloudConfigPath }} + readOnly: true + {{- end }} + {{- if and (eq .Values.cloudProvider "magnum") (.Values.magnumCABundlePath) }} + - name: ca-bundle + mountPath: {{ .Values.magnumCABundlePath }} + readOnly: true + {{- end }} + {{- range $key, $value := .Values.extraVolumeSecrets }} + - name: {{ $key }} + mountPath: {{ required "Must specify mountPath!" $value.mountPath }} + readOnly: true + {{- end }} + {{- if .Values.clusterAPIKubeconfigSecret }} + - name: cluster-api-kubeconfig + mountPath: {{ .Values.clusterAPIWorkloadKubeconfigPath | trimSuffix "/value" }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + serviceAccountName: {{ template "cluster-autoscaler.serviceAccountName" . }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.securityContext }} + securityContext: + {{ toYaml .Values.securityContext | nindent 8 | trim }} + {{- end }} + {{- if or (eq .Values.cloudProvider "magnum") .Values.extraVolumeSecrets .Values.extraVolumes .Values.clusterAPIKubeconfigSecret }} + volumes: + {{- if eq .Values.cloudProvider "magnum" }} + - name: cloudconfig + hostPath: + path: {{ .Values.cloudConfigPath }} + {{- end }} + {{- if and (eq .Values.cloudProvider "magnum") (.Values.magnumCABundlePath) }} + - name: ca-bundle + hostPath: + path: {{ .Values.magnumCABundlePath }} + {{- end }} + {{- range $key, $value := .Values.extraVolumeSecrets }} + - name: {{ $key }} + secret: + secretName: {{ default (include "cluster-autoscaler.fullname" $) $value.name }} + {{- if $value.items }} + items: + {{- toYaml $value.items | nindent 14 }} + {{- end }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.clusterAPIKubeconfigSecret }} + - name: cluster-api-kubeconfig + secret: + secretName: {{ .Values.clusterAPIKubeconfigSecret }} + {{- end }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/pdb.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/pdb.yaml new file mode 100644 index 0000000..19a7d01 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/pdb.yaml @@ -0,0 +1,16 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: +{{ include "cluster-autoscaler.instance-name" . | indent 6 }} +{{- if .Values.podDisruptionBudget }} + {{ toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..28369bf --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml @@ -0,0 +1,46 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: {{ template "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "cluster-autoscaler.fullname" . }} + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} +spec: + # Prevents running in privileged mode + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'secret' + - 'hostPath' + - 'emptyDir' + - 'projected' + - 'downwardAPI' +{{- if eq .Values.cloudProvider "gce" }} + allowedHostPaths: + - pathPrefix: {{ .Values.cloudConfigPath }} +{{- end }} + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml new file mode 100644 index 0000000..1e5b895 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml @@ -0,0 +1,22 @@ +{{- if hasKey .Values.extraArgs "expander" }} +{{- if and (.Values.expanderPriorities) (include "cluster-autoscaler.priorityExpanderEnabled" .) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-autoscaler-priority-expander + namespace: {{ .Release.Namespace }} + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + {{- if .Values.priorityConfigMapAnnotations }} + annotations: +{{ toYaml .Values.priorityConfigMapAnnotations | indent 4 }} + {{- end }} +data: + priorities: |- +{{- if kindIs "string" .Values.expanderPriorities }} +{{ .Values.expanderPriorities | indent 4 }} +{{- else }} +{{ toYaml .Values.expanderPriorities | indent 4 }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml new file mode 100644 index 0000000..097c969 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml @@ -0,0 +1,15 @@ +{{- if .Values.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "cluster-autoscaler.fullname" . }} + {{- if .Values.prometheusRule.namespace }} + namespace: {{ .Values.prometheusRule.namespace }} + {{- end }} + labels: {{- toYaml .Values.prometheusRule.additionalLabels | nindent 4 }} +spec: + groups: + - name: {{ include "cluster-autoscaler.fullname" . }} + interval: {{ .Values.prometheusRule.interval }} + rules: {{- tpl (toYaml .Values.prometheusRule.rules) . | nindent 8 }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/role.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/role.yaml new file mode 100644 index 0000000..b22fb58 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/role.yaml @@ -0,0 +1,78 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create +{{- if (include "cluster-autoscaler.priorityExpanderEnabled" .) }} + - list + - watch +{{- end }} + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cluster-autoscaler-status +{{- if (include "cluster-autoscaler.priorityExpanderEnabled" .) }} + - cluster-autoscaler-priority-expander +{{- end }} + verbs: + - delete + - get + - update +{{- if (include "cluster-autoscaler.priorityExpanderEnabled" .) }} + - watch +{{- end }} +{{- if eq (default "" (index .Values.extraArgs "leader-elect-resource-lock")) "configmaps" }} + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cluster-autoscaler + verbs: + - get + - update +{{- end }} +{{- if and ( and ( eq .Values.cloudProvider "clusterapi" ) ( not .Values.rbac.clusterScoped ) ( or ( eq .Values.clusterAPIMode "incluster-incluster" ) ( eq .Values.clusterAPIMode "kubeconfig-incluster" ) ))}} + - apiGroups: + - cluster.x-k8s.io + resources: + - machinedeployments + - machinedeployments/scale + - machines + - machinesets + verbs: + - get + - list + - update + - watch +{{- end }} +{{- if ( not .Values.rbac.clusterScoped ) }} + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - coordination.k8s.io + resourceNames: + - cluster-autoscaler + resources: + - leases + verbs: + - get + - update +{{- end }} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml new file mode 100644 index 0000000..ba5f037 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "cluster-autoscaler.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "cluster-autoscaler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/secret.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/secret.yaml new file mode 100644 index 0000000..9c58d0f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/secret.yaml @@ -0,0 +1,21 @@ +{{- if or (eq .Values.cloudProvider "azure") (and (eq .Values.cloudProvider "aws") (not (has "" (list .Values.awsAccessKeyID .Values.awsSecretAccessKey)))) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cluster-autoscaler.fullname" . }} + namespace: {{ .Release.Namespace }} +data: +{{- if eq .Values.cloudProvider "azure" }} + ClientID: "{{ .Values.azureClientID | b64enc }}" + ClientSecret: "{{ .Values.azureClientSecret | b64enc }}" + ResourceGroup: "{{ .Values.azureResourceGroup | b64enc }}" + SubscriptionID: "{{ .Values.azureSubscriptionID | b64enc }}" + TenantID: "{{ .Values.azureTenantID | b64enc }}" + VMType: "{{ .Values.azureVMType | b64enc }}" + ClusterName: "{{ .Values.azureClusterName | b64enc }}" + NodeResourceGroup: "{{ .Values.azureNodeResourceGroup | b64enc }}" +{{- else if eq .Values.cloudProvider "aws" }} + AwsAccessKeyId: "{{ .Values.awsAccessKeyID | b64enc }}" + AwsSecretAccessKey: "{{ .Values.awsSecretAccessKey | b64enc }}" +{{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/service.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/service.yaml new file mode 100644 index 0000000..d630512 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/service.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} + name: {{ template "cluster-autoscaler.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: +{{- if .Values.service.clusterIP }} + clusterIP: "{{ .Values.service.clusterIP }}" +{{- end }} +{{- if .Values.service.externalIPs }} + externalIPs: +{{ toYaml .Values.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - port: {{ .Values.service.servicePort }} + protocol: TCP + targetPort: 8085 + name: {{ .Values.service.portName }} + selector: +{{ include "cluster-autoscaler.instance-name" . | indent 4 }} + type: "{{ .Values.service.type }}" diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/serviceaccount.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/serviceaccount.yaml new file mode 100644 index 0000000..29c2580 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.rbac.create .Values.rbac.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- if .Values.rbac.serviceAccount.annotations }} + annotations: {{ toYaml .Values.rbac.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.rbac.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml new file mode 100644 index 0000000..be37239 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml @@ -0,0 +1,24 @@ +{{ if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cluster-autoscaler.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- end }} + labels: + {{- range $key, $value := .Values.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + selector: + matchLabels: +{{ include "cluster-autoscaler.instance-name" . | indent 6 }} + endpoints: + - port: {{ .Values.service.portName }} + interval: {{ .Values.serviceMonitor.interval }} + path: {{ .Values.serviceMonitor.path }} + namespaceSelector: + matchNames: + - {{.Release.Namespace}} +{{ end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/values.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/values.yaml new file mode 100644 index 0000000..bdaaf63 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/cluster-autoscaler/values.yaml @@ -0,0 +1,378 @@ +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# affinity -- Affinity for pod assignment +affinity: {} + +autoDiscovery: + # cloudProviders `aws`, `gce`, `magnum` and `clusterapi` are supported by auto-discovery at this time + # AWS: Set tags as described in https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup + + # autoDiscovery.clusterName -- Enable autodiscovery for `cloudProvider=aws`, for groups matching `autoDiscovery.tags`. + # Enable autodiscovery for `cloudProvider=clusterapi`, for groups matching `autoDiscovery.labels`. + # Enable autodiscovery for `cloudProvider=gce`, but no MIG tagging required. + # Enable autodiscovery for `cloudProvider=magnum`, for groups matching `autoDiscovery.roles`. + clusterName: "adsd-cumulus-dev" + + # autoDiscovery.tags -- ASG tags to match, run through `tpl`. + tags: + - k8s.io/cluster-autoscaler/enabled + - k8s.io/cluster-autoscaler/{{ .Values.autoDiscovery.clusterName }} + # - kubernetes.io/cluster/{{ .Values.autoDiscovery.clusterName }} + + # autoDiscovery.roles -- Magnum node group roles to match. + roles: + - worker + + # autoDiscovery.labels -- Cluster-API labels to match https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#configuring-node-group-auto-discovery + labels: [] + # - color: green + # - shape: circle +# autoscalingGroups -- For AWS, Azure AKS or Magnum. At least one element is required if not using `autoDiscovery`. For example: +#
+# - name: asg1
+# maxSize: 2
+# minSize: 1 +#
+autoscalingGroups: [] +# - name: asg1 +# maxSize: 2 +# minSize: 1 +# - name: asg2 +# maxSize: 2 +# minSize: 1 + +# autoscalingGroupsnamePrefix -- For GCE. At least one element is required if not using `autoDiscovery`. For example: +#
+# - name: ig01
+# maxSize: 10
+# minSize: 0 +#
+autoscalingGroupsnamePrefix: [] +# - name: ig01 +# maxSize: 10 +# minSize: 0 +# - name: ig02 +# maxSize: 10 +# minSize: 0 + +# awsAccessKeyID -- AWS access key ID ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) +awsAccessKeyID: "" + +# awsRegion -- AWS region (required if `cloudProvider=aws`) +awsRegion: us-gov-east-1 + +# awsSecretAccessKey -- AWS access secret key ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) +awsSecretAccessKey: "" + +# azureClientID -- Service Principal ClientID with contributor permission to Cluster and Node ResourceGroup. +# Required if `cloudProvider=azure` +azureClientID: "" + +# azureClientSecret -- Service Principal ClientSecret with contributor permission to Cluster and Node ResourceGroup. +# Required if `cloudProvider=azure` +azureClientSecret: "" + +# azureResourceGroup -- Azure resource group that the cluster is located. +# Required if `cloudProvider=azure` +azureResourceGroup: "" + +# azureSubscriptionID -- Azure subscription where the resources are located. +# Required if `cloudProvider=azure` +azureSubscriptionID: "" + +# azureTenantID -- Azure tenant where the resources are located. +# Required if `cloudProvider=azure` +azureTenantID: "" + +# azureVMType -- Azure VM type. +azureVMType: "AKS" + +# azureClusterName -- Azure AKS cluster name. +# Required if `cloudProvider=azure` +azureClusterName: "" + +# azureNodeResourceGroup -- Azure resource group where the cluster's nodes are located, typically set as `MC___`. +# Required if `cloudProvider=azure` +azureNodeResourceGroup: "" + +# azureUseManagedIdentityExtension -- Whether to use Azure's managed identity extension for credentials. If using MSI, ensure subscription ID, resource group, and azure AKS cluster name are set. +azureUseManagedIdentityExtension: false + +# magnumClusterName -- Cluster name or ID in Magnum. +# Required if `cloudProvider=magnum` and not setting `autoDiscovery.clusterName`. +magnumClusterName: "" + +# magnumCABundlePath -- Path to the host's CA bundle, from `ca-file` in the cloud-config file. +magnumCABundlePath: "/etc/kubernetes/ca-bundle.crt" + +# clusterAPIMode -- Cluster API mode, see https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#connecting-cluster-autoscaler-to-cluster-api-management-and-workload-clusters +# Syntax: workloadClusterMode-ManagementClusterMode +# for `kubeconfig-kubeconfig`, `incluster-kubeconfig` and `single-kubeconfig` you always must mount the external kubeconfig using either `extraVolumeSecrets` or `extraMounts` and `extraVolumes` +# if you dont set `clusterAPIKubeconfigSecret`and thus use an in-cluster config or want to use a non capi generated kubeconfig you must do so for the workload kubeconfig as well +clusterAPIMode: incluster-incluster # incluster-incluster, incluster-kubeconfig, kubeconfig-incluster, kubeconfig-kubeconfig, single-kubeconfig + +# clusterAPIKubeconfigSecret -- Secret containing kubeconfig for connecting to Cluster API managed workloadcluster +# Required if `cloudProvider=clusterapi` and `clusterAPIMode=kubeconfig-kubeconfig,kubeconfig-incluster or incluster-kubeconfig` +clusterAPIKubeconfigSecret: "" + +# clusterAPIWorkloadKubeconfigPath -- Path to kubeconfig for connecting to Cluster API managed workloadcluster, only used if `clusterAPIMode=kubeconfig-kubeconfig or kubeconfig-incluster` +clusterAPIWorkloadKubeconfigPath: /etc/kubernetes/value + +# clusterAPICloudConfigPath -- Path to kubeconfig for connecting to Cluster API Management Cluster, only used if `clusterAPIMode=kubeconfig-kubeconfig or incluster-kubeconfig` +clusterAPICloudConfigPath: /etc/kubernetes/mgmt-kubeconfig + +# clusterAPIConfigMapsNamespace -- Namespace on the workload cluster to store Leader election and status configmaps +clusterAPIConfigMapsNamespace: "" + +# cloudConfigPath -- Configuration file for cloud provider. +cloudConfigPath: /etc/gce.conf + +# cloudProvider -- The cloud provider where the autoscaler runs. +# Currently only `gce`, `aws`, `azure`, `magnum` and `clusterapi` are supported. +# `aws` supported for AWS. `gce` for GCE. `azure` for Azure AKS. +# `magnum` for OpenStack Magnum, `clusterapi` for Cluster API. +cloudProvider: aws + +# containerSecurityContext -- [Security context for container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) +containerSecurityContext: {} + # capabilities: + # drop: + # - ALL + +deployment: + # deployment.annotations -- Annotations to add to the Deployment object. + annotations: {} + +# dnsPolicy -- Defaults to `ClusterFirst`. Valid values are: +# `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`. +# If autoscaler does not depend on cluster DNS, recommended to set this to `Default`. +dnsPolicy: ClusterFirst + +## Priorities Expander +# expanderPriorities -- The expanderPriorities is used if `extraArgs.expander` contains `priority` and expanderPriorities is also set with the priorities. +# If `extraArgs.expander` contains `priority`, then expanderPriorities is used to define cluster-autoscaler-priority-expander priorities. +# See: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/expander/priority/readme.md +expanderPriorities: {} + +# priorityConfigMapAnnotations -- Annotations to add to `cluster-autoscaler-priority-expander` ConfigMap. +priorityConfigMapAnnotations: {} + # key1: "value1" + # key2: "value2" + +# extraArgs -- Additional container arguments. +# Refer to https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca for the full list of cluster autoscaler +# parameters and their default values. +# Everything after the first _ will be ignored allowing the use of multi-string arguments. +extraArgs: + logtostderr: true + stderrthreshold: info + v: 4 + # write-status-configmap: true + # status-config-map-name: cluster-autoscaler-status + # leader-elect: true + # leader-elect-resource-lock: endpoints + skip-nodes-with-local-storage: true + expander: least-waste + # scale-down-enabled: true + balance-similar-node-groups: true + aws-use-static-instance-list: true + # min-replica-count: 0 + # scale-down-utilization-threshold: 0.5 + # scale-down-non-empty-candidates-count: 30 + # max-node-provision-time: 15m0s + # scan-interval: 10s + # scale-down-delay-after-add: 10m + # scale-down-delay-after-delete: 0s + # scale-down-delay-after-failure: 3m + # scale-down-unneeded-time: 10m + skip-nodes-with-system-pods: false + # balancing-ignore-label_1: first-label-to-ignore + # balancing-ignore-label_2: second-label-to-ignore + +# extraEnv -- Additional container environment variables. +extraEnv: {} + +# extraEnvConfigMaps -- Additional container environment variables from ConfigMaps. +extraEnvConfigMaps: {} + +# extraEnvSecrets -- Additional container environment variables from Secrets. +extraEnvSecrets: {} + +# envFromConfigMap -- ConfigMap name to use as envFrom. +envFromConfigMap: "" + +# envFromSecret -- Secret name to use as envFrom. +envFromSecret: "" + +# extraVolumeSecrets -- Additional volumes to mount from Secrets. +extraVolumeSecrets: {} + # autoscaler-vol: + # mountPath: /data/autoscaler/ + # custom-vol: + # name: custom-secret + # mountPath: /data/custom/ + # items: + # - key: subkey + # path: mypath + +# extraVolumes -- Additional volumes. +extraVolumes: [] + # - name: ssl-certs + # hostPath: + # path: /etc/ssl/certs/ca-bundle.crt + +# extraVolumeMounts -- Additional volumes to mount. +extraVolumeMounts: [] + # - name: ssl-certs + # mountPath: /etc/ssl/certs/ca-certificates.crt + # readOnly: true + +# fullnameOverride -- String to fully override `cluster-autoscaler.fullname` template. +fullnameOverride: "" + +image: + # image.repository -- Image repository + repository: 252960665057.dkr.ecr.us-gov-east-1.amazonaws.com/eks/adsd-cumulus-dev/cluster-autoscaler + # image.tag -- Image tag + tag: v1.24.0 + # image.pullPolicy -- Image pull policy + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # image.pullSecrets -- Image pull secrets + pullSecrets: [] + # - myRegistrKeySecretName + +# kubeTargetVersionOverride -- Allow overriding the `.Capabilities.KubeVersion.GitVersion` check. Useful for `helm template` commands. +kubeTargetVersionOverride: "" + +# nameOverride -- String to partially override `cluster-autoscaler.fullname` template (will maintain the release name) +nameOverride: "" + +# nodeSelector -- Node labels for pod assignment. Ref: https://kubernetes.io/docs/user-guide/node-selection/. +nodeSelector: {} + +# podAnnotations -- Annotations to add to each pod. +podAnnotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + +# podDisruptionBudget -- Pod disruption budget. +podDisruptionBudget: + maxUnavailable: 1 + # minAvailable: 2 + +# podLabels -- Labels to add to each pod. +podLabels: {} + +# additionalLabels -- Labels to add to each object of the chart. +additionalLabels: {} + +# priorityClassName -- priorityClassName +priorityClassName: "system-cluster-critical" + +rbac: + # rbac.create -- If `true`, create and use RBAC resources. + create: true + # rbac.pspEnabled -- If `true`, creates and uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. + # Must be used with `rbac.create` set to `true`. + pspEnabled: false + # rbac.clusterScoped -- if set to false will only provision RBAC to alter resources in the current namespace. Most useful for Cluster-API + clusterScoped: true + serviceAccount: + # rbac.serviceAccount.annotations -- Additional Service Account annotations. + annotations: + eks.amazonaws.com/role-arn: "arn:aws:iam::252960665057:role/eks-adsd-cumulus-dev-irsa-kube-system-cluster-autoscaler" + # rbac.serviceAccount.create -- If `true` and `rbac.create` is also true, a Service Account will be created. + create: true + # rbac.serviceAccount.name -- The name of the ServiceAccount to use. If not set and create is `true`, a name is generated using the fullname template. + name: "cluster-autoscaler" + # rbac.serviceAccount.automountServiceAccountToken -- Automount API credentials for a Service Account. + automountServiceAccountToken: true + +# replicaCount -- Desired number of pods +replicaCount: 1 + +# resources -- Pod resource requests and limits. +resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 100m + memory: 300Mi + +# securityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) +securityContext: {} + # runAsNonRoot: true + # runAsUser: 1001 + # runAsGroup: 1001 + +service: + # service.annotations -- Annotations to add to service + annotations: {} + # service.labels -- Labels to add to service + labels: {} + # service.externalIPs -- List of IP addresses at which the service is available. Ref: https://kubernetes.io/docs/user-guide/services/#external-ips. + externalIPs: [] + + # service.loadBalancerIP -- IP address to assign to load balancer (if supported). + loadBalancerIP: "" + # service.loadBalancerSourceRanges -- List of IP CIDRs allowed access to load balancer (if supported). + loadBalancerSourceRanges: [] + # service.servicePort -- Service port to expose. + servicePort: 8085 + # service.portName -- Name for service port. + portName: http + # service.type -- Type of service to create. + type: ClusterIP + +## Are you using Prometheus Operator? +serviceMonitor: + # serviceMonitor.enabled -- If true, creates a Prometheus Operator ServiceMonitor. + enabled: false + # serviceMonitor.interval -- Interval that Prometheus scrapes Cluster Autoscaler metrics. + interval: 10s + # serviceMonitor.namespace -- Namespace which Prometheus is running in. + namespace: monitoring + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + # serviceMonitor.selector -- Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install. + selector: + release: prometheus-operator + # serviceMonitor.path -- The path to scrape for metrics; autoscaler exposes `/metrics` (this is standard) + path: /metrics + +## Custom PrometheusRule to be defined +## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart +## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions +prometheusRule: + # prometheusRule.enabled -- If true, creates a Prometheus Operator PrometheusRule. + enabled: false + # prometheusRule.additionalLabels -- Additional labels to be set in metadata. + additionalLabels: {} + # prometheusRule.namespace -- Namespace which Prometheus is running in. + namespace: monitoring + # prometheusRule.interval -- How often rules in the group are evaluated (falls back to `global.evaluation_interval` if not set). + interval: null + # prometheusRule.rules -- Rules spec template (see https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule). + rules: [] + +# tolerations -- List of node taints to tolerate (requires Kubernetes >= 1.6). +tolerations: [] + +# topologySpreadConstraints -- You can use topology spread constraints to control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains. (requires Kubernetes >= 1.19). +topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: cluster-autoscaler + +# updateStrategy -- [Deployment update strategy](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) +updateStrategy: {} + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + # type: RollingUpdate diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/.helmignore b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/Chart.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/Chart.yaml new file mode 100644 index 0000000..f428bb8 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: intermediate-certificate-issuer +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl new file mode 100644 index 0000000..5f6c44f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "intermediate-certificate-issuer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "intermediate-certificate-issuer.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "intermediate-certificate-issuer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "intermediate-certificate-issuer.labels" -}} +helm.sh/chart: {{ include "intermediate-certificate-issuer.chart" . }} +{{ include "intermediate-certificate-issuer.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "intermediate-certificate-issuer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "intermediate-certificate-issuer.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "intermediate-certificate-issuer.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "intermediate-certificate-issuer.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml new file mode 100644 index 0000000..ad99f63 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ca-key-pair + namespace: {{ .Release.Namespace }} +data: + tls.crt: {{ .Values.tls.crt }} + tls.key: {{ .Values.tls.key }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml new file mode 100644 index 0000000..76a3874 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml @@ -0,0 +1,7 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + ca: + secretName: ca-key-pair diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/values.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/values.yaml new file mode 100644 index 0000000..50dfd22 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/intermediate-certificate-issuer/values.yaml @@ -0,0 +1,6 @@ +tls: + # tls.crt contains the issuers full chain in the correct order: + # issuer -> intermediate(s) -> root. + crt: + # tls.key contains the base64 encoded signing key. + key: diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/Chart.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/Chart.yaml new file mode 100644 index 0000000..ff4a032 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +name: istio-operator +version: 1.20.0 +tillerVersion: ">=2.7.2" +description: Helm chart for deploying Istio operator +keywords: + - istio + - operator +sources: + - https://github.com/istio/istio/tree/master/operator +engine: gotpl +icon: https://istio.io/latest/favicons/android-192x192.png diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/crds/crd-operator.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/crds/crd-operator.yaml new file mode 100644 index 0000000..93ac1de --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/crds/crd-operator.yaml @@ -0,0 +1,48 @@ +# SYNC WITH manifests/charts/base/files +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: istiooperators.install.istio.io + labels: + release: istio +spec: + conversion: + strategy: None + group: install.istio.io + names: + kind: IstioOperator + listKind: IstioOperatorList + plural: istiooperators + singular: istiooperator + shortNames: + - iop + - io + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Istio control plane revision + jsonPath: .spec.revision + name: Revision + type: string + - description: IOP current state + jsonPath: .status.status + name: Status + type: string + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true +--- diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/files/gen-operator.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/files/gen-operator.yaml new file mode 100644 index 0000000..e77d5aa --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/files/gen-operator.yaml @@ -0,0 +1,220 @@ +--- +# Source: istio-operator/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: istio-operator + labels: + istio-operator-managed: Reconcile + istio-injection: disabled +--- +# Source: istio-operator/templates/service_account.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: istio-operator + name: istio-operator +--- +# Source: istio-operator/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: istio-operator +rules: +# istio groups +- apiGroups: + - authentication.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - config.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - install.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - security.istio.io + resources: + - '*' + verbs: + - '*' +# k8s groups +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions.apiextensions.k8s.io + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - deployments + - deployments/finalizers + - replicasets + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - update +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - roles + - rolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - update +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - namespaces + - pods + - pods/proxy + - persistentvolumeclaims + - secrets + - services + - serviceaccounts + verbs: + - '*' +--- +# Source: istio-operator/templates/clusterrole_binding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-operator +subjects: +- kind: ServiceAccount + name: istio-operator + namespace: istio-operator +roleRef: + kind: ClusterRole + name: istio-operator + apiGroup: rbac.authorization.k8s.io +--- +# Source: istio-operator/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + namespace: istio-operator + labels: + name: istio-operator + name: istio-operator +spec: + ports: + - name: http-metrics + port: 8383 + targetPort: 8383 + protocol: TCP + selector: + name: istio-operator +--- +# Source: istio-operator/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: istio-operator + name: istio-operator +spec: + replicas: 1 + selector: + matchLabels: + name: istio-operator + template: + metadata: + labels: + name: istio-operator + spec: + serviceAccountName: istio-operator + containers: + - name: istio-operator + image: gcr.io/istio-testing/operator:1.10-dev + command: + - operator + - server + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsGroup: 1337 + runAsUser: 1337 + runAsNonRoot: true + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 50m + memory: 128Mi + env: + - name: WATCH_NAMESPACE + value: "istio-system" + - name: LEADER_ELECTION_NAMESPACE + value: "istio-operator" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "istio-operator" + - name: WAIT_FOR_RESOURCES_TIMEOUT + value: "300s" + - name: REVISION + value: "" diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/clusterrole.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/clusterrole.yaml new file mode 100644 index 0000000..4e6bd74 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/clusterrole.yaml @@ -0,0 +1,115 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +rules: +# istio groups +- apiGroups: + - authentication.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - config.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - install.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - security.istio.io + resources: + - '*' + verbs: + - '*' +# k8s groups +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions.apiextensions.k8s.io + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - deployments + - deployments/finalizers + - replicasets + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - update +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - roles + - rolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - update +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - namespaces + - pods + - pods/proxy + - persistentvolumeclaims + - secrets + - services + - serviceaccounts + verbs: + - '*' +--- diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/clusterrole_binding.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/clusterrole_binding.yaml new file mode 100644 index 0000000..9b9df7d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/clusterrole_binding.yaml @@ -0,0 +1,13 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +subjects: +- kind: ServiceAccount + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} + namespace: {{.Values.operatorNamespace}} +roleRef: + kind: ClusterRole + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} + apiGroup: rbac.authorization.k8s.io +--- diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/crds.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/crds.yaml new file mode 100644 index 0000000..a370365 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/crds.yaml @@ -0,0 +1,6 @@ +{{- if .Values.enableCRDTemplates -}} +{{- range $path, $bytes := .Files.Glob "crds/*.yaml" -}} +--- +{{ $.Files.Get $path }} +{{- end -}} +{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/deployment.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/deployment.yaml new file mode 100644 index 0000000..1baaa8d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/deployment.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: {{.Values.operatorNamespace}} + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +spec: + replicas: 1 + selector: + matchLabels: + name: istio-operator + template: + metadata: + labels: + name: istio-operator + spec: + serviceAccountName: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} + containers: + - name: istio-operator + image: {{.Values.hub}}/operator:{{.Values.tag}} + command: + - operator + - server + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsGroup: 1337 + runAsUser: 1337 + runAsNonRoot: true + imagePullPolicy: IfNotPresent + resources: +{{ toYaml .Values.operator.resources | trim | indent 12 }} + env: + - name: WATCH_NAMESPACE + value: {{.Values.watchedNamespaces | quote}} + - name: LEADER_ELECTION_NAMESPACE + value: {{.Values.operatorNamespace | quote}} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: {{.Values.operatorNamespace | quote}} + - name: WAIT_FOR_RESOURCES_TIMEOUT + value: {{.Values.waitForResourcesTimeout | quote}} + - name: REVISION + value: {{.Values.revision | quote}} +--- diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/namespace.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/namespace.yaml new file mode 100644 index 0000000..31dc5aa --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/namespace.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: {{.Values.operatorNamespace}} + labels: + istio-operator-managed: Reconcile + istio-injection: disabled +--- diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/service.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/service.yaml new file mode 100644 index 0000000..ab3ed57 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: {{.Values.operatorNamespace}} + labels: + name: istio-operator + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +spec: + ports: + - name: http-metrics + port: 8383 + targetPort: 8383 + protocol: TCP + selector: + name: istio-operator +--- diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/service_account.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/service_account.yaml new file mode 100644 index 0000000..03e9377 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/templates/service_account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: {{.Values.operatorNamespace}} + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} +- name: {{ . }} +{{- end }} +{{- end }} +--- diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/values.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/values.yaml new file mode 100644 index 0000000..39a5bd2 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-operator/values.yaml @@ -0,0 +1,29 @@ +hub: docker.io/istio +tag: 1.10.1 + +# ImagePullSecrets for operator ServiceAccount, list of secrets in the same namespace +# used to pull operator image. Must be set for any cluster configured with private docker registry. +imagePullSecrets: [] + +operatorNamespace: istio-operator + +# Used to replace istioNamespace to support operator watch multiple namespaces. +watchedNamespaces: istio-system +waitForResourcesTimeout: 300s + +# Used for helm2 to add the CRDs to templates. +enableCRDTemplates: false + +# revision for the operator resources +revision: "" + +# Operator resource defaults +operator: + resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 50m + memory: 128Mi + diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/.helmignore b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/Chart.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/Chart.yaml new file mode 100644 index 0000000..cf94e4f --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: istio-peerauthentication +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.20.0" diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/templates/_helpers.tpl new file mode 100644 index 0000000..94c398d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "istio-peerauthentication.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "istio-peerauthentication.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "istio-peerauthentication.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "istio-peerauthentication.labels" -}} +helm.sh/chart: {{ include "istio-peerauthentication.chart" . }} +{{ include "istio-peerauthentication.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "istio-peerauthentication.selectorLabels" -}} +app.kubernetes.io/name: {{ include "istio-peerauthentication.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "istio-peerauthentication.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "istio-peerauthentication.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml new file mode 100644 index 0000000..3238311 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml @@ -0,0 +1,9 @@ +{{ if .Values.requireMutualTLS }} +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: "default" +spec: + mtls: + mode: STRICT +{{ end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/values.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-peerauthentication/values.yaml new file mode 100644 index 0000000..e69de29 diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/.helmignore b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/Chart.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/Chart.yaml new file mode 100644 index 0000000..aaf3983 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: istio-profile +description: Configuration for istio to be picked up by istio's operator. +type: application +version: 0.1.2 +appVersion: "1.20.0" diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/templates/_helpers.tpl new file mode 100644 index 0000000..8a02937 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "istio-profile.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "istio-profile.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "istio-profile.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "istio-profile.labels" -}} +helm.sh/chart: {{ include "istio-profile.chart" . }} +{{ include "istio-profile.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "istio-profile.selectorLabels" -}} +app.kubernetes.io/name: {{ include "istio-profile.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "istio-profile.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "istio-profile.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/templates/istiooperator.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/templates/istiooperator.yaml new file mode 100644 index 0000000..6131cbc --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/templates/istiooperator.yaml @@ -0,0 +1,186 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +metadata: + name: istio-profile +spec: + hub: {{ .Values.hub | default "docker.io/istio" }} + tag: {{ .Values.tag | default "1.16.1" }} + + meshConfig: +{{- if .Values.envoy.accessLog.enabled }} + accessLogFile: /dev/stdout +{{- end }} +{{- if and .Values.envoy.accessLog.enabled .Values.envoy.accessLog.format }} + accessLogFormat: {{ .Values.envoy.accessLog.format }} +{{- end }} +{{- if and .Values.envoy.accessLog.enabled .Values.envoy.accessLog.encoding }} + accessLogEncoding: {{ .Values.envoy.accessLog.encoding }} +{{- end }} + defaultConfig: + proxyMetadata: {} + enablePrometheusMerge: true + + components: + base: + enabled: true + pilot: + enabled: true + + ingressGateways: + - name: istio-ingressgateway + enabled: true + k8s: + serviceAnnotations: + "service.beta.kubernetes.io/aws-load-balancer-internal": "true" + "service.beta.kubernetes.io/aws-load-balancer-type": "nlb" + + egressGateways: + - name: istio-egressgateway + enabled: {{ .Values.egressGateways.enabled }} + + cni: + enabled: false + + istiodRemote: + enabled: false + + values: + global: + istioNamespace: {{ .Values.namespace }} + istiod: + enableAnalysis: false + logging: + level: "default:info" + logAsJson: false + pilotCertProvider: istiod + jwtPolicy: third-party-jwt + proxy: + image: proxyv2 + clusterDomain: "cluster.local" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 2000m + memory: 1024Mi + logLevel: warning + componentLogLevel: "misc:error" + privileged: false + enableCoreDump: false + statusPort: 15020 + readinessInitialDelaySeconds: 1 + readinessPeriodSeconds: 2 + readinessFailureThreshold: 30 + includeIPRanges: "*" + excludeIPRanges: {{ default "" .Values.apiserver | quote }} + excludeOutboundPorts: "" + excludeInboundPorts: "" + autoInject: enabled + tracer: "zipkin" + proxy_init: + image: proxyv2 + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 10m + memory: 10Mi + # Specify image pull policy if default behavior isn't desired. + # Default behavior: latest images will be Always else IfNotPresent. + imagePullPolicy: "" + operatorManageWebhooks: false + tracer: + lightstep: {} + zipkin: {} + datadog: {} + stackdriver: {} + imagePullSecrets: [] + oneNamespace: false + defaultNodeSelector: {} + configValidation: true + multiCluster: + enabled: false + clusterName: "" + omitSidecarInjectorConfigMap: false + network: "" + defaultResources: + requests: + cpu: 10m + defaultPodDisruptionBudget: + enabled: true + priorityClassName: "" + useMCP: false + sds: + token: + aud: istio-ca + sts: + servicePort: 0 + meshNetworks: {} + mountMtlsCerts: false + base: + enableCRDTemplates: false + validationURL: "" + pilot: + autoscaleEnabled: true + autoscaleMin: 1 + autoscaleMax: 5 + replicaCount: 1 + image: pilot + traceSampling: 1.0 + env: {} + cpu: + targetAverageUtilization: 80 + nodeSelector: {} + keepaliveMaxServerConnectionAge: 30m + enableProtocolSniffingForOutbound: true + enableProtocolSniffingForInbound: true + deploymentLabels: + configMap: true + + telemetry: + enabled: {{ .Values.telemetry.enabled }} + v2: + enabled: true + metadataExchange: + wasmEnabled: false + prometheus: + wasmEnabled: false + enabled: true + stackdriver: + enabled: false + logging: false + monitoring: false + topology: false + configOverride: {} + + istiodRemote: + injectionURL: "" + + gateways: + istio-egressgateway: + env: {} + autoscaleEnabled: true + type: ClusterIP + name: istio-egressgateway + secretVolumes: + - name: egressgateway-certs + secretName: istio-egressgateway-certs + mountPath: /etc/istio/egressgateway-certs + - name: egressgateway-ca-certs + secretName: istio-egressgateway-ca-certs + mountPath: /etc/istio/egressgateway-ca-certs + + istio-ingressgateway: + autoscaleEnabled: true + type: LoadBalancer + name: istio-ingressgateway + env: {} + secretVolumes: + - name: ingressgateway-certs + secretName: istio-ingressgateway-certs + mountPath: /etc/istio/ingressgateway-certs + - name: ingressgateway-ca-certs + secretName: istio-ingressgateway-ca-certs + mountPath: /etc/istio/ingressgateway-ca-certs diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/values.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/values.yaml new file mode 100644 index 0000000..9b43fab --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/istio-profile/values.yaml @@ -0,0 +1,44 @@ + +namespace: istio-system +requireMutualTLS: true +hub: docker.io/istio +tag: 1.10.1 +apiserver: "" + +############################################################################## +# Observability options: +############################################################################## + +# Controls settings for the envoy proxy that is added as a sidecar +envoy: + # Controls settings related to access the service. + accessLog: + # When enabled, envoy is configured to log to stdout. + enabled: true + # Format for the proxy access log. Default value is envoy's format. + # Controls accessLogFormat istio configuration. + format: + # Encoding for the proxy access log (text or json.) Default value is text. + # Controls accessLogEncoding istio configuration. + encoding: + +# When set to true, istio provides telemetry data to prometheus. +# False disables collecting telemetry data. +telemetry: + enabled: true + +# When set to true, enables tracking of a request through mesh that is +# destributed across mutliple services. +tracing: + enabled: true + +############################################################################## +# Traffic Management options: +############################################################################## + +# Egress gateways allow you to apply Istio features, for example, monitoring +# and route rules, to traffic exiting the mesh. +# When set to true, the egress gateway is created. +egressGateways: + enabled: true + diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/.helmignore b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/Chart.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/Chart.yaml new file mode 100644 index 0000000..9cfc3c1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: self-signed-certificate-issuer +description: A Helm chart for Kubernetes +type: application +version: 0.1.0 +appVersion: "1.0.0" diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl new file mode 100644 index 0000000..e62a63b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "self-signed-certificate-issuer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "self-signed-certificate-issuer.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "self-signed-certificate-issuer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "self-signed-certificate-issuer.labels" -}} +helm.sh/chart: {{ include "self-signed-certificate-issuer.chart" . }} +{{ include "self-signed-certificate-issuer.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "self-signed-certificate-issuer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "self-signed-certificate-issuer.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "self-signed-certificate-issuer.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "self-signed-certificate-issuer.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml new file mode 100644 index 0000000..ab1ee31 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml @@ -0,0 +1,8 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + ca: + secretName: root-secret + diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml new file mode 100644 index 0000000..84e895d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml @@ -0,0 +1,17 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: selfsigned-ca + namespace: {{ .Release.Namespace }} +spec: + isCA: true + commonName: selfsigned-ca + secretName: root-secret + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: selfsigned-issuer + kind: ClusterIssuer + group: cert-manager.io + diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml new file mode 100644 index 0000000..81660bd --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml @@ -0,0 +1,7 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: selfsigned-issuer +spec: + selfSigned: {} + diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/values.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/self-signed-certificate-issuer/values.yaml new file mode 100644 index 0000000..e69de29 diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/.helmignore b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/Chart.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/Chart.yaml new file mode 100644 index 0000000..e179122 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: vault-certificate-issuer +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl new file mode 100644 index 0000000..a9a1425 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "vault-certificate-issuer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "vault-certificate-issuer.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "vault-certificate-issuer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "vault-certificate-issuer.labels" -}} +helm.sh/chart: {{ include "vault-certificate-issuer.chart" . }} +{{ include "vault-certificate-issuer.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "vault-certificate-issuer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "vault-certificate-issuer.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "vault-certificate-issuer.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "vault-certificate-issuer.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml new file mode 100644 index 0000000..8880f1c --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml @@ -0,0 +1,18 @@ +{{ if eq .Values.vault.authentication_type "AppRole" }} +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + vault: + path: {{ .Values.vault.path }} + server: {{ .Values.vault.url }} + caBundle: {{ .Values.vault.ca_bundle }} + auth: + appRole: + path: {{ .Values.approle.role_path }} + roleId: {{ .Values.approle.role_id }} + secretRef: + name: cert-manager-vault-approle + key: secretId +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml new file mode 100644 index 0000000..23d58e1 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml @@ -0,0 +1,10 @@ +{{ if eq .Values.vault.authentication_type "AppRole" }} +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: cert-manager-vault-approle + namespace: {{ .Release.Namespace }} +data: + secretId: {{ .Values.approle.secret_id }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml new file mode 100644 index 0000000..f964aed --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml @@ -0,0 +1,20 @@ +{{ if eq .Values.vault.authentication_type "ServiceAccount" }} +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + vault: + path: {{ .Values.vault.path }} + server: {{ .Values.vault.url }} + caBundle: {{ .Values.vault.ca_bundle }} + auth: + kubernetes: + role: {{ .Values.serviceAccount.role }} +{{- if .Values.serviceAccount.MountPath }} + path: {{ .Values.serviceAccount.mountPath }} +{{- end }} + secretRef: + name: {{ .Values.serviceAccount.secret }} + key: token +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml new file mode 100644 index 0000000..0410d30 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml @@ -0,0 +1,15 @@ +{{ if eq .Values.vault.authentication_type "Token" }} +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + vault: + path: {{ .Values.vault.path }} + server: {{ .Values.vault.url }} + caBundle: {{ .Values.vault.ca_bundle }} + auth: + tokenSecretRef: + name: cert-manager-vault-token + key: token +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml new file mode 100644 index 0000000..35bb13d --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml @@ -0,0 +1,10 @@ +{{ if eq .Values.vault.authentication_type "Token" }} +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: cert-manager-vault-token + namespace: {{ .Release.Namespace }} +data: + token: {{ .Values.token.token }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/values.yaml b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/values.yaml new file mode 100644 index 0000000..4cac439 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/charts/vault-certificate-issuer/values.yaml @@ -0,0 +1,47 @@ + +# Common settings for all types of authentication +vault: + # the URL whereby Vault is reachable. + url: + # the Vault path that will be used for signing. Note that the path + # must use the sign endpoint. + path: + # an optional field containing a base64 encoded string of the + # Certificate Authority to trust the Vault connection. This is + # typically always required when using an https URL. + ca_bundle: + # the type of authenciation to use, must be one of: + # - AppRole + # - Token + # - ServiceAccount + authentication_type: + +# AppRole authentication type: +approle: + # secret key + secret_id: + # RoleID of the role to assume + role_id: + # the app role path + role_path: + +# Token authentication type: +token: + # a token string that has been generated from one of the many + # authentication backends that Vault supports. These tokens have + # an expiry and so need to be periodically refreshed. cert-manager + # does not refresh these token automatically and so another process + # must be put in place to do this. The token is stored in the + # cert-manager-vault-token secret in the cert-manager namespace. + token: + +# ServiceAccount authenication type: +serviceAccount: + # the name of the secret associated with the service account in the + # cert-manager namespace to use to authenticate with vault + secret: + # the role which is the Vault role that the Service Account is to assume + role: + # optional value which is the authentication mount path, defaulting + # to kubernetes. + mountPath: diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/.tf-control b/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/.tf-control new file mode 100644 index 0000000..280f449 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/.tf-control @@ -0,0 +1,20 @@ +# .tf-control +# allows for setting a specific command to be used for tf-* commands under this git repo +# see tf-control.sh help for more info + +TFCONTROL_VERSION="1.0.5" + +TFCOMMAND="terraform_latest" +# TF_CLI_CONFIG_FILE=PATH-TO-FILE/.tf-control.tfrc +# TFARGS="" +# TFNOLOG="" +# TFNOCOLOR="" + +# use the following to force a specific version. An upgrade of an existing 0.12.31 to 1.x +# needs you to cycle through 0.13.17, 0.14.11, and then latest (0.15.5 not needed). Other +# steps in between. See https://github.e.it.census.gov/terraform/support/tree/master/docs/how-to/terraform-upgrade for details +# +#TFCOMMAND="terraform_0.12.31" +#TFCOMMAND="terraform_0.13.7" +#TFCOMMAND="terraform_0.14.11" +#TFCOMMAND="terraform_0.15.5" diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/.tf-control.tfrc new file mode 100644 index 0000000..7425488 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/.tf-control.tfrc @@ -0,0 +1,24 @@ +TFCONTROL_VERSION="1.0.5" + +# https://www.terraform.io/docs/cli/config/config-file.html +plugin_cache_dir = "/data/terraform/terraform.d/plugin-cache" +#disable_checkpoint = true + +provider_installation { +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "*/*/*" ] +# } + filesystem_mirror { + path = "/data/terraform/terraform.d/providers" + include = [ "*/*/*" ] + } +# filesystem_mirror { +# path = "/apps/terraform/terraform.d/providers" +# include = [ "external.terraform.census.gov/*/*" ] +# } + direct { + include = [ "*/*/*" ] + } +} + diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/README.md b/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/README.md new file mode 100644 index 0000000..ee7afb6 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/README.md @@ -0,0 +1,127 @@ +# Extras :: cloudwatch-agent + +The configuration in this dierectory will deploy cloudwatch-agent and fluentbit, to be used for EKS Container Insights. + +# Links + +* AWS Docs + * https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-prerequisites.html + * https://aws.amazon.com/blogs/opensource/centralized-container-logging-fluent-bit/ + * https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerInsights-use-kubelet.html + * https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-setup-logs-FluentBit.html + * https://aws.github.io/eks-charts" +* Cloudwatch Agnet + * https://github.com/aws/eks-charts/tree/master/stable/aws-cloudwatch-metrics +* Fluent Bit + * https://github.com/aws/aws-for-fluent-bit + * https://github.com/aws/eks-charts/tree/master/stable/aws-for-fluent-bit + +# Supported Versions + +This configuration has been tested and validated on EKS versions + +* 1.24 +* 1.25 + +# Configuration + +This uses a helm chart, an IRSA role, and pulls the latest images at the time of creating this module. +Look in the `variables.*.auto.tfvars` files for the version numbers. + +# Installation + +You will need the latest copy of the `aws-eks` module, using the `tf-upgrade` branch. This requires the use of +Terraform 1.x, and as it is deployed in a subdirectory, it should work without issue. + +## Step 1: Get aws-eks repo + +If you do not have the `aws-eks` repo, clone it in the branch `tf-upgrade`. + +```script +# go to your TF repository directory +cd $PATH_TO_TERRAFORM +git clone git@github.e.it.census.gov:terraform-modules/aws-eks.git -b tf-upgrade +cd aws-eks +export EKS_SOURCE=$(pwd) +``` + +If you already have the repo, go into the directory, checkout the branch and refresh it. + +```script +# go to your TF repository directory +cd $PATH_TO_TERRAFORM +cd aws-eks +git checkout tf-upgrade +git pull origin tf-upgrade +export EKS_SOURCE=$(pwd) +``` + +## Step 2: Copy code + +Go into the `common-services` directory of the EKS cluster where you wish to deploy this. Make a directory, `cloudwatch-agent`, and then +rsync the code. Please use rsync, not copy. There is a directory, and there may be softlinks. You'll work in a new branch. An example is below: + +```script +cd $PATH_TO_TERRAFORM +cd 107742151971-do2-govcloud/vpc/east/vpc5/apps/eks-ditd-gups-stage/common-services +mkdir cloudwatch-agent +cd cloudwatch-agent +git checkout -b add-cloudwatch-agent +rsync -avRWH $EKS_SOURCE/examples/extra/cloudwatch-agent/./ ./ +``` + +## Step 3: Plan + +There is no configuration needed. All relevant details are pulled from the parent directories. You do need EKS cluster access, +so be sure you are running with a user who has K8S RBAC access. + +```script +tf-run plan +tf-plan summary + +# add to git +git add . +git commit -m 'add cloudwatch, fluentbit' . +git push +# submit PR with plan summary and plan log +``` + +## Step 4: Apply + +Once the PR is merged, apply, and finalize the directory. + +```script +tf-run apply +``` + +Make sure it started up: + +```console +% kubectl --kubeconfig setup/kube.config get pods -n aws-cloudwatch +NAME READY STATUS RESTARTS AGE +aws-cloudwatch-metrics-8jlwh 1/1 Running 0 24h +aws-cloudwatch-metrics-8jxqs 1/1 Running 0 24h +aws-cloudwatch-metrics-k668c 1/1 Running 0 24h +fluent-bit-aws-for-fluent-bit-6bvgk 1/1 Running 0 24h +fluent-bit-aws-for-fluent-bit-b4hk5 1/1 Running 0 24h +fluent-bit-aws-for-fluent-bit-chx46 1/1 Running 0 24h +``` + +All should be running. If any errors, or not running, look at `events` and `logs`. + +Then, check AWS CloudWatch Logs. There will be four log as follows: /aws/containerinsights/{clustername}/{name} +where {name} is + + * performance + * host + * applications + * dataplane + +The Container Insight dashboard should also show performance data for the cluster, though it may take some +time to appear. + + +# CHANGELOG + +* 1.0.0 -- 2023-08-24 + - initial diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/cloudwatch-agent.tf b/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/cloudwatch-agent.tf new file mode 100644 index 0000000..a4b7004 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/cloudwatch-agent.tf @@ -0,0 +1,123 @@ +# https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-prerequisites.html + +data "aws_iam_policy" "policy_cloudwatch-agent" { + name = "CloudWatchAgentServerPolicy" +} + +module "role_cloudwatch-agent" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + + role_description = "EKS IAM Role for ${var.cluster_name} for service account ${var.cloudwatch_agent_namespace}:${var.cloudwatch_agent_name}" + role_name = format("%v%v-irsa__%v", local._prefixes["eks-role"], var.cluster_name, var.cloudwatch_agent_name) + + role_policy_arns = { + policy = data.aws_iam_policy.policy_cloudwatch-agent.arn + } + + oidc_providers = { + main = { + provider_arn = local.oidc_provider_arn + namespace_service_accounts = [format("%v:%v", var.cloudwatch_agent_namespace, var.cloudwatch_agent_name)] + } + } + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + { + "eks:namespace" = var.cloudwatch_agent_namespace + "eks:user" = var.cloudwatch_agent_name + } + ) +} + +locals { + cloudwatch_agent_images_output = { for k, v in module.images_cloudwatch-agent.images : v.name => v } +} + +module "images_cloudwatch-agent" { + source = "git@github.e.it.census.gov:terraform-modules/aws-ecr-copy-images.git?ref=tf-upgrade" + + profile = var.profile + application_list = [] + application_name = format("eks/%v", var.cluster_name) + image_config = [for k, v in var.cloudwatch_agent_images : v if v.enabled] + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +resource "aws_cloudwatch_log_group" "cloudwatch_agent_logs" { + for_each = toset(var.cloudwatch_agent_log_names) + name = format("/aws/containerinsights/%v/%v", var.cluster_name, each.key) + retention_in_days = var.cloudwatch_agent_log_retention_days + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +resource "kubernetes_namespace" "cloudwatch-agent" { + metadata { + name = var.cloudwatch_agent_namespace + } +} + +# chart +# https://github.com/aws/eks-charts/tree/master/stable/aws-cloudwatch-metrics +resource "helm_release" "cloudwatch-agent" { + chart = "aws-cloudwatch-metrics" + name = "aws-cloudwatch-metrics" + namespace = var.cloudwatch_agent_namespace + repository = var.cloudwatch_agent_charts["cloudwatch-agent"].use_remote ? var.cloudwatch_agent_charts["cloudwatch-agent"].repository : "${path.module}/charts" + version = var.cloudwatch_agent_charts["cloudwatch-agent"].use_remote ? var.cloudwatch_agent_charts["cloudwatch-agent"].version : null + + depends_on = [kubernetes_namespace.cloudwatch-agent, module.images_cloudwatch-agent] + set { + name = "image.repository" + value = split(":", local.cloudwatch_agent_images_output["cloudwatch-agent"].dest_full_path)[0] + } + + set { + name = "image.tag" + value = local.cloudwatch_agent_images_output["cloudwatch-agent"].tag + } + + set { + name = "clusterName" + value = var.cluster_name + } + set { + name = "serviceAccount.name" + value = var.cloudwatch_agent_name + } + set { + name = "serviceAccount.create" + value = "true" + } + set { + name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" + value = module.role_cloudwatch-agent.iam_role_arn + } + timeout = 300 +} + +data "aws_iam_policy_document" "cloudwatch_agent_policy_extra" { + statement { + sid = "DescribeVolumes" + effect = "Allow" + actions = ["ec2:DescribeVolumes"] + resources = ["*"] + } +} + +resource "aws_iam_role_policy" "cloudwatch_agent_policy_extra" { + name = "extra" + role = module.role_cloudwatch-agent.iam_role_name + + policy = data.aws_iam_policy_document.cloudwatch_agent_policy_extra.json +} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/fluentbit.tf b/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/fluentbit.tf new file mode 100644 index 0000000..9263e7b --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/fluentbit.tf @@ -0,0 +1,186 @@ +# https://github.com/aws/aws-for-fluent-bit + +## % tf-aws ssm get-parameters-by-path --path /aws/service/aws-for-fluent-bit/ --query 'Parameters[*].Name'|grep 2.31.12 +## "/aws/service/aws-for-fluent-bit/2.31.12-windowsservercore", +## "/aws/service/aws-for-fluent-bit/init-2.31.12.20230629", +## "/aws/service/aws-for-fluent-bit/2.31.12.20230727", +## "/aws/service/aws-for-fluent-bit/2.31.12.20230629", +## "/aws/service/aws-for-fluent-bit/2.31.12", +## "/aws/service/aws-for-fluent-bit/init-2.31.12", +## "/aws/service/aws-for-fluent-bit/init-2.31.12.20230727" +## +## % tf-aws ssm get-parameter --name /aws/service/aws-for-fluent-bit/2.31.12.20230629 +## { +## "Parameter": { +## "Name": "/aws/service/aws-for-fluent-bit/2.31.12.20230629", +## "Type": "String", +## "Value": "161423150738.dkr.ecr.us-gov-west-1.amazonaws.com/aws-for-fluent-bit:2.31.12.20230629", +## "Version": 1, +## "LastModifiedDate": "2023-06-29T20:54:07.770000-04:00", +## "ARN": "arn:aws-us-gov:ssm:us-gov-west-1::parameter/aws/service/aws-for-fluent-bit/2.31.12.20230629", +## "DataType": "text" +## } +## } + + +data "aws_ssm_parameter" "fluentbit_image" { + name = format("/aws/service/aws-for-fluent-bit/%v", var.fluentbit_tag) + + lifecycle { + precondition { + condition = var.fluentbit_tag != null && var.fluentbit_tag != "" + error_message = "var.fluentbit_tag must be provided and not null or empty." + } + } +} + + +module "role_fluentbit" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + + role_description = "EKS IAM Role for ${var.cluster_name} for service account ${var.fluentbit_namespace}:${var.fluentbit_name}" + role_name = format("%v%v-irsa__%v", local._prefixes["eks-role"], var.cluster_name, var.fluentbit_name) + + role_policy_arns = { + policy = aws_iam_policy.policy_fluentbit.arn + } + + oidc_providers = { + main = { + provider_arn = local.oidc_provider_arn + namespace_service_accounts = [format("%v:%v", var.fluentbit_namespace, var.fluentbit_name)] + } + } + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + { + "eks:namespace" = var.fluentbit_namespace + "eks:user" = var.fluentbit_name + } + ) +} + +resource "aws_iam_policy" "policy_fluentbit" { + name = format("%v%v-irsa__%v", local._prefixes["eks-policy"], var.cluster_name, var.fluentbit_name) + description = "EKS IAM Policy for ${var.cluster_name} for service account ${var.fluentbit_namespace}:${var.fluentbit_name}" + path = "/" + policy = data.aws_iam_policy_document.policy_fluentbit.json + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + { + "Name" = format("%v%v-irsa__%v", local._prefixes["eks-policy"], var.cluster_name, var.fluentbit_name) + "eks:namespace" = var.fluentbit_namespace + "eks:user" = var.fluentbit_name + } + ) +} + + +# https://aws.amazon.com/blogs/opensource/centralized-container-logging-fluent-bit/ +data "aws_iam_policy_document" "policy_fluentbit" { + statement { + sid = "AllowFirehose" + effect = "Allow" + actions = [ + "firehose:PutRecordBatch" + ] + resources = ["*"] + } + ## statement { + ## sid = "PutLogEvents" + ## effect = "Allow" + ## actions = [ + ## "logs:PutLogEvents" + ## ] + ## resources = [ format("arn:%v:logs:*:*:log-group:*:*:*",data.aws_arn.current.partition) ] + ## } + statement { + sid = "CreateStreams" + effect = "Allow" + actions = [ + "logs:CreateLogStream", + "logs:DescribeLogStreams", + "logs:PutLogEvents" + ] + # resources = [ format("arn:%v:logs:*:*:log-group:*",data.aws_arn.current.partition) ] + resources = [for k, v in aws_cloudwatch_log_group.fluentbit_logs : format("%v:*", v.arn)] + } + ## statement { + ## sid = "CreateLogGroup" + ## effect = "Allow" + ## actions = [ + ## "logs:CreateLogGroup" + ## ] + ## resources = [ "*" ] + ## } +} + +resource "aws_cloudwatch_log_group" "fluentbit_logs" { + for_each = toset(var.fluentbit_log_names) + name = format("/aws/containerinsights/%v/%v", var.cluster_name, each.key) + retention_in_days = var.fluentbit_log_retention_days + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +## helm, reference ssm image +# https://github.com/aws/eks-charts/tree/master/stable/aws-for-fluent-bit + +resource "helm_release" "fluentbit" { + chart = "aws-for-fluent-bit" + name = var.fluentbit_name + namespace = var.fluentbit_namespace + repository = var.fluentbit_charts["fluent-bit"].use_remote ? var.fluentbit_charts["fluent-bit"].repository : "${path.module}/charts" + version = var.fluentbit_charts["fluent-bit"].use_remote ? var.fluentbit_charts["fluent-bit"].version : null + + values = [ + file("fluentbit.values.yml"), + templatefile("${path.root}/templates/fluentbit.env.yml.tpl", { + region = local.region + cluster_name = var.cluster_name + }) + ] + + set { + name = "cluster.name" + value = var.cluster_name + } + set { + name = "logs.region" + value = var.region + } + set { + name = "image.repository" + value = split(":", data.aws_ssm_parameter.fluentbit_image.value)[0] + } + set { + name = "image.tag" + value = var.fluentbit_tag + } + set { + name = "cloudWatchLogs.enabled" + value = "false" + } + set { + name = "serviceAccount.name" + value = var.fluentbit_name + } + set { + name = "serviceAccount.create" + value = "true" + } + set { + name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" + value = module.role_fluentbit.iam_role_arn + } + timeout = 300 +} diff --git a/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/fluentbit.values.yml b/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/fluentbit.values.yml new file mode 100644 index 0000000..029164e --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.29/common-services/cloudwatch-agent/fluentbit.values.yml @@ -0,0 +1,229 @@ +# https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerInsights-use-kubelet.html +# networkign needs to be enablrd for the kubernetes filter. The chart does not enable this and has comments about enabling +hostNetwork: true +dnsPolicy: ClusterFirstWithHostNet +# disable starndard input and filter +input: + enabled: false +filter: + enabled: false +# https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-setup-logs-FluentBit.html +# https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/latest/k8s-deployment-manifest-templates/deployment-mode/daemonset/container-insights-monitoring/cloudwatch-namespace.yaml +# https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/latest/k8s-deployment-manifest-templates/deployment-mode/daemonset/container-insights-monitoring/fluent-bit/fluent-bit.yaml +# takes volumes, volumeMounts, and inputs,outputs,filters, and parsers from these sample +# note there seems not to be away to pass the labels and selector.matchLabels to this chart +# +volumeMounts: +# Please don't change below read-only permissions + - name: fluentbitstate + mountPath: /var/fluent-bit/state + - name: varlog + mountPath: /var/log + readOnly: true + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + - name: runlogjournal + mountPath: /run/log/journal + readOnly: true + - name: dmesg + mountPath: /var/log/dmesg + readOnly: true +volumes: + - name: fluentbitstate + hostPath: + path: /var/fluent-bit/state + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + - name: runlogjournal + hostPath: + path: /run/log/journal + - name: dmesg + hostPath: + path: /var/log/dmesg +additionalInputs: |- + [INPUT] + Name tail + Tag application.* + Exclude_Path /var/log/containers/cloudwatch-agent*, /var/log/containers/fluent-bit*, /var/log/containers/aws-node*, /var/log/containers/kube-proxy* + Path /var/log/containers/*.log + multiline.parser docker, cri + DB /var/fluent-bit/state/flb_container.db + Mem_Buf_Limit 50MB + Skip_Long_Lines On + Refresh_Interval 10 + Rotate_Wait 30 + storage.type filesystem + Read_from_Head ${READ_FROM_HEAD} + [INPUT] + Name tail + Tag application.* + Path /var/log/containers/fluent-bit* + multiline.parser docker, cri + DB /var/fluent-bit/state/flb_log.db + Mem_Buf_Limit 5MB + Skip_Long_Lines On + Refresh_Interval 10 + Read_from_Head ${READ_FROM_HEAD} + [INPUT] + Name tail + Tag application.* + Path /var/log/containers/cloudwatch-agent* + multiline.parser docker, cri + DB /var/fluent-bit/state/flb_cwagent.db + Mem_Buf_Limit 5MB + Skip_Long_Lines On + Refresh_Interval 10 + Read_from_Head ${READ_FROM_HEAD} + [INPUT] + Name systemd + Tag dataplane.systemd.* + Systemd_Filter _SYSTEMD_UNIT=docker.service + Systemd_Filter _SYSTEMD_UNIT=containerd.service + Systemd_Filter _SYSTEMD_UNIT=kubelet.service + DB /var/fluent-bit/state/systemd.db + Path /var/log/journal + Read_From_Tail ${READ_FROM_TAIL} + [INPUT] + Name tail + Tag dataplane.tail.* + Path /var/log/containers/aws-node*, /var/log/containers/kube-proxy* + multiline.parser docker, cri + DB /var/fluent-bit/state/flb_dataplane_tail.db + Mem_Buf_Limit 50MB + Skip_Long_Lines On + Refresh_Interval 10 + Rotate_Wait 30 + storage.type filesystem + Read_from_Head ${READ_FROM_HEAD} + [INPUT] + Name tail + Tag host.dmesg + Path /var/log/dmesg + Key message + DB /var/fluent-bit/state/flb_dmesg.db + Mem_Buf_Limit 5MB + Skip_Long_Lines On + Refresh_Interval 10 + Read_from_Head ${READ_FROM_HEAD} + [INPUT] + Name tail + Tag host.messages + Path /var/log/messages + Parser syslog + DB /var/fluent-bit/state/flb_messages.db + Mem_Buf_Limit 5MB + Skip_Long_Lines On + Refresh_Interval 10 + Read_from_Head ${READ_FROM_HEAD} + [INPUT] + Name tail + Tag host.secure + Path /var/log/secure + Parser syslog + DB /var/fluent-bit/state/flb_secure.db + Mem_Buf_Limit 5MB + Skip_Long_Lines On + Refresh_Interval 10 + Read_from_Head ${READ_FROM_HEAD} +additionalOutputs: |- + [OUTPUT] + Name cloudwatch_logs + Match application.* + region ${AWS_REGION} + log_group_name /aws/containerinsights/${CLUSTER_NAME}/application + log_stream_prefix ${HOST_NAME}- + auto_create_group true + extra_user_agent container-insights + [OUTPUT] + Name cloudwatch_logs + Match dataplane.* + region ${AWS_REGION} + log_group_name /aws/containerinsights/${CLUSTER_NAME}/dataplane + log_stream_prefix ${HOST_NAME}- + auto_create_group true + extra_user_agent container-insights + [OUTPUT] + Name cloudwatch_logs + Match host.* + region ${AWS_REGION} + log_group_name /aws/containerinsights/${CLUSTER_NAME}/host + log_stream_prefix ${HOST_NAME}. + auto_create_group true + extra_user_agent container-insights +additionalFilters: |- + [FILTER] + Name kubernetes + Match application.* + Kube_URL https://kubernetes.default.svc:443 + Kube_Tag_Prefix application.var.log.containers. + Merge_Log On + Merge_Log_Key log_processed + K8S-Logging.Parser On + K8S-Logging.Exclude Off + Labels Off + Annotations Off + Use_Kubelet On + Kubelet_Port 10250 + Buffer_Size 0 + Kube_CA_File /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + Kube_Token_File /var/run/secrets/kubernetes.io/serviceaccount/token + [FILTER] + Name modify + Match dataplane.systemd.* + Rename _HOSTNAME hostname + Rename _SYSTEMD_UNIT systemd_unit + Rename MESSAGE message + Remove_regex ^((?!hostname|systemd_unit|message).)*$ + [FILTER] + Name aws + Match dataplane.* + imds_version v1 + [FILTER] + Name aws + Match host.* + imds_version v1 +service: + extraParsers: |- + [PARSER] + Name syslog + Format regex + Regex ^(?