diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/.gitignore b/examples/full-cluster-tf-upgrade/1.23/.gitignore similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/.gitignore rename to examples/full-cluster-tf-upgrade/1.23/.gitignore diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/.tf-control b/examples/full-cluster-tf-upgrade/1.23/.tf-control similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/.tf-control rename to examples/full-cluster-tf-upgrade/1.23/.tf-control diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.23/.tf-control.tfrc similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/.tf-control.tfrc rename to examples/full-cluster-tf-upgrade/1.23/.tf-control.tfrc diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/README.md b/examples/full-cluster-tf-upgrade/1.23/README.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/README.md rename to examples/full-cluster-tf-upgrade/1.23/README.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ROLES.md b/examples/full-cluster-tf-upgrade/1.23/ROLES.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ROLES.md rename to examples/full-cluster-tf-upgrade/1.23/ROLES.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/.tf-control b/examples/full-cluster-tf-upgrade/1.23/aws-auth/.tf-control similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/.tf-control rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/.tf-control diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.23/aws-auth/.tf-control.tfrc similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/.tf-control.tfrc rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/.tf-control.tfrc diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/README.md b/examples/full-cluster-tf-upgrade/1.23/aws-auth/README.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/README.md rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/README.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/aws-auth.auto.tfvars b/examples/full-cluster-tf-upgrade/1.23/aws-auth/aws-auth.auto.tfvars similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/aws-auth.auto.tfvars rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/aws-auth.auto.tfvars diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/config_map.aws-auth.yaml.tpl b/examples/full-cluster-tf-upgrade/1.23/aws-auth/config_map.aws-auth.yaml.tpl similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/config_map.aws-auth.yaml.tpl rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/config_map.aws-auth.yaml.tpl diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/aws-auth/data.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/data.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/data.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/aws-auth/kubeconfig.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/kubeconfig.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/kubeconfig.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/patch-aws-auth.tf b/examples/full-cluster-tf-upgrade/1.23/aws-auth/patch-aws-auth.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/patch-aws-auth.tf rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/patch-aws-auth.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/prefixes.tf b/examples/full-cluster-tf-upgrade/1.23/aws-auth/prefixes.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/prefixes.tf rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/prefixes.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/providers.tf b/examples/full-cluster-tf-upgrade/1.23/aws-auth/providers.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/providers.tf rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/providers.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/region.tf b/examples/full-cluster-tf-upgrade/1.23/aws-auth/region.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/region.tf rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/region.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/settings.aws-auth.tf b/examples/full-cluster-tf-upgrade/1.23/aws-auth/settings.aws-auth.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/settings.aws-auth.tf rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/settings.aws-auth.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/tf-run.data b/examples/full-cluster-tf-upgrade/1.23/aws-auth/tf-run.data similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/tf-run.data rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/tf-run.data diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/variables.aws-auth.tf b/examples/full-cluster-tf-upgrade/1.23/aws-auth/variables.aws-auth.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/variables.aws-auth.tf rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/variables.aws-auth.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.23/aws-auth/variables.eks.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/variables.eks.tf rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/variables.eks.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/version.tf b/examples/full-cluster-tf-upgrade/1.23/aws-auth/version.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/version.tf rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/version.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/versions.tf b/examples/full-cluster-tf-upgrade/1.23/aws-auth/versions.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/aws-auth/versions.tf rename to examples/full-cluster-tf-upgrade/1.23/aws-auth/versions.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/bin/copy_image.sh b/examples/full-cluster-tf-upgrade/1.23/bin/copy_image.sh similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/bin/copy_image.sh rename to examples/full-cluster-tf-upgrade/1.23/bin/copy_image.sh diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/bin/fix-terminating-namespace.sh b/examples/full-cluster-tf-upgrade/1.23/bin/fix-terminating-namespace.sh similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/bin/fix-terminating-namespace.sh rename to examples/full-cluster-tf-upgrade/1.23/bin/fix-terminating-namespace.sh diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/bin/show-k8s-things.sh b/examples/full-cluster-tf-upgrade/1.23/bin/show-k8s-things.sh similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/bin/show-k8s-things.sh rename to examples/full-cluster-tf-upgrade/1.23/bin/show-k8s-things.sh diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/.tf-control b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/.tf-control similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/.tf-control rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/.tf-control diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/.tf-control.tfrc similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/.tf-control.tfrc rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/.tf-control.tfrc diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/README.md b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/README.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/README.md rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/README.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/RESULTS.md b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/RESULTS.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/RESULTS.md rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/RESULTS.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/cm.tf.off b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/cm.tf.off similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/cm.tf.off rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/cm.tf.off diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/data.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/data.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/data.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/dba-clusterrole.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/dba-clusterrole.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/dba-clusterrole.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/dba-clusterrole.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/dba-rolebinding.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/dba-rolebinding.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/dba-rolebinding.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/dba-rolebinding.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/dba.iam.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/dba.iam.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/dba.iam.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/dba.iam.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/deployer-clusterrole.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/deployer-clusterrole.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/deployer-clusterrole.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/deployer-clusterrole.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/deployer-rolebinding.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/deployer-rolebinding.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/deployer-rolebinding.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/deployer-rolebinding.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/deployer.iam.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/deployer.iam.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/deployer.iam.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/deployer.iam.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/kubeconfig.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/kubeconfig.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/kubeconfig.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/locals.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/locals.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/locals.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/locals.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/main.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/main.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/main.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/main.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/prefixes.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/prefixes.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/prefixes.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/prefixes.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/providers.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/providers.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/providers.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/providers.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/region.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/region.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/region.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/region.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/tf-run.data b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/tf-run.data similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/tf-run.data rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/tf-run.data diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/variables.auto.tfvars b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/variables.auto.tfvars similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/variables.auto.tfvars rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/variables.auto.tfvars diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/variables.eks.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/variables.eks.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/variables.eks.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/variables.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/variables.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/variables.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/variables.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/version.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/version.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/version.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/version.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/versions.tf b/examples/full-cluster-tf-upgrade/1.23/cluster-roles/versions.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/cluster-roles/versions.tf rename to examples/full-cluster-tf-upgrade/1.23/cluster-roles/versions.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/.gitignore b/examples/full-cluster-tf-upgrade/1.23/common-services/.gitignore similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/.gitignore rename to examples/full-cluster-tf-upgrade/1.23/common-services/.gitignore diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/.tf-control b/examples/full-cluster-tf-upgrade/1.23/common-services/.tf-control similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/.tf-control rename to examples/full-cluster-tf-upgrade/1.23/common-services/.tf-control diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.23/common-services/.tf-control.tfrc similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/.tf-control.tfrc rename to examples/full-cluster-tf-upgrade/1.23/common-services/.tf-control.tfrc diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/README.md b/examples/full-cluster-tf-upgrade/1.23/common-services/README.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/README.md rename to examples/full-cluster-tf-upgrade/1.23/common-services/README.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/README.output.md b/examples/full-cluster-tf-upgrade/1.23/common-services/README.output.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/README.output.md rename to examples/full-cluster-tf-upgrade/1.23/common-services/README.output.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/cert.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/cert.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/cert.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/cert.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/.helmignore b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/.helmignore similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/.helmignore rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/.helmignore diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/Chart.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/Chart.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/Chart.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/Chart.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/README.md b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/README.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/README.md rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/README.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/README.md.gotmpl b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/README.md.gotmpl similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/README.md.gotmpl rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/README.md.gotmpl diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/NOTES.txt b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/NOTES.txt similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/NOTES.txt rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/NOTES.txt diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/_helpers.tpl similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/_helpers.tpl rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/_helpers.tpl diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/deployment.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/deployment.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/deployment.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/deployment.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/pdb.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/pdb.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/pdb.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/pdb.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/role.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/role.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/role.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/role.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/secret.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/secret.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/secret.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/secret.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/service.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/service.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/service.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/service.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/serviceaccount.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/serviceaccount.yaml new file mode 100644 index 0000000..29c2580 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.rbac.create .Values.rbac.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- if .Values.rbac.serviceAccount.annotations }} + annotations: {{ toYaml .Values.rbac.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.rbac.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/values.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/values.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/cluster-autoscaler/values.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/cluster-autoscaler/values.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/intermediate-certificate-issuer/.helmignore b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/intermediate-certificate-issuer/.helmignore similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/intermediate-certificate-issuer/.helmignore rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/intermediate-certificate-issuer/.helmignore diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/intermediate-certificate-issuer/Chart.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/intermediate-certificate-issuer/Chart.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/intermediate-certificate-issuer/Chart.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/intermediate-certificate-issuer/Chart.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/intermediate-certificate-issuer/values.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/intermediate-certificate-issuer/values.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/intermediate-certificate-issuer/values.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/intermediate-certificate-issuer/values.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/Chart.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/Chart.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/Chart.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/Chart.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/crds/crd-operator.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/crds/crd-operator.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/crds/crd-operator.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/crds/crd-operator.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/files/gen-operator.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/files/gen-operator.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/files/gen-operator.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/files/gen-operator.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/clusterrole.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/clusterrole.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/clusterrole.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/clusterrole.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/clusterrole_binding.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/clusterrole_binding.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/clusterrole_binding.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/clusterrole_binding.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/crds.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/crds.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/crds.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/crds.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/deployment.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/deployment.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/deployment.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/deployment.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/namespace.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/namespace.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/namespace.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/namespace.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/service.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/service.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/service.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/service.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/service_account.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/service_account.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/templates/service_account.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/templates/service_account.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/values.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/values.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-operator/values.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-operator/values.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-peerauthentication/.helmignore b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-peerauthentication/.helmignore similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-peerauthentication/.helmignore rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-peerauthentication/.helmignore diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-peerauthentication/Chart.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-peerauthentication/Chart.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-peerauthentication/Chart.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-peerauthentication/Chart.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-peerauthentication/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-peerauthentication/templates/_helpers.tpl similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-peerauthentication/templates/_helpers.tpl rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-peerauthentication/templates/_helpers.tpl diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-peerauthentication/values.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-peerauthentication/values.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-peerauthentication/values.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-peerauthentication/values.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-profile/.helmignore b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-profile/.helmignore similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-profile/.helmignore rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-profile/.helmignore diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-profile/Chart.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-profile/Chart.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-profile/Chart.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-profile/Chart.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-profile/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-profile/templates/_helpers.tpl similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-profile/templates/_helpers.tpl rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-profile/templates/_helpers.tpl diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-profile/templates/istiooperator.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-profile/templates/istiooperator.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-profile/templates/istiooperator.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-profile/templates/istiooperator.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-profile/values.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-profile/values.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/istio-profile/values.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/istio-profile/values.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/.helmignore b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/.helmignore similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/.helmignore rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/.helmignore diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/Chart.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/Chart.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/Chart.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/Chart.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/values.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/values.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/self-signed-certificate-issuer/values.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/self-signed-certificate-issuer/values.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/.helmignore b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/.helmignore similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/.helmignore rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/.helmignore diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/Chart.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/Chart.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/Chart.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/Chart.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/values.yaml b/examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/values.yaml similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/charts/vault-certificate-issuer/values.yaml rename to examples/full-cluster-tf-upgrade/1.23/common-services/charts/vault-certificate-issuer/values.yaml diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/common-services.auto.tfvars b/examples/full-cluster-tf-upgrade/1.23/common-services/common-services.auto.tfvars similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/common-services.auto.tfvars rename to examples/full-cluster-tf-upgrade/1.23/common-services/common-services.auto.tfvars diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/copy_image.sh b/examples/full-cluster-tf-upgrade/1.23/common-services/copy_image.sh similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/copy_image.sh rename to examples/full-cluster-tf-upgrade/1.23/common-services/copy_image.sh diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/copy_images.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/copy_images.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/copy_images.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/copy_images.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/data.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/data.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/data.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/dns.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/dns.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/dns.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/dns.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/kubeconfig.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/kubeconfig.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/kubeconfig.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/locals.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/locals.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/locals.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/locals.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/main.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/main.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/main.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/main.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/parent_rs.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/parent_rs.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/parent_rs.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/prefixes.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/prefixes.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/prefixes.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/prefixes.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/providers.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/providers.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/providers.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/providers.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/region.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/region.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/region.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/region.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/tags.md b/examples/full-cluster-tf-upgrade/1.23/common-services/tags.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/tags.md rename to examples/full-cluster-tf-upgrade/1.23/common-services/tags.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/test-cluster-autoscaling.json b/examples/full-cluster-tf-upgrade/1.23/common-services/test-cluster-autoscaling.json similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/test-cluster-autoscaling.json rename to examples/full-cluster-tf-upgrade/1.23/common-services/test-cluster-autoscaling.json diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/tf-run.data b/examples/full-cluster-tf-upgrade/1.23/common-services/tf-run.data similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/tf-run.data rename to examples/full-cluster-tf-upgrade/1.23/common-services/tf-run.data diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/variables.common-services.auto.tfvars b/examples/full-cluster-tf-upgrade/1.23/common-services/variables.common-services.auto.tfvars similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/variables.common-services.auto.tfvars rename to examples/full-cluster-tf-upgrade/1.23/common-services/variables.common-services.auto.tfvars diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/variables.common-services.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/variables.common-services.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/variables.common-services.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/variables.common-services.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/variables.eks.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/variables.eks.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/variables.eks.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/version.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/version.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/version.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/version.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/versions.tf b/examples/full-cluster-tf-upgrade/1.23/common-services/versions.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/common-services/versions.tf rename to examples/full-cluster-tf-upgrade/1.23/common-services/versions.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/create-iam-config.sh b/examples/full-cluster-tf-upgrade/1.23/create-iam-config.sh similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/create-iam-config.sh rename to examples/full-cluster-tf-upgrade/1.23/create-iam-config.sh diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/data.eks-main.tf b/examples/full-cluster-tf-upgrade/1.23/data.eks-main.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/data.eks-main.tf rename to examples/full-cluster-tf-upgrade/1.23/data.eks-main.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/dns-zone.tf b/examples/full-cluster-tf-upgrade/1.23/dns-zone.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/dns-zone.tf rename to examples/full-cluster-tf-upgrade/1.23/dns-zone.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/dns-zone.tf.cat b/examples/full-cluster-tf-upgrade/1.23/dns-zone.tf.cat similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/dns-zone.tf.cat rename to examples/full-cluster-tf-upgrade/1.23/dns-zone.tf.cat diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs-encryption.tf b/examples/full-cluster-tf-upgrade/1.23/ebs-encryption.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs-encryption.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs-encryption.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/.tf-control b/examples/full-cluster-tf-upgrade/1.23/ebs/.tf-control similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/.tf-control rename to examples/full-cluster-tf-upgrade/1.23/ebs/.tf-control diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.23/ebs/.tf-control.tfrc similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/.tf-control.tfrc rename to examples/full-cluster-tf-upgrade/1.23/ebs/.tf-control.tfrc diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/README.ebs.md b/examples/full-cluster-tf-upgrade/1.23/ebs/README.ebs.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/README.ebs.md rename to examples/full-cluster-tf-upgrade/1.23/ebs/README.ebs.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/README.md b/examples/full-cluster-tf-upgrade/1.23/ebs/README.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/README.md rename to examples/full-cluster-tf-upgrade/1.23/ebs/README.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/copy_image.sh b/examples/full-cluster-tf-upgrade/1.23/ebs/copy_image.sh similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/copy_image.sh rename to examples/full-cluster-tf-upgrade/1.23/ebs/copy_image.sh diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/copy_images.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/copy_images.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/copy_images.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/copy_images.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/data.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/data.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/data.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/ecr.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/ecr.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/ecr.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/ecr.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/kubeconfig.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/kubeconfig.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/kubeconfig.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/locals.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/locals.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/locals.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/locals.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/main.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/main.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/main.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/main.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/parent_rs.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/parent_rs.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/parent_rs.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/prefixes.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/prefixes.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/prefixes.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/prefixes.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/providers.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/providers.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/providers.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/providers.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/region.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/region.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/region.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/region.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/role.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/role.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/role.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/role.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/tf-run.data b/examples/full-cluster-tf-upgrade/1.23/ebs/tf-run.data similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/tf-run.data rename to examples/full-cluster-tf-upgrade/1.23/ebs/tf-run.data diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/variables.ebs.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/variables.ebs.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/variables.ebs.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/variables.ebs.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/variables.eks.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/variables.eks.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/variables.eks.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/version.tf b/examples/full-cluster-tf-upgrade/1.23/ebs/version.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ebs/version.tf rename to examples/full-cluster-tf-upgrade/1.23/ebs/version.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/ec2-keypair.tf b/examples/full-cluster-tf-upgrade/1.23/ec2-keypair.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/ec2-keypair.tf rename to examples/full-cluster-tf-upgrade/1.23/ec2-keypair.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/.tf-control b/examples/full-cluster-tf-upgrade/1.23/efs/.tf-control similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/.tf-control rename to examples/full-cluster-tf-upgrade/1.23/efs/.tf-control diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.23/efs/.tf-control.tfrc similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/.tf-control.tfrc rename to examples/full-cluster-tf-upgrade/1.23/efs/.tf-control.tfrc diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/README.efs.md b/examples/full-cluster-tf-upgrade/1.23/efs/README.efs.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/README.efs.md rename to examples/full-cluster-tf-upgrade/1.23/efs/README.efs.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/README.md b/examples/full-cluster-tf-upgrade/1.23/efs/README.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/README.md rename to examples/full-cluster-tf-upgrade/1.23/efs/README.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/copy_image.sh b/examples/full-cluster-tf-upgrade/1.23/efs/copy_image.sh similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/copy_image.sh rename to examples/full-cluster-tf-upgrade/1.23/efs/copy_image.sh diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/copy_images.tf b/examples/full-cluster-tf-upgrade/1.23/efs/copy_images.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/copy_images.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/copy_images.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/efs/data.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/data.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/data.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/ecr.tf b/examples/full-cluster-tf-upgrade/1.23/efs/ecr.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/ecr.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/ecr.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/efs.tf b/examples/full-cluster-tf-upgrade/1.23/efs/efs.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/efs.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/efs.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/efs/kubeconfig.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/kubeconfig.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/kubeconfig.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/locals.tf b/examples/full-cluster-tf-upgrade/1.23/efs/locals.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/locals.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/locals.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/main.tf b/examples/full-cluster-tf-upgrade/1.23/efs/main.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/main.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/main.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.23/efs/parent_rs.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/parent_rs.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/parent_rs.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/persistent-volume.tf b/examples/full-cluster-tf-upgrade/1.23/efs/persistent-volume.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/persistent-volume.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/persistent-volume.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/policy.tf b/examples/full-cluster-tf-upgrade/1.23/efs/policy.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/policy.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/policy.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/prefixes.tf b/examples/full-cluster-tf-upgrade/1.23/efs/prefixes.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/prefixes.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/prefixes.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/providers.tf b/examples/full-cluster-tf-upgrade/1.23/efs/providers.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/providers.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/providers.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/region.tf b/examples/full-cluster-tf-upgrade/1.23/efs/region.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/region.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/region.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/role.tf b/examples/full-cluster-tf-upgrade/1.23/efs/role.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/role.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/role.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/tf-run.data b/examples/full-cluster-tf-upgrade/1.23/efs/tf-run.data similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/tf-run.data rename to examples/full-cluster-tf-upgrade/1.23/efs/tf-run.data diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/variables.efs.tf b/examples/full-cluster-tf-upgrade/1.23/efs/variables.efs.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/variables.efs.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/variables.efs.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.23/efs/variables.eks.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/variables.eks.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/variables.eks.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/version.tf b/examples/full-cluster-tf-upgrade/1.23/efs/version.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/version.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/version.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/efs/versions.tf b/examples/full-cluster-tf-upgrade/1.23/efs/versions.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/efs/versions.tf rename to examples/full-cluster-tf-upgrade/1.23/efs/versions.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/eks-console-access.tf b/examples/full-cluster-tf-upgrade/1.23/eks-console-access.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/eks-console-access.tf rename to examples/full-cluster-tf-upgrade/1.23/eks-console-access.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/group.tf b/examples/full-cluster-tf-upgrade/1.23/group.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/group.tf rename to examples/full-cluster-tf-upgrade/1.23/group.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/includes.d/README.md b/examples/full-cluster-tf-upgrade/1.23/includes.d/README.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/includes.d/README.md rename to examples/full-cluster-tf-upgrade/1.23/includes.d/README.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/includes.d/data.eks-main.tf b/examples/full-cluster-tf-upgrade/1.23/includes.d/data.eks-main.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/includes.d/data.eks-main.tf rename to examples/full-cluster-tf-upgrade/1.23/includes.d/data.eks-main.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/includes.d/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/includes.d/data.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/includes.d/data.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/includes.d/data.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/includes.d/kubeconfig.eks-main.tf b/examples/full-cluster-tf-upgrade/1.23/includes.d/kubeconfig.eks-main.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/includes.d/kubeconfig.eks-main.tf rename to examples/full-cluster-tf-upgrade/1.23/includes.d/kubeconfig.eks-main.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/includes.d/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/includes.d/kubeconfig.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/includes.d/kubeconfig.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/includes.d/kubeconfig.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/includes.d/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.23/includes.d/parent_rs.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/includes.d/parent_rs.tf rename to examples/full-cluster-tf-upgrade/1.23/includes.d/parent_rs.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/.tf-control b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/.tf-control similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/.tf-control rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/.tf-control diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/.tf-control.tfrc similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/.tf-control.tfrc rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/.tf-control.tfrc diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/README.md b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/README.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/README.md rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/README.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/.tf-control b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/.tf-control similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/.tf-control rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/.tf-control diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/.tf-control.tfrc b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/.tf-control.tfrc similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/.tf-control.tfrc rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/.tf-control.tfrc diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/README.md b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/README.md similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/README.md rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/README.md diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/data.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/data.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/data.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/irsa-roles.autoscale.tf.off b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/irsa-roles.autoscale.tf.off similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/irsa-roles.autoscale.tf.off rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/irsa-roles.autoscale.tf.off diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/locals.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/locals.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/locals.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/locals.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/parent_rs.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/parent_rs.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/parent_rs.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/policy.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/policy.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/policy.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/policy.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/prefixes.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/prefixes.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/prefixes.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/prefixes.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/providers.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/providers.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/providers.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/providers.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/region.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/region.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/region.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/region.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/role.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/role.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/role.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/role.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/service_account.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/service_account.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/service_account.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/service_account.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/tf-run.data b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/tf-run.data similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/tf-run.data rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/tf-run.data diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/variables.eks.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/variables.eks.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/variables.eks.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/variables.irsa.auto.tfvars b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/variables.irsa.auto.tfvars similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/variables.irsa.auto.tfvars rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/variables.irsa.auto.tfvars diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/variables.irsa.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/variables.irsa.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/variables.irsa.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/variables.irsa.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/variables.tags.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/variables.tags.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/variables.tags.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/variables.tags.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/version.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/version.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/version.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/version.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/versions.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/versions.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/cluster-autoscaler/versions.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/cluster-autoscaler/versions.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/data.eks-subdirectory.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/data.eks-subdirectory.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/data.eks-subdirectory.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/parent_rs.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/parent_rs.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/parent_rs.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/prefixes.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/prefixes.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/prefixes.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/prefixes.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/providers.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/providers.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/providers.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/providers.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/region.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/region.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/region.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/region.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/tf-run.data b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/tf-run.data similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/tf-run.data rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/tf-run.data diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/variables.eks.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/variables.eks.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/variables.eks.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/variables.irsa.auto.tfvars b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/variables.irsa.auto.tfvars similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/variables.irsa.auto.tfvars rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/variables.irsa.auto.tfvars diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/variables.irsa.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/variables.irsa.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/variables.irsa.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/variables.irsa.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/variables.tags.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/variables.tags.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/variables.tags.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/variables.tags.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/version.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/version.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/version.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/version.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/versions.tf b/examples/full-cluster-tf-upgrade/1.23/irsa-roles/versions.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/irsa-roles/versions.tf rename to examples/full-cluster-tf-upgrade/1.23/irsa-roles/versions.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/kubeconfig.eks-main.tf b/examples/full-cluster-tf-upgrade/1.23/kubeconfig.eks-main.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/kubeconfig.eks-main.tf rename to examples/full-cluster-tf-upgrade/1.23/kubeconfig.eks-main.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/main.tf b/examples/full-cluster-tf-upgrade/1.23/main.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/main.tf rename to examples/full-cluster-tf-upgrade/1.23/main.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/oidc.tf b/examples/full-cluster-tf-upgrade/1.23/oidc.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/oidc.tf rename to examples/full-cluster-tf-upgrade/1.23/oidc.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/outputs.tf b/examples/full-cluster-tf-upgrade/1.23/outputs.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/outputs.tf rename to examples/full-cluster-tf-upgrade/1.23/outputs.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/policy.tf b/examples/full-cluster-tf-upgrade/1.23/policy.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/policy.tf rename to examples/full-cluster-tf-upgrade/1.23/policy.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/prefixes.tf b/examples/full-cluster-tf-upgrade/1.23/prefixes.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/prefixes.tf rename to examples/full-cluster-tf-upgrade/1.23/prefixes.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/providers.tf b/examples/full-cluster-tf-upgrade/1.23/providers.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/providers.tf rename to examples/full-cluster-tf-upgrade/1.23/providers.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/region.tf b/examples/full-cluster-tf-upgrade/1.23/region.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/region.tf rename to examples/full-cluster-tf-upgrade/1.23/region.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/role.tf b/examples/full-cluster-tf-upgrade/1.23/role.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/role.tf rename to examples/full-cluster-tf-upgrade/1.23/role.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/saml.tf b/examples/full-cluster-tf-upgrade/1.23/saml.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/saml.tf rename to examples/full-cluster-tf-upgrade/1.23/saml.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/securitygroup.tf b/examples/full-cluster-tf-upgrade/1.23/securitygroup.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/securitygroup.tf rename to examples/full-cluster-tf-upgrade/1.23/securitygroup.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/settings.auto.tfvars.example b/examples/full-cluster-tf-upgrade/1.23/settings.auto.tfvars.example similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/settings.auto.tfvars.example rename to examples/full-cluster-tf-upgrade/1.23/settings.auto.tfvars.example diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/setup-env.sh b/examples/full-cluster-tf-upgrade/1.23/setup-env.sh similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/setup-env.sh rename to examples/full-cluster-tf-upgrade/1.23/setup-env.sh diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/templates/node-private-userdata.tmpl b/examples/full-cluster-tf-upgrade/1.23/templates/node-private-userdata.tmpl similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/templates/node-private-userdata.tmpl rename to examples/full-cluster-tf-upgrade/1.23/templates/node-private-userdata.tmpl diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/tf-run.data b/examples/full-cluster-tf-upgrade/1.23/tf-run.data similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/tf-run.data rename to examples/full-cluster-tf-upgrade/1.23/tf-run.data diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/variables.dns.tf b/examples/full-cluster-tf-upgrade/1.23/variables.dns.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/variables.dns.tf rename to examples/full-cluster-tf-upgrade/1.23/variables.dns.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.23/variables.eks.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/variables.eks.tf rename to examples/full-cluster-tf-upgrade/1.23/variables.eks.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/variables.tags.tf b/examples/full-cluster-tf-upgrade/1.23/variables.tags.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/variables.tags.tf rename to examples/full-cluster-tf-upgrade/1.23/variables.tags.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/variables.vpc.auto.tfvars.make-link b/examples/full-cluster-tf-upgrade/1.23/variables.vpc.auto.tfvars.make-link similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/variables.vpc.auto.tfvars.make-link rename to examples/full-cluster-tf-upgrade/1.23/variables.vpc.auto.tfvars.make-link diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/variables.vpc.tf.make-link b/examples/full-cluster-tf-upgrade/1.23/variables.vpc.tf.make-link similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/variables.vpc.tf.make-link rename to examples/full-cluster-tf-upgrade/1.23/variables.vpc.tf.make-link diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/version.tf b/examples/full-cluster-tf-upgrade/1.23/version.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/version.tf rename to examples/full-cluster-tf-upgrade/1.23/version.tf diff --git a/examples/full-cluster-tf-upgrade/1.23.in-progress/versions.tf b/examples/full-cluster-tf-upgrade/1.23/versions.tf similarity index 100% rename from examples/full-cluster-tf-upgrade/1.23.in-progress/versions.tf rename to examples/full-cluster-tf-upgrade/1.23/versions.tf diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/.gitignore b/examples/full-cluster-tf-upgrade/1.24.in-progress/.gitignore deleted file mode 100644 index f416fe8..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -kube.config -ecr-login.txt -setup/ec2-ssh-eks-* -!setup/ec2-ssh-eks-*.pub diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/README.md b/examples/full-cluster-tf-upgrade/1.24.in-progress/README.md deleted file mode 100644 index 67e8746..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/README.md +++ /dev/null @@ -1,514 +0,0 @@ -# EKS Full Cluster Example - -This has been updated from 1.21 to 1.22 as the default EKS/K8s version. It is as simple as changing the version -in `settings.auto.tfvars`. Thanks to ADSD for the work on that. For a document on how to upgrade from 1.21 to -1.22, see [here](https://github.e.it.census.gov/terraform/cloud-information/blob/master/aws/documentation/containers/eks/upgrade-1.21-1.22.md) - -## About - -There are a number of steps to end up with a cluster. - -1. From main repository, in the same `vpc/{region}/vpc{number}` directory - 1. [Tag subnets](#subnet-tagging) in main repository (before creating nodegroup) - 1. [Copy variables.vpc.*](#copy-variable-settings) from main respository in the same `vpc/{region}/vpc{number}` - 1. Copy the [includes.d structure](#copy-includesd) -1. In the submodule repository, in the `vpc/{region}/vpc{number}/apps/{clustername}` directory - 1. Update `settings.auto.tfvars` - 1. Update `includes.d/parent_rs.tf` - 1. Initialize [Cluster Main](#initialize-cluster-main) directory - 1. Create [policies](#policies) - 1. Create [EC2 Keypair](#keypair-creation) - 1. Finish [cluster setup](#cluster-creation) -1. Setup [aws-auth](#setup-aws-auth) -1. Setup [EFS](#setup-efs) - -## Post-Setup Tasks - -1. Connect DNS zone from on-prem to Route53 Resolvers with a forwarder - -## Subnet Tagging - -A tag needs to be added to the subnet(s) where the cluster will run. We haven't figured out yet how to incorporate this more -automatically. - -The file to update is the `variable.subnets.auto.tfvars`, in this case `vpc/east/vpc3/variables.subnets.auto.tfvars`: - -```hcl -private_subnets = [ - { base_cidr = "10.188.18.0/23", label = "private-lb", bits = 2, private = true, - tags = { "kubernetes.io/role/internal-elb" = 1 } - }, - { base_cidr = "10.188.17.0/24", label = "endpoints", bits = 2, private = true, tags = {} }, - { base_cidr = "10.188.20.0/23", label = "db", bits = 2, private = true, tags = {} }, - { base_cidr = "10.188.22.0/23", label = "apps", bits = 2, private = true, tags = {} }, - { base_cidr = "10.188.24.0/21", label = "container", bits = 2, private = true, - tags = { - "kubernetes.io/cluster/org-project-env" = "shared" - }, - } -# space all used up -] -``` - -We add the tag `"kubernetes.io/cluster/{cluster_name}" = "shared"` in order for the node groups to pick up the -cluster subnets. This is on the new `container` ubnet. - -For creating a service which uses load balancers (ELB, ALB, or NLB), the last tag listed here is needed -`"kubernetes.io/role/internal-elb" = 1`. This is only one tag for all EKS, not one per cluster, and it should apply -to the subnet(s) for load balancing. A separate set of subnets exist for load balacning, with a name including `private-lb`. - - -## Copy Variable Settings when in a submodule repo - -We need the `variables.vpc.tf` and `variables.vpc.auto.tfvars` from the main repository. These are not to be modified in -this submodule. - -```shell -cd MAIN-REPOSITORY -MAINTOP=$(git rev-parse --show-toplevel) -cd applications/{APPNAME} -cd vpc/{region}/vpc{number} -for f in $(ls $MAINTOP/vpc/{region}/vpc{number}/variables.vpc*) - do - cp $f ./ -done -``` - -Replace {region} and {number} and {APPNAME} with the correct values. - -## Link Variable Settings when in the main account repo - -Link these files from the `vpc/{region}/vpc{number}/` dirctory: - -* variables.vpc.tf -* variables.vpc.auto.tfvars - -## Copy includes.d when in a submodule repo - -This makes a copy of the entire `MAIN/includes.d` structure in the submodule, for use as soft links to bring in -application variables for tagging. - -```shell -cd MAIN-REPOSITORY -MAINTOP=$(git rev-parse --show-toplevel) -cd applications/{APPNAME} -rsync -avRWH $MAINTOP/./includes.d ./ -``` - -Replace {APPNAME} with the correct value. - -## Links includes.d when in the main account repo - -If thre is an existing `MAIN/includes.d/` path for the specific application variables you wish to apply, -make a link to it as appropraite. - -## Update the settings.auto.tfvars file - -Set the appropriate values in the `settings.auto.tfvars` file. An example starter file is at `settings.auto.tfvars.example`. - -Here is a sample file: - -```hcl -cluster_name = "org-project-env -cluster_version = "1.21" -region = "us-gov-east-1" -domain = "org-project-env.env.domain.census.gov" -eks_instance_disk_size = 40 -eks_vpc_name = "*vpcshortname*" -eks_instance_type = "t3.xlarge" -eks_ng_desire_size = 3 -eks_ng_max_size = 15 -eks_ng_min_size = 3 -``` - -You need to change these values: - -* cluster_name: put in the proper org, project, and environment. Cluster names should not be replicated across the environment. -These are tracked in the repo cloud-information/aws/documentation/containers/ (fix link). -* region: include the correct region. This really is a duplicate of the `region` variable, so it may be removed in the future. -* domain: this is the domain name of the clsuter, consisting of the cluster name and the proper domain name for the environment/VPC. -* eks_vpc_name: replace *vpcshortname* with the appropriate vpc name. This is used to find the vpc ID. This will be fixed at a later date. - -All the others are subject to your configuration. They are a good starting point, but can vary. - -## Update the includes.d/parent_rs.tf file - -```hcl -locals { - parent_rs = data.terraform_remote_state.vpc_{region}_vpc{number}_apps_eks-{cluster-name}.outputs -} -``` - -* region: west or east, dependent on which region the VPC is in -* number: incremental VPC number -* cluster-name: cluster name, the same as used in the `settings.auto.tfvars` file above - -# Terraform Automated Setup - -A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. - -* copy the `remote_state.yml` from the parent and update `directory` to be the current directory -* run the tf-run.sh - -```console -% tf-run.sh apply -``` - -* example of the `tf-run.sh` steps - -This is part of a larger cluster configuration, so at the end of the run it indicates another directory -to visit when done. - -```console -% tf-run.sh list -* running action=plan -* START: tf-run.sh v1.1.2 start=1636562594 end= logfile=logs/run.plan.20211110.1636562594.log (not-created) -* reading from tf-run.data -* read 22 entries from tf-run.data -> list -** START: start=1636562594 -* 1 COMMENT> make sure the private-lb subnet and container subnets are tagged properly (see README.md) -* 2 STOP> then continue with at step 3 -* 3 COMMAND> tf-directory-setup.py -l none -f -* 4 COMMAND> setup-new-directory.sh -* 5 COMMAND> tf-init -upgrade -* 6 POLICY> (*.tf) aws_iam_policy.nlb-policy aws_iam_policy.cloudwatch-policy aws_iam_policy.cluster-admin-policy aws_iam_policy.cluster-admin_assume_policy -* 6 tf-plan -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy -target=aws_iam_policy.cluster-admin_assume_policy -* 7 COMMENT> EC2 key pairs -* 8 tf-plan -target=null_resource.generate_keypair -* 9 tf-plan -target=aws_key_pair.cluster_keypair -* 10 COMMAND> tf-directory-setup.py -l s3 -* 11 COMMENT> be sure to add the setup/ec2-ssh-eks-{cluster} to git-secret, git-secret hide, add the setup/*secret and setup/*pub got git, and commit the entirety of the change -* 12 tf-plan -* 13 COMMENT> setup the includes.d/parent_rs.tf according to the includes.d/README -* 14 STOP> -* 15 COMMENT> cd aws-auth and tf-run.sh apply -* 16 STOP> -* 17 COMMENT> cd efs and tf-run.sh apply -* 18 STOP> -* 19 COMMENT> cd irsa-roles and tf-run.sh apply -* 20 STOP> -* 21 COMMENT> cd common-services and tf-run.sh apply -* 22 STOP> -** END: start=1636562594 end=1636562594 elapsed=0 logfile=logs/run.plan.20211110.1636562594.log (not-created) -``` - -It is highly recommended to use the `tf-run.sh` approach. This has a number of stopping points along the way with comments telling you what to do. -It also directs you to the subdirectories to visit to complete the setup. - -# Terraform Manual Setup - -## Initialize Cluster Main - -We need to setup the main directory for the cluster. Be sure `remote_state.yml` is correct. Then: - -```shell -tf-directory-setup.py -l none -tf-init -``` - -## Policies - -First, we have to create the two polices. The roles will not get created until they do. - -```shell -TFTARGET=$(grep ^res.*iam_policy *.tf |awk '{print "-target=" $2 "." $3}'|sed -e 's/"//g') -tf-plan $TFTARGET -tf-apply $TFTARGET -unset TFTARGET -``` - -## Keypair Creation - -We need to create the SSH key, which then allows for the public key to be uploaded. - -```shell -tf-plan -target=null_resource.generate_keypair -tf-apply -target=null_resource.generate_keypair - -tf-plan -target=aws_key_pair.cluster_keypair -tf-apply -target=aws_key_pair.cluster_keypair -``` - -## Cluster Creation - -One created, we can run the rest of the code - -```shell -tf-plan -tf-apply -``` - -Finalize by linking to the remote state file: - -```shell -tf-directory-setup.py -l s3 -``` - -## Setup aws-auth - -Be sure `remote_state.yml` is correct. Examine the `settings.aws-auth.tfvars` and replace any remote state references to the proper -objects. There is at least one, a `rolearn`. You can get the remote state path with - -```shell -grep ^data remote_state.*{clustername}.tf | awk '{print $1 "." $2 "." $3}' |sed -e 's/"//g' -``` - -Next, we setup the remote state files, link to the parent remote state, and initialize terraform. - -```shell -tf-directory-setup.py -l none -# should only be one file here -ln -s ../remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_*.tf . -setup-new-directory.sh -tf-init -``` - -Then, we can plan, apply, and finalize: - -```shell -tf-pan -tf-apply -tf-directory-setup.py -l s3 -``` - -## Setup EFS - -Be sure `remote_state.yml` is correct. Examine the `main.tf` and replace any remote state references to the proper -objects. You can find where they are used: - -```console -% grep data.terraform_remote_state *.tf -main.tf: vpc_id = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_vpc_id -main.tf: subnet_ids = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_subnet_ids -main.tf: cluster_worker_sg_id = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_worker_sg_id -main.tf: oidc_provider_url = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.oidc_provider_url -main.tf: oidc_provider_arn = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.oidc_provider_arn -``` - -Find the value to replace these with: - -```shell -grep ^data remote_state.*{clustername}.tf | awk '{print $1 "." $2 "." $3}' |sed -e 's/"//g' -``` - -Next, we setup the remote state files, link to the parent remote state, and initialize terraform. - -```shell -tf-directory-setup.py -l none -# should only be one file here -ln -s ../remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_*.tf . -setup-new-directory.sh -``` - -Then, we have to create the polices. The roles will not get created until they do. - -```shell -TFTARGET=$(grep ^res.*iam_policy *.tf |awk '{print "-target=" $2 "." $3}'|sed -e 's/"//g') -tf-plan $TFTARGET -tf-apply $TFTARGET -unset TFTARGET -``` - -Finally, you can apply the rest: - - -```shell -tf-plan -tf-apply -``` - -## Common Services -### Certificate Authority - -Set the download to `false` - -```shell -# ca-cert.tf - ca_cert_download = false -``` - -Do the first apply, which generates the key and csr. You'll need to then submit the CSR. (directions generated) - -```shell -tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') -``` - - -```shell -# terraform taint null_resource.ca_cert[0] -# # (wait for submitted cert to be ready) -tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') -tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') -``` - -### Rest of Setup - -```shell -tf-plan -tf-apply -tf-directory-setup.py -l s3 -``` - -## Access to the cluster - -There are two ways to access the cluster. One is from the AWS Console and the other is via the IAM account or role. - -The cluster access vi console is found in the EKS section, under *clusters*. - -For IAM access, one must have IAM account credentials configured in `$HOME/.aws/credentials` and `$HOME/.aws/config`. [Here](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html) -are the docs, and we have an example below. Region is important, otherwise it defaults to `us-gov-west-1` and the STS will fail. - -```script -# $HOME/.aws/credentials -[252960665057-ma6-gov] -aws_access_key_id = ABCD1234... -aws_secret_access_key = abcd5678... - -# $HOME/.aws/config -[profile 252960665057-ma6-gov-eks-org-project-env] -source_profile = 252960665057-ma6-gov -region = us-gov-east-1 -role_arn = arn:aws-us-gov:iam::252960665057:role/r-eks-org-project-env-cluster-admin -role_session_name = badra001 -``` - -With this configuration, using the proifle `252960665057-ma6-gov` gives you the normal IAM access - -```console -% aws --profile 252960665057-ma6-gov sts get-caller-identity -{ - "UserId": "AIDATVZNBNXQ5UPHMBGPY", - "Account": "252960665057", - "Arn": "arn:aws-us-gov:iam::252960665057:user/a-badra001" -} -``` - -Using the other profile will use the source profile (which has to have permission to assume the role), the role arn, and a session -name mapping it back to your Census username (JBID). - -```console -% aws --profile 252960665057-ma6-gov-eks-org-project-env sts get-caller-identity -{ - "UserId": "AROATVZNBNXQ7AV7W2ISZ:badra001", - "Account": "252960665057", - "Arn": "arn:aws-us-gov:sts::252960665057:assumed-role/r-eks-org-project-env-cluster-admin/badra001" -} -``` - ------ -OLD LAB SETUP ------ - -# Cluster Setup - -## Download Configuration - -Now that the cluster is created, we need the `kubectl` command and to download the configuration. - -* get [kubectl](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) - -```console -% aws eks --profile $(get-profile) --region $(get-region) update-kubeconfig --name test2 --kubeconfig ./test2.kube.config -Added new context arn:aws:eks:us-east-1:079788916859:cluster/test2 to /data/git-repos/terraform/079788916859-do2-cat_apps-adsd-eks/vpc/east-1/vpc4/apps/eks-test2/test2.kube.config -% export KUBECONFIG=$(pwd)/test2.kube.config -% kubectl get nodes -NAME STATUS ROLES AGE VERSION -ip-10-194-24-49.ec2.internal Ready 24m v1.20.4-eks-6b7464 -ip-10-194-24-90.ec2.internal Ready 24m v1.20.4-eks-6b7464 -ip-10-194-25-120.ec2.internal Ready 24m v1.20.4-eks-6b7464 -ip-10-194-26-252.ec2.internal Ready 24m v1.20.4-eks-6b7464 -``` - -## Authentication - -### Automated - -This is in theh `aws-auth` subdirectory. - -```shell -cd aws-auth -tf-init -tf-plan -tf-apply -``` - -### Manual - -To allow users and roles to manipulate the cluster, we add to the mapRole or mapUsera. - -```shell -kubectl edit -n kube-system configmap/aws-auth -``` - -Add sections for `mapRoles`: - -```yaml - mapRoles: | - - rolearn: arn:aws:iam::079788916859:role/r-inf-cloud-admin - username: system:node:{{EC2PrivateDNSName}} - groups: - - system:bootstrappers - - system:nodes - - eks-console-dashboard-full-access-group -``` - -Add sections for `mapUsers`: - -```yaml - mapUsers: | - - userarn: arn:aws:iam::079788916859:user/u-zawac002 - username: admin - groups: - - system:masters -``` - -We will like want to do this through templating. - -* users - * arn:aws:iam::079788916859:user/u-badra001 - * arn:aws:iam::079788916859:user/u-ashle001 - * arn:aws:iam::079788916859:user/u-mcgin314 - * arn:aws:iam::079788916859:user/u-sall0002 - * arn:aws:iam::079788916859:user/u-zawac002 -* roles - * arn:aws:iam::079788916859:role/r-inf-cloud-admin - * arn:aws:iam::079788916859:role/r-adsd-cumulus - * arn:aws:iam::079788916859:role/r-adsd-eks - * arn:aws:iam::079788916859:role/r-adsd-tools - -## Adding Cluster Roles for AWS Console - -To allow [console access](https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml), we need these steps. - -It requires the cluster to be up and the `{clustername}.kube.config` file to exist along with the environment variable pointing to it. - -### Automated - -This appies just the full access cluste role, as the restricted one needs additional configuration. - -```shell -tf-apply -target=null_resource.apply_cluster_roles -``` - -### Manual - -```shell -curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml -curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml -``` - -For full console, we'll use the first one. - -```console -% kubectl apply -f eks-console-full-access.yaml -clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created -``` - - -# Details - - - - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/ROLES.md b/examples/full-cluster-tf-upgrade/1.24.in-progress/ROLES.md deleted file mode 100644 index 3880590..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/ROLES.md +++ /dev/null @@ -1,119 +0,0 @@ -# Roles - -There are several types of roles we handle within the EKS cluster. - -1. IAM Role for Service Account (IRSA) -These roles involve an IAM role with a formatted name of r-eks-{cluster}-irsa__{k8snamespace}__{k8suser}. This will -grant approproriate IAM permissions to a pod. It includes specific conditions for the local OIDC provider mapping to -system:serviceaccount:{k8snamespace}:{k8suse}. This is super important because the pod inherits the permissions -of the node group, which grants far too much access to the running pods. These are not mapped into the ConfigMap aws-auth. - -A default:default will exists which grants little to no AWS permissions. - -1. Cluster Admin Role -This role is used for the cluster administration. It is of the form r-eks-{cluster}-cluster-admin. It has read access to the -[EKS Console](https://console.amazonaws-us-gov.com/eks/home). It has: -* access to read and write ECR for the specific repositories used for the cluster at /eks/{clustername} -* access to the EKS API for the cluster -* can download the kube.config file -* is mapped with the ConfigMap aws-auth into k8suser admin and k8sgroup system:masters -* permissions to update the node groups (via cli) -* others as discovered - -Users will use this role through the use of STS:AssumeRole either with the console or CLI. - -1. Additional Application Roles -These will be for granting access to clusterroles via namespace and k8suser to IAM or SAML users. They will take the form -r-eks-{cluster}-{name} where name should consider some portion of the namespace and purpose, and the name cannot be one of the existing -roles already in existence. These will typically not need any AWS Access beyond that of the update-config or get-token to obtain -the configuration file. These will require a clusterrole and clusterrolebinding, and will need a username to go along with them. -See [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) and [here](https://aws.amazon.com/premiumsupport/knowledge-center/eks-iam-permissions-namespaces/) -for details about this. The configuration file to create this (yaml) will be stored in github, and ideally, it will be created through the use of Terraform to be able -to easily add these as needed. - -Users will use this role through the use of STS:AssumeRole either with the console or CLI. - -## IRSA Roles - -```hcl - condition { - test = "StringEquals" - variable = "${local.oidc_provider_url}:sub" - values = ["system:serviceaccount:${local.app2_namespace}:${local.app2_name}"] - } -``` - -* irsa-roles.aws-cli.tf -* irsa-roles.cumulus.tf -* irsa-roles.jenkins.tf - - -## Cluster Admin Role - -## Additional Application Roles - -## cumulus-dba -## cumulus-deployer -## cicd-deployer - -## jenkins - -* Tool: Jenkins -* Purpose: Used for CICD Pipeline - * build images - * copy images - * deploy pods - * deploy services - * other things as necessary -* Source System: VM on-prem -* AWS Access - * IAM Service account tied to the cluster name - * s-eks-{cluster}-cicd - * permissions to read and write ECR * but NOT eks/{clustername} - * permission to eks get-token - * permission to eks update-cluster (get kubeconfig) -* Kubernetes Access - * Username - * recommend the same pattern: eks-{cluster}-cicd - * Group - * group names needed - * Permissions - * defined in K8S thing .. - * Files for configuration of K8S - * yml: - * tf: - -# AWS Commands - -```shell -aws eks get token -aws eks update-config -``` - -## CICD - -There are a number of ways to handle the CICD pipline. How in part depends on whether it runs outside of the cluster or inside of the cluster. These - -* service account for CICD (say, s-adsd-cicd-deployer) with full permissions to ECR and to get eks config and token along with k8s permissions through -ConfigMap aws-auth. -* role for CICD per cluster, say r-eks-{cluster}-cicd-deployer with same permissions above. -* These are all account specific, so running CICD across multiple accounts will need multiple IAM accounts and roles. -* consider some central way of doing this so a CICD can deploy to any cluster in any account in any region. -* perhaps start with a smaller per cluster user/role and work towards a better solution later - -# TBD - -1. Determine how to create a default:default IRSA role which grants little to no AWS permissions (maybe sts get-caller-identity). -1. Create a module for IRSA -1. Explore the use of the OIDC integration with Access Manager -1. Develop a strategy for CICD access - -# Links - -* [AWS RBAC](https://aws.amazon.com/premiumsupport/knowledge-center/eks-iam-permissions-namespaces/) -* [K8S RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) -* [Add User Role](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html) -* [OIDC Identity Provider](https://docs.aws.amazon.com/eks/latest/userguide/authenticate-oidc-identity-provider.html) -* [OIDC with MicroFocus](https://community.microfocus.com/cyberres/accessmanager/w/access_manager_tips/27815/access-amazon-web-services-using-amazon-cognito-for-mobile-applications-and-netiq-access-manager-4-1) - - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/README.md b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/README.md deleted file mode 100644 index e7a638f..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# aws-auth - -This sets up the the `aws-auth` ConfigMap for Kubernetes as needed in part of the cluster configuration. - -## Links - -## Terraform Automated - -A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. - -* copy the `remote_state.yml` from the parent and update `directory` to be the current directory -* run the tf-run.sh - -```console -% tf-run.sh apply -``` - -* example of the tf-run.sh`steps - -This is part of a larger cluster configuration, so at the end of the run it indicates another directory -to visit when done. - -```console -% tf-run.sh list -** END: start=1636558187 end=1636558187 elapsed=0 logfile=logs/run.plan.20211110.1636558187.log (not-created) -* running action=plan -* START: tf-run.sh v1.1.2 start=1636558903 end= logfile=logs/run.plan.20211110.1636558903.log (not-created) -* reading from tf-run.data -* read 6 entries from tf-run.data -> list -** START: start=1636558903 -* 1 COMMAND> tf-directory-setup.py -l none -f -* 2 COMMAND> setup-new-directory.sh -* 3 COMMAND> tf-init -upgrade -* 4 tf-plan -* 5 COMMAND> tf-directory-setup.py -l s3 -* 6 STOP> cd ../efs and tf-run.sh apply -** END: start=1636558903 end=1636558903 elapsed=0 logfile=logs/run.plan.20211110.1636558903.log (not-created) -``` - -It is highly recommended to use the `tf-run.sh` approach. - -## Terraform Manual - -First, copy the `remote_state.yml` from the parent and update `directory` to be the current directory. - -```shell -tf-directory-setup.py -l none -setup-new-directory.sh -tf-init -```` - -* Apply the rest - -```shell -tf-apply -tf-directory-setup.py -l s3 -``` - -## Post Setup Examination - -Your `kubectl` configuration file needs to be setup (one is extracted in `setup/kube.config` as part of this configuration). - -```console -% kubectl --kubeconfig setup/kube.config get configmap -n kube-system aws-auth -NAME DATA AGE -aws-auth 2 44d -``` diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/aws-auth.auto.tfvars b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/aws-auth.auto.tfvars deleted file mode 100644 index 6898918..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/aws-auth.auto.tfvars +++ /dev/null @@ -1,28 +0,0 @@ -aws_auth_users = [ - { - userarn = "" - aws_username = "a-ashle001" - username = "admin" - groups = ["system:masters", "eks-console-dashboard-full-access-group"] - }, - { - userarn = "" - aws_username = "a-badra001" - username = "admin" - groups = ["system:masters", "eks-console-dashboard-full-access-group"] - }, -] -aws_auth_roles = [ - { - rolearn = "" - aws_rolename = "r-inf-cloud-admin" - username = "admin" - groups = ["system:masters", "eks-console-dashboard-full-access-group"] - }, - { - rolearn = "" - aws_rolename = "r-inf-terraform" - username = "admin" - groups = ["system:masters", "eks-console-dashboard-full-access-group"] - }, -] diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/config_map.aws-auth.yaml.tpl b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/config_map.aws-auth.yaml.tpl deleted file mode 100644 index 7c58ada..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/config_map.aws-auth.yaml.tpl +++ /dev/null @@ -1,17 +0,0 @@ -data: -%{ if length(roles) > 0 } - mapRoles: | - %{ for k, v in roles ~} - - rolearn: ${v.rolearn} - username: ${v.username} - groups: ${v.groups} - %{ endfor ~} -%{ endif } -%{ if length(users) > 0 } - mapUsers: | - %{ for k, v in users ~} - - userarn: ${v.userarn} - username: ${v.username} - groups: ${v.groups} - %{ endfor ~} -%{ endif } diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/data.eks-subdirectory.tf deleted file mode 120000 index 43b5430..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/data.eks-subdirectory.tf +++ /dev/null @@ -1 +0,0 @@ -../includes.d/data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/kubeconfig.eks-subdirectory.tf deleted file mode 120000 index e3750a4..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/kubeconfig.eks-subdirectory.tf +++ /dev/null @@ -1 +0,0 @@ -../includes.d/kubeconfig.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/patch-aws-auth.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/patch-aws-auth.tf deleted file mode 100644 index 88e0bbe..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/patch-aws-auth.tf +++ /dev/null @@ -1,135 +0,0 @@ -data "kubernetes_config_map" "aws-auth" { - metadata { - namespace = "kube-system" - name = "aws-auth" - } -} - -data "aws_iam_user" "auth_users" { - for_each = toset([for u in local.joined_auth_users : u.aws_username]) - user_name = each.key -} - -data "aws_iam_role" "auth_roles" { - for_each = toset([for r in local.joined_auth_roles : r.aws_rolename]) - name = each.key -} - - -locals { - existing_roles_string = lookup(data.kubernetes_config_map.aws-auth.data, "mapRoles", "") - existing_users_string = lookup(data.kubernetes_config_map.aws-auth.data, "mapUsers", "") - - existing_roles = local.existing_roles_string != "" ? yamldecode(local.existing_roles_string) : [] - existing_users = local.existing_users_string != "" ? yamldecode(local.existing_users_string) : [] - - joined_auth_users = concat(local.aws_auth_users, var.aws_auth_users) - joined_auth_roles = concat(local.aws_auth_roles, var.aws_auth_roles) - - mapped_auth_users = [for u in local.joined_auth_users : { - userarn = data.aws_iam_user.auth_users[u.aws_username].arn - aws_username = u.aws_username - username = u.username - groups = u.groups - }] - mapped_auth_roles = [for u in local.joined_auth_roles : { - rolearn = data.aws_iam_role.auth_roles[u.aws_rolename].arn - aws_rolename = u.aws_rolename - username = u.username - groups = u.groups - }] - - merged_users = merge( - { for user in local.existing_users : user.userarn => user }, - # { for user in local.aws_auth_users : user.userarn => user }, - # { for user in var.aws_auth_users : user.userarn => user } - { for user in local.mapped_auth_users : user.userarn => user }, - ) - - merged_roles = merge( - { for role in local.existing_roles : role.rolearn => role }, - # { for role in local.aws_auth_roles : role.rolearn => role }, - # { for role in var.aws_auth_roles : role.rolearn => role } - { for role in local.mapped_auth_roles : role.rolearn => role }, - ) - - # patch = yamlencode({ - # "data" = { - # "mapUsers" = values(local.merged_users) - # "mapRoles" = values(local.merged_roles) - # } - # }) - patch = < 0~} - mapRoles: | -%{for k, v in local.merged_roles~} - - rolearn: ${v.rolearn} - username: ${v.username} - groups: -%{for g in v.groups~} - - ${g} -%{endfor~} -%{endfor~} -%{endif~} -%{if length(local.merged_users) > 0~} - mapUsers: | -%{for k, v in local.merged_users~} - - userarn: ${v.userarn} - username: ${v.username} - groups: -%{for g in v.groups~} - - ${g} -%{endfor~} -%{endfor~} -%{endif~} -EOM - - # patch_t = templatefile("${path.root}/config_map.aws-auth.yaml.tpl",{ - # users = values(local.merged_users) - # roles = values(local.merged_roles) - # }) -} - -resource "null_resource" "patch-aws-auth" { - triggers = { - users = join(",", sort(keys(local.merged_users))) - roles = join(",", sort(keys(local.merged_roles))) - } - depends_on = [null_resource.kubeconfig] - # provisioner "local-exec" { - # command = "if [ -z $KUBECONFIG ]; then 'echo missing KUBECONFIG'; exit 1; else exit 0; fi" - # } - # provisioner "local-exec" { - # command = "if [ ! -r $KUBECONFIG ]; then 'echo unreadable KUBECONFIG'; exit 1; else exit 0; fi" - # } - # provisioner "local-exec" { - # command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" - # } - provisioner "local-exec" { - command = "test -d setup || mkdir setup" - } - provisioner "local-exec" { - command = "echo '${local.patch}' > setup/config_map.patch.yaml" - } - # provisioner "local-exec" { - # command = "echo '${local.patch_t}' > config_map.patch_t.yaml" - # } - provisioner "local-exec" { - # command = "kubectl patch --type merge -n kube-system configmap/aws-auth -p '${local.patch}'" - command = "kubectl --kubeconfig ${path.root}/setup/kube.config patch --type merge -n kube-system configmap/aws-auth --patch-file setup/config_map.patch.yaml" - } -} - -# output "map" { -# value = data.kubernetes_config_map.aws-auth -# } -# output "map_output" { -# value = { -# "object" = data.kubernetes_config_map.aws-auth -# "existing_users" = local.existing_users -# "existing_roles" = local.existing_roles -# "patch" = local.patch -# "patch_text" = local.patch_t -# } -# } diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/prefixes.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/prefixes.tf deleted file mode 120000 index e0bf5ad..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/prefixes.tf +++ /dev/null @@ -1 +0,0 @@ -../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/providers.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/providers.tf deleted file mode 120000 index 7244d01..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/providers.tf +++ /dev/null @@ -1 +0,0 @@ -../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/region.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/region.tf deleted file mode 100644 index b7b1696..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/region.tf +++ /dev/null @@ -1,4 +0,0 @@ -locals { - region = var.region -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/settings.aws-auth.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/settings.aws-auth.tf deleted file mode 100644 index 4d3259d..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/settings.aws-auth.tf +++ /dev/null @@ -1,11 +0,0 @@ -locals { - aws_auth_users = [] - aws_auth_roles = [ - { - rolearn : "" - aws_rolename : format("%v%v-cluster-admin", local._prefixes["eks-role"], var.cluster_name) - username : "admin" - groups = ["system:masters", "eks-console-dashboard-full-access-group"] - }, - ] -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/tf-run.data b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/tf-run.data deleted file mode 100644 index 049f9df..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/tf-run.data +++ /dev/null @@ -1,11 +0,0 @@ -VERSION 1.2.2 -REMOTE-STATE -COMMAND tf-directory-setup.py -l none -f -COMMAND setup-new-directory.sh -COMMAND tf-init -upgrade -COMMAND ln -sf ../versions.tf -COMMAND ln -sf ../settings.auto.tfvars -LINKTOP init -ALL -COMMAND tf-directory-setup.py -l s3 -STOP cd ../efs and tf-run.sh apply diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/variables.aws-auth.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/variables.aws-auth.tf deleted file mode 100644 index 05708d5..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/variables.aws-auth.tf +++ /dev/null @@ -1,23 +0,0 @@ -# maybe just ignore the ARN entirely and force a read - -variable "aws_auth_users" { - description = "A list of objects where each object has userarn, username, k8s_username, and groups, where groups is a list of groups to associate with the user. Leaving userarn as an empty string will pull the user ARN from AWS." - type = list(object({ - userarn = string - aws_username = string - username = string - groups = list(string) - })) - default = [] -} - -variable "aws_auth_roles" { - description = "A list of objects where each object has rolearn, rolename, k8s_username, and groups, where groups is a list of groups to associate with the role. Leaving rolearn as an empty string will pull the role ARN from AWS." - type = list(object({ - rolearn = string - aws_rolename = string - username = string - groups = list(string) - })) - default = [] -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/variables.eks.tf deleted file mode 120000 index 7dd95db..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/variables.eks.tf +++ /dev/null @@ -1 +0,0 @@ -../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/version.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/version.tf deleted file mode 120000 index 061373c..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/version.tf +++ /dev/null @@ -1 +0,0 @@ -../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/versions.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/versions.tf deleted file mode 120000 index 8bd0ff1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/aws-auth/versions.tf +++ /dev/null @@ -1 +0,0 @@ -../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/bin/copy_image.sh b/examples/full-cluster-tf-upgrade/1.24.in-progress/bin/copy_image.sh deleted file mode 100755 index 60e8847..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/bin/copy_image.sh +++ /dev/null @@ -1,324 +0,0 @@ -#!/bin/bash - -############################################################################### -# This script uses skopeo to copy a docker image from one repository to -# another. The primary intent is to copy the image from a public repository -# to a private repository. -############################################################################### -# Expected environment variables: -# -# SOURCE_IMAGE - The image to copy to to another location. Example: -# paradyme-docker-local.jfrog.io/appetizer:dev -# SOURCE_INSECURE - Set this to 1 of the source repository is in an insecure -# docker registry. Set it to 0 or leave it unset if the -# docker registry is secure. -# -# DESTINATION_IMAGE - The image to copy to to another location. Example: -# paradyme-docker-local.jfrog.io/appetizer:dev -# DESTINATION_INSECURE - Set this to 1 of the destination repository is in -# an insecure docker registry. Set it to 0 or leave it unset -# if the docker registry is secure. -# -# When the source repository requires authentication to access, configure -# these values. Otherwise do not set them. -# -# SOURCE_USERNAME - The username to supply for credentialed access to the -# repository. `anthony-zawacki` is an example. -# SOURCE_PASSWORD - The password to supply for credentialed access to the -# repository. An artifactory API_KEY for example. -# -# When the destination repository requires authentication to access, configure -# these values. Otherwise do not set them. -# -# DESTINATION_USERNAME - The username to supply for credentialed access to the -# repository. `anthony-zawacki` is an example. -# DESTINATION_PASSWORD - The password to supply for credentialed access to the -# repository. The output of: -# `aws ecr get-login-password --region us-east-2` for example. -# -# If the destination repository does not exist, the copy_image.sh script will -# create the repository automatically. In cases where the newly created -# repository should have a mutable image (perhaps always pushing to a `latest` -# tag in a development environment), it is possible to configure the -# repository to allow mutability by configuring this environment variable. -# Otherwise, do not set it. -# -# -############################################################################### - -ensure_skopeo() { - skopeo=$(command -v skopeo) - if [[ "$skopeo" == "" ]]; then - echo "The required executable, skopeo, was not found." - echo "Please install it and ensure it is in the path." - return 1 - fi - - return 0 -} - -usage() { - local msg="${1}"; shift; - - cat < (SOURCE_IMAGE) The name of the image to copy to another - registry. - -src-username (SOURCE_USERNAME) Optional parameter in cases where - the source registry requires authentication. Use this username for the - credentials. - -src-password (SOURCE_PASSWORD) Optional parameter in cases where - the source registry requires authentication. Use this password for the - credentials. - -src-insecure (SOURCE_INSECURE=1) Optional parameter indicates that the - source registry is not a secured registry and that tls validation - should be disabled for the processing of the image. The default is - to assume that the source registry is secured. - +src-insecure (SOURCE_INSECURE=0) Optional parameter explicitly indicating - that the source registry is secure and TLS must be used to access the - registry. - - -dest-image (DESTINATION_IMAGE) The name of the image to to use in the - destination registry. - -dest-username (DESTINATION_USERNAME) Optional parameter in cases - where the destination registry requires authentication. Use this - username for the credentials. - -dest-password (DESTINATION_PASSWORD) Optional parameter in cases - where the destination registry requires authentication. Use this - password for the credentials. - -dest-insecure (DESTINATION_INSECURE=1) Optional parameter indicates that the - destination registry is not a secured registry and that tls validation - should be disabled for the processing of the image. The default is - to assume that the destination registry is secured. - +dest-insecure (DESTINATION_INSECURE=0) Optional parameter explicitly - indicating that the destination registry is secure and TLS must be - used to access the registry. - -dest-mutable (DESTINATION_MUTABLE=1) Optional parameter indicates that if - creating the ECR repository is required, create it allowing mutable - images. - +dest-mutable (DESTNATION_MUTABLE=0) Optional parameter explicitly - indicating that if creating the ECR repository is required, create it - with immutable images. - -EOF - - exit 1 -} - -parse_commandline() { - local key - local positional=() - - while [[ $# -gt 0 ]]; do - key="$1"; shift - - case "$key" in - -src-image) - SOURCE_IMAGE="$1"; shift - ;; - -src-username) - SOURCE_USERNAME="$1"; shift - ;; - -src-password) - SOURCE_PASSWORD="$1"; shift - ;; - -src-insecure) - SOURCE_INSECURE=1 - ;; - +src-insecure) - SOURCE_INSECURE=0 - ;; - -dest-image) - DESTINATION_IMAGE="$1"; shift - ;; - -dest-username) - DESTINATION_USERNAME="$1"; shift - ;; - -dest-password) - DESTINATION_PASSWORD="$1"; shift - ;; - -dest-insecure) - DESTINATION_INSECURE=1 - ;; - +dest-insecure) - DESTINATION_INSECURE=0 - ;; - -dest-mutable) - DESTINATION_MUTABLE=1 - ;; - +dest-mutable) - DESTINATION_MUTABLE=0 - ;; - *) - positional+=("$key") - ;; - esac - done - - if [[ ${#positional[@]} -gt 0 ]]; then - usage "Unrecognized parameters: ${positional[*]}" - fi -} - -ensure_parameters() { - if [[ "$SOURCE_IMAGE" == "" ]]; then - usage "Must specify SOURCE_IMAGE" - fi - - if [[ "$DESTINATION_IMAGE" == "" ]]; then - usage "Must specify DESTINATION_IMAGE" - fi - - if [[ "$SOURCE_USERNAME" != "" || "$SOURCE_PASSWORD" != "" ]]; then - if [[ "$SOURCE_USERNAME" == "" || "$SOURCE_PASSWORD" == "" ]]; then - usage "Must specify both the SOURCE_USERNAME and SOURCE_PASSWORD." - fi - fi - - if [[ "$DESTINATION_USERNAME" != "" || "$DESTINATION_PASSWORD" != "" ]]; then - if [[ "$DESTINATION_USERNAME" == "" || "$DESTINATION_PASSWORD" == "" ]]; then - usage "Must specify both the DESTINATION_USERNAME and DESTINATION_PASSWORD." - fi - fi - - return 0 -} - -image_exists() { - declare src_creds="$SOURCE_USERNAME:$SOURCE_PASSWORD" - declare command=(skopeo inspect --insecure-policy) - - if [[ "$SOURCE_USERNAME" != "" ]]; then -# command+=(--src-creds "$src_creds") - command+=(--creds "$src_creds") - else -# command+=(--src-no-creds) - command+=(--no-creds) - fi - -# if [[ "$SOURCE_INSECURE" == "1" ]]; then -# command+=(--src-tls-verify=false) -# else -# command+=(--src-tls-verify=true) -# fi - - command+=("docker://$SOURCE_IMAGE") - - ${command[@]} > /dev/null 2>&1 - status=$? - echo "* source_image_exists() status=$status" - # return 0 if it does, 1 if not - return $? -} - -destination_image_exists() { - declare dst_creds="$DESTINATION_USERNAME:$DESTINATION_PASSWORD" - declare command=(skopeo inspect --insecure-policy) - - if [[ "$DESTINATION_USERNAME" != "" ]]; then -# command+=(--dest-creds "$dst_creds") - command+=(--creds "$dst_creds") - else -# command+=(--dest-no-creds) - command+=(--no-creds) - fi - -# if [[ "$DESTINATION_INSECURE" == "1" ]]; then -# command+=(--dest-tls-verify=false) -# else -# command+=(--dest-tls-verify=true) -# fi - - command+=("docker://$DESTINATION_IMAGE") - - ${command[@]} > /dev/null 2>&1 - status=$? - echo "* destination_image_exists() status=$status" - # return 0 if it does, 1 if not - return $? -} - -copy_image() { - declare src_creds="$SOURCE_USERNAME:$SOURCE_PASSWORD" - declare dest_creds="$DESTINATION_USERNAME:$DESTINATION_PASSWORD" - declare command=(skopeo copy --insecure-policy) - - if [[ "$SOURCE_USERNAME" != "" ]]; then - command+=(--src-creds "$src_creds") - else - command+=(--src-no-creds) - fi - - if [[ "$SOURCE_INSECURE" == "1" ]]; then - command+=(--src-tls-verify=false) - else - command+=(--src-tls-verify=true) - fi - - if [[ "$DESTINATION_USERNAME" != "" ]]; then - command+=(--dest-creds "$dest_creds") - else - command+=(--dest-no-creds) - fi - - if [[ "$DESTINATION_INSECURE" == "1" ]]; then - command+=(--dest-tls-verify=false) - else - command+=(--dest-tls-verify=true) - fi - - command+=("docker://$SOURCE_IMAGE" "docker://$DESTINATION_IMAGE") - - if [[ "$DESTINATION_IMAGE" == *.dkr.ecr.*.amazonaws.com/* ]]; then - echo "ECR registry detected, ensuring repository." - declare repository="${DESTINATION_IMAGE##*.amazonaws.com/}" - repository="${repository%%:*}" - declare region="${DESTINATION_IMAGE%%.amazonaws.com/*}" - region="${region##*.}" - export AWS_PAGER="" - if ! aws ecr describe-repositories \ - --region "$region" \ - --output "json" \ - --repository-names "$repository" \ - > /dev/null 2>&1; then - local mutability="IMMUTABLE" - if [ "$DESTINATION_MUTABLE" == "1" ]; then - mutability="MUTABLE" - fi - echo "creating repository $repository." - aws ecr create-repository \ - --image-tag-mutability "$mutability" \ - --image-scanning-configuration "scanOnPush=true" \ - --encryption-configuration "encryptionType=KMS" \ - --repository-name "$repository" \ - --region "$region" \ - > /dev/null 2>&1 || return $? - else - echo "repository $repository exists." - fi - fi - - echo "Copying $SOURCE_IMAGE" - echo "to $DESTINATION_IMAGE" - - ${command[@]} -} - - -ensure_image() { - ( image_exists && ! destination_image_exists ) || copy_image -} - -main() { - ensure_skopeo && \ - parse_commandline "$@" && \ - ensure_parameters && \ - ensure_image && \ - echo "Done" -} - -return 0 > /dev/null 2>&1 || main "$@" - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/bin/fix-terminating-namespace.sh b/examples/full-cluster-tf-upgrade/1.24.in-progress/bin/fix-terminating-namespace.sh deleted file mode 100755 index 7282e79..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/bin/fix-terminating-namespace.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# fix_terminating_namespace() { -# local -r namespace="${1}"; shift; -# -# kubectl get ns "$namespace" 2>&1 | grep -q Terminating -# -# if [ $? -eq 0 ]; then -# kubectl get namespace "$namespace" -o json | \ -# grep -v '^ "kubernetes"$' | \ -# kubectl replace --raw "/api/v1/namespaces/$namespace/finalize" -f - -# else -# echo "Namespace $namespace not found or not stuck in terminating state." -# fi -# } -# } - -namespace="${1}" -shift; - -kubectl get ns "$namespace" 2>&1 | grep -q Terminating -if [ $? -eq 0 ] -then - kubectl get namespace "$namespace" -o json |\ - grep -v '^ "kubernetes"$' |\ - kubectl replace --raw "/api/v1/namespaces/$namespace/finalize" -f - -else - echo "Namespace $namespace not found or not stuck in terminating state." -fi diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/bin/show-k8s-things.sh b/examples/full-cluster-tf-upgrade/1.24.in-progress/bin/show-k8s-things.sh deleted file mode 100755 index c5f6290..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/bin/show-k8s-things.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -for f in all clusterrolebindings clusterroles nodes pods pvc pv rolebindings roles sc secrets services -do - echo "kubectl --config setup/kube.config get $f --all-namespaces -o wide > OUT.get-$f.txt" - kubectl --kubeconfig setup/kube.config get $f --all-namespaces -o wide > OUT.get-$f.txt -done diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/README.md b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/README.md deleted file mode 100644 index eae6d1d..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/README.md +++ /dev/null @@ -1,238 +0,0 @@ -# About cluster-roles - -This directory constructs the resources for roles, permissions and Kubernetes resources -for the EKS cluster adsd-cumulus-dev. - -# Application Information - -* Application: EKS adsd-cumulus-dev -* Organization: ADSD -* Project: DICE-dev -* Point of Contact(s): badra001, -* Creation Date: 2021-10-08 -* References: - * Requirements: {url} - * Remedy Ticket: {number} - * Other: {url} -* Related Configurations: - * {directory-path} - -# Application Requirements: EKS Cluster RBAC - -In order to let CICD pipeline and DBA to manage the applications and databases which Cumulus needed. 3 cluster roles need to be create - -1. Deployer Application Role -2. Deployer Istio System Role -3. DBA Administrator Role - -CICD deployer will be binding to Deployer roles in the namespaces that CICD will manager. Same as DBA Admin user, they only have admin roles for the namespaces that they are going to manage. - -## Deployer Application Role - -This role defines the k8s resources that CICD pipeline need to create for application deployment. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: deployer-role -aggregationRule: - clusterRoleSelectors: - - matchLabels: - rbac.authorization.k8s.io/aggregate-to-edit: "true" -rules: - - apiGroups: - - cert-manager.io - - acme.cert-manager.io - resources: - - "*" - verbs: - - get - - list - - watch - - create - - update - - patch - - delete - - apiGroups: - - networking.istio.io - - security.istio.io - resources: - - virtualservices - - authorizationpolicies - - destinationrules - - peerauthentications - - requestauthentications - verbs: - - get - - list - - watch - - create - - delete - - patch - -``` -## Deployer Istio System Role - -This Role defines that deployer need to create gateway and certificate in istio-system namespace, per istio requires, TLS certificate need stay in the same -namespace as istio-ingressgateway. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: deployer-istiosystem-role -rules: - - apiGroups: - - cert-manager.io - - acme.cert-manager.io - resources: - - "*" - verbs: - - get - - list - - watch - - create - - update - - patch - - apiGroups: - - networking.istio.io - resources: - - gateways - verbs: - - get - - list - - watch - - create - - delete - - patch - -``` - -## DBA Administrator Role -This is admin role for a particular namespace or namespaces that DBA need to access and managed the DBs. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: dba-admin-role -aggregationRule: - clusterRoleSelectors: - - matchLabels: - rbac.authorization.k8s.io/aggregate-to-admin: "true" -rules: - - apiGroups: - - cert-manager.io - - acme.cert-manager.io - resources: - - "*" - verbs: - - get - - list - - watch - - create - - update - - patch - - delete - - apiGroups: - - networking.istio.io - - security.istio.io - resources: - - virtualservices - - authorizationpolicies - - destinationrules - - peerauthentications - - requestauthentications - verbs: - - get - - list - - watch - - create - - delete - - patch - -``` - -# Terraform Directions - - - - -# Details - - -account_alias = "" -account_id = "" -application_tags = {} -aws_environment = "" -census_private_cidr = [ - "148.129.0.0/16", - "172.16.0.0/12", - "192.168.0.0/16" -] -census_public_cidr = [ - "148.129.0.0/16" -] -cicd_k8s_group_name = "s-eks-adsd-cumulus-dev-cicd-deployer" -cicd_k8s_user_name = "cicd-deployer" -cicd_managed_namespaces = [ - "adsd-cumulus-dev-apps", - "adsd-cumulus-dev-addressupdate", - "adsd-cumulus-dev-adminmatchrecord", - "adsd-cumulus-dev-cbs-apps", - "adsd-cumulus-dev-collectionevent", - "adsd-cumulus-dev-collectionintervention", - "adsd-cumulus-dev-collectionoperation", - "adsd-cumulus-dev-collectionresponse", - "adsd-cumulus-dev-common", - "adsd-cumulus-dev-mft", - "adsd-cumulus-dev-monitoring" -] -cluster_name = "" -cluster_version = "1.20" -dba_admin_rolebinding_name = "dba-admin-rolebinding" -dba_administrator_role_name = "dba-admin-role" -dba_k8s_group_name = "s-eks-adsd-cumulus-dev-dba-admin" -dba_k8s_user_name = "dba-admin" -dba_managed_namespaces = [ - "adsd-cumulus-dev-db" -] -deployer_application_role_name = "deployer-application-role" -deployer_application_rolebinding_name = "deployer-application-rolebinding" -deployer_istiosystem_role_name = "deployer-istiosystem-role" -domain = "" -eks_instance_disk_size = 40 -eks_instance_type = "t3.xlarge" -eks_ng_desire_size = 4 -eks_ng_max_size = 16 -eks_ng_min_size = 4 -eks_vpc_name = "*vpc4*" -istio_installed_namespace = "istio-system" -kms_tfstate_key = "k-kms-inf-tfstate" -profile = "" -region = "" -region_map = {} -regions = [] -subnets_name = "*-apps-*" -tag_costallocation = "csvd:infrastructure" -tag_creator = "" -tfstate_bucket = "inf-tfstate-252960665057" -tfstate_bucket_prefix = "inf-tfstate" -tfstate_key_prefix = "ma6-gov" -tfstate_key_suffix = "terraform.tfstate" -tfstate_region = "us-gov-east-1" -tfstate_table = "tf_remote_state" -vpc_dns_servers = [ - "148.129.127.22", - "148.129.191.22" -] -vpc_domain_name = "dice.census.gov" -vpc_full_name = "" -vpc_ntp_servers = [ - "148.129.127.23", - "148.129.191.23" -] - - - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/RESULTS.md b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/RESULTS.md deleted file mode 100644 index 5d31a20..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/RESULTS.md +++ /dev/null @@ -1,41 +0,0 @@ -## Cluster Roles - -```console -% kubectl --kubeconfig setup/kube.config get clusterrole -o wide |grep -iE "dba|deployer" -cumulus-dba-role 2021-10-07T14:36:45Z -dba-admin-role 2021-10-13T12:12:33Z -deployer-application-role 2021-10-13T12:12:33Z -deployer-istiosystem-role 2021-10-13T12:12:33Z -deployer-role 2021-10-07T16:37:43Z -``` - -## Role Binding - -```console -% kubectl --kubeconfig setup/kube.config get rolebinding -o wide --all-namespaces |grep -iE "deployer|dba" -adsd-cumulus-dev-addressupdate deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer -adsd-cumulus-dev-addressupdate deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer -adsd-cumulus-dev-adminmatchrecord deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer -adsd-cumulus-dev-adminmatchrecord deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer -adsd-cumulus-dev-apps deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer -adsd-cumulus-dev-apps deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer -adsd-cumulus-dev-cbs-apps deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer -adsd-cumulus-dev-cbs-apps deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer -adsd-cumulus-dev-collectionevent deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer -adsd-cumulus-dev-collectionevent deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer -adsd-cumulus-dev-collectionintervention deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer -adsd-cumulus-dev-collectionintervention deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer -adsd-cumulus-dev-collectionoperation deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer -adsd-cumulus-dev-collectionoperation deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer -adsd-cumulus-dev-collectionresponse deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer -adsd-cumulus-dev-collectionresponse deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer -adsd-cumulus-dev-common deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer -adsd-cumulus-dev-common deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer -adsd-cumulus-dev-db cumulus-dba-rolebinding ClusterRole/cumulus-dba-role 5d22h dba-admin cumulus-dba kube-system/dba -adsd-cumulus-dev-db dba-admin-rolebinding ClusterRole/dba-admin-role 56m dba-admin s-eks-adsd-cumulus-dev-dba-admin -adsd-cumulus-dev-mft deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer -adsd-cumulus-dev-mft deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer -adsd-cumulus-dev-monitoring deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer -adsd-cumulus-dev-monitoring deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer -istio-system deployer_istiosystem_role_binding ClusterRole/deployer-istiosystem-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer -``` diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/cm.tf.off b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/cm.tf.off deleted file mode 100644 index f84cb4b..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/cm.tf.off +++ /dev/null @@ -1,6 +0,0 @@ -data "kubernetes_config_map" "awsauth" { - metadata { - name = "aws-auth" - namespace = "kube-system" - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/data.eks-subdirectory.tf deleted file mode 120000 index 43b5430..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/data.eks-subdirectory.tf +++ /dev/null @@ -1 +0,0 @@ -../includes.d/data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/dba-clusterrole.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/dba-clusterrole.tf deleted file mode 100644 index e60e7b5..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/dba-clusterrole.tf +++ /dev/null @@ -1,24 +0,0 @@ -resource "kubernetes_cluster_role" "dba_administrator_cluster_role" { - metadata { - name = var.dba_administrator_role_name - } - aggregation_rule { - cluster_role_selectors { - match_labels = { - "rbac.authorization.k8s.io/aggregate-to-admin" = "true" - } - } - } - - rule { - api_groups = ["cert-manager.io", "acme.cert-manager.io"] - resources = ["certificates", "challenges", "orders", "certificaterequests", "issuers"] - verbs = ["get", "list", "watch", "create", "update", "patch"] - } - - rule { - verbs = ["get", "list", "watch", "create", "update", "patch"] - api_groups = ["networking.istio.io", "security.istio.io"] - resources = ["virtualservices", "authorizationpolicies", "destinationrules", "peerauthentications", "requestauthentications"] - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/dba-rolebinding.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/dba-rolebinding.tf deleted file mode 100644 index e7d48aa..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/dba-rolebinding.tf +++ /dev/null @@ -1,40 +0,0 @@ -locals { - dba_managed_namespaces = formatlist("%v-%v", var.cluster_name, var.dba_managed_namespaces) - dba_k8s_group_name = format("%v%v-%v", local._prefixes["eks-user"], var.cluster_name, var.dba_k8s_group_name) -} - -resource "kubernetes_namespace" "dba_managed_namespaces" { - for_each = toset(local.dba_managed_namespaces) - metadata { - name = each.key - labels = { - istio-injection = "enabled" - } - } -} - -resource "kubernetes_role_binding" "dba_admin_rolebinding" { - # for_each = toset(local.dba_managed_namespaces) - for_each = kubernetes_namespace.dba_managed_namespaces - - metadata { - name = var.dba_admin_rolebinding_name - namespace = each.key - } - role_ref { - api_group = "rbac.authorization.k8s.io" - kind = "ClusterRole" - name = var.dba_administrator_role_name - } - subject { - kind = "User" - name = var.dba_k8s_user_name - api_group = "rbac.authorization.k8s.io" - } - subject { - kind = "Group" - name = local.dba_k8s_group_name - api_group = "rbac.authorization.k8s.io" - } - # depends_on = [kubernetes_namespace.dba_managed_namespaces] -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/dba.iam.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/dba.iam.tf deleted file mode 100644 index 3ef0a8a..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/dba.iam.tf +++ /dev/null @@ -1,117 +0,0 @@ -locals { - policy_dba_k8s_group_name = replace(local.dba_k8s_group_name, local._prefixes["eks-user"], local._prefixes["eks-policy"]) - role_dba_k8s_group_name = format("%v%v-%v", local._prefixes["eks"], var.cluster_name, var.dba_k8s_group_name) -} - -module "role_dba_administrator" { - source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" - - role_name = local.role_dba_k8s_group_name - role_description = "Role for EKS cluster ${var.cluster_name} for access by ${var.dba_k8s_group_name}" - enable_ldap_creation = false - assume_policy_document = data.aws_iam_policy_document.dba_administrator_allow_sts.json - attached_policies = [aws_iam_policy.dba_administrator.arn] - - tags = merge( - local.base_tags, - local.common_tags, - var.application_tags, - ) -} - -resource "aws_iam_policy" "dba_administrator" { - name = local.policy_dba_k8s_group_name - path = "/" - description = "Policy for EKS ${var.cluster_name} IAM access ${var.dba_k8s_group_name}" - policy = data.aws_iam_policy_document.dba_administrator.json -} - -locals { - dba_administrator_policy_statements = { - ECRRead = { - actions = [ - "ecr:Describe*", - "ecr:Get*", - "ecr:ListImages", - "ecr:BatchGetImage", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - ] - resources = ["*"] - } - EKSRead = { - actions = [ - "eks:ListClusters", - ] - resources = ["*"] - } - EKSReadMyClusters = { - actions = [ - "eks:DescribeCluster", - "eks:AccessKubernetesApi", - ] - resources = [format(local.common_arn, "eks", format("%v/%v", "cluster", var.cluster_name))] - } - STSAssumeRole = { - actions = ["sts:AssumeRole"] - resources = [module.role_dba_administrator.role_arn] - } - } -} - -data "aws_iam_policy_document" "dba_administrator" { - dynamic "statement" { - for_each = local.dba_administrator_policy_statements - iterator = s - content { - sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) - effect = lookup(s.value, "effect", "Allow") - actions = lookup(s.value, "actions", []) - resources = lookup(s.value, "resources", []) - not_resources = lookup(s.value, "not_resources", []) - } - } -} - -# allow anyone in this account to assume the role, if they have the permission to do so -data "aws_iam_policy_document" "dba_administrator_allow_sts" { - statement { - sid = "AllowSTSAssume" - effect = "Allow" - actions = ["sts:AssumeRole"] - principals { - type = "AWS" - identifiers = [ - format(local.iam_arn, "root"), - ] - } - } -} - -# output "role_dba_administrator_arn" { -# description = "DBA Adminstrator role ARN" -# value = module.role_dba_administrator.role_arn -# } - -module "group_dba_administrator" { - source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git" - - group_name = local.role_dba_k8s_group_name - attached_policies = [aws_iam_policy.dba_administrator.arn] - - tags = merge( - local.base_tags, - local.common_tags, - var.application_tags, - ) -} - -output "info_dba_administrator" { - description = "DBA Adminstrator IAM details" - value = { - role_name = module.role_dba_administrator.role_name - role_arn = module.role_dba_administrator.role_arn - group_name = module.group_dba_administrator.group_name - group_arn = module.group_dba_administrator.group_arn - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/deployer-clusterrole.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/deployer-clusterrole.tf deleted file mode 100644 index 7cede6e..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/deployer-clusterrole.tf +++ /dev/null @@ -1,67 +0,0 @@ -resource "kubernetes_cluster_role" "cicd_deployer_istiosystem_cluster_role" { - metadata { - name = var.deployer_istiosystem_role_name - } - - rule { - api_groups = ["acme.cert-manager.io"] - resources = ["challenges", "orders", "certificaterequests"] - verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] - } - - rule { - api_groups = ["cert-manager.io"] - resources = ["certificates"] - verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] - } - - - rule { - verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] - api_groups = ["networking.istio.io"] - resources = ["gateways"] - } -} - -resource "kubernetes_cluster_role" "cicd_deployer_istio_cluster_role" { - metadata { - name = var.deployer_application_istio_role_name - } - rule { - api_groups = ["security.istio.io"] - verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] - resources = ["requestauthentications", "authorizationpolicies", "peerauthentications"] - } - - rule { - verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] - api_groups = ["networking.istio.io"] - resources = ["virtualservices", "destinationrules", "gateways"] - } -} - -resource "kubernetes_cluster_role" "cicd_deployer_application_cluster_role" { - metadata { - name = var.deployer_application_role_name - } - aggregation_rule { - cluster_role_selectors { - match_labels = { - "rbac.authorization.k8s.io/aggregate-to-edit" = "true" - } - } - } - - rule { - api_groups = ["acme.cert-manager.io"] - resources = ["challenges", "orders", "certificaterequests"] - verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] - } - - rule { - api_groups = ["cert-manager.io"] - resources = ["certificates"] - verbs = ["create", "delete", "deletecollection", "get", "list", "patch", "update", "patch"] - } - -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/deployer-rolebinding.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/deployer-rolebinding.tf deleted file mode 100644 index 3b90b7b..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/deployer-rolebinding.tf +++ /dev/null @@ -1,91 +0,0 @@ -resource "kubernetes_role_binding" "deployer_istio_role_binding" { - metadata { - name = "deployer_istiosystem_role_binding" - namespace = var.istio_installed_namespace - } - role_ref { - api_group = "rbac.authorization.k8s.io" - kind = "ClusterRole" - name = var.deployer_istiosystem_role_name - } - subject { - kind = "User" - name = var.cicd_k8s_user_name - api_group = "rbac.authorization.k8s.io" - } - subject { - kind = "Group" - # name = format("%v%v-%v", local._prefixes["eks-user"], var.cluster_name, var.cicd_k8s_group_name) - name = local.cicd_k8s_iam_username - api_group = "rbac.authorization.k8s.io" - } -} - -locals { - cicd_managed_namespaces = formatlist("%v-%v", var.cluster_name, var.cicd_managed_namespaces) - cicd_k8s_iam_username = format("%v%v-%v", local._prefixes["eks-user"], var.cluster_name, var.cicd_k8s_group_name) - cicd_k8s_group_name = format("%v%v-%v", local._prefixes["eks"], var.cluster_name, var.cicd_k8s_group_name) -} - -resource "kubernetes_namespace" "cicd_managed_namespaces" { - for_each = toset(local.cicd_managed_namespaces) - metadata { - name = each.key - labels = { - istio-injection = "enabled" - } - } -} - - -resource "kubernetes_role_binding" "deployer_application_istio_rolebinding" { - # for_each = toset(local.cicd_managed_namespaces) - for_each = kubernetes_namespace.cicd_managed_namespaces - - metadata { - name = var.deployer_application_istio_rolebinding_name - namespace = each.key - } - role_ref { - api_group = "rbac.authorization.k8s.io" - kind = "ClusterRole" - name = var.deployer_application_istio_role_name - } - subject { - kind = "User" - name = var.cicd_k8s_user_name - api_group = "rbac.authorization.k8s.io" - } - subject { - kind = "Group" - name = local.cicd_k8s_iam_username - api_group = "rbac.authorization.k8s.io" - } - # depends_on = [kubernetes_namespace.cicd_managed_namespaces] -} - -resource "kubernetes_role_binding" "deployer_application_rolebinding" { - # for_each = toset(local.cicd_managed_namespaces) - for_each = kubernetes_namespace.cicd_managed_namespaces - - metadata { - name = var.deployer_application_rolebinding_name - namespace = each.key - } - role_ref { - api_group = "rbac.authorization.k8s.io" - kind = "ClusterRole" - name = var.deployer_application_role_name - } - subject { - kind = "User" - name = var.cicd_k8s_user_name - api_group = "rbac.authorization.k8s.io" - } - subject { - kind = "Group" - name = local.cicd_k8s_iam_username - api_group = "rbac.authorization.k8s.io" - } - # depends_on = [kubernetes_namespace.cicd_managed_namespaces] -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/deployer.iam.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/deployer.iam.tf deleted file mode 100644 index 204b4d1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/deployer.iam.tf +++ /dev/null @@ -1,167 +0,0 @@ -locals { - policy_cicd_k8s_group_name = replace(local.cicd_k8s_iam_username, local._prefixes["eks-user"], local._prefixes["eks-policy"]) - role_cicd_k8s_group_name = replace(local.cicd_k8s_iam_username, local._prefixes["eks-user"], "") - iam_policies_cicd = ["p-inf-manage-access-keys"] -} - -data "aws_iam_policy" "cicd_deployer_policies" { - for_each = toset(local.iam_policies_cicd) - name = each.key -} - -module "service_cicd_deployer" { - source = "git@github.e.it.census.gov:terraform-modules/aws-iam-user.git" - - iam_username = local.cicd_k8s_iam_username - username = "" - email_address = "" - groups = ["g-inf-ip-restriction"] - generate_password = false - service_account = true - enable_sending_mail = false - create_access_keys = false - profile = var.profile - pgp_key_file = "./init/tf-gpg-key.b64" - - attached_policies = flatten(concat([for k, v in data.aws_iam_policy.cicd_deployer_policies : v.arn], [aws_iam_policy.cicd_deployer.arn])) - - tags = merge( - local.base_tags, - local.common_tags, - var.application_tags, - ) -} -module "role_cicd_deployer" { - source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git" - - role_name = local.role_cicd_k8s_group_name - role_description = "Role for EKS cluster ${var.cluster_name} for access by ${var.cicd_k8s_group_name}" - enable_ldap_creation = false - assume_policy_document = data.aws_iam_policy_document.cicd_deployer_allow_sts.json - # attached_policies = flatten(concat([for k, v in data.aws_iam_policy.cicd_deployer_policies : v.arn], [aws_iam_policy.cicd_deployer.arn])) - attached_policies = [aws_iam_policy.cicd_deployer.arn] - - tags = merge( - local.base_tags, - local.common_tags, - var.application_tags, - ) -} - -resource "aws_iam_policy" "cicd_deployer" { - name = local.policy_cicd_k8s_group_name - path = "/" - description = "Policy for EKS ${var.cluster_name} IAM access ${var.cicd_k8s_group_name}" - policy = data.aws_iam_policy_document.cicd_deployer.json -} - -locals { - cicd_deployer_policy_statements = { - ECRRead = { - actions = [ - "ecr:Describe*", - "ecr:Get*", - "ecr:ListImages", - "ecr:BatchGetImage", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - ] - resources = ["*"] - } - ECRWrite = { - # effect = "Deny" - actions = [ - "ecr:BatchDeleteImage", - "ecr:CompleteLayerUpload", - "ecr:CreateRepository", - "ecr:DeleteRepository", - "ecr:InitiateLayerUpload", - "ecr:PutImage", - "ecr:UploadLayerPart" - ] - # not_resources = [format(local.common_arn, "ecr", format("repository/eks/%v/*", var.cluster_name))] - not_resources = [format(local.common_arn, "ecr", "repository/eks/*")] - } - EKSRead = { - actions = [ - "eks:ListClusters", - ] - resources = ["*"] - } - EKSReadMyClusters = { - actions = [ - "eks:AccessKubernetesApi", - "eks:DescribeCluster", - ] - resources = [format(local.common_arn, "eks", format("%v/%v", "cluster", var.cluster_name))] - } - # IAMRead = { - # actions = [ - # "iam:ListRoles", - # ] - # resources = ["*"] - # } - } -} - -data "aws_iam_policy_document" "cicd_deployer" { - dynamic "statement" { - for_each = local.cicd_deployer_policy_statements - iterator = s - content { - sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) - effect = lookup(s.value, "effect", "Allow") - actions = lookup(s.value, "actions", []) - resources = lookup(s.value, "resources", []) - not_resources = lookup(s.value, "not_resources", []) - } - } -} - -# allow anyone in this account to assume the role, if they have the permission to do so -data "aws_iam_policy_document" "cicd_deployer_allow_sts" { - statement { - sid = "AllowSTSAssume" - effect = "Allow" - actions = ["sts:AssumeRole"] - principals { - type = "AWS" - identifiers = [ - format(local.iam_arn, "root"), - ] - } - } -} - -# output "service_cicd_deployer_arn" { -# description = "CICD Deployer user ARN" -# value = module.service_cicd_deployer.user_arn -# } -# -# output "service_cicd_deployer_username" { -# description = "CICD Deployer username" -# value = module.service_cicd_deployer.user_name -# } - -module "group_cicd_deployer" { - source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git" - - group_name = local.cicd_k8s_group_name - attached_policies = flatten(concat([for k, v in data.aws_iam_policy.cicd_deployer_policies : v.arn], [aws_iam_policy.cicd_deployer.arn])) - - tags = merge( - local.base_tags, - local.common_tags, - var.application_tags, - ) -} - -output "info_cicd_deployer" { - description = "CID Deployer IAM details" - value = { - user_name = module.service_cicd_deployer.user_name - user_arn = module.service_cicd_deployer.user_arn - group_name = module.group_cicd_deployer.group_name - group_arn = module.group_cicd_deployer.group_arn - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/kubeconfig.eks-subdirectory.tf deleted file mode 120000 index e3750a4..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/kubeconfig.eks-subdirectory.tf +++ /dev/null @@ -1 +0,0 @@ -../includes.d/kubeconfig.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/locals.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/locals.tf deleted file mode 100644 index 92d0613..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/locals.tf +++ /dev/null @@ -1,11 +0,0 @@ -locals { - base_arn = format("arn:%v:%%v:%v:%v:%%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) - iam_arn = format("arn:%v:iam::%v:%%v", data.aws_arn.current.partition, data.aws_caller_identity.current.account_id) - common_arn = format("arn:%v:%%v:%v:%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) - - base_tags = { - "eks-cluster-name" = var.cluster_name - "boc:tf_module_version" = local._module_version - "boc:created_by" = "terraform" - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/main.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/main.tf deleted file mode 100644 index ef02738..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/main.tf +++ /dev/null @@ -1,30 +0,0 @@ -locals { - aws_auth_users = [ - { - userarn = module.service_cicd_deployer.user_arn - aws_username = "" - username = var.cicd_k8s_user_name - groups = [local.cicd_k8s_group_name] - }, - ] - aws_auth_roles = [ - { - rolearn : module.role_dba_administrator.role_arn - aws_rolename : "" - username : var.dba_k8s_user_name - groups = [local.dba_k8s_group_name] - }, - ] -} - -module "awsauth_cluster-roles" { - source = "git@github.e.it.census.gov:terraform-modules/aws-eks.git//patch-aws-auth" - - region = local.region - profile = var.profile - cluster_name = var.cluster_name - aws_auth_users = local.aws_auth_users - aws_auth_roles = local.aws_auth_roles - - keep_temporary_files = false -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/prefixes.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/prefixes.tf deleted file mode 120000 index e0bf5ad..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/prefixes.tf +++ /dev/null @@ -1 +0,0 @@ -../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/providers.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/providers.tf deleted file mode 120000 index 7244d01..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/providers.tf +++ /dev/null @@ -1 +0,0 @@ -../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/region.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/region.tf deleted file mode 100644 index b7b1696..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/region.tf +++ /dev/null @@ -1,4 +0,0 @@ -locals { - region = var.region -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/tf-run.data b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/tf-run.data deleted file mode 100644 index 4f92a5c..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/tf-run.data +++ /dev/null @@ -1,14 +0,0 @@ -VERSION 1.2.2 -REMOTE-STATE -STOP only run this after the cluster roles represented here have been setup in K8S -COMMAND tf-directory-setup.py -l none -f -COMMAND setup-new-directory.sh -COMMAND tf-init -upgrade -COMMAND ln -sf ../versions.tf -COMMAND ln -sf ../settings.auto.tfvars . -LINKTOP init -POLICY -ALL -COMMAND tf-directory-setup.py -l s3 - -COMMENT cd ../ and continue diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/variables.auto.tfvars b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/variables.auto.tfvars deleted file mode 100644 index 974aef0..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/variables.auto.tfvars +++ /dev/null @@ -1,16 +0,0 @@ -istio_installed_namespace = "istio-system" -# enable only for cicd needs -cicd_k8s_group_name = "cicd-deployer" -cicd_k8s_user_name = "cicd-deployer" -cicd_managed_namespaces = [] -deployer_application_istio_role_name = "deployer-application-istio-role" -deployer_application_istio_rolebinding_name = "deployer-application-istio-rolebinding" -deployer_application_role_name = "deployer-application-role" -deployer_application_rolebinding_name = "deployer-application-rolebinding" -deployer_istiosystem_role_name = "deployer-istiosystem-role" -# enable only for dba account needs (most likely, not needed) -dba_admin_rolebinding_name = "dba-admin-rolebinding" -dba_administrator_role_name = "dba-admin-role" -dba_k8s_group_name = "dba-admin" -dba_k8s_user_name = "dba-admin" -dba_managed_namespaces = [] diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/variables.eks.tf deleted file mode 120000 index 7dd95db..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/variables.eks.tf +++ /dev/null @@ -1 +0,0 @@ -../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/variables.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/variables.tf deleted file mode 100644 index 559f683..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/variables.tf +++ /dev/null @@ -1,83 +0,0 @@ -variable "deployer_istiosystem_role_name" { - description = "The kubernetes cluster role name of CIDR Deployer" - type = string - default = "deployer-istiosystem-role" -} - -variable "deployer_application_role_name" { - description = "The kubernetes cluster role name of CICD Deployer" - type = string - default = "deployer-application-role" -} - -variable "deployer_application_istio_role_name" { - description = "The kubernetes cluster role name of CICD Deployer" - type = string - default = "deployer-application-istio-role" -} - - - -variable "dba_administrator_role_name" { - description = "The kubernetes cluster role name of DBA Administrator" - type = string - default = "dba-admin-role" -} - -variable "istio_installed_namespace" { - description = "Namespace that Istio installed" - type = string - default = "istio-system" -} - -variable "cicd_k8s_user_name" { - description = "The user name of CICD Deployer" - type = string - default = "cicd-deployer" -} -variable "cicd_k8s_group_name" { - description = "The Group name of CICD Deployer belongs to (excluding prefix for service account and cluster)" - type = string - default = "cicd-deployer" -} - -variable "dba_k8s_user_name" { - description = "the user name of DBA Administrator" - type = string - default = "dba-admin" -} -variable "dba_k8s_group_name" { - description = "The Group name of dba-admin belongs to (excluding prefix for service account and cluster)" - type = string - default = "dba-admin" -} - -variable "deployer_application_rolebinding_name" { - description = "Role binding name of deployer that binding to role deployer_application_cluster_role" - type = string - default = "deployer-application-rolebinding" -} - -variable "deployer_application_istio_rolebinding_name" { - description = "Role binding name of deployer that binding to role deployer_application_cluster_role" - type = string - default = "deployer-application-istio-rolebinding" -} - -variable "dba_admin_rolebinding_name" { - description = "Role binding name of deployer that binding to role deployer_application_cluster_role" - type = string - default = "dba-admin-rolebinding" -} - -variable "cicd_managed_namespaces" { - description = "Deployer managed namespaces that deploy can create resources in (excluding cluster name prefix)" - type = list - default = [] -} - -variable "dba_managed_namespaces" { - description = "DBA admin managed namespaces (excluding cluster name prefix)" - type = list - default = [] -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/version.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/version.tf deleted file mode 120000 index 061373c..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/version.tf +++ /dev/null @@ -1 +0,0 @@ -../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/versions.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/versions.tf deleted file mode 120000 index 8bd0ff1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/cluster-roles/versions.tf +++ /dev/null @@ -1 +0,0 @@ -../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/.gitignore b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/.gitignore deleted file mode 100644 index 1ae9a3f..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/.gitignore +++ /dev/null @@ -1 +0,0 @@ -certs/*.key diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/README.md b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/README.md deleted file mode 100644 index f8b7f53..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# common-services - -This is a directory where the common services are setup: - -* cert-manager -* istio service mesh -* metrics-server - -## Setup Steps - -First, copy the `remote_state.yml` from the parent and update `directory` to be the current directory. - -## Terraform Automated - -A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. - -* copy the `remote_state.yml` from the parent and update `directory` to be the current directory -* run the tf-run.sh - -```console -% tf-run.sh apply -``` - -* example of the `tf-run.sh` steps - -This is part of a larger cluster configuration, so at the end of the run it indicates another directory -to visit when done. - -```console -% tf-run.sh list -* running action=plan -* START: tf-run.sh v1.1.2 start=1636563207 end= logfile=logs/run.plan.20211110.1636563207.log (not-created) -* reading from tf-run.data -* read 23 entries from tf-run.data -> list -** START: start=1636563207 -* 1 COMMAND> tf-directory-setup.py -l none -f -* 2 COMMAND> setup-new-directory.sh -* 3 COMMAND> tf-init -upgrade -* 4 tf-plan -target=tls_private_key.ca -* 5 tf-plan -target=tls_cert_request.ca -* 6 tf-plan -target=null_resource.ca_root_cert -* 7 tf-plan -target=null_resource.ca_files -* 8 tf-plan -target=null_resource.ca_cert -* 9 tf-plan -target=local_file.ca_bundle_cert -* 10 COMMAND> tf-directory-setup.py -l s3 -* 11 COMMENT> submit certs/*csr using command ouptut listed in apply to TCO for signing -* 12 STOP> once that is availabile, change cert_download to true -* 13 COMMAND> terraform taint null_resource.ca_cert -* 14 tf-plan -target=null_resource.ca_root_cert -* 15 tf-plan -target=null_resource.ca_files -* 16 tf-plan -target=null_resource.ca_cert -* 17 COMMENT> second run is to complete the steps -* 18 tf-plan -target=null_resource.ca_root_cert -* 19 tf-plan -target=null_resource.ca_files -* 20 tf-plan -target=null_resource.ca_cert -* 21 tf-plan -* 22 COMMENT> run: git-secret add certs/*.key; git-secret hide -* 23 COMMENT> be sure to add all files to git, and be sure to commit -a to get .gitsecret/ changes -** END: start=1636563207 end=1636563207 elapsed=0 logfile=logs/run.plan.20211110.1636563207.log (not-created) -``` - -It is highly recommended to use the `tf-run.sh` approach. - -## Terraform Manual - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/README.output.md b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/README.output.md deleted file mode 100644 index 089cab7..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/README.output.md +++ /dev/null @@ -1,84 +0,0 @@ -```console -% kubectl -n kube-system get pods -o wide -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -aws-load-balancer-controller-54fdf64896-jzwsr 1/1 Running 0 23h 10.194.26.74 ip-10-194-26-252.ec2.internal -aws-load-balancer-controller-54fdf64896-qqt6d 1/1 Running 0 23h 10.194.24.242 ip-10-194-24-49.ec2.internal -aws-node-29kmc 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal -aws-node-6d8ls 1/1 Running 1 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal -aws-node-6vrbg 1/1 Running 1 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal -aws-node-ldgxc 1/1 Running 1 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal -coredns-65bfc5645f-g86rx 1/1 Running 0 7d2h 10.194.24.207 ip-10-194-24-90.ec2.internal -coredns-65bfc5645f-xj9rl 1/1 Running 0 7d2h 10.194.24.69 ip-10-194-24-90.ec2.internal -efs-csi-controller-65fb886fd4-7slw6 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal -efs-csi-controller-65fb886fd4-vcf9l 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal -efs-csi-node-6t6v6 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal -efs-csi-node-kxqfb 3/3 Running 0 2d21h 10.194.24.49 ip-10-194-24-49.ec2.internal -efs-csi-node-p8hzn 3/3 Running 0 2d21h 10.194.26.252 ip-10-194-26-252.ec2.internal -efs-csi-node-xxq9h 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal -kube-proxy-78n7f 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal -kube-proxy-cms7c 1/1 Running 0 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal -kube-proxy-h2t6n 1/1 Running 0 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal -kube-proxy-jkxnz 1/1 Running 0 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal -``` - -```console -% kubectl get pods --all-namespaces -o wide -NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -cert-manager cert-manager-7fcbc79fc5-xwt4s 1/1 Running 0 51m 10.194.24.138 ip-10-194-24-49.ec2.internal -cert-manager cert-manager-cainjector-6b7f4575f4-xpgnc 1/1 Running 0 51m 10.194.24.56 ip-10-194-24-49.ec2.internal -cert-manager cert-manager-webhook-6cd54b96fc-rvld4 1/1 Running 0 51m 10.194.24.170 ip-10-194-24-90.ec2.internal -istio-system istio-egressgateway-7fcc58ddf7-dtx25 1/1 Running 0 95m 10.194.26.120 ip-10-194-26-252.ec2.internal -istio-system istio-ingressgateway-75f76c546b-vx2v6 1/1 Running 0 95m 10.194.24.8 ip-10-194-24-90.ec2.internal -istio-system istiod-85b6f86f94-vqfj2 1/1 Running 0 95m 10.194.25.155 ip-10-194-25-120.ec2.internal -kube-system aws-load-balancer-controller-54fdf64896-jzwsr 1/1 Running 0 23h 10.194.26.74 ip-10-194-26-252.ec2.internal -kube-system aws-load-balancer-controller-54fdf64896-qqt6d 1/1 Running 0 23h 10.194.24.242 ip-10-194-24-49.ec2.internal -kube-system aws-node-29kmc 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal -kube-system aws-node-6d8ls 1/1 Running 1 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal -kube-system aws-node-6vrbg 1/1 Running 1 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal -kube-system aws-node-ldgxc 1/1 Running 1 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal -kube-system coredns-65bfc5645f-g86rx 1/1 Running 0 7d2h 10.194.24.207 ip-10-194-24-90.ec2.internal -kube-system coredns-65bfc5645f-xj9rl 1/1 Running 0 7d2h 10.194.24.69 ip-10-194-24-90.ec2.internal -kube-system efs-csi-controller-65fb886fd4-7slw6 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal -kube-system efs-csi-controller-65fb886fd4-vcf9l 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal -kube-system efs-csi-node-6t6v6 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal -kube-system efs-csi-node-kxqfb 3/3 Running 0 2d21h 10.194.24.49 ip-10-194-24-49.ec2.internal -kube-system efs-csi-node-p8hzn 3/3 Running 0 2d21h 10.194.26.252 ip-10-194-26-252.ec2.internal -kube-system efs-csi-node-xxq9h 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal -kube-system kube-proxy-78n7f 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal -kube-system kube-proxy-cms7c 1/1 Running 0 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal -kube-system kube-proxy-h2t6n 1/1 Running 0 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal -kube-system kube-proxy-jkxnz 1/1 Running 0 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal -operators istio-operator-7cc8974d48-f2j2m 1/1 Running 0 14h 10.194.26.211 ip-10-194-26-252.ec2.internal -sample-alb sample-alb-8744f54f9-7w4cj 1/1 Running 0 23h 10.194.25.67 ip-10-194-25-120.ec2.internal -sample-alb sample-alb-8744f54f9-gs8f5 1/1 Running 0 23h 10.194.24.147 ip-10-194-24-49.ec2.internal -sample-alb sample-alb-8744f54f9-v6kgr 1/1 Running 0 23h 10.194.26.168 ip-10-194-26-252.ec2.internal -sample-elb sample-elb-69786b5f7d-d7nb4 1/1 Running 0 2d21h 10.194.26.178 ip-10-194-26-252.ec2.internal -sample-elb sample-elb-69786b5f7d-mw7jb 1/1 Running 0 2d21h 10.194.24.193 ip-10-194-24-49.ec2.internal -sample-elb sample-elb-69786b5f7d-tqz2s 1/1 Running 0 2d21h 10.194.25.96 ip-10-194-25-120.ec2.internal -sample-nlb sample-nlb-6cd5769dfb-n8dmd 1/1 Running 0 2d21h 10.194.25.198 ip-10-194-25-120.ec2.internal -sample-nlb sample-nlb-6cd5769dfb-qw8n4 1/1 Running 0 2d21h 10.194.24.132 ip-10-194-24-49.ec2.internal -sample-nlb sample-nlb-6cd5769dfb-t2nhp 1/1 Running 0 2d21h 10.194.26.18 ip-10-194-26-252.ec2.internal -``` - -```console -% kubectl -n istio-system get secret | grep -iE "ca-secret|tls" -istio-ca-secret istio.io/ca-root 5 7d2h -nginx-cert kubernetes.io/tls 3 6d20h -root-secret kubernetes.io/tls 3 7d14h -``` - - kubectl get pods --all-namespaces -o wide|grep -i cert -cert-manager cert-manager-7fcbc79fc5-xwt4s 1/1 Running 0 7d22h 10.194.24.138 ip-10-194-24-49.ec2.internal -cert-manager cert-manager-cainjector-6b7f4575f4-xpgnc 1/1 Running 0 7d22h 10.194.24.56 ip-10-194-24-49.ec2.internal -cert-manager cert-manager-webhook-6cd54b96fc-rvld4 1/1 Running 0 7d22h 10.194.24.170 ip-10-194-24-90.ec2.internal - -$ kubectl -n cert-manager get secrets -NAME TYPE - DATA AGE -ca-key-pair Opaque - 2 5m2s -... -$ kubectl get clusterissuer -NAME READY AGE -clusterissuer True 5m36s - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/cert.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/cert.tf deleted file mode 100644 index a6a8338..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/cert.tf +++ /dev/null @@ -1,71 +0,0 @@ -# tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') -# terraform taint null_resource.ca_cert[0] -# # (wait for submitted cert to be ready) -# tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') -# tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') - -#--- -# ca -#--- -locals { - ca_dns_name = format("pki.%v.%v", var.cluster_name, var.vpc_domain_name) - # ca_ou = format("ou=%v,ou=EKS,ou=%v,ou=PKI",var.cluster_name,var.vpc_full_name) - ca_ou = format("eks-%v-%v-PKI", var.cluster_name, var.vpc_full_name) - ca_cert_download = false - ca_cert_san = [local.ca_dns_name] - - ca_key_filename = format("${path.root}/certs/%v.key", local.ca_dns_name) - ca_key_exists = fileexists(local.ca_key_filename) - ca_cert_filename = format("${path.root}/certs/%v.crt", local.ca_dns_name) - ca_cert_exists = fileexists(local.ca_cert_filename) - ca_root_filename = "${path.root}/certs/ca-root.crt" - ca_root_exists = fileexists(local.ca_root_filename) - ca_bundle_contents = local.ca_cert_exists && local.ca_root_exists ? format("%v%v", file(local.ca_cert_filename), file(local.ca_root_filename)) : "" - ca_bundle_filename = format("${path.root}/certs/%v.bundle.crt", local.ca_dns_name) - - v2_certificate_csr_message = < - -# Method 2 - Specifying groups manually -$ helm install my-release autoscaler/cluster-autoscaler \ ---set "autoscalingGroups[0].name=your-asg-name" \ ---set "autoscalingGroups[0].maxSize=10" \ ---set "autoscalingGroups[0].minSize=1" -``` - -## Introduction - -This chart bootstraps a cluster-autoscaler deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. - -## Prerequisites - -- Helm 3+ -- Kubernetes 1.8+ - - [Older versions](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#releases) may work by overriding the `image`. Cluster autoscaler internally simulates the scheduler and bugs between mismatched versions may be subtle. -- Azure AKS specific Prerequisites: - - Kubernetes 1.10+ with RBAC-enabled. - -## Previous Helm Chart - -The previous `cluster-autoscaler` Helm chart hosted at [helm/charts](https://github.com/helm/charts) has been moved to this repository in accordance with the [Deprecation timeline](https://github.com/helm/charts#deprecation-timeline). Note that a few things have changed between this version and the old version: - -- This repository **only** supports Helm chart installations using Helm 3+ since the `apiVersion` on the charts has been marked as `v2`. -- Previous versions of the Helm chart have not been migrated - -## Migration from 1.X to 9.X+ versions of this Chart - -**TL;DR:** -You should choose to use versions >=9.0.0 of the `cluster-autoscaler` chart published from this repository; previous versions, and the `cluster-autoscaler-chart` with versioning 1.X.X published from this repository are deprecated. - -
- Previous versions of this chart - further details -On initial migration of this chart from the `helm/charts` repository this chart was renamed from `cluster-autoscaler` to `cluster-autoscaler-chart` due to technical limitations. This affected all `1.X` releases of the chart, version 2.0.0 of this chart exists only to mark the [`cluster-autoscaler-chart` chart](https://artifacthub.io/packages/helm/cluster-autoscaler/cluster-autoscaler-chart) as deprecated. - -Releases of the chart from `9.0.0` onwards return the naming of the chart to `cluster-autoscaler` and return to following the versioning established by the chart's previous location at . - -To migrate from a 1.X release of the chart to a `9.0.0` or later release, you should first uninstall your `1.X` install of the `cluster-autoscaler-chart` chart, before performing the installation of the new `cluster-autoscaler` chart. -
- -## Migration from 9.0 to 9.1 - -Starting from `9.1.0` the `envFromConfigMap` value is expected to contain the name of a ConfigMap that is used as ref for `envFrom`, similar to `envFromSecret`. If you want to keep the previous behaviour of `envFromConfigMap` you must rename it to `extraEnvConfigMaps`. - -## Installing the Chart - -**By default, no deployment is created and nothing will autoscale**. - -You must provide some minimal configuration, either to specify instance groups or enable auto-discovery. It is not recommended to do both. - -Either: - -- Set `autoDiscovery.clusterName` and provide additional autodiscovery options if necessary **or** -- Set static node group configurations for one or more node groups (using `autoscalingGroups` or `autoscalingGroupsnamePrefix`). - -To create a valid configuration, follow instructions for your cloud provider: - -* [AWS](#aws---using-auto-discovery-of-tagged-instance-groups) -* [GCE](#gce) -* [Azure AKS](#azure-aks) -* [OpenStack Magnum](#openstack-magnum) - -### AWS - Using auto-discovery of tagged instance groups - -Auto-discovery finds ASGs tags as below and automatically manages them based on the min and max size specified in the ASG. `cloudProvider=aws` only. - -- Tag the ASGs with keys to match `.Values.autoDiscovery.tags`, by default: `k8s.io/cluster-autoscaler/enabled` and `k8s.io/cluster-autoscaler/` -- Verify the [IAM Permissions](#aws---iam) -- Set `autoDiscovery.clusterName=` -- Set `awsRegion=` -- Set (option) `awsAccessKeyID=` and `awsSecretAccessKey=` if you want to [use AWS credentials directly instead of an instance role](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) - -```console -$ helm install my-release autoscaler/cluster-autoscaler --set autoDiscovery.clusterName= --set awsRegion= -``` - -Alternatively with your own AWS credentials - -```console -$ helm install my-release autoscaler/cluster-autoscaler --set autoDiscovery.clusterName= --set awsRegion= --set awsAccessKeyID= --set awsSecretAccessKey= -``` - -#### Specifying groups manually - -Without autodiscovery, specify an array of elements each containing ASG name, min size, max size. The sizes specified here will be applied to the ASG, assuming IAM permissions are correctly configured. - -- Verify the [IAM Permissions](#aws---iam) -- Either provide a yaml file setting `autoscalingGroups` (see values.yaml) or use `--set` e.g.: - -```console -$ helm install my-release autoscaler/cluster-autoscaler \ ---set "autoscalingGroups[0].name=your-asg-name" \ ---set "autoscalingGroups[0].maxSize=10" \ ---set "autoscalingGroups[0].minSize=1" -``` - -#### Auto-discovery - -For auto-discovery of instances to work, they must be tagged with the keys in `.Values.autoDiscovery.tags`, which by default are -`k8s.io/cluster-autoscaler/enabled` and `k8s.io/cluster-autoscaler/` - -The value of the tag does not matter, only the key. - -An example kops spec excerpt: - -```yaml -apiVersion: kops/v1alpha2 -kind: Cluster -metadata: - name: my.cluster.internal -spec: - additionalPolicies: - node: | - [ - {"Effect":"Allow","Action":["autoscaling:DescribeAutoScalingGroups","autoscaling:DescribeAutoScalingInstances","autoscaling:DescribeLaunchConfigurations","autoscaling:DescribeTags","autoscaling:SetDesiredCapacity","autoscaling:TerminateInstanceInAutoScalingGroup"],"Resource":"*"} - ] - ... ---- -apiVersion: kops/v1alpha2 -kind: InstanceGroup -metadata: - labels: - kops.k8s.io/cluster: my.cluster.internal - name: my-instances -spec: - cloudLabels: - k8s.io/cluster-autoscaler/enabled: "" - k8s.io/cluster-autoscaler/my.cluster.internal: "" - image: kops.io/k8s-1.8-debian-jessie-amd64-hvm-ebs-2018-01-14 - machineType: r4.large - maxSize: 4 - minSize: 0 -``` - -In this example you would need to `--set autoDiscovery.clusterName=my.cluster.internal` when installing. - -It is not recommended to try to mix this with setting `autoscalingGroups` - -See [autoscaler AWS documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup) for a more discussion of the setup. - -### GCE - -The following parameters are required: - -- `autoDiscovery.clusterName=any-name` -- `cloud-provider=gce` -- `autoscalingGroupsnamePrefix[0].name=your-ig-prefix,autoscalingGroupsnamePrefix[0].maxSize=10,autoscalingGroupsnamePrefix[0].minSize=1` - -To use Managed Instance Group (MIG) auto-discovery, provide a YAML file setting `autoscalingGroupsnamePrefix` (see values.yaml) or use `--set` when installing the Chart - e.g. - -```console -$ helm install my-release autoscaler/cluster-autoscaler \ ---set "autoscalingGroupsnamePrefix[0].name=your-ig-prefix,autoscalingGroupsnamePrefix[0].maxSize=10,autoscalingGroupsnamePrefi[0].minSize=1" \ ---set autoDiscovery.clusterName= \ ---set cloudProvider=gce -``` - -Note that `your-ig-prefix` should be a _prefix_ matching one or more MIGs, and _not_ the full name of the MIG. For example, to match multiple instance groups - `k8s-node-group-a-standard`, `k8s-node-group-b-gpu`, you would use a prefix of `k8s-node-group-`. - -In the event you want to explicitly specify MIGs instead of using auto-discovery, set members of the `autoscalingGroups` array directly - e.g. - -``` -# where 'n' is the index, starting at 0 --- set autoscalingGroups[n].name=https://content.googleapis.com/compute/v1/projects/$PROJECTID/zones/$ZONENAME/instanceGroupManagers/$FULL-MIG-NAME,autoscalingGroups[n].maxSize=$MAXSIZE,autoscalingGroups[n].minSize=$MINSIZE -``` - -### Azure AKS - -The following parameters are required: - -- `cloudProvider=azure` -- `autoscalingGroups[0].name=your-agent-pool,autoscalingGroups[0].maxSize=10,autoscalingGroups[0].minSize=1` -- `azureClientID: "your-service-principal-app-id"` -- `azureClientSecret: "your-service-principal-client-secret"` -- `azureSubscriptionID: "your-azure-subscription-id"` -- `azureTenantID: "your-azure-tenant-id"` -- `azureClusterName: "your-aks-cluster-name"` -- `azureResourceGroup: "your-aks-cluster-resource-group-name"` -- `azureVMType: "AKS"` -- `azureNodeResourceGroup: "your-aks-cluster-node-resource-group"` - -### OpenStack Magnum - -`cloudProvider: magnum` must be set, and then one of - -- `magnumClusterName=` and `autoscalingGroups` with the names of node groups and min/max node counts -- or `autoDiscovery.clusterName=` with one or more `autoDiscovery.roles`. - -Additionally, `cloudConfigPath: "/etc/kubernetes/cloud-config"` must be set as this should be the location -of the cloud-config file on the host. - -Example values files can be found [here](../../cluster-autoscaler/cloudprovider/magnum/examples). - -Install the chart with - -``` -$ helm install my-release autoscaler/cluster-autoscaler -f myvalues.yaml -``` - -## Uninstalling the Chart - -To uninstall `my-release`: - -```console -$ helm uninstall my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -> **Tip**: List all releases using `helm list` or start clean with `helm uninstall my-release` - -## Additional Configuration - -### AWS - IAM - -The worker running the cluster autoscaler will need access to certain resources and actions: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup" - ], - "Resource": "*" - } - ] -} -``` - -- `DescribeTags` is required for autodiscovery. -- `DescribeLaunchConfigurations` is required to scale up an ASG from 0. - -If you would like to limit the scope of the Cluster Autoscaler to ***only*** modify ASGs for a particular cluster, use the following policy instead: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeLaunchTemplateVersions" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "autoscaling:UpdateAutoScalingGroup" - ], - "Resource": [ - "arn:aws:autoscaling:::autoScalingGroup::autoScalingGroupName/node-group-1", - "arn:aws:autoscaling:::autoScalingGroup::autoScalingGroupName/node-group-2", - "arn:aws:autoscaling:::autoScalingGroup::autoScalingGroupName/node-group-3" - ], - "Condition": { - "StringEquals": { - "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled": "true", - "autoscaling:ResourceTag/kubernetes.io/cluster/": "owned" - } - } - } - ] -} -``` - -Make sure to replace the variables ``, ``, ``, and the ARNs of the ASGs where applicable. - -### AWS - IAM Roles for Service Accounts (IRSA) - -For Kubernetes clusters that use Amazon EKS, the service account can be configured with an IAM role using [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) to avoid needing to grant access to the worker nodes for AWS resources. - -In order to accomplish this, you will first need to create a new IAM role with the above mentions policies. Take care in [configuring the trust relationship](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html#iam-role-configuration) to restrict access just to the service account used by cluster autoscaler. - -Once you have the IAM role configured, you would then need to `--set rbac.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=arn:aws:iam::123456789012:role/MyRoleName` when installing. - -## Troubleshooting - -The chart will succeed even if the container arguments are incorrect. A few minutes after starting -`kubectl logs -l "app=aws-cluster-autoscaler" --tail=50` should loop through something like - -``` -polling_autoscaler.go:111] Poll finished -static_autoscaler.go:97] Starting main loop -utils.go:435] No pod using affinity / antiaffinity found in cluster, disabling affinity predicate for this loop -static_autoscaler.go:230] Filtering out schedulables -``` - -If not, find a pod that the deployment created and `describe` it, paying close attention to the arguments under `Command`. e.g.: - -``` -Containers: - cluster-autoscaler: - Command: - ./cluster-autoscaler - --cloud-provider=aws -# if specifying ASGs manually - --nodes=1:10:your-scaling-group-name -# if using autodiscovery - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ - --v=4 -``` - -### PodSecurityPolicy - -Though enough for the majority of installations, the default PodSecurityPolicy _could_ be too restrictive depending on the specifics of your release. Please make sure to check that the template fits with any customizations made or disable it by setting `rbac.pspEnabled` to `false`. - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| additionalLabels | object | `{}` | Labels to add to each object of the chart. | -| affinity | object | `{}` | Affinity for pod assignment | -| autoDiscovery.clusterName | string | `nil` | Enable autodiscovery for `cloudProvider=aws`, for groups matching `autoDiscovery.tags`. Enable autodiscovery for `cloudProvider=gce`, but no MIG tagging required. Enable autodiscovery for `cloudProvider=magnum`, for groups matching `autoDiscovery.roles`. | -| autoDiscovery.roles | list | `["worker"]` | Magnum node group roles to match. | -| autoDiscovery.tags | list | `["k8s.io/cluster-autoscaler/enabled","k8s.io/cluster-autoscaler/{{ .Values.autoDiscovery.clusterName }}"]` | ASG tags to match, run through `tpl`. | -| autoscalingGroups | list | `[]` | For AWS, Azure AKS or Magnum. At least one element is required if not using `autoDiscovery`. For example:
 - name: asg1
maxSize: 2
minSize: 1
| -| autoscalingGroupsnamePrefix | list | `[]` | For GCE. At least one element is required if not using `autoDiscovery`. For example:
 - name: ig01
maxSize: 10
minSize: 0
| -| awsAccessKeyID | string | `""` | AWS access key ID ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) | -| awsRegion | string | `"us-east-1"` | AWS region (required if `cloudProvider=aws`) | -| awsSecretAccessKey | string | `""` | AWS access secret key ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) | -| azureClientID | string | `""` | Service Principal ClientID with contributor permission to Cluster and Node ResourceGroup. Required if `cloudProvider=azure` | -| azureClientSecret | string | `""` | Service Principal ClientSecret with contributor permission to Cluster and Node ResourceGroup. Required if `cloudProvider=azure` | -| azureClusterName | string | `""` | Azure AKS cluster name. Required if `cloudProvider=azure` | -| azureNodeResourceGroup | string | `""` | Azure resource group where the cluster's nodes are located, typically set as `MC___`. Required if `cloudProvider=azure` | -| azureResourceGroup | string | `""` | Azure resource group that the cluster is located. Required if `cloudProvider=azure` | -| azureSubscriptionID | string | `""` | Azure subscription where the resources are located. Required if `cloudProvider=azure` | -| azureTenantID | string | `""` | Azure tenant where the resources are located. Required if `cloudProvider=azure` | -| azureUseManagedIdentityExtension | bool | `false` | Whether to use Azure's managed identity extension for credentials. If using MSI, ensure subscription ID and resource group are set. | -| azureVMType | string | `"AKS"` | Azure VM type. | -| cloudConfigPath | string | `"/etc/gce.conf"` | Configuration file for cloud provider. | -| cloudProvider | string | `"aws"` | The cloud provider where the autoscaler runs. Currently only `gce`, `aws`, `azure` and `magnum` are supported. `aws` supported for AWS. `gce` for GCE. `azure` for Azure AKS. `magnum` for OpenStack Magnum. | -| containerSecurityContext | object | `{}` | [Security context for container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) | -| dnsPolicy | string | `"ClusterFirst"` | Defaults to `ClusterFirst`. Valid values are: `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`. If autoscaler does not depend on cluster DNS, recommended to set this to `Default`. | -| envFromConfigMap | string | `""` | ConfigMap name to use as envFrom. | -| envFromSecret | string | `""` | Secret name to use as envFrom. | -| expanderPriorities | object | `{}` | The expanderPriorities is used if `extraArgs.expander` is set to `priority` and expanderPriorities is also set with the priorities. If `extraArgs.expander` is set to `priority`, then expanderPriorities is used to define cluster-autoscaler-priority-expander priorities. See: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/expander/priority/readme.md | -| extraArgs | object | `{"logtostderr":true,"stderrthreshold":"info","v":4}` | Additional container arguments. Refer to https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca for the full list of cluster autoscaler parameters and their default values. Everything after the first _ will be ignored allowing the use of multi-string arguments. | -| extraEnv | object | `{}` | Additional container environment variables. | -| extraEnvConfigMaps | object | `{}` | Additional container environment variables from ConfigMaps. | -| extraEnvSecrets | object | `{}` | Additional container environment variables from Secrets. | -| extraVolumeMounts | list | `[]` | Additional volumes to mount. | -| extraVolumeSecrets | object | `{}` | Additional volumes to mount from Secrets. | -| extraVolumes | list | `[]` | Additional volumes. | -| fullnameOverride | string | `""` | String to fully override `cluster-autoscaler.fullname` template. | -| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.pullSecrets | list | `[]` | Image pull secrets | -| image.repository | string | `"k8s.gcr.io/autoscaling/cluster-autoscaler"` | Image repository | -| image.tag | string | `"v1.21.0"` | Image tag | -| kubeTargetVersionOverride | string | `""` | Allow overriding the `.Capabilities.KubeVersion.GitVersion` check. Useful for `helm template` commands. | -| magnumCABundlePath | string | `"/etc/kubernetes/ca-bundle.crt"` | Path to the host's CA bundle, from `ca-file` in the cloud-config file. | -| magnumClusterName | string | `""` | Cluster name or ID in Magnum. Required if `cloudProvider=magnum` and not setting `autoDiscovery.clusterName`. | -| nameOverride | string | `""` | String to partially override `cluster-autoscaler.fullname` template (will maintain the release name) | -| nodeSelector | object | `{}` | Node labels for pod assignment. Ref: https://kubernetes.io/docs/user-guide/node-selection/. | -| podAnnotations | object | `{}` | Annotations to add to each pod. | -| podDisruptionBudget | object | `{"maxUnavailable":1}` | Pod disruption budget. | -| podLabels | object | `{}` | Labels to add to each pod. | -| priorityClassName | string | `""` | priorityClassName | -| priorityConfigMapAnnotations | object | `{}` | Annotations to add to `cluster-autoscaler-priority-expander` ConfigMap. | -| prometheusRule.additionalLabels | object | `{}` | Additional labels to be set in metadata. | -| prometheusRule.enabled | bool | `false` | If true, creates a Prometheus Operator PrometheusRule. | -| prometheusRule.interval | string | `nil` | How often rules in the group are evaluated (falls back to `global.evaluation_interval` if not set). | -| prometheusRule.namespace | string | `"monitoring"` | Namespace which Prometheus is running in. | -| prometheusRule.rules | list | `[]` | Rules spec template (see https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule). | -| rbac.create | bool | `true` | If `true`, create and use RBAC resources. | -| rbac.pspEnabled | bool | `false` | If `true`, creates and uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. Must be used with `rbac.create` set to `true`. | -| rbac.serviceAccount.annotations | object | `{}` | Additional Service Account annotations. | -| rbac.serviceAccount.automountServiceAccountToken | bool | `true` | Automount API credentials for a Service Account. | -| rbac.serviceAccount.create | bool | `true` | If `true` and `rbac.create` is also true, a Service Account will be created. | -| rbac.serviceAccount.name | string | `""` | The name of the ServiceAccount to use. If not set and create is `true`, a name is generated using the fullname template. | -| replicaCount | int | `1` | Desired number of pods | -| resources | object | `{}` | Pod resource requests and limits. | -| securityContext | object | `{}` | [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) | -| service.annotations | object | `{}` | Annotations to add to service | -| service.externalIPs | list | `[]` | List of IP addresses at which the service is available. Ref: https://kubernetes.io/docs/user-guide/services/#external-ips. | -| service.labels | object | `{}` | Labels to add to service | -| service.loadBalancerIP | string | `""` | IP address to assign to load balancer (if supported). | -| service.loadBalancerSourceRanges | list | `[]` | List of IP CIDRs allowed access to load balancer (if supported). | -| service.portName | string | `"http"` | Name for service port. | -| service.servicePort | int | `8085` | Service port to expose. | -| service.type | string | `"ClusterIP"` | Type of service to create. | -| serviceMonitor.enabled | bool | `false` | If true, creates a Prometheus Operator ServiceMonitor. | -| serviceMonitor.interval | string | `"10s"` | Interval that Prometheus scrapes Cluster Autoscaler metrics. | -| serviceMonitor.namespace | string | `"monitoring"` | Namespace which Prometheus is running in. | -| serviceMonitor.path | string | `"/metrics"` | The path to scrape for metrics; autoscaler exposes `/metrics` (this is standard) | -| serviceMonitor.selector | object | `{"release":"prometheus-operator"}` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install. | -| tolerations | list | `[]` | List of node taints to tolerate (requires Kubernetes >= 1.6). | -| updateStrategy | object | `{}` | [Deployment update strategy](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) | diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/README.md.gotmpl b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/README.md.gotmpl deleted file mode 100644 index dda305c..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/README.md.gotmpl +++ /dev/null @@ -1,335 +0,0 @@ -{{ template "chart.header" . }} - -{{ template "chart.description" . }} - -## TL;DR: - -```console -$ helm repo add autoscaler https://kubernetes.github.io/autoscaler - -# Method 1 - Using Autodiscovery -$ helm install my-release autoscaler/cluster-autoscaler \ ---set 'autoDiscovery.clusterName'= - -# Method 2 - Specifying groups manually -$ helm install my-release autoscaler/cluster-autoscaler \ ---set "autoscalingGroups[0].name=your-asg-name" \ ---set "autoscalingGroups[0].maxSize=10" \ ---set "autoscalingGroups[0].minSize=1" -``` - -## Introduction - -This chart bootstraps a cluster-autoscaler deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. - -## Prerequisites - -- Helm 3+ -- Kubernetes 1.8+ - - [Older versions](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#releases) may work by overriding the `image`. Cluster autoscaler internally simulates the scheduler and bugs between mismatched versions may be subtle. -- Azure AKS specific Prerequisites: - - Kubernetes 1.10+ with RBAC-enabled. - -## Previous Helm Chart - -The previous `cluster-autoscaler` Helm chart hosted at [helm/charts](https://github.com/helm/charts) has been moved to this repository in accordance with the [Deprecation timeline](https://github.com/helm/charts#deprecation-timeline). Note that a few things have changed between this version and the old version: - -- This repository **only** supports Helm chart installations using Helm 3+ since the `apiVersion` on the charts has been marked as `v2`. -- Previous versions of the Helm chart have not been migrated - -## Migration from 1.X to 9.X+ versions of this Chart - -**TL;DR:** -You should choose to use versions >=9.0.0 of the `cluster-autoscaler` chart published from this repository; previous versions, and the `cluster-autoscaler-chart` with versioning 1.X.X published from this repository are deprecated. - -
- Previous versions of this chart - further details -On initial migration of this chart from the `helm/charts` repository this chart was renamed from `cluster-autoscaler` to `cluster-autoscaler-chart` due to technical limitations. This affected all `1.X` releases of the chart, version 2.0.0 of this chart exists only to mark the [`cluster-autoscaler-chart` chart](https://artifacthub.io/packages/helm/cluster-autoscaler/cluster-autoscaler-chart) as deprecated. - -Releases of the chart from `9.0.0` onwards return the naming of the chart to `cluster-autoscaler` and return to following the versioning established by the chart's previous location at . - -To migrate from a 1.X release of the chart to a `9.0.0` or later release, you should first uninstall your `1.X` install of the `cluster-autoscaler-chart` chart, before performing the installation of the new `cluster-autoscaler` chart. -
- -## Migration from 9.0 to 9.1 - -Starting from `9.1.0` the `envFromConfigMap` value is expected to contain the name of a ConfigMap that is used as ref for `envFrom`, similar to `envFromSecret`. If you want to keep the previous behaviour of `envFromConfigMap` you must rename it to `extraEnvConfigMaps`. - -## Installing the Chart - -**By default, no deployment is created and nothing will autoscale**. - -You must provide some minimal configuration, either to specify instance groups or enable auto-discovery. It is not recommended to do both. - -Either: - -- Set `autoDiscovery.clusterName` and provide additional autodiscovery options if necessary **or** -- Set static node group configurations for one or more node groups (using `autoscalingGroups` or `autoscalingGroupsnamePrefix`). - -To create a valid configuration, follow instructions for your cloud provider: - -* [AWS](#aws---using-auto-discovery-of-tagged-instance-groups) -* [GCE](#gce) -* [Azure AKS](#azure-aks) -* [OpenStack Magnum](#openstack-magnum) - -### AWS - Using auto-discovery of tagged instance groups - -Auto-discovery finds ASGs tags as below and automatically manages them based on the min and max size specified in the ASG. `cloudProvider=aws` only. - -- Tag the ASGs with keys to match `.Values.autoDiscovery.tags`, by default: `k8s.io/cluster-autoscaler/enabled` and `k8s.io/cluster-autoscaler/` -- Verify the [IAM Permissions](#aws---iam) -- Set `autoDiscovery.clusterName=` -- Set `awsRegion=` -- Set (option) `awsAccessKeyID=` and `awsSecretAccessKey=` if you want to [use AWS credentials directly instead of an instance role](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) - -```console -$ helm install my-release autoscaler/cluster-autoscaler --set autoDiscovery.clusterName= --set awsRegion= -``` - -Alternatively with your own AWS credentials - -```console -$ helm install my-release autoscaler/cluster-autoscaler --set autoDiscovery.clusterName= --set awsRegion= --set awsAccessKeyID= --set awsSecretAccessKey= -``` - -#### Specifying groups manually - -Without autodiscovery, specify an array of elements each containing ASG name, min size, max size. The sizes specified here will be applied to the ASG, assuming IAM permissions are correctly configured. - -- Verify the [IAM Permissions](#aws---iam) -- Either provide a yaml file setting `autoscalingGroups` (see values.yaml) or use `--set` e.g.: - -```console -$ helm install my-release autoscaler/cluster-autoscaler \ ---set "autoscalingGroups[0].name=your-asg-name" \ ---set "autoscalingGroups[0].maxSize=10" \ ---set "autoscalingGroups[0].minSize=1" -``` - -#### Auto-discovery - -For auto-discovery of instances to work, they must be tagged with the keys in `.Values.autoDiscovery.tags`, which by default are -`k8s.io/cluster-autoscaler/enabled` and `k8s.io/cluster-autoscaler/` - -The value of the tag does not matter, only the key. - -An example kops spec excerpt: - -```yaml -apiVersion: kops/v1alpha2 -kind: Cluster -metadata: - name: my.cluster.internal -spec: - additionalPolicies: - node: | - [ - {"Effect":"Allow","Action":["autoscaling:DescribeAutoScalingGroups","autoscaling:DescribeAutoScalingInstances","autoscaling:DescribeLaunchConfigurations","autoscaling:DescribeTags","autoscaling:SetDesiredCapacity","autoscaling:TerminateInstanceInAutoScalingGroup"],"Resource":"*"} - ] - ... ---- -apiVersion: kops/v1alpha2 -kind: InstanceGroup -metadata: - labels: - kops.k8s.io/cluster: my.cluster.internal - name: my-instances -spec: - cloudLabels: - k8s.io/cluster-autoscaler/enabled: "" - k8s.io/cluster-autoscaler/my.cluster.internal: "" - image: kops.io/k8s-1.8-debian-jessie-amd64-hvm-ebs-2018-01-14 - machineType: r4.large - maxSize: 4 - minSize: 0 -``` - -In this example you would need to `--set autoDiscovery.clusterName=my.cluster.internal` when installing. - -It is not recommended to try to mix this with setting `autoscalingGroups` - -See [autoscaler AWS documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup) for a more discussion of the setup. - -### GCE - -The following parameters are required: - -- `autoDiscovery.clusterName=any-name` -- `cloud-provider=gce` -- `autoscalingGroupsnamePrefix[0].name=your-ig-prefix,autoscalingGroupsnamePrefix[0].maxSize=10,autoscalingGroupsnamePrefix[0].minSize=1` - -To use Managed Instance Group (MIG) auto-discovery, provide a YAML file setting `autoscalingGroupsnamePrefix` (see values.yaml) or use `--set` when installing the Chart - e.g. - -```console -$ helm install my-release autoscaler/cluster-autoscaler \ ---set "autoscalingGroupsnamePrefix[0].name=your-ig-prefix,autoscalingGroupsnamePrefix[0].maxSize=10,autoscalingGroupsnamePrefi[0].minSize=1" \ ---set autoDiscovery.clusterName= \ ---set cloudProvider=gce -``` - -Note that `your-ig-prefix` should be a _prefix_ matching one or more MIGs, and _not_ the full name of the MIG. For example, to match multiple instance groups - `k8s-node-group-a-standard`, `k8s-node-group-b-gpu`, you would use a prefix of `k8s-node-group-`. - -In the event you want to explicitly specify MIGs instead of using auto-discovery, set members of the `autoscalingGroups` array directly - e.g. - -``` -# where 'n' is the index, starting at 0 --- set autoscalingGroups[n].name=https://content.googleapis.com/compute/v1/projects/$PROJECTID/zones/$ZONENAME/instanceGroupManagers/$FULL-MIG-NAME,autoscalingGroups[n].maxSize=$MAXSIZE,autoscalingGroups[n].minSize=$MINSIZE -``` - -### Azure AKS - -The following parameters are required: - -- `cloudProvider=azure` -- `autoscalingGroups[0].name=your-agent-pool,autoscalingGroups[0].maxSize=10,autoscalingGroups[0].minSize=1` -- `azureClientID: "your-service-principal-app-id"` -- `azureClientSecret: "your-service-principal-client-secret"` -- `azureSubscriptionID: "your-azure-subscription-id"` -- `azureTenantID: "your-azure-tenant-id"` -- `azureClusterName: "your-aks-cluster-name"` -- `azureResourceGroup: "your-aks-cluster-resource-group-name"` -- `azureVMType: "AKS"` -- `azureNodeResourceGroup: "your-aks-cluster-node-resource-group"` - -### OpenStack Magnum - -`cloudProvider: magnum` must be set, and then one of - -- `magnumClusterName=` and `autoscalingGroups` with the names of node groups and min/max node counts -- or `autoDiscovery.clusterName=` with one or more `autoDiscovery.roles`. - -Additionally, `cloudConfigPath: "/etc/kubernetes/cloud-config"` must be set as this should be the location -of the cloud-config file on the host. - -Example values files can be found [here](../../cluster-autoscaler/cloudprovider/magnum/examples). - -Install the chart with - -``` -$ helm install my-release autoscaler/cluster-autoscaler -f myvalues.yaml -``` - -## Uninstalling the Chart - -To uninstall `my-release`: - -```console -$ helm uninstall my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -> **Tip**: List all releases using `helm list` or start clean with `helm uninstall my-release` - -## Additional Configuration - -### AWS - IAM - -The worker running the cluster autoscaler will need access to certain resources and actions: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup" - ], - "Resource": "*" - } - ] -} -``` - -- `DescribeTags` is required for autodiscovery. -- `DescribeLaunchConfigurations` is required to scale up an ASG from 0. - -If you would like to limit the scope of the Cluster Autoscaler to ***only*** modify ASGs for a particular cluster, use the following policy instead: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeLaunchTemplateVersions" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "autoscaling:UpdateAutoScalingGroup" - ], - "Resource": [ - "arn:aws:autoscaling:::autoScalingGroup::autoScalingGroupName/node-group-1", - "arn:aws:autoscaling:::autoScalingGroup::autoScalingGroupName/node-group-2", - "arn:aws:autoscaling:::autoScalingGroup::autoScalingGroupName/node-group-3" - ], - "Condition": { - "StringEquals": { - "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled": "true", - "autoscaling:ResourceTag/kubernetes.io/cluster/": "owned" - } - } - } - ] -} -``` - -Make sure to replace the variables ``, ``, ``, and the ARNs of the ASGs where applicable. - -### AWS - IAM Roles for Service Accounts (IRSA) - -For Kubernetes clusters that use Amazon EKS, the service account can be configured with an IAM role using [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) to avoid needing to grant access to the worker nodes for AWS resources. - -In order to accomplish this, you will first need to create a new IAM role with the above mentions policies. Take care in [configuring the trust relationship](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html#iam-role-configuration) to restrict access just to the service account used by cluster autoscaler. - -Once you have the IAM role configured, you would then need to `--set rbac.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=arn:aws:iam::123456789012:role/MyRoleName` when installing. - -## Troubleshooting - -The chart will succeed even if the container arguments are incorrect. A few minutes after starting -`kubectl logs -l "app=aws-cluster-autoscaler" --tail=50` should loop through something like - -``` -polling_autoscaler.go:111] Poll finished -static_autoscaler.go:97] Starting main loop -utils.go:435] No pod using affinity / antiaffinity found in cluster, disabling affinity predicate for this loop -static_autoscaler.go:230] Filtering out schedulables -``` - -If not, find a pod that the deployment created and `describe` it, paying close attention to the arguments under `Command`. e.g.: - -``` -Containers: - cluster-autoscaler: - Command: - ./cluster-autoscaler - --cloud-provider=aws -# if specifying ASGs manually - --nodes=1:10:your-scaling-group-name -# if using autodiscovery - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ - --v=4 -``` - -### PodSecurityPolicy - -Though enough for the majority of installations, the default PodSecurityPolicy _could_ be too restrictive depending on the specifics of your release. Please make sure to check that the template fits with any customizations made or disable it by setting `rbac.pspEnabled` to `false`. - -{{ template "chart.valuesSection" . }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/NOTES.txt b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/NOTES.txt deleted file mode 100644 index 94e211e..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/NOTES.txt +++ /dev/null @@ -1,18 +0,0 @@ -{{- if or .Values.autoDiscovery.clusterName .Values.autoscalingGroups -}} - -To verify that cluster-autoscaler has started, run: - - kubectl --namespace={{ .Release.Namespace }} get pods -l "app.kubernetes.io/name={{ template "cluster-autoscaler.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" - -{{- else -}} - -############################################################################## -#### ERROR: You must specify values for either #### -#### autoDiscovery.clusterName or autoscalingGroups[] #### -############################################################################## - -The deployment and pod will not be created and the installation is not functional -See README: - open https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler - -{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/_helpers.tpl deleted file mode 100644 index 0723059..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/_helpers.tpl +++ /dev/null @@ -1,87 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "cluster-autoscaler.name" -}} -{{- default (printf "%s-%s" .Values.cloudProvider .Chart.Name) .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "cluster-autoscaler.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default (printf "%s-%s" .Values.cloudProvider .Chart.Name) .Values.nameOverride -}} -{{- if ne $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s" $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "cluster-autoscaler.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Return instance and name labels. -*/}} -{{- define "cluster-autoscaler.instance-name" -}} -app.kubernetes.io/instance: {{ .Release.Name | quote }} -app.kubernetes.io/name: {{ include "cluster-autoscaler.name" . | quote }} -{{- end -}} - - -{{/* -Return labels, including instance and name. -*/}} -{{- define "cluster-autoscaler.labels" -}} -{{ include "cluster-autoscaler.instance-name" . }} -app.kubernetes.io/managed-by: {{ .Release.Service | quote }} -helm.sh/chart: {{ include "cluster-autoscaler.chart" . | quote }} -{{- if .Values.additionalLabels }} -{{ toYaml .Values.additionalLabels }} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for deployment. -*/}} -{{- define "deployment.apiVersion" -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if semverCompare "<1.9-0" $kubeTargetVersion -}} -{{- print "apps/v1beta2" -}} -{{- else -}} -{{- print "apps/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for podsecuritypolicy. -*/}} -{{- define "podsecuritypolicy.apiVersion" -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if semverCompare "<1.10-0" $kubeTargetVersion -}} -{{- print "extensions/v1beta1" -}} -{{- else -}} -{{- print "policy/v1beta1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the service account name used by the pod. -*/}} -{{- define "cluster-autoscaler.serviceAccountName" -}} -{{- if .Values.rbac.serviceAccount.create -}} - {{ default (include "cluster-autoscaler.fullname" .) .Values.rbac.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.rbac.serviceAccount.name }} -{{- end -}} -{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml deleted file mode 100644 index 409fbe2..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml +++ /dev/null @@ -1,150 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: -{{ include "cluster-autoscaler.labels" . | indent 4 }} - name: {{ template "cluster-autoscaler.fullname" . }} -rules: - - apiGroups: - - "" - resources: - - events - - endpoints - verbs: - - create - - patch - - apiGroups: - - "" - resources: - - pods/eviction - verbs: - - create - - apiGroups: - - "" - resources: - - pods/status - verbs: - - update - - apiGroups: - - "" - resources: - - endpoints - resourceNames: - - cluster-autoscaler - verbs: - - get - - update - - apiGroups: - - "" - resources: - - nodes - verbs: - - watch - - list - - get - - update - - apiGroups: - - "" - resources: - - namespaces - - pods - - services - - replicationcontrollers - - persistentvolumeclaims - - persistentvolumes - verbs: - - watch - - list - - get - - apiGroups: - - batch - resources: - - jobs - - cronjobs - verbs: - - watch - - list - - get - - apiGroups: - - batch - - extensions - resources: - - jobs - verbs: - - get - - list - - patch - - watch - - apiGroups: - - extensions - resources: - - replicasets - - daemonsets - verbs: - - watch - - list - - get - - apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - watch - - list - - apiGroups: - - apps - resources: - - daemonsets - - replicasets - - statefulsets - verbs: - - watch - - list - - get - - apiGroups: - - storage.k8s.io - resources: - - storageclasses - - csinodes - - csidrivers - - csistoragecapacities - verbs: - - watch - - list - - get - - apiGroups: - - "" - resources: - - configmaps - verbs: - - list - - watch - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - apiGroups: - - coordination.k8s.io - resourceNames: - - cluster-autoscaler - resources: - - leases - verbs: - - get - - update -{{- if .Values.rbac.pspEnabled }} - - apiGroups: - - extensions - - policy - resources: - - podsecuritypolicies - resourceNames: - - {{ template "cluster-autoscaler.fullname" . }} - verbs: - - use -{{- end -}} - -{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml deleted file mode 100644 index d1e8308..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: -{{ include "cluster-autoscaler.labels" . | indent 4 }} - name: {{ template "cluster-autoscaler.fullname" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "cluster-autoscaler.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "cluster-autoscaler.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/deployment.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/deployment.yaml deleted file mode 100644 index 46246f2..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/deployment.yaml +++ /dev/null @@ -1,256 +0,0 @@ -{{- if or .Values.autoDiscovery.clusterName .Values.autoscalingGroups }} -{{/* one of the above is required */}} -apiVersion: {{ template "deployment.apiVersion" . }} -kind: Deployment -metadata: - labels: -{{ include "cluster-autoscaler.labels" . | indent 4 }} - name: {{ template "cluster-autoscaler.fullname" . }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: -{{ include "cluster-autoscaler.instance-name" . | indent 6 }} - {{- if .Values.podLabels }} -{{ toYaml .Values.podLabels | indent 6 }} - {{- end }} -{{- if .Values.updateStrategy }} - strategy: - {{ toYaml .Values.updateStrategy | nindent 4 | trim }} -{{- end }} - template: - metadata: - {{- if .Values.podAnnotations }} - annotations: -{{ toYaml .Values.podAnnotations | indent 8 }} - {{- end }} - labels: -{{ include "cluster-autoscaler.instance-name" . | indent 8 }} - {{- if .Values.additionalLabels }} -{{ toYaml .Values.additionalLabels | indent 8 }} - {{- end }} - {{- if .Values.podLabels }} -{{ toYaml .Values.podLabels | indent 8 }} - {{- end }} - spec: - {{- if .Values.priorityClassName }} - priorityClassName: "{{ .Values.priorityClassName }}" - {{- end }} - {{- if .Values.dnsPolicy }} - dnsPolicy: "{{ .Values.dnsPolicy }}" - {{- end }} - containers: - - name: {{ template "cluster-autoscaler.name" . }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - command: - - ./cluster-autoscaler - - --cloud-provider={{ .Values.cloudProvider }} - - --namespace={{ .Release.Namespace }} - {{- if .Values.autoscalingGroups }} - {{- range .Values.autoscalingGroups }} - - --nodes={{ .minSize }}:{{ .maxSize }}:{{ .name }} - {{- end }} - {{- end }} - {{- if eq .Values.cloudProvider "aws" }} - {{- if .Values.autoDiscovery.clusterName }} - - --node-group-auto-discovery=asg:tag={{ tpl (join "," .Values.autoDiscovery.tags) . }} - {{- end }} - {{- else if eq .Values.cloudProvider "gce" }} - {{- if .Values.autoscalingGroupsnamePrefix }} - {{- range .Values.autoscalingGroupsnamePrefix }} - - --node-group-auto-discovery=mig:namePrefix={{ .name }},min={{ .minSize }},max={{ .maxSize }} - {{- end }} - {{- end }} - {{- else if eq .Values.cloudProvider "magnum" }} - {{- if .Values.autoDiscovery.clusterName }} - - --cluster-name={{ .Values.autoDiscovery.clusterName }} - - --node-group-auto-discovery=magnum:role={{ tpl (join "," .Values.autoDiscovery.roles) . }} - {{- else }} - - --cluster-name={{ .Values.magnumClusterName }} - {{- end }} - {{- end }} - {{- if eq .Values.cloudProvider "magnum" }} - - --cloud-config={{ .Values.cloudConfigPath }} - {{- end }} - {{- range $key, $value := .Values.extraArgs }} - {{- if not (kindIs "invalid" $value) }} - - --{{ $key | mustRegexFind "^[^_]+" }}={{ $value }} - {{- else }} - - --{{ $key | mustRegexFind "^[^_]+" }} - {{- end }} - {{- end }} - env: - {{- if and (eq .Values.cloudProvider "aws") (ne .Values.awsRegion "") }} - - name: AWS_REGION - value: "{{ .Values.awsRegion }}" - {{- if .Values.awsAccessKeyID }} - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - key: AwsAccessKeyId - name: {{ template "cluster-autoscaler.fullname" . }} - {{- end }} - {{- if .Values.awsSecretAccessKey }} - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - key: AwsSecretAccessKey - name: {{ template "cluster-autoscaler.fullname" . }} - {{- end }} - {{- else if eq .Values.cloudProvider "azure" }} - - name: ARM_SUBSCRIPTION_ID - valueFrom: - secretKeyRef: - key: SubscriptionID - name: {{ template "cluster-autoscaler.fullname" . }} - - name: ARM_RESOURCE_GROUP - valueFrom: - secretKeyRef: - key: ResourceGroup - name: {{ template "cluster-autoscaler.fullname" . }} - - name: ARM_VM_TYPE - valueFrom: - secretKeyRef: - key: VMType - name: {{ template "cluster-autoscaler.fullname" . }} - {{- if .Values.azureUseManagedIdentityExtension }} - - name: ARM_USE_MANAGED_IDENTITY_EXTENSION - value: "true" - {{- else }} - - name: ARM_TENANT_ID - valueFrom: - secretKeyRef: - key: TenantID - name: {{ template "cluster-autoscaler.fullname" . }} - - name: ARM_CLIENT_ID - valueFrom: - secretKeyRef: - key: ClientID - name: {{ template "cluster-autoscaler.fullname" . }} - - name: ARM_CLIENT_SECRET - valueFrom: - secretKeyRef: - key: ClientSecret - name: {{ template "cluster-autoscaler.fullname" . }} - - name: AZURE_CLUSTER_NAME - valueFrom: - secretKeyRef: - key: ClusterName - name: {{ template "cluster-autoscaler.fullname" . }} - - name: AZURE_NODE_RESOURCE_GROUP - valueFrom: - secretKeyRef: - key: NodeResourceGroup - name: {{ template "cluster-autoscaler.fullname" . }} - {{- end }} - {{- end }} - {{- range $key, $value := .Values.extraEnv }} - - name: {{ $key }} - value: "{{ $value }}" - {{- end }} - {{- range $key, $value := .Values.extraEnvConfigMaps }} - - name: {{ $key }} - valueFrom: - configMapKeyRef: - name: {{ default (include "cluster-autoscaler.fullname" $) $value.name }} - key: {{ required "Must specify key!" $value.key }} - {{- end }} - {{- range $key, $value := .Values.extraEnvSecrets }} - - name: {{ $key }} - valueFrom: - secretKeyRef: - name: {{ default (include "cluster-autoscaler.fullname" $) $value.name }} - key: {{ required "Must specify key!" $value.key }} - {{- end }} - {{- if or .Values.envFromSecret .Values.envFromConfigMap }} - envFrom: - {{- if .Values.envFromSecret }} - - secretRef: - name: {{ .Values.envFromSecret }} - {{- end }} - {{- if .Values.envFromConfigMap }} - - configMapRef: - name: {{ .Values.envFromConfigMap }} - {{- end }} - {{- end }} - livenessProbe: - httpGet: - path: /health-check - port: 8085 - ports: - - containerPort: 8085 - resources: -{{ toYaml .Values.resources | indent 12 }} - {{- if .Values.containerSecurityContext }} - securityContext: - {{ toYaml .Values.containerSecurityContext | nindent 12 | trim }} - {{- end }} - {{- if or (eq .Values.cloudProvider "magnum") .Values.extraVolumeSecrets .Values.extraVolumeMounts }} - volumeMounts: - {{- if eq .Values.cloudProvider "magnum" }} - - name: cloudconfig - mountPath: {{ .Values.cloudConfigPath }} - readOnly: true - {{- end }} - {{- if and (eq .Values.cloudProvider "magnum") (.Values.magnumCABundlePath) }} - - name: ca-bundle - mountPath: {{ .Values.magnumCABundlePath }} - readOnly: true - {{- end }} - {{- range $key, $value := .Values.extraVolumeSecrets }} - - name: {{ $key }} - mountPath: {{ required "Must specify mountPath!" $value.mountPath }} - readOnly: true - {{- end }} - {{- if .Values.extraVolumeMounts }} - {{ toYaml .Values.extraVolumeMounts | nindent 12 }} - {{- end }} - {{- end }} - {{- if .Values.affinity }} - affinity: -{{ toYaml .Values.affinity | indent 8 }} - {{- end }} - {{- if .Values.nodeSelector }} - nodeSelector: -{{ toYaml .Values.nodeSelector | indent 8 }} - {{- end }} - serviceAccountName: {{ template "cluster-autoscaler.serviceAccountName" . }} - tolerations: -{{ toYaml .Values.tolerations | indent 8 }} - {{- if .Values.securityContext }} - securityContext: - {{ toYaml .Values.securityContext | nindent 8 | trim }} - {{- end }} - {{- if or (eq .Values.cloudProvider "magnum") .Values.extraVolumeSecrets .Values.extraVolumes }} - volumes: - {{- if eq .Values.cloudProvider "magnum" }} - - name: cloudconfig - hostPath: - path: {{ .Values.cloudConfigPath }} - {{- end }} - {{- if and (eq .Values.cloudProvider "magnum") (.Values.magnumCABundlePath) }} - - name: ca-bundle - hostPath: - path: {{ .Values.magnumCABundlePath }} - {{- end }} - {{- range $key, $value := .Values.extraVolumeSecrets }} - - name: {{ $key }} - secret: - secretName: {{ default (include "cluster-autoscaler.fullname" $) $value.name }} - {{- if $value.items }} - items: - {{- toYaml $value.items | nindent 14 }} - {{- end }} - {{- end }} - {{- if .Values.extraVolumes }} - {{- toYaml .Values.extraVolumes | nindent 10 }} - {{- end }} - {{- end }} - {{- if .Values.image.pullSecrets }} - imagePullSecrets: - {{- range .Values.image.pullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/pdb.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/pdb.yaml deleted file mode 100644 index da5bd56..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/pdb.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if .Values.podDisruptionBudget -}} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: -{{ include "cluster-autoscaler.labels" . | indent 4 }} - name: {{ template "cluster-autoscaler.fullname" . }} -spec: - selector: - matchLabels: -{{ include "cluster-autoscaler.instance-name" . | indent 6 }} -{{- if .Values.podDisruptionBudget }} - {{ toYaml .Values.podDisruptionBudget | nindent 2 }} -{{- end }} -{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml deleted file mode 100644 index 28369bf..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml +++ /dev/null @@ -1,46 +0,0 @@ -{{- if .Values.rbac.pspEnabled }} -apiVersion: {{ template "podsecuritypolicy.apiVersion" . }} -kind: PodSecurityPolicy -metadata: - name: {{ template "cluster-autoscaler.fullname" . }} - labels: -{{ include "cluster-autoscaler.labels" . | indent 4 }} -spec: - # Prevents running in privileged mode - privileged: false - # Required to prevent escalations to root. - allowPrivilegeEscalation: false - requiredDropCapabilities: - - ALL - volumes: - - 'configMap' - - 'secret' - - 'hostPath' - - 'emptyDir' - - 'projected' - - 'downwardAPI' -{{- if eq .Values.cloudProvider "gce" }} - allowedHostPaths: - - pathPrefix: {{ .Values.cloudConfigPath }} -{{- end }} - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: false -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml deleted file mode 100644 index 5bb2024..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if hasKey .Values.extraArgs "expander" }} -{{- if and (.Values.expanderPriorities) (eq .Values.extraArgs.expander "priority") -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: cluster-autoscaler-priority-expander - labels: -{{ include "cluster-autoscaler.labels" . | indent 4 }} - {{- if .Values.priorityConfigMapAnnotations }} - annotations: -{{ toYaml .Values.priorityConfigMapAnnotations | indent 4 }} - {{- end }} -data: - priorities: |- -{{ .Values.expanderPriorities | indent 4 }} -{{- end -}} -{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml deleted file mode 100644 index 097c969..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if .Values.prometheusRule.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ include "cluster-autoscaler.fullname" . }} - {{- if .Values.prometheusRule.namespace }} - namespace: {{ .Values.prometheusRule.namespace }} - {{- end }} - labels: {{- toYaml .Values.prometheusRule.additionalLabels | nindent 4 }} -spec: - groups: - - name: {{ include "cluster-autoscaler.fullname" . }} - interval: {{ .Values.prometheusRule.interval }} - rules: {{- tpl (toYaml .Values.prometheusRule.rules) . | nindent 8 }} -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/role.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/role.yaml deleted file mode 100644 index c1f226e..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/role.yaml +++ /dev/null @@ -1,46 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: -{{ include "cluster-autoscaler.labels" . | indent 4 }} - name: {{ template "cluster-autoscaler.fullname" . }} -rules: - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create -{{- if eq (default "" .Values.extraArgs.expander) "priority" }} - - list - - watch -{{- end }} - - apiGroups: - - "" - resources: - - configmaps - resourceNames: - - cluster-autoscaler-status -{{- if eq (default "" .Values.extraArgs.expander) "priority" }} - - cluster-autoscaler-priority-expander -{{- end }} - verbs: - - delete - - get - - update -{{- if eq (default "" .Values.extraArgs.expander) "priority" }} - - watch -{{- end }} -{{- if eq (default "" (index .Values.extraArgs "leader-elect-resource-lock")) "configmaps" }} - - apiGroups: - - "" - resources: - - configmaps - resourceNames: - - cluster-autoscaler - verbs: - - get - - update -{{- end }} -{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml deleted file mode 100644 index 938bc03..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: -{{ include "cluster-autoscaler.labels" . | indent 4 }} - name: {{ template "cluster-autoscaler.fullname" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ template "cluster-autoscaler.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "cluster-autoscaler.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/secret.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/secret.yaml deleted file mode 100644 index 3f0ef09..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/secret.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if or (eq .Values.cloudProvider "azure") (and (eq .Values.cloudProvider "aws") (not (has "" (list .Values.awsAccessKeyID .Values.awsSecretAccessKey)))) }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "cluster-autoscaler.fullname" . }} -data: -{{- if eq .Values.cloudProvider "azure" }} - ClientID: "{{ .Values.azureClientID | b64enc }}" - ClientSecret: "{{ .Values.azureClientSecret | b64enc }}" - ResourceGroup: "{{ .Values.azureResourceGroup | b64enc }}" - SubscriptionID: "{{ .Values.azureSubscriptionID | b64enc }}" - TenantID: "{{ .Values.azureTenantID | b64enc }}" - VMType: "{{ .Values.azureVMType | b64enc }}" - ClusterName: "{{ .Values.azureClusterName | b64enc }}" - NodeResourceGroup: "{{ .Values.azureNodeResourceGroup | b64enc }}" -{{- else if eq .Values.cloudProvider "aws" }} - AwsAccessKeyId: "{{ .Values.awsAccessKeyID | b64enc }}" - AwsSecretAccessKey: "{{ .Values.awsSecretAccessKey | b64enc }}" -{{- end }} -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/service.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/service.yaml deleted file mode 100644 index dd8903d..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/service.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.service.annotations }} - annotations: -{{ toYaml .Values.service.annotations | indent 4 }} -{{- end }} - labels: -{{ include "cluster-autoscaler.labels" . | indent 4 }} -{{- if .Values.service.labels }} -{{ toYaml .Values.service.labels | indent 4 }} -{{- end }} - name: {{ template "cluster-autoscaler.fullname" . }} -spec: -{{- if .Values.service.clusterIP }} - clusterIP: "{{ .Values.service.clusterIP }}" -{{- end }} -{{- if .Values.service.externalIPs }} - externalIPs: -{{ toYaml .Values.service.externalIPs | indent 4 }} -{{- end }} -{{- if .Values.service.loadBalancerIP }} - loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" -{{- end }} -{{- if .Values.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: -{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} -{{- end }} - ports: - - port: {{ .Values.service.servicePort }} - protocol: TCP - targetPort: 8085 - name: {{ .Values.service.portName }} - selector: -{{ include "cluster-autoscaler.instance-name" . | indent 4 }} - type: "{{ .Values.service.type }}" diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml deleted file mode 100644 index a0b9d25..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{ if .Values.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ include "cluster-autoscaler.fullname" . }} - {{- if .Values.serviceMonitor.namespace }} - namespace: {{ .Values.serviceMonitor.namespace }} - {{- end }} - labels: - {{- range $key, $value := .Values.serviceMonitor.selector }} - {{ $key }}: {{ $value | quote }} - {{- end }} -spec: - selector: - matchLabels: -{{ include "cluster-autoscaler.instance-name" . | indent 6 }} - endpoints: - - port: {{ .Values.service.portName }} - interval: {{ .Values.serviceMonitor.interval }} - path: {{ .Values.serviceMonitor.path }} - namespaceSelector: - matchNames: - - {{.Release.Namespace}} -{{ end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/values.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/values.yaml deleted file mode 100644 index aebcb66..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/cluster-autoscaler/values.yaml +++ /dev/null @@ -1,339 +0,0 @@ -## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -# affinity -- Affinity for pod assignment -affinity: {} - -autoDiscovery: - # cloudProviders `aws`, `gce` and `magnum` are supported by auto-discovery at this time - # AWS: Set tags as described in https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup - - # autoDiscovery.clusterName -- Enable autodiscovery for `cloudProvider=aws`, for groups matching `autoDiscovery.tags`. - # Enable autodiscovery for `cloudProvider=gce`, but no MIG tagging required. - # Enable autodiscovery for `cloudProvider=magnum`, for groups matching `autoDiscovery.roles`. - clusterName: "adsd-cumulus-dev" - - # autoDiscovery.tags -- ASG tags to match, run through `tpl`. - tags: - - k8s.io/cluster-autoscaler/enabled - - k8s.io/cluster-autoscaler/{{ .Values.autoDiscovery.clusterName }} - # - kubernetes.io/cluster/{{ .Values.autoDiscovery.clusterName }} - - # autoDiscovery.roles -- Magnum node group roles to match. - roles: - - worker - -# autoscalingGroups -- For AWS, Azure AKS or Magnum. At least one element is required if not using `autoDiscovery`. For example: -#
-# - name: asg1
-# maxSize: 2
-# minSize: 1 -#
-autoscalingGroups: [] -# - name: asg1 -# maxSize: 2 -# minSize: 1 -# - name: asg2 -# maxSize: 2 -# minSize: 1 - -# autoscalingGroupsnamePrefix -- For GCE. At least one element is required if not using `autoDiscovery`. For example: -#
-# - name: ig01
-# maxSize: 10
-# minSize: 0 -#
-autoscalingGroupsnamePrefix: [] -# - name: ig01 -# maxSize: 10 -# minSize: 0 -# - name: ig02 -# maxSize: 10 -# minSize: 0 - -# awsAccessKeyID -- AWS access key ID ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) -awsAccessKeyID: "" - -# awsRegion -- AWS region (required if `cloudProvider=aws`) -awsRegion: us-gov-east-1 - -# awsSecretAccessKey -- AWS access secret key ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) -awsSecretAccessKey: "" - -# azureClientID -- Service Principal ClientID with contributor permission to Cluster and Node ResourceGroup. -# Required if `cloudProvider=azure` -azureClientID: "" - -# azureClientSecret -- Service Principal ClientSecret with contributor permission to Cluster and Node ResourceGroup. -# Required if `cloudProvider=azure` -azureClientSecret: "" - -# azureResourceGroup -- Azure resource group that the cluster is located. -# Required if `cloudProvider=azure` -azureResourceGroup: "" - -# azureSubscriptionID -- Azure subscription where the resources are located. -# Required if `cloudProvider=azure` -azureSubscriptionID: "" - -# azureTenantID -- Azure tenant where the resources are located. -# Required if `cloudProvider=azure` -azureTenantID: "" - -# azureVMType -- Azure VM type. -azureVMType: "AKS" - -# azureClusterName -- Azure AKS cluster name. -# Required if `cloudProvider=azure` -azureClusterName: "" - -# azureNodeResourceGroup -- Azure resource group where the cluster's nodes are located, typically set as `MC___`. -# Required if `cloudProvider=azure` -azureNodeResourceGroup: "" - -# azureUseManagedIdentityExtension -- Whether to use Azure's managed identity extension for credentials. If using MSI, ensure subscription ID and resource group are set. -azureUseManagedIdentityExtension: false - -# magnumClusterName -- Cluster name or ID in Magnum. -# Required if `cloudProvider=magnum` and not setting `autoDiscovery.clusterName`. -magnumClusterName: "" - -# magnumCABundlePath -- Path to the host's CA bundle, from `ca-file` in the cloud-config file. -magnumCABundlePath: "/etc/kubernetes/ca-bundle.crt" - -# cloudConfigPath -- Configuration file for cloud provider. -cloudConfigPath: /etc/gce.conf - -# cloudProvider -- The cloud provider where the autoscaler runs. -# Currently only `gce`, `aws`, `azure` and `magnum` are supported. -# `aws` supported for AWS. `gce` for GCE. `azure` for Azure AKS. -# `magnum` for OpenStack Magnum. -cloudProvider: aws - -# containerSecurityContext -- [Security context for container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) -containerSecurityContext: {} - # capabilities: - # drop: - # - ALL - -# dnsPolicy -- Defaults to `ClusterFirst`. Valid values are: -# `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`. -# If autoscaler does not depend on cluster DNS, recommended to set this to `Default`. -dnsPolicy: ClusterFirst - -## Priorities Expander -# expanderPriorities -- The expanderPriorities is used if `extraArgs.expander` is set to `priority` and expanderPriorities is also set with the priorities. -# If `extraArgs.expander` is set to `priority`, then expanderPriorities is used to define cluster-autoscaler-priority-expander priorities. -# See: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/expander/priority/readme.md -expanderPriorities: {} - -# priorityConfigMapAnnotations -- Annotations to add to `cluster-autoscaler-priority-expander` ConfigMap. -priorityConfigMapAnnotations: {} - # key1: "value1" - # key2: "value2" - -# extraArgs -- Additional container arguments. -# Refer to https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca for the full list of cluster autoscaler -# parameters and their default values. -# Everything after the first _ will be ignored allowing the use of multi-string arguments. -extraArgs: - logtostderr: true - stderrthreshold: info - v: 4 - # write-status-configmap: true - # status-config-map-name: cluster-autoscaler-status - # leader-elect: true - # leader-elect-resource-lock: endpoints - skip-nodes-with-local-storage: true - expander: least-waste - # scale-down-enabled: true - balance-similar-node-groups: true - aws-use-static-instance-list: true - # min-replica-count: 0 - # scale-down-utilization-threshold: 0.5 - # scale-down-non-empty-candidates-count: 30 - # max-node-provision-time: 15m0s - # scan-interval: 10s - # scale-down-delay-after-add: 10m - # scale-down-delay-after-delete: 0s - # scale-down-delay-after-failure: 3m - # scale-down-unneeded-time: 10m - skip-nodes-with-system-pods: false - # balancing-ignore-label_1: first-label-to-ignore - # balancing-ignore-label_2: second-label-to-ignore - -# extraEnv -- Additional container environment variables. -extraEnv: {} - -# extraEnvConfigMaps -- Additional container environment variables from ConfigMaps. -extraEnvConfigMaps: {} - -# extraEnvSecrets -- Additional container environment variables from Secrets. -extraEnvSecrets: {} - -# envFromConfigMap -- ConfigMap name to use as envFrom. -envFromConfigMap: "" - -# envFromSecret -- Secret name to use as envFrom. -envFromSecret: "" - -# extraVolumeSecrets -- Additional volumes to mount from Secrets. -extraVolumeSecrets: {} - # autoscaler-vol: - # mountPath: /data/autoscaler/ - # custom-vol: - # name: custom-secret - # mountPath: /data/custom/ - # items: - # - key: subkey - # path: mypath - -# extraVolumes -- Additional volumes. -extraVolumes: [] - # - name: ssl-certs - # hostPath: - # path: /etc/ssl/certs/ca-bundle.crt - -# extraVolumeMounts -- Additional volumes to mount. -extraVolumeMounts: [] - # - name: ssl-certs - # mountPath: /etc/ssl/certs/ca-certificates.crt - # readOnly: true - -# fullnameOverride -- String to fully override `cluster-autoscaler.fullname` template. -fullnameOverride: "" - -image: - # image.repository -- Image repository - repository: 252960665057.dkr.ecr.us-gov-east-1.amazonaws.com/eks/adsd-cumulus-dev/cluster-autoscaler - # image.tag -- Image tag - tag: v1.21.0 - # image.pullPolicy -- Image pull policy - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # image.pullSecrets -- Image pull secrets - pullSecrets: [] - # - myRegistrKeySecretName - -# kubeTargetVersionOverride -- Allow overriding the `.Capabilities.KubeVersion.GitVersion` check. Useful for `helm template` commands. -kubeTargetVersionOverride: "" - -# nameOverride -- String to partially override `cluster-autoscaler.fullname` template (will maintain the release name) -nameOverride: "" - -# nodeSelector -- Node labels for pod assignment. Ref: https://kubernetes.io/docs/user-guide/node-selection/. -nodeSelector: {} - -# podAnnotations -- Annotations to add to each pod. -podAnnotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: "false" - -# podDisruptionBudget -- Pod disruption budget. -podDisruptionBudget: - maxUnavailable: 1 - # minAvailable: 2 - -# podLabels -- Labels to add to each pod. -podLabels: {} - -# additionalLabels -- Labels to add to each object of the chart. -additionalLabels: {} - -# priorityClassName -- priorityClassName -priorityClassName: "" - -rbac: - # rbac.create -- If `true`, create and use RBAC resources. - create: true - # rbac.pspEnabled -- If `true`, creates and uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. - # Must be used with `rbac.create` set to `true`. - pspEnabled: false - serviceAccount: - # rbac.serviceAccount.annotations -- Additional Service Account annotations. - annotations: - eks.amazonaws.com/role-arn: "arn:aws:iam::252960665057:role/eks-adsd-cumulus-dev-irsa-kube-system-cluster-autoscaler" - # rbac.serviceAccount.create -- If `true` and `rbac.create` is also true, a Service Account will be created. - create: true - # rbac.serviceAccount.name -- The name of the ServiceAccount to use. If not set and create is `true`, a name is generated using the fullname template. - name: "cluster-autoscaler" - # rbac.serviceAccount.automountServiceAccountToken -- Automount API credentials for a Service Account. - automountServiceAccountToken: true - -# replicaCount -- Desired number of pods -replicaCount: 1 - -# resources -- Pod resource requests and limits. -resources: - limits: - cpu: 100m - memory: 600Mi - requests: - cpu: 100m - memory: 600Mi - -# securityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) -securityContext: {} - # runAsNonRoot: true - # runAsUser: 1001 - # runAsGroup: 1001 - -service: - # service.annotations -- Annotations to add to service - annotations: {} - # service.labels -- Labels to add to service - labels: {} - # service.externalIPs -- List of IP addresses at which the service is available. Ref: https://kubernetes.io/docs/user-guide/services/#external-ips. - externalIPs: [] - - # service.loadBalancerIP -- IP address to assign to load balancer (if supported). - loadBalancerIP: "" - # service.loadBalancerSourceRanges -- List of IP CIDRs allowed access to load balancer (if supported). - loadBalancerSourceRanges: [] - # service.servicePort -- Service port to expose. - servicePort: 8085 - # service.portName -- Name for service port. - portName: http - # service.type -- Type of service to create. - type: ClusterIP - -## Are you using Prometheus Operator? -serviceMonitor: - # serviceMonitor.enabled -- If true, creates a Prometheus Operator ServiceMonitor. - enabled: false - # serviceMonitor.interval -- Interval that Prometheus scrapes Cluster Autoscaler metrics. - interval: 10s - # serviceMonitor.namespace -- Namespace which Prometheus is running in. - namespace: monitoring - ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) - ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) - # serviceMonitor.selector -- Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install. - selector: - release: prometheus-operator - # serviceMonitor.path -- The path to scrape for metrics; autoscaler exposes `/metrics` (this is standard) - path: /metrics - -## Custom PrometheusRule to be defined -## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart -## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions -prometheusRule: - # prometheusRule.enabled -- If true, creates a Prometheus Operator PrometheusRule. - enabled: false - # prometheusRule.additionalLabels -- Additional labels to be set in metadata. - additionalLabels: {} - # prometheusRule.namespace -- Namespace which Prometheus is running in. - namespace: monitoring - # prometheusRule.interval -- How often rules in the group are evaluated (falls back to `global.evaluation_interval` if not set). - interval: null - # prometheusRule.rules -- Rules spec template (see https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule). - rules: [] - -# tolerations -- List of node taints to tolerate (requires Kubernetes >= 1.6). -tolerations: [] - -# updateStrategy -- [Deployment update strategy](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) -updateStrategy: {} - # rollingUpdate: - # maxSurge: 1 - # maxUnavailable: 0 - # type: RollingUpdate diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/.helmignore b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/Chart.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/Chart.yaml deleted file mode 100644 index f428bb8..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: intermediate-certificate-issuer -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl deleted file mode 100644 index 5f6c44f..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "intermediate-certificate-issuer.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "intermediate-certificate-issuer.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "intermediate-certificate-issuer.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "intermediate-certificate-issuer.labels" -}} -helm.sh/chart: {{ include "intermediate-certificate-issuer.chart" . }} -{{ include "intermediate-certificate-issuer.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "intermediate-certificate-issuer.selectorLabels" -}} -app.kubernetes.io/name: {{ include "intermediate-certificate-issuer.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "intermediate-certificate-issuer.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "intermediate-certificate-issuer.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml deleted file mode 100644 index ad99f63..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: ca-key-pair - namespace: {{ .Release.Namespace }} -data: - tls.crt: {{ .Values.tls.crt }} - tls.key: {{ .Values.tls.key }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml deleted file mode 100644 index 76a3874..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: clusterissuer -spec: - ca: - secretName: ca-key-pair diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/values.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/values.yaml deleted file mode 100644 index 50dfd22..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/intermediate-certificate-issuer/values.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tls: - # tls.crt contains the issuers full chain in the correct order: - # issuer -> intermediate(s) -> root. - crt: - # tls.key contains the base64 encoded signing key. - key: diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/Chart.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/Chart.yaml deleted file mode 100644 index 7bca5e3..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -name: istio-operator -version: 1.10.1 -tillerVersion: ">=2.7.2" -description: Helm chart for deploying Istio operator -keywords: - - istio - - operator -sources: - - https://github.com/istio/istio/tree/master/operator -engine: gotpl -icon: https://istio.io/latest/favicons/android-192x192.png diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/crds/crd-operator.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/crds/crd-operator.yaml deleted file mode 100644 index 93ac1de..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/crds/crd-operator.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# SYNC WITH manifests/charts/base/files -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: istiooperators.install.istio.io - labels: - release: istio -spec: - conversion: - strategy: None - group: install.istio.io - names: - kind: IstioOperator - listKind: IstioOperatorList - plural: istiooperators - singular: istiooperator - shortNames: - - iop - - io - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Istio control plane revision - jsonPath: .spec.revision - name: Revision - type: string - - description: IOP current state - jsonPath: .status.status - name: Status - type: string - - description: 'CreationTimestamp is a timestamp representing the server time - when this object was created. It is not guaranteed to be set in happens-before - order across separate operations. Clients may not set this value. It is represented - in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for - lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - subresources: - status: {} - schema: - openAPIV3Schema: - type: object - x-kubernetes-preserve-unknown-fields: true - served: true - storage: true ---- diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/files/gen-operator.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/files/gen-operator.yaml deleted file mode 100644 index e77d5aa..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/files/gen-operator.yaml +++ /dev/null @@ -1,220 +0,0 @@ ---- -# Source: istio-operator/templates/namespace.yaml -apiVersion: v1 -kind: Namespace -metadata: - name: istio-operator - labels: - istio-operator-managed: Reconcile - istio-injection: disabled ---- -# Source: istio-operator/templates/service_account.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - namespace: istio-operator - name: istio-operator ---- -# Source: istio-operator/templates/clusterrole.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: istio-operator -rules: -# istio groups -- apiGroups: - - authentication.istio.io - resources: - - '*' - verbs: - - '*' -- apiGroups: - - config.istio.io - resources: - - '*' - verbs: - - '*' -- apiGroups: - - install.istio.io - resources: - - '*' - verbs: - - '*' -- apiGroups: - - networking.istio.io - resources: - - '*' - verbs: - - '*' -- apiGroups: - - security.istio.io - resources: - - '*' - verbs: - - '*' -# k8s groups -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - '*' -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions.apiextensions.k8s.io - - customresourcedefinitions - verbs: - - '*' -- apiGroups: - - apps - - extensions - resources: - - daemonsets - - deployments - - deployments/finalizers - - replicasets - verbs: - - '*' -- apiGroups: - - autoscaling - resources: - - horizontalpodautoscalers - verbs: - - '*' -- apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - get - - create - - update -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - '*' -- apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterrolebindings - - clusterroles - - roles - - rolebindings - verbs: - - '*' -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - create - - update -- apiGroups: - - "" - resources: - - configmaps - - endpoints - - events - - namespaces - - pods - - pods/proxy - - persistentvolumeclaims - - secrets - - services - - serviceaccounts - verbs: - - '*' ---- -# Source: istio-operator/templates/clusterrole_binding.yaml -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: istio-operator -subjects: -- kind: ServiceAccount - name: istio-operator - namespace: istio-operator -roleRef: - kind: ClusterRole - name: istio-operator - apiGroup: rbac.authorization.k8s.io ---- -# Source: istio-operator/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - namespace: istio-operator - labels: - name: istio-operator - name: istio-operator -spec: - ports: - - name: http-metrics - port: 8383 - targetPort: 8383 - protocol: TCP - selector: - name: istio-operator ---- -# Source: istio-operator/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - namespace: istio-operator - name: istio-operator -spec: - replicas: 1 - selector: - matchLabels: - name: istio-operator - template: - metadata: - labels: - name: istio-operator - spec: - serviceAccountName: istio-operator - containers: - - name: istio-operator - image: gcr.io/istio-testing/operator:1.10-dev - command: - - operator - - server - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - runAsGroup: 1337 - runAsUser: 1337 - runAsNonRoot: true - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 200m - memory: 256Mi - requests: - cpu: 50m - memory: 128Mi - env: - - name: WATCH_NAMESPACE - value: "istio-system" - - name: LEADER_ELECTION_NAMESPACE - value: "istio-operator" - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: OPERATOR_NAME - value: "istio-operator" - - name: WAIT_FOR_RESOURCES_TIMEOUT - value: "300s" - - name: REVISION - value: "" diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/clusterrole.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/clusterrole.yaml deleted file mode 100644 index 4e6bd74..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/clusterrole.yaml +++ /dev/null @@ -1,115 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} -rules: -# istio groups -- apiGroups: - - authentication.istio.io - resources: - - '*' - verbs: - - '*' -- apiGroups: - - config.istio.io - resources: - - '*' - verbs: - - '*' -- apiGroups: - - install.istio.io - resources: - - '*' - verbs: - - '*' -- apiGroups: - - networking.istio.io - resources: - - '*' - verbs: - - '*' -- apiGroups: - - security.istio.io - resources: - - '*' - verbs: - - '*' -# k8s groups -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - '*' -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions.apiextensions.k8s.io - - customresourcedefinitions - verbs: - - '*' -- apiGroups: - - apps - - extensions - resources: - - daemonsets - - deployments - - deployments/finalizers - - replicasets - verbs: - - '*' -- apiGroups: - - autoscaling - resources: - - horizontalpodautoscalers - verbs: - - '*' -- apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - get - - create - - update -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - '*' -- apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterrolebindings - - clusterroles - - roles - - rolebindings - verbs: - - '*' -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - create - - update -- apiGroups: - - "" - resources: - - configmaps - - endpoints - - events - - namespaces - - pods - - pods/proxy - - persistentvolumeclaims - - secrets - - services - - serviceaccounts - verbs: - - '*' ---- diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/clusterrole_binding.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/clusterrole_binding.yaml deleted file mode 100644 index 9b9df7d..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/clusterrole_binding.yaml +++ /dev/null @@ -1,13 +0,0 @@ -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} -subjects: -- kind: ServiceAccount - name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} - namespace: {{.Values.operatorNamespace}} -roleRef: - kind: ClusterRole - name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} - apiGroup: rbac.authorization.k8s.io ---- diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/crds.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/crds.yaml deleted file mode 100644 index a370365..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/crds.yaml +++ /dev/null @@ -1,6 +0,0 @@ -{{- if .Values.enableCRDTemplates -}} -{{- range $path, $bytes := .Files.Glob "crds/*.yaml" -}} ---- -{{ $.Files.Get $path }} -{{- end -}} -{{- end -}} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/deployment.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/deployment.yaml deleted file mode 100644 index 1baaa8d..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/deployment.yaml +++ /dev/null @@ -1,51 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - namespace: {{.Values.operatorNamespace}} - name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} -spec: - replicas: 1 - selector: - matchLabels: - name: istio-operator - template: - metadata: - labels: - name: istio-operator - spec: - serviceAccountName: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} - containers: - - name: istio-operator - image: {{.Values.hub}}/operator:{{.Values.tag}} - command: - - operator - - server - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - runAsGroup: 1337 - runAsUser: 1337 - runAsNonRoot: true - imagePullPolicy: IfNotPresent - resources: -{{ toYaml .Values.operator.resources | trim | indent 12 }} - env: - - name: WATCH_NAMESPACE - value: {{.Values.watchedNamespaces | quote}} - - name: LEADER_ELECTION_NAMESPACE - value: {{.Values.operatorNamespace | quote}} - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: OPERATOR_NAME - value: {{.Values.operatorNamespace | quote}} - - name: WAIT_FOR_RESOURCES_TIMEOUT - value: {{.Values.waitForResourcesTimeout | quote}} - - name: REVISION - value: {{.Values.revision | quote}} ---- diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/namespace.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/namespace.yaml deleted file mode 100644 index 31dc5aa..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/namespace.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: {{.Values.operatorNamespace}} - labels: - istio-operator-managed: Reconcile - istio-injection: disabled ---- diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/service.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/service.yaml deleted file mode 100644 index ab3ed57..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/service.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - namespace: {{.Values.operatorNamespace}} - labels: - name: istio-operator - name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} -spec: - ports: - - name: http-metrics - port: 8383 - targetPort: 8383 - protocol: TCP - selector: - name: istio-operator ---- diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/service_account.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/service_account.yaml deleted file mode 100644 index 03e9377..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/templates/service_account.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - namespace: {{.Values.operatorNamespace}} - name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} -{{- if .Values.imagePullSecrets }} -imagePullSecrets: -{{- range .Values.imagePullSecrets }} -- name: {{ . }} -{{- end }} -{{- end }} ---- diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/values.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/values.yaml deleted file mode 100644 index 39a5bd2..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-operator/values.yaml +++ /dev/null @@ -1,29 +0,0 @@ -hub: docker.io/istio -tag: 1.10.1 - -# ImagePullSecrets for operator ServiceAccount, list of secrets in the same namespace -# used to pull operator image. Must be set for any cluster configured with private docker registry. -imagePullSecrets: [] - -operatorNamespace: istio-operator - -# Used to replace istioNamespace to support operator watch multiple namespaces. -watchedNamespaces: istio-system -waitForResourcesTimeout: 300s - -# Used for helm2 to add the CRDs to templates. -enableCRDTemplates: false - -# revision for the operator resources -revision: "" - -# Operator resource defaults -operator: - resources: - limits: - cpu: 200m - memory: 256Mi - requests: - cpu: 50m - memory: 128Mi - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/.helmignore b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/Chart.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/Chart.yaml deleted file mode 100644 index 5995e6b..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: istio-peerauthentication -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/templates/_helpers.tpl deleted file mode 100644 index 94c398d..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "istio-peerauthentication.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "istio-peerauthentication.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "istio-peerauthentication.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "istio-peerauthentication.labels" -}} -helm.sh/chart: {{ include "istio-peerauthentication.chart" . }} -{{ include "istio-peerauthentication.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "istio-peerauthentication.selectorLabels" -}} -app.kubernetes.io/name: {{ include "istio-peerauthentication.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "istio-peerauthentication.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "istio-peerauthentication.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml deleted file mode 100644 index 3238311..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml +++ /dev/null @@ -1,9 +0,0 @@ -{{ if .Values.requireMutualTLS }} -apiVersion: security.istio.io/v1beta1 -kind: PeerAuthentication -metadata: - name: "default" -spec: - mtls: - mode: STRICT -{{ end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/values.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-peerauthentication/values.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/.helmignore b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/Chart.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/Chart.yaml deleted file mode 100644 index fbd07c2..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/Chart.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v2 -name: istio-profile -description: Configuration for istio to be picked up by istio's operator. -type: application -version: 0.1.2 -appVersion: "1.10.1" diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/templates/_helpers.tpl deleted file mode 100644 index 8a02937..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "istio-profile.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "istio-profile.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "istio-profile.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "istio-profile.labels" -}} -helm.sh/chart: {{ include "istio-profile.chart" . }} -{{ include "istio-profile.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "istio-profile.selectorLabels" -}} -app.kubernetes.io/name: {{ include "istio-profile.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "istio-profile.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "istio-profile.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/templates/istiooperator.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/templates/istiooperator.yaml deleted file mode 100644 index 5062e95..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/templates/istiooperator.yaml +++ /dev/null @@ -1,188 +0,0 @@ -apiVersion: install.istio.io/v1alpha1 -kind: IstioOperator -metadata: - name: istio-profile -spec: - hub: {{ .Values.hub | default "docker.io/istio" }} - tag: {{ .Values.tag | default "1.10.1" }} - - meshConfig: -{{- if .Values.envoy.accessLog.enabled }} - accessLogFile: /dev/stdout -{{- end }} -{{- if and .Values.envoy.accessLog.enabled .Values.envoy.accessLog.format }} - accessLogFormat: {{ .Values.envoy.accessLog.format }} -{{- end }} -{{- if and .Values.envoy.accessLog.enabled .Values.envoy.accessLog.encoding }} - accessLogEncoding: {{ .Values.envoy.accessLog.encoding }} -{{- end }} - defaultConfig: - proxyMetadata: {} - enablePrometheusMerge: true - - components: - base: - enabled: true - pilot: - enabled: true - - ingressGateways: - - name: istio-ingressgateway - enabled: true - k8s: - serviceAnnotations: - "service.beta.kubernetes.io/aws-load-balancer-internal": "true" - "service.beta.kubernetes.io/aws-load-balancer-type": "nlb" - - egressGateways: - - name: istio-egressgateway - enabled: {{ .Values.egressGateways.enabled }} - - cni: - enabled: false - - istiodRemote: - enabled: false - - values: - global: - istioNamespace: {{ .Values.namespace }} - istiod: - enableAnalysis: false - logging: - level: "default:info" - logAsJson: false - pilotCertProvider: istiod - jwtPolicy: third-party-jwt - proxy: - image: proxyv2 - clusterDomain: "cluster.local" - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 2000m - memory: 1024Mi - logLevel: warning - componentLogLevel: "misc:error" - privileged: false - enableCoreDump: false - statusPort: 15020 - readinessInitialDelaySeconds: 1 - readinessPeriodSeconds: 2 - readinessFailureThreshold: 30 - includeIPRanges: "*" - excludeIPRanges: {{ default "" .Values.apiserver | quote }} - excludeOutboundPorts: "" - excludeInboundPorts: "" - autoInject: enabled - tracer: "zipkin" - proxy_init: - image: proxyv2 - resources: - limits: - cpu: 2000m - memory: 1024Mi - requests: - cpu: 10m - memory: 10Mi - # Specify image pull policy if default behavior isn't desired. - # Default behavior: latest images will be Always else IfNotPresent. - imagePullPolicy: "" - operatorManageWebhooks: false - tracer: - lightstep: {} - zipkin: {} - datadog: {} - stackdriver: {} - imagePullSecrets: [] - oneNamespace: false - defaultNodeSelector: {} - configValidation: true - multiCluster: - enabled: false - clusterName: "" - omitSidecarInjectorConfigMap: false - network: "" - defaultResources: - requests: - cpu: 10m - defaultPodDisruptionBudget: - enabled: true - priorityClassName: "" - useMCP: false - sds: - token: - aud: istio-ca - sts: - servicePort: 0 - meshNetworks: {} - mountMtlsCerts: false - base: - enableCRDTemplates: false - validationURL: "" - pilot: - autoscaleEnabled: true - autoscaleMin: 1 - autoscaleMax: 5 - replicaCount: 1 - image: pilot - traceSampling: 1.0 - env: {} - cpu: - targetAverageUtilization: 80 - nodeSelector: {} - keepaliveMaxServerConnectionAge: 30m - enableProtocolSniffingForOutbound: true - enableProtocolSniffingForInbound: true - deploymentLabels: - configMap: true - - telemetry: - enabled: {{ .Values.telemetry.enabled }} - v2: - enabled: true - metadataExchange: - wasmEnabled: false - prometheus: - wasmEnabled: false - enabled: true - stackdriver: - enabled: false - logging: false - monitoring: false - topology: false - configOverride: {} - - istiodRemote: - injectionURL: "" - - gateways: - istio-egressgateway: - zvpn: {} - env: {} - autoscaleEnabled: true - type: ClusterIP - name: istio-egressgateway - secretVolumes: - - name: egressgateway-certs - secretName: istio-egressgateway-certs - mountPath: /etc/istio/egressgateway-certs - - name: egressgateway-ca-certs - secretName: istio-egressgateway-ca-certs - mountPath: /etc/istio/egressgateway-ca-certs - - istio-ingressgateway: - autoscaleEnabled: true - type: LoadBalancer - name: istio-ingressgateway - zvpn: {} - env: {} - secretVolumes: - - name: ingressgateway-certs - secretName: istio-ingressgateway-certs - mountPath: /etc/istio/ingressgateway-certs - - name: ingressgateway-ca-certs - secretName: istio-ingressgateway-ca-certs - mountPath: /etc/istio/ingressgateway-ca-certs diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/values.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/values.yaml deleted file mode 100644 index 9b43fab..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/istio-profile/values.yaml +++ /dev/null @@ -1,44 +0,0 @@ - -namespace: istio-system -requireMutualTLS: true -hub: docker.io/istio -tag: 1.10.1 -apiserver: "" - -############################################################################## -# Observability options: -############################################################################## - -# Controls settings for the envoy proxy that is added as a sidecar -envoy: - # Controls settings related to access the service. - accessLog: - # When enabled, envoy is configured to log to stdout. - enabled: true - # Format for the proxy access log. Default value is envoy's format. - # Controls accessLogFormat istio configuration. - format: - # Encoding for the proxy access log (text or json.) Default value is text. - # Controls accessLogEncoding istio configuration. - encoding: - -# When set to true, istio provides telemetry data to prometheus. -# False disables collecting telemetry data. -telemetry: - enabled: true - -# When set to true, enables tracking of a request through mesh that is -# destributed across mutliple services. -tracing: - enabled: true - -############################################################################## -# Traffic Management options: -############################################################################## - -# Egress gateways allow you to apply Istio features, for example, monitoring -# and route rules, to traffic exiting the mesh. -# When set to true, the egress gateway is created. -egressGateways: - enabled: true - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/.helmignore b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/Chart.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/Chart.yaml deleted file mode 100644 index 9cfc3c1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/Chart.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v2 -name: self-signed-certificate-issuer -description: A Helm chart for Kubernetes -type: application -version: 0.1.0 -appVersion: "1.0.0" diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl deleted file mode 100644 index e62a63b..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "self-signed-certificate-issuer.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "self-signed-certificate-issuer.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "self-signed-certificate-issuer.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "self-signed-certificate-issuer.labels" -}} -helm.sh/chart: {{ include "self-signed-certificate-issuer.chart" . }} -{{ include "self-signed-certificate-issuer.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "self-signed-certificate-issuer.selectorLabels" -}} -app.kubernetes.io/name: {{ include "self-signed-certificate-issuer.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "self-signed-certificate-issuer.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "self-signed-certificate-issuer.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml deleted file mode 100644 index ab1ee31..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: clusterissuer -spec: - ca: - secretName: root-secret - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml deleted file mode 100644 index 84e895d..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: selfsigned-ca - namespace: {{ .Release.Namespace }} -spec: - isCA: true - commonName: selfsigned-ca - secretName: root-secret - privateKey: - algorithm: ECDSA - size: 256 - issuerRef: - name: selfsigned-issuer - kind: ClusterIssuer - group: cert-manager.io - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml deleted file mode 100644 index 81660bd..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: selfsigned-issuer -spec: - selfSigned: {} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/values.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/self-signed-certificate-issuer/values.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/.helmignore b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/Chart.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/Chart.yaml deleted file mode 100644 index e179122..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: vault-certificate-issuer -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl deleted file mode 100644 index a9a1425..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "vault-certificate-issuer.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "vault-certificate-issuer.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "vault-certificate-issuer.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "vault-certificate-issuer.labels" -}} -helm.sh/chart: {{ include "vault-certificate-issuer.chart" . }} -{{ include "vault-certificate-issuer.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "vault-certificate-issuer.selectorLabels" -}} -app.kubernetes.io/name: {{ include "vault-certificate-issuer.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "vault-certificate-issuer.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "vault-certificate-issuer.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml deleted file mode 100644 index 8880f1c..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{ if eq .Values.vault.authentication_type "AppRole" }} -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: clusterissuer -spec: - vault: - path: {{ .Values.vault.path }} - server: {{ .Values.vault.url }} - caBundle: {{ .Values.vault.ca_bundle }} - auth: - appRole: - path: {{ .Values.approle.role_path }} - roleId: {{ .Values.approle.role_id }} - secretRef: - name: cert-manager-vault-approle - key: secretId -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml deleted file mode 100644 index 23d58e1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{ if eq .Values.vault.authentication_type "AppRole" }} -apiVersion: v1 -kind: Secret -type: Opaque -metadata: - name: cert-manager-vault-approle - namespace: {{ .Release.Namespace }} -data: - secretId: {{ .Values.approle.secret_id }} -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml deleted file mode 100644 index f964aed..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{ if eq .Values.vault.authentication_type "ServiceAccount" }} -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: clusterissuer -spec: - vault: - path: {{ .Values.vault.path }} - server: {{ .Values.vault.url }} - caBundle: {{ .Values.vault.ca_bundle }} - auth: - kubernetes: - role: {{ .Values.serviceAccount.role }} -{{- if .Values.serviceAccount.MountPath }} - path: {{ .Values.serviceAccount.mountPath }} -{{- end }} - secretRef: - name: {{ .Values.serviceAccount.secret }} - key: token -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml deleted file mode 100644 index 0410d30..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{ if eq .Values.vault.authentication_type "Token" }} -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: clusterissuer -spec: - vault: - path: {{ .Values.vault.path }} - server: {{ .Values.vault.url }} - caBundle: {{ .Values.vault.ca_bundle }} - auth: - tokenSecretRef: - name: cert-manager-vault-token - key: token -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml deleted file mode 100644 index 35bb13d..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{ if eq .Values.vault.authentication_type "Token" }} -apiVersion: v1 -kind: Secret -type: Opaque -metadata: - name: cert-manager-vault-token - namespace: {{ .Release.Namespace }} -data: - token: {{ .Values.token.token }} -{{- end }} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/values.yaml b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/values.yaml deleted file mode 100644 index 4cac439..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/charts/vault-certificate-issuer/values.yaml +++ /dev/null @@ -1,47 +0,0 @@ - -# Common settings for all types of authentication -vault: - # the URL whereby Vault is reachable. - url: - # the Vault path that will be used for signing. Note that the path - # must use the sign endpoint. - path: - # an optional field containing a base64 encoded string of the - # Certificate Authority to trust the Vault connection. This is - # typically always required when using an https URL. - ca_bundle: - # the type of authenciation to use, must be one of: - # - AppRole - # - Token - # - ServiceAccount - authentication_type: - -# AppRole authentication type: -approle: - # secret key - secret_id: - # RoleID of the role to assume - role_id: - # the app role path - role_path: - -# Token authentication type: -token: - # a token string that has been generated from one of the many - # authentication backends that Vault supports. These tokens have - # an expiry and so need to be periodically refreshed. cert-manager - # does not refresh these token automatically and so another process - # must be put in place to do this. The token is stored in the - # cert-manager-vault-token secret in the cert-manager namespace. - token: - -# ServiceAccount authenication type: -serviceAccount: - # the name of the secret associated with the service account in the - # cert-manager namespace to use to authenticate with vault - secret: - # the role which is the Vault role that the Service Account is to assume - role: - # optional value which is the authentication mount path, defaulting - # to kubernetes. - mountPath: diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/common-services.auto.tfvars b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/common-services.auto.tfvars deleted file mode 100644 index 8198041..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/common-services.auto.tfvars +++ /dev/null @@ -1,2 +0,0 @@ -#tls_crt_file = "certs/pki.test4.sandbox.csp2.census.gov.bundle.crt" -#tls_key_file = "certs/pki.test4.sandbox.csp2.census.gov.key" diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/copy_image.sh b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/copy_image.sh deleted file mode 120000 index 889e269..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/copy_image.sh +++ /dev/null @@ -1 +0,0 @@ -../bin/copy_image.sh \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/copy_images.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/copy_images.tf deleted file mode 100644 index ddd67aa..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/copy_images.tf +++ /dev/null @@ -1,92 +0,0 @@ -data "aws_ecr_authorization_token" "token" {} - -locals { - account_id = data.aws_caller_identity.current.account_id - repo_parent_name = format("eks/%v", var.cluster_name) - - account_ecr_registry = format("%v.dkr.ecr.%v.amazonaws.com", local.account_id, var.region) - account_ecr = format("%v/%v", local.account_ecr_registry, local.repo_parent_name) - - images = [ - # cert-manager related images: - { - name = "cert-manager-controller" - image = "quay.io/jetstack/cert-manager-controller" - tag = var.cert_manager_controller_tag - enabled = true - }, - { - name = "cluster-autoscaler" - image = "public.ecr.aws/v0g0y9g5/cluster-autoscaler" - tag = var.cluster_autoscaler_tag - enabled = true - }, - { - name = "metrics-server" - image = "docker.io/bitnami/metrics-server" - tag = var.metrics_server_tag - enabled = true - }, - { - name = "cert-manager-cainjector" - image = "quay.io/jetstack/cert-manager-cainjector" - tag = var.cert_manager_cainjector_tag - enabled = true - }, - { - name = "cert-manager-webhook" - image = "quay.io/jetstack/cert-manager-webhook" - tag = var.cert_manager_webhook_tag - enabled = true - }, - # istio related images: - { - name = "istio/operator" - image = "docker.io/istio/operator" - tag = var.istio_tag - enabled = true - }, - { - name = "istio/pilot" - image = "docker.io/istio/pilot" - tag = var.istio_tag - enabled = true - }, - { - name = "istio/proxyv2" - image = "docker.io/istio/proxyv2" - tag = var.istio_tag - enabled = true - }, - ] - image_repos = { for image in local.images : image.name => format("%v/%v", local.account_ecr, image.name) } - image_map = { for image in local.images : image.name => - merge( - image, - tomap( - { "full_path" = local.image_repos[image.name], - "registry" = local.account_ecr_registry, - "repository" = format("%v/%v", local.repo_parent_name, image.name), } - )) } -} - -resource "null_resource" "copy_images" { - for_each = { for image in local.images : image.name => image if image.enabled } - triggers = { - name = each.key - image = format("%v:%v", each.value.image, each.value.tag) - } - - provisioner "local-exec" { - command = "${path.module}/copy_image.sh" - environment = { - AWS_PROFILE = var.profile - AWS_REGION = local.region - SOURCE_IMAGE = format("%v:%v", each.value.image, each.value.tag) - DESTINATION_IMAGE = format("%v/%v:%v", local.account_ecr, each.value.name, each.value.tag) - DESTINATION_USERNAME = data.aws_ecr_authorization_token.token.user_name - DESTINATION_PASSWORD = data.aws_ecr_authorization_token.token.password - } - } -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/data.eks-subdirectory.tf deleted file mode 120000 index 43b5430..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/data.eks-subdirectory.tf +++ /dev/null @@ -1 +0,0 @@ -../includes.d/data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/dns.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/dns.tf deleted file mode 100644 index 91a5b35..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/dns.tf +++ /dev/null @@ -1,25 +0,0 @@ -data "kubernetes_service" "istio-ingressgateway" { - metadata { - name = "istio-ingressgateway" - namespace = "istio-system" - } -} - -locals { - is_gateway_active = data.kubernetes_service.istio-ingressgateway.status != null -} - -data "aws_lb" "lb" { - count = local.is_gateway_active ? 1 : 0 - name = split("-", data.kubernetes_service.istio-ingressgateway.status.0.load_balancer.0.ingress.0.hostname)[0] -} - -resource "aws_route53_record" "istio-ingress" { - count = local.is_gateway_active ? 1 : 0 - name = format("*.%v", local.parent_rs.cluster_domain_name) - type = "CNAME" - ttl = 900 - zone_id = local.parent_rs.cluster_domain_id - - records = [data.aws_lb.lb[0].dns_name] -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/kubeconfig.eks-subdirectory.tf deleted file mode 120000 index e3750a4..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/kubeconfig.eks-subdirectory.tf +++ /dev/null @@ -1 +0,0 @@ -../includes.d/kubeconfig.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/locals.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/locals.tf deleted file mode 100644 index 4b9ae5a..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/locals.tf +++ /dev/null @@ -1,17 +0,0 @@ -locals { - base_tags = { - "eks-cluster-name" = var.cluster_name - "boc:tf_module_version" = local._module_version - "boc:created_by" = "terraform" - } -} - -# replace TF remote state accordingly in parent_rs with that from the parent directory, and be sure to make the link -locals { - vpc_id = local.parent_rs.cluster_vpc_id - subnet_ids = local.parent_rs.cluster_subnet_ids - cluster_worker_sg_id = local.parent_rs.cluster_worker_sg_id - - oidc_provider_url = local.parent_rs.oidc_provider_url - oidc_provider_arn = local.parent_rs.oidc_provider_arn -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/main.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/main.tf deleted file mode 100644 index 85c931e..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/main.tf +++ /dev/null @@ -1,400 +0,0 @@ -locals { - charts = { - "cert-manager" = { - name = "cert-manager" - repository = "https://charts.jetstack.io" - version = "v1.4.3" - use_remote = true - } - - "metrics-server" = { - name = "metrics-server" - repository = "https://charts.bitnami.com/bitnami" - # version = "5.10.4" - # version = "5.11.9" - version = "6.2.4" - use_remote = true - } - - # a standard chart that is downloaded as part of the istio-bundle. It's not - # available standalone in a repository - # name = "istio-operator" - # these are all custom - # name = "certificate-issuer" - # name = "istio-profile" - } -} - -resource "kubernetes_namespace" "cert-manager" { - metadata { - name = "cert-manager" - } -} - -resource "kubernetes_namespace" "istio-system" { - metadata { - name = "istio-system" - } -} - -# Install Metrics-Server -resource "helm_release" "metrics-server" { - chart = "metrics-server" - name = "metrics-server" - namespace = "kube-system" - repository = local.charts["metrics-server"].use_remote ? local.charts["metrics-server"].repository : "${path.module}/charts" - version = local.charts["metrics-server"].use_remote ? local.charts["metrics-server"].version : null - - depends_on = [null_resource.copy_images] - set { - name = "extraArgs[0]" - # value = "InternalIP,ExternalIP,Hostname" - value = "--kubelet-preferred-address-types=InternalIP" - } - set { - name = "apiService.create" - value = "true" - } - set { - name = "extraArgs[1]" - value = "--cert-dir=/tmp" - } - set { - name = "extraArgs[2]" - value = "--kubelet-use-node-status-port" - } - set { - name = "extraArgs[3]" - value = "--metric-resolution=15s" - } - # set { - # name = "extraArgs[4]" - # value = "--kubelet-insecure-tls=true" - # } - set { - name = "image.registry" - value = local.account_ecr_registry - } - set { - name = "image.repository" - # value = format("%v/%v", local.repo_parent_name, local.images["metric-server"].name) - value = local.image_map["metrics-server"].repository - } - - set { - name = "image.tag" - value = var.metrics_server_tag - } - - timeout = 300 -} - -resource "helm_release" "cluster-autoscaler" { - chart = "cluster-autoscaler" - name = "cluster-autoscaler" - namespace = "kube-system" - repository = "${path.module}/charts/" - depends_on = [null_resource.copy_images] - set { - name = "image.repository" - value = local.image_repos["cluster-autoscaler"] - } - set { - name = "image.tag" - value = var.cluster_autoscaler_tag - } - set { - name = "autoDiscovery.clusterName" - value = var.cluster_name - } - set { - name = "awsRegion" - value = local.region - } -} - -# Install cert-manager -resource "helm_release" "cert-manager" { - chart = "cert-manager" - name = "cert-manager" - namespace = kubernetes_namespace.cert-manager.metadata[0].name - repository = local.charts["cert-manager"].use_remote ? local.charts["cert-manager"].repository : "${path.module}/charts" - version = local.charts["cert-manager"].use_remote ? local.charts["cert-manager"].version : null - - depends_on = [null_resource.copy_images] - - set { - name = "installCRDs" - value = "true" - } - set { - name = "extraArgs" - value = "{--enable-certificate-owner-ref=true}" - } - - set { - name = "image.repository" - value = local.image_repos["cert-manager-controller"] - } - set { - name = "image.tag" - value = var.cert_manager_controller_tag - } - - set { - name = "cainjector.image.repository" - value = local.image_repos["cert-manager-cainjector"] - } - set { - name = "cainjector.image.tag" - value = var.cert_manager_cainjector_tag - } - - set { - name = "webhook.image.repository" - value = local.image_repos["cert-manager-webhook"] - } - set { - name = "webhook.image.tag" - value = var.cert_manager_webhook_tag - } - - timeout = 180 -} - -# cert-manager reports ready before the cert-manager-webhook pod -# has completely started and is ready to process requests. This sleep -# is set for a completely arbitrary time to allow cert-manager-webhook -# to finish starting. On slow systems, this may not be long enough, -# but on t3.xlarge, it works fine. -resource "time_sleep" "let_cert-manager-webhook_boot" { - depends_on = [helm_release.cert-manager] - - create_duration = "19s" -} - -locals { - tls_crt_file = length(var.tls_crt_file) > 0 ? var.tls_crt_file : "certs/${local.ca_dns_name}.bundle.crt" - tls_crt_contents = (length(local.tls_crt_file) > 0 && fileexists(local.tls_crt_file)) ? file(local.tls_crt_file) : var.tls_crt_contents - tls_crt_b64 = length(local.tls_crt_contents) > 0 ? base64encode(local.tls_crt_contents) : var.tls_crt_b64 - - tls_key_file = length(var.tls_key_file) > 0 ? var.tls_key_file : "certs/${local.ca_dns_name}.key" - tls_key_contents = (length(local.tls_key_file) > 0 && fileexists(local.tls_key_file)) ? file(local.tls_key_file) : var.tls_key_contents - tls_key_b64 = length(local.tls_key_contents) > 0 ? base64encode(local.tls_key_contents) : var.tls_key_b64 - - intermediate_ca = (length(local.tls_crt_b64) > 0) && (length(local.tls_key_b64) > 0) - - vault_ca_bundle_pem_file = var.vault_ca_bundle_pem_file - vault_ca_bundle_pem = ((length(local.vault_ca_bundle_pem_file) > 0) ? - file(local.vault_ca_bundle_pem_file) - : var.vault_ca_bundle_pem) - vault_ca_bundle_pem_b64 = ((length(local.vault_ca_bundle_pem) > 0) ? - base64encode(local.vault_ca_bundle_pem) - : var.vault_ca_bundle_pem_b64) - - vault_ca = ! local.intermediate_ca && length(var.vault_url) > 0 - - self_signed_ca = ! local.intermediate_ca && ! local.vault_ca - - defined_ca = (local.self_signed_ca ? 1 : 0) + (local.intermediate_ca ? 1 : 0) + (local.vault_ca ? 1 : 0) -} - -# configure the certificate issuer. - -# when self-signed certs requested -resource "helm_release" "self-signed-certificate-issuer" { - count = local.self_signed_ca == true ? 1 : 0 - - chart = "self-signed-certificate-issuer" - name = "certificate-issuer" - namespace = kubernetes_namespace.cert-manager.metadata[0].name - repository = "${path.module}/charts/" - - depends_on = [time_sleep.let_cert-manager-webhook_boot] - - # Required because the chart creates "non-standard" kubernetes resources - # that use the cert-manager CRDs. - disable_openapi_validation = true -} - -# when using an internediate CA is requested -resource "helm_release" "intermediate-certificate-issuer" { - count = local.intermediate_ca == true ? 1 : 0 - - chart = "intermediate-certificate-issuer" - name = "certificate-issuer" - namespace = kubernetes_namespace.cert-manager.metadata[0].name - repository = "${path.module}/charts/" - - depends_on = [time_sleep.let_cert-manager-webhook_boot] - - # Required because the chart creates "non-standard" kubernetes resources - # that use the cert-manager CRDs. - disable_openapi_validation = true - - set { - name = "tls.crt" - value = local.tls_crt_b64 - } - set { - name = "tls.key" - value = local.tls_key_b64 - } -} - -# when using vault as a CA is requested -resource "helm_release" "vault-certificate-issuer" { - count = local.vault_ca == true ? 1 : 0 - - chart = "vault-certificate-issuer" - name = "certificate-issuer" - namespace = kubernetes_namespace.cert-manager.metadata[0].name - repository = "${path.module}/charts/" - - depends_on = [time_sleep.let_cert-manager-webhook_boot] - - # Required because the chart creates "non-standard" kubernetes resources - # that use the cert-manager CRDs. - disable_openapi_validation = true - - set { - name = "vault.url" - value = var.vault_url - } - set { - name = "vault.path" - value = var.vault_path - } - set { - name = "vault.ca_bundle" - value = local.vault_ca_bundle_pem_b64 - } - set { - name = "vault.authentication_type" - value = var.vault_authentication - } - - set { - name = "approle.secret_id" - value = var.vault_approle_secret_id - } - set { - name = "approle.role_id" - value = var.vault_approle_secret_id - } - set { - name = "approle.role_path" - value = var.vault_approle_role_path - } - - set { - name = "token.token" - value = var.vault_token - } - - set { - name = "serviceAccount.serviceAccount" - value = var.vault_serviceaccount_sa - } - - set { - name = "serviceAccount.role" - value = var.vault_serviceaccount_role - } - set { - name = "serviceAccount.mountPath" - value = var.vault_serviceaccount_mountpath - } -} - -# installs the istio-operator that will listen for profile configurations to -# install / configure modify the istio components. -resource "helm_release" "istio-operator" { - chart = "istio-operator" - name = "istio-operator" - namespace = kubernetes_namespace.istio-system.metadata[0].name - repository = "${path.module}/charts/" - - depends_on = [helm_release.cert-manager] - - set { - name = "hub" - value = format("%v/%v", local.account_ecr, "istio") - } - set { - name = "tag" - value = var.istio_tag - } - set { - name = "operatorNamespace" - value = "operators" - } - set { - name = "watchedNamespaces" - value = kubernetes_namespace.istio-system.metadata[0].name - } - - timeout = 180 -} - -# Need to access the IP address of the apiserver for the next step. -data "kubernetes_service" "apiserver" { - metadata { - name = "kubernetes" - } -} - -# sets up service mesh -resource "helm_release" "istio-profile" { - chart = "istio-profile" - name = "istio-profile" - namespace = kubernetes_namespace.istio-system.metadata[0].name - repository = "${path.module}/charts/" - - depends_on = [helm_release.istio-operator, null_resource.certificate-issuers] - - set { - name = "hub" - value = format("%v/%v", local.account_ecr, "istio") - } - set { - name = "tag" - value = var.istio_tag - } - # Passes in the API server so it can be excluded from requiring mTLS from - # pods that are protected by istio. It already implements SSL. - set { - name = "apiserver" - value = "${data.kubernetes_service.apiserver.spec[0].cluster_ip}/32" - } -} - -# Creating the istio profile is very quick. Time is needed to allow -# istio-operator to install the CRDs and deploy istio. -resource "time_sleep" "let_istio-operator_install_istio" { - depends_on = [helm_release.istio-profile] - - create_duration = "19s" -} - -# Require all pods in the service mesh to use mTLS -resource "helm_release" "istio-peer-authentication" { - chart = "istio-peerauthentication" - name = "istio-peerauthentication" - namespace = kubernetes_namespace.istio-system.metadata[0].name - repository = "${path.module}/charts/" - - depends_on = [time_sleep.let_istio-operator_install_istio] -} - -resource "null_resource" "certificate-issuers" { - triggers = { - self_signed_ca = join(",", helm_release.self-signed-certificate-issuer[*].id) - intermediate_ca = join(",", helm_release.intermediate-certificate-issuer[*].id) - vault_ca = join(",", helm_release.vault-certificate-issuer[*].id) - } - provisioner "local-exec" { - command = "if [ ${local.defined_ca} == 0 ]; then echo 'no-certificate-issuer defined'; exit 1; fi" - } -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/parent_rs.tf deleted file mode 120000 index d85ece6..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/parent_rs.tf +++ /dev/null @@ -1 +0,0 @@ -../includes.d/parent_rs.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/prefixes.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/prefixes.tf deleted file mode 120000 index e0bf5ad..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/prefixes.tf +++ /dev/null @@ -1 +0,0 @@ -../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/providers.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/providers.tf deleted file mode 120000 index 7244d01..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/providers.tf +++ /dev/null @@ -1 +0,0 @@ -../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/region.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/region.tf deleted file mode 100644 index b7b1696..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/region.tf +++ /dev/null @@ -1,4 +0,0 @@ -locals { - region = var.region -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/test-cluster-autoscaling.json b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/test-cluster-autoscaling.json deleted file mode 100644 index ab00596..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/test-cluster-autoscaling.json +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-deployment -spec: - selector: - matchLabels: - app: nginx - replicas: 4 # tells deployment to run 2 pods matching the template - template: - metadata: - labels: - app: nginx - spec: - containers: - - name: nginx - resources: - requests: - cpu: 3 - limits: - cpu: 3 - image: "252960665057.dkr.ecr.us-gov-east-1.amazonaws.com/eks/adsd-cumulus-dev/nginx:1.21" - ports: - - containerPort: 80 diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/tf-run.data b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/tf-run.data deleted file mode 100644 index d233332..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/tf-run.data +++ /dev/null @@ -1,29 +0,0 @@ -VERSION 1.2.2 -REMOTE-STATE -COMMAND tf-directory-setup.py -l none -f -COMMAND setup-new-directory.sh -COMMAND tf-init -upgrade -COMMAND ln -sf ../variables.vpc.auto.tfvars . -COMMAND ln -sf ../variables.vpc.tf -COMMAND ln -sf ../versions.tf -COMMAND ln -sf ../settings.auto.tfvars -LINKTOP init - -module.cert -COMMAND tf-directory-setup.py -l s3 - -COMMENT Adding key to git-secret, hiding, and adding to git. Manually commit afterwards. -COMMAND git-secret add certs/*.key -COMMAND git-secret hide -m -COMMAND git add certs/*.key.secret -COMMENT execute: git commit -m add-pki-key -a - -COMMENT submit certs/*csr using command ouptut listed in apply to TCO for signing -COMMENT Once that is available, change cert_download to true. If you have received a certificate manually, from the new MS CA, do NOT change cert_download -STOP Wait for certificate to be signed, then continue with %%NEXT%%. - -TAG have-certificate -module.cert -module.cert -ALL -ALL diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/variables.common-services.auto.tfvars b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/variables.common-services.auto.tfvars deleted file mode 100644 index c6a82a1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/variables.common-services.auto.tfvars +++ /dev/null @@ -1,25 +0,0 @@ -cert_manager_cainjector_tag = "v1.4.3" -cert_manager_controller_tag = "v1.4.3" -cert_manager_webhook_tag = "v1.4.3" -cluster_autoscaler_tag = "v1.21.0" -istio_tag = "1.10.1" -metrics_server_tag = "0.6.2-debian-11-r0" -tls_crt_b64 = "" -tls_crt_contents = "" -tls_crt_file = "" -tls_key_b64 = "" -tls_key_contents = "" -tls_key_file = "" -vault_approle_role_id = "" -vault_approle_role_path = "" -vault_approle_secret_id = "" -vault_authentication = "" -vault_ca_bundle_pem = "" -vault_ca_bundle_pem_b64 = "" -vault_ca_bundle_pem_file = "" -vault_path = "" -vault_serviceaccount_mountpath = "" -vault_serviceaccount_role = "" -vault_serviceaccount_sa = "" -vault_token = "" -vault_url = "" diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/variables.common-services.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/variables.common-services.tf deleted file mode 100644 index 17f2009..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/variables.common-services.tf +++ /dev/null @@ -1,209 +0,0 @@ -############################################################################# -# Options for configuring cert-manager to generate certificates for https -# termination in the cluster: -# -# - By not configuring any other method, cert-manager is configured to -# generate a private key and a self-signed CA which will be stored in the -# root-secret secret in the cert-manager namespace. Certificates are then -# signed using this internal CA. -# - tls_cert / tls_key - intermediate CA - By configuring a tls_cert and -# tls_key (either file, contents or base64 encoded data, see below,) -# cert-manager will be configured to create certificates based upon the -# intermediate certificate provided. -# - vault - By configuring information about the vault, cert-manager will be -# configured to interact with the vault to create certificates. -# -# For fields that ultimately need to be base64 encoded, there are -# typically three input variables for each field. -# 1. variable with a path to a file that holds the unencoded data which -# will be read by terraform and encoded into a base64 string to be used -# as needed. This field has the highest precedence of the three fields. -# 2. variable with the raw unencoded data which will be encoded into a -# base64 string to be used as needed. This field has the second highest -# precedence of the three fields. -# 3. variable with the base64 encoded data ready for use. This field has -# the lowest priority of the three fields. -############################################################################# - -############################################################################# -# vault -# -# To use Vault as the certificate authority for cert-manager, first supply -# the common configuration elements. Once complete, configure the selected -# authenication method and fill in the details for that authentication type. -############################################################################# -variable "vault_url" { - description = "URL to the vault server." - type = string - default = "" -} - -variable "vault_path" { - description = "Path is the Vault path that will be used for signing. Note that the path must use the sign endpoint." - type = string - default = "" -} - -variable "vault_ca_bundle_pem_file" { - description = "Path to the pem file that holds the CA bundle containing the Certificate Authority to trust the Vault connection. This is typically always required when using an https URL." - type = string - default = "" -} - -variable "vault_ca_bundle_pem" { - description = "Contents of the pem file holding the CA bundle containing the Certificate Authority to trust the Vault connection. This is typically always required when using an https URL." - type = string - default = "" -} - -variable "vault_ca_bundle_pem_b64" { - description = "Base64 encoded contents of the pem file holding the CA bundle containing the Certificate Authority to trust the Vault connection. This is typically always required when using an https URL." - type = string - default = "" -} - -variable "vault_authentication" { - description = "How to authenticate with the vault. This value must be blank when not using the value, or one of 'AppRole', 'Token', or 'ServiceAccount'." - type = string - default = "" -} - -############################################################################# -# for AppRole authentication -variable "vault_approle_secret_id" { - description = "The vault SecretID for the AppRole. This is stored in the vault secret in the cert-manager namespace." - type = string - default = "" - # sensitive = true -} - -variable "vault_approle_role_id" { - description = "The vault RoleId for cert-manager to assume." - type = string - default = "" -} - -variable "vault_approle_role_path" { - description = "The vault app role path for the role for cert-manager to assume." - type = string - default = "" -} - -############################################################################# -# for Token authentication -variable "vault_token" { - description = "The vault token that cert-manager should use to authenticate with vault. Note that tokens expire, and the token must be refreshed manually. This token is stored in the valut secret in the cert-manager namespace." - type = string - default = "" - # sensitive = true -} - -############################################################################# -# for ServiceAccount authentication -variable "vault_serviceaccount_sa" { - description = "The name of the service account in the cert-manager namespace to use to access the token to communicate with vault." - type = string - default = "" -} - -variable "vault_serviceaccount_role" { - description = "The role cert-manager is to assume." - type = string - default = "" -} - -variable "vault_serviceaccount_mountpath" { - description = "The location to mount the secret into the filesystem. Defaults to kubernetes" - type = string - default = "" -} - -############################################################################# -# tls_cert / tls_key - intermediate CA -# -# To use an intermediate CA, configure two of these fields with correct -# values which configures cert-manager to sign cert requests with an -# intermediate key. -# -# Input can be the file, file contents, or base64 encoded file contents to -# allow chaining the output of a module that can generate an intermediate CA -# to the input of this script. Depending on how the intermediate CA is -# generated, pass the output as input in whichever form is easiest. -# -# See https://cert-manager.io/docs/configuration/ca/ -############################################################################# - -variable "tls_crt_file" { - description = "Path to the file that holds the tls.crt representing the issuer's full chain in the correct order (issuer -> intermediate(s) -> root.) When left blank, cert-manager is configured with a self-signed CA." - type = string - default = "" -} - -variable "tls_key_file" { - description = "Path to the file that holds the signing private key. When left blank, cert-manager is configured with a self-signed CA." - type = string - default = "" -} - -variable "tls_crt_contents" { - description = "The contents of the file that holds the tls.crt representing the issuer's full chain in the correct order (issuer -> intermediate(s) -> root.) When left blank, cert-manager is configured with a self-signed CA." - type = string - default = "" -} - -variable "tls_key_contents" { - description = "The contents of the file that holds the signing private key. When left blank, cert-manager is configured with a self-signed CA." - type = string - default = "" -} - -variable "tls_crt_b64" { - description = "The base64 encoded contents of the file that holds the tls.crt representing the issuer's full chain in the correct order (issuer -> intermediate(s) -> root.) When left blank, cert-manager is configured with a self-signed CA." - type = string - default = "" -} - -variable "tls_key_b64" { - description = "The base64 encoded contents of the file that holds the signing private key. When left blank, cert-manager is configured with a self-signed CA." - type = string - default = "" -} - -# See the readme `Updating the cert-manager chart` to find these values. -variable "cert_manager_controller_tag" { - description = "Which tag of public.ecr.aws/eks-anywhere/jetstack/cert-manager-controller" - type = string - default = "v1.4.3" -} - -variable "cluster_autoscaler_tag" { - description = "Image tag of public.ecr.aws/v0g0y9g5/cluster-autoscaler" - type = string - default = "v1.21.0" -} - -variable "metrics_server_tag" { - description = "Which tag of metrics-server" - type = string - default = "0.5.0-debian-10-r83" -} - -variable "cert_manager_cainjector_tag" { - description = "Which tag of public.ecr.aws/eks-anywhere/jetstack/cert-manager-cainjector" - type = string - default = "v1.4.3" -} - -variable "cert_manager_webhook_tag" { - description = "Which tag of public.ecr.aws/eks-anywhere/jetstack/cert-manager-webhook" - type = string - default = "v1.4.3" -} - -# Set the readme `Updating the istio chart` to find these values. -variable "istio_tag" { - description = "The version of istio to install" - type = string - default = "1.10.1" -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/variables.eks.tf deleted file mode 120000 index 7dd95db..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/variables.eks.tf +++ /dev/null @@ -1 +0,0 @@ -../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/version.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/version.tf deleted file mode 120000 index 061373c..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/version.tf +++ /dev/null @@ -1 +0,0 @@ -../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/versions.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/versions.tf deleted file mode 120000 index 8bd0ff1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/common-services/versions.tf +++ /dev/null @@ -1 +0,0 @@ -../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/create-iam-config.sh b/examples/full-cluster-tf-upgrade/1.24.in-progress/create-iam-config.sh deleted file mode 100755 index 9bb68f1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/create-iam-config.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash - -PROFILE=$1 -CLUSTER=$2 -REGION=$3 - -if [ -z "$PROFILE" ] -then - PROFILE=$(grep -E '^\bprofile\b *' *tfvars| sed -e 's/^.*profile.* =//' -e 's/\"//g' -e 's/^ *//' | head -n 1) -fi -if [ -z "$PROFILE" ] -then - echo "* unable to determine profile, please pass as argument 1" - exit 1 -else - echo "* using profile $PROFILE" -fi - -if [ -z "$CLUSTER" ] -then - CLUSTER=$(grep -E '^\bcluster_name\b *' settings.auto.tfvars| sed -e 's/^.*cluster_name.* =//' -e 's/\"//g' -e 's/^ *//' | head -n 1) -fi -if [ -z "$CLUSTER" ] -then - echo "* unable to determine cluster name, please pass as argument 2" - exit 1 -else - echo "* using cluster $CLUSTER" -fi - -ADMINROLE=$(terraform output role_cluster-admin-role_arn) -if [ -z "$ADMINROLE" ] -then - echo "* unable to determine cluster $CLUSTER admin role. Check that you are in the correct directory an terraform has been run" - exit 1 -fi - -if [ -z "$REGION" ] -then - echo "* getting region from profile $PROFILE" - REGION=$(aws configure --profile $PROFILE get region) -else - echo "* using region $REGION" -fi - -NEWPROFILE="$PROFILE-eks-$CLUSTER" -EXISTS=$(aws configure list-profiles | grep -c "^$NEWPROFILE$") - -if [ $EXISTS == 0 ] -then - echo "* creating new configuration profile $NEWPROFILE for assume role $ADMINROLE" -else - echo "* replacing configuration for profile $NEWPROFILE for assume role $ADMINROLE" -fi -echo "" - -( echo "aws configure set profile.$NEWPROFILE.source_profile $PROFILE" ; \ - echo "aws configure set profile.$NEWPROFILE.region $REGION" ; \ - echo "aws configure set profile.$NEWPROFILE.role_arn $ADMINROLE" ; \ - echo "aws configure set profile.$NEWPROFILE.role_session_name $USER" ) | sh -x - -echo "" -echo "* test with: aws --profile $NEWPROFILE sts get-caller-identity" diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/data.eks-main.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/data.eks-main.tf deleted file mode 100644 index 7ead28b..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/data.eks-main.tf +++ /dev/null @@ -1,18 +0,0 @@ -locals { - aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster - # for main.tf - aws_eks_cluster = aws_eks_cluster.eks_cluster - # for all subdirectories - ## aws_eks_cluster = data.aws_eks_cluster.cluster -} - -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} - -#--- -# for all subdirectories only -#--- -## data "aws_eks_cluster" "cluster" { -## name = var.cluster_name -## } diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/dns-zone.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/dns-zone.tf deleted file mode 100644 index a7f3f41..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/dns-zone.tf +++ /dev/null @@ -1,125 +0,0 @@ -locals { - cluster_domain_name = format("%v.%v", var.cluster_name, var.vpc_domain_name) - cluster_domain_description = format("%v EKS Cluster DNS Zone", var.cluster_name) -} - -resource "aws_route53_zone" "cluster_domain" { - name = local.cluster_domain_name - comment = local.cluster_domain_description - force_destroy = false - - vpc { - vpc_id = data.aws_vpc.eks_vpc.id - vpc_region = local.region - } - - ## dynamic "vpc" { - ## for_each = true ? var.region_map : {} - ## iterator = r - ## content { - ## vpc_id = var.main_dns_vpcs[r.value] - ## vpc_region = r.value - ## } - ## } - - lifecycle { - ignore_changes = [vpc] - } - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - tomap({ "Name" = local.cluster_domain_name }), - ) - - # depends_on = [ aws_route53_vpc_association_authorization.west_cluster_domain, aws_route53_vpc_association_authorization.east_cluster_domain ] -} - -output "cluster_domain_name" { - description = "DNS Zone Name" - value = local.cluster_domain_name -} - -output "cluster_domain_id" { - description = "DNS Zone ID" - value = aws_route53_zone.cluster_domain.zone_id -} - -output "cluster_domain_ns" { - description = "DNS Zone Nameservers" - value = aws_route53_zone.cluster_domain.name_servers -} - -#--- -# associate to main do2-govcloud vpc1-services east and west for inbound resolution -#--- -provider "aws" { - alias = "east_main_dns" - region = var.region_map["east"] - profile = var.main_dns_profile -} - -provider "aws" { - alias = "west_main_dns" - region = var.region_map["west"] - profile = var.main_dns_profile -} - -# resource "aws_route53_vpc_association_authorization" "cluster_domain" { -# for_each = var.region_map -# -# zone_id = aws_route53_zone.cluster_domain.zone_id -# vpc_region = each.value -# vpc_id = var.main_dns_vpcs[each.value] -# } - -resource "aws_route53_vpc_association_authorization" "west_cluster_domain" { - for_each = tomap({ "zone" = aws_route53_zone.cluster_domain }) - zone_id = each.value.zone_id - vpc_region = "us-gov-west-1" - vpc_id = var.main_dns_vpcs["us-gov-west-1"] -} - -resource "aws_route53_vpc_association_authorization" "east_cluster_domain" { - for_each = tomap({ "zone" = aws_route53_zone.cluster_domain }) - zone_id = each.value.zone_id - vpc_region = "us-gov-east-1" - vpc_id = var.main_dns_vpcs["us-gov-east-1"] -} - -resource "aws_route53_zone_association" "west_cluster_domain" { - provider = aws.west_main_dns - for_each = aws_route53_vpc_association_authorization.west_cluster_domain - - zone_id = each.value.zone_id - vpc_id = each.value.vpc_id - vpc_region = each.value.vpc_region -} - -resource "aws_route53_zone_association" "east_cluster_domain" { - provider = aws.east_main_dns - for_each = aws_route53_vpc_association_authorization.east_cluster_domain - - zone_id = each.value.zone_id - vpc_id = each.value.vpc_id - vpc_region = each.value.vpc_region -} - -# now we need to add the NS records for the new zone to the parent zone - -data "aws_route53_zone" "parent" { - name = var.vpc_domain_name - private_zone = true -} - -resource "aws_route53_record" "cluster_domain" { - allow_overwrite = true - name = local.cluster_domain_name - type = "NS" - ttl = 900 - zone_id = data.aws_route53_zone.parent.zone_id - - records = aws_route53_zone.cluster_domain.name_servers -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/dns-zone.tf.cat b/examples/full-cluster-tf-upgrade/1.24.in-progress/dns-zone.tf.cat deleted file mode 100644 index 93fb23a..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/dns-zone.tf.cat +++ /dev/null @@ -1,128 +0,0 @@ -locals { - cluster_domain_name = format("%v.%v", var.cluster_name, var.vpc_domain_name) - cluster_domain_description = format("%v EKS Cluster DNS Zone", var.cluster_name) -# true for gov, fale for cat -## aws_dns_infrastructure = false -} - -resource "aws_route53_zone" "cluster_domain" { - name = local.cluster_domain_name - comment = local.cluster_domain_description - force_destroy = false - - vpc { - vpc_id = data.aws_vpc.eks_vpc.id - vpc_region = local.region - } - - ## dynamic "vpc" { - ## for_each = true ? var.region_map : {} - ## iterator = r - ## content { - ## vpc_id = var.main_dns_vpcs[r.value] - ## vpc_region = r.value - ## } - ## } - - lifecycle { - ignore_changes = [vpc] - } - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - tomap({ "Name" = local.cluster_domain_name }), - ) - - # depends_on = [ aws_route53_vpc_association_authorization.west_cluster_domain, aws_route53_vpc_association_authorization.east_cluster_domain ] -} - -output "cluster_domain_name" { - description = "DNS Zone Name" - value = local.cluster_domain_name -} - -output "cluster_domain_id" { - description = "DNS Zone ID" - value = aws_route53_zone.cluster_domain.zone_id -} - -output "cluster_domain_ns" { - description = "DNS Zone Nameservers" - value = aws_route53_zone.cluster_domain.name_servers -} - -# now we need to add the NS records for the new zone to the parent zone -data "aws_route53_zone" "parent" { - name = var.vpc_domain_name - private_zone = true -} - -resource "aws_route53_record" "cluster_domain" { - allow_overwrite = true - name = local.cluster_domain_name - type = "NS" - ttl = 900 - zone_id = data.aws_route53_zone.parent.zone_id - - records = aws_route53_zone.cluster_domain.name_servers -} - -## #--- -## # associate to main do2-govcloud vpc1-services east and west for inbound resolution -## # NOT in cat -## #--- -## provider "aws" { -## alias = "east_main_dns" -## region = local.aws_dns_infrastructure ? var.region_map["east"] : "" -## profile = var.main_dns_profile -## } -## -## provider "aws" { -## alias = "west_main_dns" -## region = local.aws_dns_infrastructure ? var.region_map["west"] : "" -## profile = var.main_dns_profile -## } -## -## # resource "aws_route53_vpc_association_authorization" "cluster_domain" { -## # for_each = var.region_map -## # -## # zone_id = aws_route53_zone.cluster_domain.zone_id -## # vpc_region = each.value -## # vpc_id = var.main_dns_vpcs[each.value] -## # } -## -## resource "aws_route53_vpc_association_authorization" "west_cluster_domain" { -## for_each = local.aws_dns_infrastructure ? tomap({ "zone" = aws_route53_zone.cluster_domain }) : {} -## zone_id = each.value.zone_id -## vpc_region = "us-gov-west-1" -## vpc_id = var.main_dns_vpcs["us-gov-west-1"] -## } -## -## resource "aws_route53_vpc_association_authorization" "east_cluster_domain" { -## for_each = local.aws_dns_infrastructure ? tomap({ "zone" = aws_route53_zone.cluster_domain }) : {} -## zone_id = each.value.zone_id -## vpc_region = "us-gov-east-1" -## vpc_id = var.main_dns_vpcs["us-gov-east-1"] -## } -## -## resource "aws_route53_zone_association" "west_cluster_domain" { -## provider = aws.west_main_dns -## for_each = local.aws_dns_infrastructure ? aws_route53_vpc_association_authorization.west_cluster_domain : {} -## -## zone_id = each.value.zone_id -## vpc_id = each.value.vpc_id -## vpc_region = each.value.vpc_region -## } -## -## resource "aws_route53_zone_association" "east_cluster_domain" { -## provider = aws.east_main_dns -## for_each = local.aws_dns_infrastructure ? aws_route53_vpc_association_authorization.east_cluster_domain : {} -## -## zone_id = each.value.zone_id -## vpc_id = each.value.vpc_id -## vpc_region = each.value.vpc_region -## } -## diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/ebs-encryption.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/ebs-encryption.tf deleted file mode 100644 index c67da2a..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/ebs-encryption.tf +++ /dev/null @@ -1,89 +0,0 @@ -locals { - _app_tags_sc_parameters = [for k, v in var.application_tags : format("%v=%v", k, v)] - app_tags_sc_parameters = { for i in range(0, length(local._app_tags_sc_parameters)) : format("tagSpecification_%v", i + 1) => local._app_tags_sc_parameters[i] } -} - -resource "kubernetes_storage_class" "ebs_encrypted" { - metadata { - name = "gp2-encrypted" - annotations = { - "storageclass.kubernetes.io/is-default-class" = "true" - } - } - parameters = merge( - local.app_tags_sc_parameters, - { - fsType = "ext4" - type = "gp2" - encrypted = "true" - # kms_key_id = data.aws_kms_key.ebs_key.arn - kmsKeyId = data.aws_kms_key.ebs_key.arn - }) - storage_provisioner = "kubernetes.io/aws-ebs" - reclaim_policy = "Delete" - volume_binding_mode = "Immediate" - allow_volume_expansion = "true" -} - -# run once. This deletes the default storage class created by eks called 'gp2' -# vs trying to patch it - -resource "null_resource" "delete_default_sc" { - triggers = { - id = kubernetes_storage_class.ebs_encrypted.id - } - depends_on = [null_resource.kubeconfig] - provisioner "local-exec" { - command = "kubectl --kubeconfig ${path.root}/setup/kube.config delete sc gp2" - } -} - -## { -## "apiVersion": "storage.k8s.io/v1", -## "kind": "StorageClass", -## "metadata": { -## "annotations": { -## "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"storage.k8s.io/v1\",\"kind\":\"StorageClass\",\"metadata\":{\"annotations\":{\"storageclass.kubernetes.io/is-default-class\":\"true\"},\"name\":\"gp2\"},\"parameters\":{\"fsType\":\"ext4\",\"type\":\"gp2\"},\"provisioner\":\"kubernetes.io/aws-ebs\",\"volumeBindingMode\":\"WaitForFirstConsumer\"}\n", -## "storageclass.kubernetes.io/is-default-class": "true" -## }, -## "creationTimestamp": "2021-09-20T16:10:48Z", -## "managedFields": [ -## { -## "apiVersion": "storage.k8s.io/v1", -## "fieldsType": "FieldsV1", -## "fieldsV1": { -## "f:metadata": { -## "f:annotations": { -## ".": {}, -## "f:kubectl.kubernetes.io/last-applied-configuration": {}, -## "f:storageclass.kubernetes.io/is-default-class": {} -## } -## }, -## "f:parameters": { -## ".": {}, -## "f:fsType": {}, -## "f:type": {} -## }, -## "f:provisioner": {}, -## "f:reclaimPolicy": {}, -## "f:volumeBindingMode": {} -## }, -## "manager": "kubectl-client-side-apply", -## "operation": "Update", -## "time": "2021-09-20T16:10:48Z" -## } -## ], -## "name": "gp2", -## "resourceVersion": "253", -## "uid": "5768ea51-ae73-450e-b0de-38a07be0a5d3" -## }, -## "parameters": { -## "fsType": "ext4", -## "type": "gp2" -## }, -## "provisioner": "kubernetes.io/aws-ebs", -## "reclaimPolicy": "Delete", -## "volumeBindingMode": "WaitForFirstConsumer" - -## } - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/ec2-keypair.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/ec2-keypair.tf deleted file mode 100644 index e47db54..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/ec2-keypair.tf +++ /dev/null @@ -1,36 +0,0 @@ -locals { - keypair_name = format("ec2-ssh-%v%v", local._prefixes["eks"], var.cluster_name) -} - -# two-step process to create -# terraform apply -target=null_resource.generate_keypair -# terraform apply -# when done, add to git -# cd setup -# echo inf-ec2-keypair >> .gitignore -# git-secret add inf-ec2-keypair -# git-secret hide -# git add inf-ec2-keypair.{pub,secret} -# git commit -m'add ec2-keypair: inf-ec2-keypair' inf-ec2-keypair.{pub,secret} .gitignore - -# inf-keypair -resource "null_resource" "generate_keypair" { - provisioner "local-exec" { - command = "test -d setup || mkdir setup" - } - provisioner "local-exec" { - working_dir = "./setup" - command = "ssh-keygen -f ${local.keypair_name} -N '' -t rsa -b 2048 -C '${local.keypair_name}@${var.cluster_name}.${var.vpc_domain_name}'" - } -} - -resource "aws_key_pair" "cluster_keypair" { - key_name = local.keypair_name - public_key = file("setup/${local.keypair_name}.pub") - depends_on = [null_resource.generate_keypair] -} - -output "cluster_keypair" { - description = "EC2 keypair for EKS Cluster" - value = aws_key_pair.cluster_keypair.key_name -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/README.efs.md b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/README.efs.md deleted file mode 100644 index 14039bd..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/README.efs.md +++ /dev/null @@ -1,81 +0,0 @@ -# eks-efs - -A standard EKS cluster only provides the `gp2` storage class, which is an EBS based persistent volume. -`gp2` can only be used with ReadWriteOnce persistent volumes. -If an application requires ReadOnlyMany or ReadWriteMany, a different type of persistent volume is required. -The eks-efs module installs an efs-provisioner in the cluster with a storage class of `efs` which allows all types of persistent volumes. - -## Parameters - -| Name | Description | -| ---- | ----------- | -| region | The AWS region that EKS cluster is located. | -| cluster_name | The name of the cluster in which efs-provisioner will be installed. | -| subnet_ids | A list of subnets inside the VPC. Used for EFS mount points. | -| security_groups | Security groups for all worker management | -| aws_efs_csi_driver_version | Which version of the aws-efs-csi-driver helm chart to use. Currently defaults to 2.1.4. | -| external_provisioner_tag | Which tag of public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner to use. Currently defaults to v2.1.1-eks-1-18-2 | -| livenessprobe_tag | Which tag of public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe to use. Currently defaults to v2.2.0-eks-1-18-2 | -| node_driver_registrar_tag | Which tag of public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar to use. Currently defaults to v2.1.0-eks-1-18-2 | - -## Updating the aws-efs-csi-driver chart - -When using a private VPC, the helm chart cannot be downloaded from "https://kubernetes-sigs.github.io/aws-efs-csi-driver/" during installation. -A local copy of the chart is maintained within the terraform script. -The lastest version of the helm chart can be found by looking at https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/charts/aws-efs-csi-driver/Chart.yaml and checking the `version:` tag (not the `appVersion` tag.) -To update this helm chart to the latest version, the procedure is to: - -```script -cd charts -helm add repo https://kubernetes-sigs.github.io/aws-efs-csi-driver/ aws-efs-csi-driver -helm repo update -rm -fr aws-efs-csi-driver -helm pull aws-efs-csi-driver --untar -``` - -After completing these steps, be sure to examine aws-efs-csi-driver/values.yaml and confirm that the tags listed for the sidecar images match the tags assigned by default in input.tf. -For example, the values.yaml file: - -```json -sidecars: - livenessProbe: - image: - repository: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe - tag: v2.2.0-eks-1-18-2 - pullPolicy: IfNotPresent - resources: {} - nodeDriverRegistrar: - image: - repository: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar - tag: v2.1.0-eks-1-18-2 - pullPolicy: IfNotPresent - resources: {} - csiProvisioner: - image: - repository: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner - tag: v2.1.1-eks-1-18-2 - pullPolicy: IfNotPresent - resources: {} -``` - -Entries in input.tf: - -```hcl -variable "livenessprobe_tag" { - description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/livenessp -robe to use." - default = "v2.2.0-eks-1-18-2" -} - -variable "node_driver_registrar_tag" { - description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/node-driv -er-registrar to use." - default = "v2.1.0-eks-1-18-2" -} - -variable "external_provisioner_tag" { - description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/external- -provisioner to use." - default = "v2.1.1-eks-1-18-2" -} -``` diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/README.md b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/README.md deleted file mode 100644 index 7d589b0..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/README.md +++ /dev/null @@ -1,164 +0,0 @@ -# EFS - -This sets up the needed EFS resources for persistent volumes. See [this](README.efs.md) for more details. - -## Links - -* https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html -* https://github.com/kubernetes-sigs/aws-efs-csi-driver -* https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/433 -* https://github.com/hashicorp/terraform-provider-kubernetes/issues/723#issuecomment-679423792 -* https://dev.to/vidyasagarmsc/update-multiple-lines-in-a-yaml-file-49fb - -## Initialize - -* Proxy setup - -Proxy is needed because system may not have access to the `registry.terraform.io` site directory, -and if indirectly, it may not be able to handle a proxy redirect. You may not need to use this, but if you get -errors from the `tf-init`, this is your first thing to setup. - -```shell -export HTTP_PROXY=http://proxy.tco.census.gov:3128 -export HTTPS_PROXY=http://proxy.tco.census.gov:3128 -``` - -## Terraform Automated - -A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. - -* copy the `remote_state.yml` from the parent and update `directory` to be the current directory -* run the tf-run.sh - -```console -% tf-run.sh apply -``` - -* example of the `tf-run.sh` steps - -This is part of a larger cluster configuration, so at the end of the run it indicates another directory -to visit when done. - -```console -% tf-run.sh list -* running action=plan -* START: tf-run.sh v1.1.2 start=1636558187 end= logfile=logs/run.plan.20211110.1636558187.log (not-created) -* reading from tf-run.data -* read 7 entries from tf-run.data -> list -** START: start=1636558187 -* 1 COMMAND> tf-directory-setup.py -l none -f -* 2 COMMAND> setup-new-directory.sh -* 3 COMMAND> tf-init -upgrade -* 4 POLICY> (*.tf) aws_iam_policy.efs-policy -* 4 tf-plan -target=aws_iam_policy.efs-policy -* 5 tf-plan -* 6 COMMAND> tf-directory-setup.py -l s3 -* 7 STOP> cd ../common-services and tf-run.sh apply -** END: start=1636558187 end=1636558187 elapsed=0 logfile=logs/run.plan.20211110.1636558187.log (not-created) -``` - -It is highly recommended to use the `tf-run.sh` approach. - -## Terraform Manual - - -```shell -tf-directory-setup.py -l none -setup-new-directory.sh -tf-init -```` - -* Apply the EFS policy first (before the role) - -```shell -tf-apply -target=aws_iam_policy.efs-policy -``` - -* Apply the rest - -This must be done from a system with the skopeo command, so RHEL8+. - -To use the local install, The efs/charts/ directory -must be populated with the expected code (see [README.md](README.md)) outside of terraform, -much like the .tf files are created. Currently, as the box we run this from has internet access, -we can deploy by pulling the helm stuff from the internet. - -```shell -tf-apply -tf-directory-setup.py -l s3 -``` - -## Post Setup Examination - -This gives us (look at the efs-csi-* ones) to see what was setup. Your `kubectl` configuration file -needs to be setup (one is extracted in `setup/kube.config` as part of this configuration). - -```console -% kubectl --kubeconfig setup/kube.config get pods -n kube-system -NAME READY STATUS RESTARTS AGE -aws-node-j6n6z 1/1 Running 1 27h -aws-node-nmgqm 1/1 Running 1 27h -aws-node-t5ggn 1/1 Running 1 27h -aws-node-vxlvw 1/1 Running 0 27h -coredns-65bfc5645f-254kx 1/1 Running 0 29h -coredns-65bfc5645f-zpvld 1/1 Running 0 29h -efs-csi-controller-7c88dbd56d-chdkt 3/3 Running 0 3m36s -efs-csi-controller-7c88dbd56d-hsws7 3/3 Running 0 3m36s -efs-csi-node-4gjdh 3/3 Running 0 3m36s -efs-csi-node-g49r7 3/3 Running 0 3m36s -efs-csi-node-hq6q9 3/3 Running 0 3m36s -efs-csi-node-lcdmd 3/3 Running 0 3m36s -kube-proxy-dp9zl 1/1 Running 0 27h -kube-proxy-n9l75 1/1 Running 0 27h -kube-proxy-qrv2w 1/1 Running 0 27h -kube-proxy-zssvb 1/1 Running 0 27h -``` - -* Create PVC Automated - -Use the `persistent-volume.tf`, which is setup by default, and should happen as part of the final apply above. - -* Create PVC Manually - -```json -# pvc.yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: efs-test3-claim -spec: - accessModes: - - ReadWriteMany - volumeMode: Filesystem - resources: - requests: - storage: 25Gi - storageClassName: efs -``` - -* Examinine the PV and PVC - -```console -% kubectl get pv -No resources found -% kubectl get pvc -No resources found in default namespace. -% kubectl apply -f pvc.yaml -persistentvolumeclaim/efs-test3-claim created -% kubectl get pvc -NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE -efs-test3-claim Pending efs 39s -``` - -* Describing the PVC - -```shell -kubectl --kubeconfig setup/kube.config describe pvc efs-test3-claim -``` - -To patch to make it work with the regional STS endpoint (this is handled in the TF code): - -```shell -kubectl --kubeconfig setup/kube.config -n kube-system set env deployment/efs-csi-controller AWS_STS_REGIONAL_ENDPOINTS=regional -``` diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/copy_image.sh b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/copy_image.sh deleted file mode 120000 index 889e269..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/copy_image.sh +++ /dev/null @@ -1 +0,0 @@ -../bin/copy_image.sh \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/copy_images.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/copy_images.tf deleted file mode 100644 index f7e13be..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/copy_images.tf +++ /dev/null @@ -1,59 +0,0 @@ - -data "aws_ecr_authorization_token" "token" {} - -locals { - repo_parent_name = format("eks/%v", var.cluster_name) - images = [ - { - image = "external-provisioner" - tag = var.external_provisioner_tag - }, - { - image = "livenessprobe" - tag = var.livenessprobe_tag - }, - { - image = "node-driver-registrar" - tag = var.node_driver_registrar_tag - }, - ] -} - -resource "aws_ecr_repository" "repository" { - for_each = { for image in local.images : image.image => image } - - name = format("%v/%v", local.repo_parent_name, each.value.image) - image_tag_mutability = "IMMUTABLE" - - image_scanning_configuration { - scan_on_push = true - } - - encryption_configuration { - encryption_type = "KMS" - } - - tags = merge( - #local.common_tags, - #local.base_tags, - #var.application_tags, - tomap({ "Name" = format("ecr-eks-%v-%v", var.cluster_name, each.value.image) }), - ) -} - -resource "null_resource" "copy_images" { - for_each = { for image in local.images : image.image => image } - - provisioner "local-exec" { - command = "${path.module}/copy_image.sh" - environment = { - AWS_PROFILE = var.profile - AWS_REGION = local.region - SOURCE_IMAGE = format("%v/%v:%v", local.src_reg, each.value.image, each.value.tag) - DESTINATION_IMAGE = format("%v:%v", aws_ecr_repository.repository[each.key].repository_url, each.value.tag) - DESTINATION_USERNAME = data.aws_ecr_authorization_token.token.user_name - DESTINATION_PASSWORD = data.aws_ecr_authorization_token.token.password - } - } -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/data.eks-subdirectory.tf deleted file mode 120000 index 43b5430..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/data.eks-subdirectory.tf +++ /dev/null @@ -1 +0,0 @@ -../includes.d/data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/ecr.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/ecr.tf deleted file mode 100644 index 228a775..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/ecr.tf +++ /dev/null @@ -1,57 +0,0 @@ - -# Populated from: -# https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html - -data "aws_caller_identity" "whoami" {} - -locals { - af_south_1 = (var.region == "af-south-1" ? "877085696533.dkr.ecr.af-south-1.amazonaws.com/" : "") - af = local.af_south_1 - - ap_east_1 = var.region == "ap-east-1" ? "800184023465.dkr.ecr.ap-east-1.amazonaws.com/" : "" - ap_northeast_1 = var.region == "ap-northeast-1" ? "602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/" : "" - ap_northeast_2 = var.region == "ap-northeast-2" ? "602401143452.dkr.ecr.ap-northeast-2.amazonaws.com/" : "" - ap_northeast_3 = var.region == "ap-northeast-3" ? "602401143452.dkr.ecr.ap-northeast-3.amazonaws.com/" : "" - ap_south_1 = var.region == "ap-south-1" ? "602401143452.dkr.ecr.ap-south-1.amazonaws.com/" : "" - ap_southeast_1 = var.region == "ap-southeast-1" ? "602401143452.dkr.ecr.ap-southeast-1.amazonaws.com/" : "" - ap_southeast_2 = var.region == "ap-southeast-2" ? "602401143452.dkr.ecr.ap-southeast-2.amazonaws.com/" : "" - ap_1 = "${local.ap_east_1}${local.ap_northeast_1}${local.ap_northeast_2}${local.ap_northeast_3}${local.ap_south_1}" - ap_2 = "${local.ap_southeast_1}${local.ap_southeast_2}" - ap = "${local.ap_1}${local.ap_2}" - - ca_central_1 = var.region == "ca-central-1" ? "602401143452.dkr.ecr.ca-central-1.amazonaws.com/" : "" - ca = local.ca_central_1 - - cn_north_1 = var.region == "cn-north-1" ? "918309763551.dkr.ecr.cn-north-1.amazonaws.com.cn/" : "" - cn_northwest_1 = var.region == "cn-northwest-1" ? "961992271922.dkr.ecr.cn-northwest-1.amazonaws.com.cn/" : "" - cn = "${local.cn_north_1}${local.cn_northwest_1}" - - eu_central_1 = var.region == "eu-central-1" ? "602401143452.dkr.ecr.eu-central-1.amazonaws.com/" : "" - eu_north_1 = var.region == "eu-north-1" ? "602401143452.dkr.ecr.eu-north-1.amazonaws.com/" : "" - eu_south_1 = var.region == "eu-south-1" ? "590381155156.dkr.ecr.eu-south-1.amazonaws.com/" : "" - eu_west_1 = var.region == "eu-west-1" ? "602401143452.dkr.ecr.eu-west-1.amazonaws.com/" : "" - eu_west_2 = var.region == "eu-west-2" ? "602401143452.dkr.ecr.eu-west-2.amazonaws.com/" : "" - eu_west_3 = var.region == "eu-west-3" ? "602401143452.dkr.ecr.eu-west-3.amazonaws.com/" : "" - eu = "${local.eu_central_1}${local.eu_north_1}${local.eu_south_1}${local.eu_west_1}${local.eu_west_2}${local.eu_west_3}" - - me_south_1 = var.region == "me-south-1" ? "558608220178.dkr.ecr.me-south-1.amazonaws.com/" : "" - me = local.me_south_1 - - sa_east_1 = var.region == "sa-east-1" ? "602401143452.dkr.ecr.sa-east-1.amazonaws.com/" : "" - sa = local.sa_east_1 - - us_east_1 = var.region == "us-east-1" ? "602401143452.dkr.ecr.us-east-1.amazonaws.com/" : "" - us_east_2 = var.region == "us-east-2" ? "602401143452.dkr.ecr.us-east-2.amazonaws.com/" : "" - us_gov_east_1 = var.region == "us-gov-east-1" ? "151742754352.dkr.ecr.us-gov-east-1.amazonaws.com/" : "" - us_gov_west_1 = var.region == "us-gov-west-1" ? "013241004608.dkr.ecr.us-gov-west-1.amazonaws.com/" : "" - us_west_1 = var.region == "us-west-1" ? "602401143452.dkr.ecr.us-west-1.amazonaws.com/" : "" - us_west_2 = var.region == "us-west-2" ? "602401143452.dkr.ecr.us-west-2.amazonaws.com/" : "" - us = "${local.us_east_1}${local.us_east_2}${local.us_gov_east_1}${local.us_gov_west_1}${local.us_west_1}${local.us_west_2}" - - ecr = "${local.af}${local.ap}${local.ca}${local.cn}${local.eu}${local.me}${local.sa}${local.us}" - - - public_reg = "public.ecr.aws" - src_reg = format("%v/eks-distro/kubernetes-csi", local.public_reg) - account_ecr = "${data.aws_caller_identity.whoami.account_id}.dkr.ecr.${var.region}.amazonaws.com/${var.cluster_name}" -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/efs.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/efs.tf deleted file mode 100644 index 9fb5563..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/efs.tf +++ /dev/null @@ -1,26 +0,0 @@ -# Create an Amazon EFS file system for the EKS cluster. -# Step 4a: Create a file system. -# Step 4b: Create mount targets. -module "efs" { - source = "git@github.e.it.census.gov:terraform-modules/aws-efs.git" - - name = var.cluster_name - vpc_id = local.vpc_id - subnet_ids = local.subnet_ids - security_groups = [local.cluster_worker_sg_id] - ## subnet_ids = local.cni_subnet_ids - ## security_groups = [local.cluster_cni_worker_sg_id] - - tags = merge( - local.base_tags, - local.common_tags, - var.application_tags, - tomap({ "efs.csi.aws.com/cluster" = "true" }), - ) -} - -# look at efs module. Add -# efs_tags -# kms_tags -# moint_point_tags -# or use the override tags thing diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/kubeconfig.eks-subdirectory.tf deleted file mode 120000 index e3750a4..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/kubeconfig.eks-subdirectory.tf +++ /dev/null @@ -1 +0,0 @@ -../includes.d/kubeconfig.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/locals.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/locals.tf deleted file mode 100644 index 4b9ae5a..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/locals.tf +++ /dev/null @@ -1,17 +0,0 @@ -locals { - base_tags = { - "eks-cluster-name" = var.cluster_name - "boc:tf_module_version" = local._module_version - "boc:created_by" = "terraform" - } -} - -# replace TF remote state accordingly in parent_rs with that from the parent directory, and be sure to make the link -locals { - vpc_id = local.parent_rs.cluster_vpc_id - subnet_ids = local.parent_rs.cluster_subnet_ids - cluster_worker_sg_id = local.parent_rs.cluster_worker_sg_id - - oidc_provider_url = local.parent_rs.oidc_provider_url - oidc_provider_arn = local.parent_rs.oidc_provider_arn -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/main.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/main.tf deleted file mode 100644 index 446e3b9..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/main.tf +++ /dev/null @@ -1,125 +0,0 @@ -# Most of this file references the AWS documentation to install the -# Amazon EFS CSI driver. This documentation is found here: -# https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html - - -## data "tls_certificate" "certs" { -## url = data.aws_eks_cluster.cluster.identity[0].oidc[0].issuer -## } - -locals { - charts = { - "efs-provisioner" = { - name = "aws-efs-csi-driver" - repository = "https://kubernetes-sigs.github.io/aws-efs-csi-driver" - version = "2.1.4" - use_remote = true - } - } -} - -# Create an IAM policy and role -# Step 1b: -# Contents of the policy are found here: -# https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/docs/iam-policy-example.json -# -# See policy.tf - -# Create an IAM policy and role -# Step 2b: -# -# See role.tf - -#resource "null_resource" "helm_charts" { -# for_each = toset(local.charts) -# provisioner "local-exec" { -# command = "test -d ${path.module}/charts/${each.key} || mkdir -p ${path.module}/charts/${each.key}" -# } -#} - -# Install the Amazon EFS driver -# Step 3: -# See the readme `Updating the aws-efs-csi-driver chart` on updating this chart. -resource "helm_release" "efs-provisioner" { - depends_on = [null_resource.copy_images] - - chart = "aws-efs-csi-driver" - name = "efs-provisioner" - namespace = "kube-system" - # repository = "${path.module}/charts" - repository = local.charts["efs-provisioner"].use_remote ? local.charts["efs-provisioner"].repository : "${path.module}/charts" - version = local.charts["efs-provisioner"].use_remote ? local.charts["efs-provisioner"].version : null - recreate_pods = true - timeout = 300 - set { - name = "image.repository" - value = "${local.ecr}eks/aws-efs-csi-driver" - } - set { - name = "sidecars.livenessProbe.image.repository" - value = aws_ecr_repository.repository["livenessprobe"].repository_url - } - set { - name = "sidecars.livenessProbe.image.tag" - value = var.livenessprobe_tag - } - set { - name = "sidecars.nodeDriverRegistrar.image.repository" - value = aws_ecr_repository.repository["node-driver-registrar"].repository_url - } - set { - name = "sidecars.nodeDriverRegistrar.image.tag" - value = var.node_driver_registrar_tag - } - set { - name = "sidecars.csiProvisioner.image.repository" - value = aws_ecr_repository.repository["external-provisioner"].repository_url - } - set { - name = "sidecars.csiProvisioner.image.tag" - value = var.external_provisioner_tag - } - set { - name = "controller.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" - # value = aws_iam_role.cluster_efs_role.arn - value = module.role_efs-driver.role_arn - } -} - -# The efs-provisioner defaults to using sts.amazonaws.com which resolves to -# a public IP address. The cluster cannot access it. However, this issue: -# https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/433 -# was resolved with the ability to use an environment variable to tell the -# provisioner to use the regional sts instead. The problem is that the -# helm chart does not have a provision to set this. So instead, after the -# provisioner is deployed, patch the deployment: -resource "null_resource" "patch-efs-provisioner-for-regional-sts" { - depends_on = [helm_release.efs-provisioner] - provisioner "local-exec" { - environment = { - KUBECONFIG = "${path.root}/setup/kube.config" - } - command = "kubectl -n kube-system set env deployment/efs-csi-controller AWS_STS_REGIONAL_ENDPOINTS=regional" - } -} - -# Create an Amazon EFS file system for the EKS cluster. -# Step 4a: Create a file system. -# Step 4b: Create mount targets. - -# Create a default storage class. -resource "kubernetes_storage_class" "efs-sc" { - depends_on = [module.efs] - - metadata { - name = "efs" - } - storage_provisioner = "efs.csi.aws.com" - parameters = { - provisioningMode = "efs-ap" - fileSystemId = module.efs.id - directoryPerms = "700" - } - mount_options = ["tls"] -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/parent_rs.tf deleted file mode 120000 index d85ece6..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/parent_rs.tf +++ /dev/null @@ -1 +0,0 @@ -../includes.d/parent_rs.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/persistent-volume.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/persistent-volume.tf deleted file mode 100644 index 7ff0766..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/persistent-volume.tf +++ /dev/null @@ -1,19 +0,0 @@ -resource "kubernetes_persistent_volume_claim" "cluster-base-efs" { - metadata { - name = format("%v%v-%v", local._prefixes["eks"], var.cluster_name, "base-claim") - # namespace = kubernetes_namespace.cicd_namespace.metadata[0].name - } - wait_until_bound = false - spec { - access_modes = ["ReadWriteMany"] - # capacity = { - # storage = "25Gi" - # } - resources { - requests = { - storage = "25Gi" - } - } - storage_class_name = "efs" - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/policy.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/policy.tf deleted file mode 100644 index 2693fde..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/policy.tf +++ /dev/null @@ -1,55 +0,0 @@ -# apply policy before creating role -# tf-apply -target=aws_iam_policy.efs-policy - -resource "aws_iam_policy" "efs-policy" { - name = format("%v%v-efs-driver", local._prefixes["eks-policy"], var.cluster_name) - path = "/" - description = "Allow configuration of the EFS" - policy = data.aws_iam_policy_document.efs-policy.json - - tags = merge( - local.base_tags, - local.common_tags, - var.application_tags, - tomap({ "Name" = format("%v%v-efs-driver", local._prefixes["eks-policy"], var.cluster_name) }), - ) -} - -# TBD: refine resources to limit only to eks configurations -data "aws_iam_policy_document" "efs-policy" { - statement { - sid = "EKSEFSDescribe" - effect = "Allow" - resources = ["*"] - actions = [ - "elasticfilesystem:DescribeAccessPoints", - "elasticfilesystem:DescribeFileSystems", - ] - } - statement { - sid = "EKSEFSCreateAccessPoint" - effect = "Allow" - resources = ["*"] - actions = [ - "elasticfilesystem:CreateAccessPoint" - ] - condition { - test = "StringLike" - variable = "aws:RequestTag/efs.csi.aws.com/cluster" - values = ["true"] - } - } - statement { - sid = "EKSEFSDeleteAccessPoint" - effect = "Allow" - resources = ["*"] - actions = [ - "elasticfilesystem:DeleteAccessPoint" - ] - condition { - test = "StringLike" - variable = "aws:ResourceTag/efs.csi.aws.com/cluster" - values = ["true"] - } - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/prefixes.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/prefixes.tf deleted file mode 120000 index e0bf5ad..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/prefixes.tf +++ /dev/null @@ -1 +0,0 @@ -../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/providers.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/providers.tf deleted file mode 120000 index 7244d01..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/providers.tf +++ /dev/null @@ -1 +0,0 @@ -../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/region.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/region.tf deleted file mode 100644 index b7b1696..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/region.tf +++ /dev/null @@ -1,4 +0,0 @@ -locals { - region = var.region -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/role.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/role.tf deleted file mode 100644 index 3d203d1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/role.tf +++ /dev/null @@ -1,48 +0,0 @@ -#--- -# cluster -#--- -locals { - # oidc = replace(data.aws_eks_cluster.cluster.identity[0].oidc[0].issuer, "https://", "") - account_id = data.aws_caller_identity.current.account_id - principal = format("arn:%v:iam::%v:oidc-provider/%v", data.aws_arn.current.partition, local.account_id, local.oidc_provider_url) -} - -# create: aws_iam_policy.efs-policy first -module "role_efs-driver" { - source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" - - role_name = format("%v%v-efs-driver", local._prefixes["eks"], var.cluster_name) - role_description = "EKS EFS Driver Role for ${var.cluster_name}" - enable_ldap_creation = false - assume_policy_document = data.aws_iam_policy_document.efs_assume_webidentity.json - attached_policies = [aws_iam_policy.efs-policy.arn] - - tags = merge( - local.base_tags, - local.common_tags, - var.application_tags, - tomap({ "Name" = format("%v%v-efs-driver", local._prefixes["eks-role"], var.cluster_name) }), - ) -} - -data "aws_iam_policy_document" "efs_assume_webidentity" { - statement { - sid = "EFSAssumeRoleWebIdentity" - effect = "Allow" - actions = ["sts:AssumeRoleWithWebIdentity"] - principals { - type = "Federated" - identifiers = [local.principal] - } - condition { - test = "StringEquals" - variable = "${local.oidc_provider_url}:sub" - values = ["system:serviceaccount:kube-system:efs-csi-controller-sa"] - } - } -} - -output "role_efs-driver_arn" { - description = "Role ARN for EKS EFS Driver Role" - value = module.role_efs-driver.role_arn -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/tf-run.data b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/tf-run.data deleted file mode 100644 index ba1d9c5..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/tf-run.data +++ /dev/null @@ -1,12 +0,0 @@ -VERSION 1.2.2 -REMOTE-STATE -COMMAND tf-directory-setup.py -l none -f -COMMAND setup-new-directory.sh -COMMAND tf-init -upgrade -COMMAND ln -sf ../versions.tf -COMMAND ln -sf ../settings.auto.tfvars -LINKTOP init -POLICY -ALL -COMMAND tf-directory-setup.py -l s3 -STOP cd ../irsa-roles and tf-run.sh apply diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/variables.efs.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/variables.efs.tf deleted file mode 100644 index 0e2acb6..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/variables.efs.tf +++ /dev/null @@ -1,37 +0,0 @@ -# variable "eks_vpc_name" { -# description = "Define the VPC name that will be used by this cluster" -# type = string -# default = "*vpc4*" -# } -# -# variable "subnets_name" { -# description = "Define the name of the subnets to be used by this cluster" -# type = string -# default = "*-apps-*" -# } - -variable "cluster_worker_sg_id" { - description = "Security group for all worker management." - type = string - default = "" -} - -# See the readme `Updating the aws-efs-csi-driver chart` to find these values. -variable "livenessprobe_tag" { - description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/liveness" - type = string - default = "v2.2.0-eks-1-18-2" -} - -variable "node_driver_registrar_tag" { - description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/node-dri" - type = string - default = "v2.1.0-eks-1-18-2" -} - -variable "external_provisioner_tag" { - description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/external" - type = string - default = "v2.1.1-eks-1-18-2" -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/variables.eks.tf deleted file mode 120000 index 7dd95db..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/variables.eks.tf +++ /dev/null @@ -1 +0,0 @@ -../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/version.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/version.tf deleted file mode 120000 index 061373c..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/version.tf +++ /dev/null @@ -1 +0,0 @@ -../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/versions.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/versions.tf deleted file mode 120000 index 8bd0ff1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/efs/versions.tf +++ /dev/null @@ -1 +0,0 @@ -../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/eks-console-access.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/eks-console-access.tf deleted file mode 100644 index f590c3f..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/eks-console-access.tf +++ /dev/null @@ -1,70 +0,0 @@ -# ```shell -# curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml -# curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml -# ``` -# -# For full console, we'll use the first one. -# -# ```console -# % kubectl apply -f eks-console-full-access.yaml -# clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created -# clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created -# ``` - -locals { - cluster_roles = [ - { - name = "eks-console-full-access" - url = "https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml" - enabled = true - }, - { - name = "eks-console-restricted-access" - url = "https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml" - enabled = false - }, - ] - cluster_roles_map = { for cr in local.cluster_roles : cr.name => cr } -} - - -data "http" "cluster_roles" { - for_each = local.cluster_roles_map - url = each.value.url -} - -resource "null_resource" "cluster_roles" { - for_each = local.cluster_roles_map - triggers = { - roles = join(",", [each.key, each.value.url]) - } - provisioner "local-exec" { - command = "test -d setup || mkdir setup" - } - provisioner "local-exec" { - command = "echo '${data.http.cluster_roles[each.key].body}' > setup/${each.value.name}.yaml" - } -} - -resource "null_resource" "apply_cluster_roles" { - for_each = { for k, v in local.cluster_roles_map : k => v if v.enabled } - triggers = { - roles = join(",", [each.key, each.value.url]) - } - depends_on = [null_resource.kubeconfig] - # provisioner "local-exec" { - # command = "if [ -z $KUBECONFIG ]; then 'echo missing KUBECONFIG'; exit 1; else exit 0; fi" - # } - # provisioner "local-exec" { - # command = "if [ ! -r $KUBECONFIG ]; then 'echo unreadable KUBECONFIG'; exit 1; else exit 0; fi" - # } - # provisioner "local-exec" { - # command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" - # } - provisioner "local-exec" { - environment = { - KUBECONFIG = "${path.root}/setup/kube.config" - } - command = "kubectl apply -f setup/${each.value.name}.yaml" - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/group.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/group.tf deleted file mode 100644 index cdffce9..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/group.tf +++ /dev/null @@ -1,13 +0,0 @@ -module "group_cluster-admin" { - source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git" - - group_name = format("%v%v-cluster-admin", local._prefixes["eks"], var.cluster_name) - attached_policies = [aws_iam_policy.cluster-admin-policy.arn, aws_iam_policy.cluster-admin_assume_policy.arn] - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - ) -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/README.md b/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/README.md deleted file mode 100644 index 97c168f..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/README.md +++ /dev/null @@ -1,30 +0,0 @@ -## Requirements - -No requirements. - -## Providers - -| Name | Version | -|------|---------| -| [aws](#provider\_aws) | n/a | -| [null](#provider\_null) | n/a | - -## Modules - -No modules. - -## Resources - -| Name | Type | -|------|------| -| [null_resource.kubeconfig](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | - -## Inputs - -No inputs. - -## Outputs - -No outputs. diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/data.eks-main.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/data.eks-main.tf deleted file mode 100644 index 7ead28b..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/data.eks-main.tf +++ /dev/null @@ -1,18 +0,0 @@ -locals { - aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster - # for main.tf - aws_eks_cluster = aws_eks_cluster.eks_cluster - # for all subdirectories - ## aws_eks_cluster = data.aws_eks_cluster.cluster -} - -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} - -#--- -# for all subdirectories only -#--- -## data "aws_eks_cluster" "cluster" { -## name = var.cluster_name -## } diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/data.eks-subdirectory.tf deleted file mode 100644 index 870e8c6..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/data.eks-subdirectory.tf +++ /dev/null @@ -1,15 +0,0 @@ -data "aws_eks_cluster" "cluster" { - name = var.cluster_name -} - -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} - -locals { - aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster - # for main.tf - # aws_eks_cluster = aws_eks_cluster.eks_cluster - # for all subdirectories - aws_eks_cluster = data.aws_eks_cluster.cluster -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/kubeconfig.eks-main.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/kubeconfig.eks-main.tf deleted file mode 100644 index 5a6333e..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/kubeconfig.eks-main.tf +++ /dev/null @@ -1,29 +0,0 @@ -resource "null_resource" "kubeconfig" { - triggers = { - always_run = timestamp() - } - provisioner "local-exec" { - command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" - } - provisioner "local-exec" { - command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" - } - provisioner "local-exec" { - environment = { - AWS_PROFILE = var.profile - AWS_REGION = local.region - } - command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" - } - depends_on = [aws_eks_cluster.eks_cluster] -} - -#--- -# call it like -#--- -## provisioner "local-exec" { -## environment = { -## KUBECONFIG = "${path.root}/setup/kube.config" -## } -## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" -## } diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/kubeconfig.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/kubeconfig.eks-subdirectory.tf deleted file mode 100644 index 5e386f5..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/kubeconfig.eks-subdirectory.tf +++ /dev/null @@ -1,29 +0,0 @@ -resource "null_resource" "kubeconfig" { - triggers = { - always_run = timestamp() - } - provisioner "local-exec" { - command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" - } - provisioner "local-exec" { - command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" - } - provisioner "local-exec" { - environment = { - AWS_PROFILE = var.profile - AWS_REGION = local.region - } - command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" - } - depends_on = [data.aws_eks_cluster.cluster] -} - -#--- -# call it like -#--- -## provisioner "local-exec" { -## environment = { -## KUBECONFIG = "${path.root}/setup/kube.config" -## } -## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" -## } diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/parent_rs.tf deleted file mode 100644 index 7d4b782..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/includes.d/parent_rs.tf +++ /dev/null @@ -1,4 +0,0 @@ -# replace TF remote state accordingly in parent_rs with that from the parent directory, and be sure to make the link -locals { - parent_rs = data.terraform_remote_state.vpc-state-path_application-state-path-eks-cluster-name.outputs -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/README.md b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/README.md deleted file mode 100644 index 6915c05..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# irsa-roles - -This is a directory under which actual IRSA role subdirectories exist. No resources are created here. - -See the directories to follow the directions containd within: - -* cluster-autoscaler - -## Setup Steps - -First, copy the `remote_state.yml` from the parent and update `directory` to be the current directory. - -## Terraform Automated - -A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. - -* copy the `remote_state.yml` from the parent and update `directory` to be the current directory -* run the tf-run.sh - -```console -% tf-run.sh apply -``` - -* example of the `tf-run.sh` steps - -This is part of a larger cluster configuration, so at the end of the run it indicates another directory -to visit when done. - -```console -% tf-run.sh list -* running action=plan -* START: tf-run.sh v1.1.2 start=1636562881 end= logfile=logs/run.plan.20211110.1636562881.log (not-created) -* reading from tf-run.data -* read 6 entries from tf-run.data -> list -** START: start=1636562881 -* 1 COMMAND> tf-directory-setup.py -l none -f -* 2 COMMAND> setup-new-directory.sh -* 3 COMMAND> tf-init -upgrade -* 4 tf-plan -* 5 COMMAND> tf-directory-setup.py -l s3 -* 6 COMMENT> cd cluster-roles and tf-run.sh apply -** END: start=1636562881 end=1636562881 elapsed=0 logfile=logs/run.plan.20211110.1636562881.log (not-created) -``` - -It is highly recommended to use the `tf-run.sh` approach. - -## Terraform Manual - -* setup - -```shell -tf-directory-setup.py -l none -setup-new-directory.sh -tf-init -```` - -* Apply the rest - -```shell -tf-plan -tf-apply -tf-directory-setup.py -l s3 -``` diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/README.md b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/README.md deleted file mode 100644 index bc949cb..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# irsa-roles: cluster-autoscaler - -This sets up the needed IAM roles for service accounts for the cluster autoscaler. - - -## Setup - -First, copy the `remote_state.yml` from the parent and update `directory` to be the current directory. - -## Terraform Automated - -A `tf-run.data` file exists here, so the simplest way to implement is with the `tf-run.sh` script. - -```console -% tf-run.sh apply -``` - -* example of the tf-run.sh`steps - -This is part of a larger cluster configuration, so at the end of the run it indicates another directory -to visit when done. - -```console -% tf-run.sh list -* running action=plan -* START: tf-run.sh v1.1.2 start=1636561755 end= logfile=logs/run.plan.20211110.1636561755.log (not-created) -* reading from tf-run.data -* read 6 entries from tf-run.data -> list -** START: start=1636561755 -* 1 COMMAND> tf-directory-setup.py -l none -* 2 COMMAND> setup-new-directory.sh -* 3 COMMAND> tf-init -upgrade -* 4 POLICY> (*.tf) aws_iam_policy.app_policy1 -* 4 tf-plan -target=aws_iam_policy.app_policy1 -* 5 tf-plan -* 6 COMMAND> tf-directory-setup.py -l s3 -** END: start=1636561755 end=1636561755 elapsed=0 logfile=logs/run.plan.20211110.1636561755.log (not-created) -``` - -It is highly recommended to use the `tf-run.sh` approach. - -## Terraform Manual - -```shell -tf-directory-setup.py -l none -setup-new-directory.sh -tf-init -```` - -* Apply the the policies - -```shell -tf-plan -target=aws_iam_policy.app_policy1 -tf-apply -target=aws_iam_policy.app_policy1 -``` - -* Apply the rest - -```shell -tf-plan -tf-apply -tf-directory-setup.py -l s3 -``` - -## Post Setup Examination diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/data.eks-subdirectory.tf deleted file mode 120000 index 05ab52d..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/data.eks-subdirectory.tf +++ /dev/null @@ -1 +0,0 @@ -../data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/irsa-roles.autoscale.tf.off b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/irsa-roles.autoscale.tf.off deleted file mode 100644 index 8199a2e..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/irsa-roles.autoscale.tf.off +++ /dev/null @@ -1,63 +0,0 @@ -data "aws_iam_policy_document" "assume_role_policy" { - statement { - actions = ["sts:AssumeRoleWithWebIdentity"] - effect = "Allow" - - condition { - test = "StringEquals" - variable = "${local.oidc_provider_url}:sub" - values = ["system:serviceaccount:${var.namespace}:${var.name}"] - } - - principals { - identifiers = [local.oidc_provider_arn] - type = "Federated" - } - } -} - -data "aws_iam_policy_document" "app_policy1"{ - statement { - sid = "ClusterAutoscaler" - effect = "Allow" - actions = [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:DescribeLaunchTemplateVersions" - ] - resources = ["*"] - } -} - -resource "aws_iam_policy" "app_policy1" { - name = format("%v%v-%v-%v-policy1", local._prefixes["eks-policy"], var.cluster_name, var.namespace, var.name) - path = "/" - policy = data.aws_iam_policy_document.app_policy1.json - -} - -module "app_role" { - source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git" - - role_name = format("%v%v-irsa-%v-%v", local._prefixes["eks"], var.cluster_name, var.namespace, var.name) - role_description = "EKS IAM Role for ${var.cluster_name} for service account ${var.namespace}:${var.name}" - enable_ldap_creation = false - assume_policy_document = data.aws_iam_policy_document.assume_role_policy.json - attached_policies = [aws_iam_policy.app_policy1.arn] - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - ) -} - -output "app_role_arn" { - description = "ARN of IAM Role for Service account for cluster-autoscaler" - value = module.app_role.role_arn -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/locals.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/locals.tf deleted file mode 100644 index a65fb20..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/locals.tf +++ /dev/null @@ -1,17 +0,0 @@ -locals { - base_tags = { - "eks:cluster_name" = var.cluster_name - "boc:tf_module_version" = local._module_version - "boc:created_by" = "terraform" - } -} - -# replace TF remote state accordingly in parent_rs with that from the parent directory, and be sure to make the link -locals { - vpc_id = local.parent_rs.cluster_vpc_id - subnet_ids = local.parent_rs.cluster_subnet_ids - cluster_worker_sg_id = local.parent_rs.cluster_worker_sg_id - - oidc_provider_url = local.parent_rs.oidc_provider_url - oidc_provider_arn = local.parent_rs.oidc_provider_arn -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/parent_rs.tf deleted file mode 120000 index dfccf35..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/parent_rs.tf +++ /dev/null @@ -1 +0,0 @@ -../parent_rs.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/policy.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/policy.tf deleted file mode 100644 index da92e08..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/policy.tf +++ /dev/null @@ -1,23 +0,0 @@ -data "aws_iam_policy_document" "app_policy1" { - statement { - sid = "ClusterAutoscaler" - effect = "Allow" - actions = [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:DescribeLaunchTemplateVersions" - ] - resources = ["*"] - } -} - -resource "aws_iam_policy" "app_policy1" { - name = format("%v%v-%v__%v__%v", local._prefixes["eks-policy"], var.cluster_name, "p1", var.namespace, var.name) - path = "/" - policy = data.aws_iam_policy_document.app_policy1.json - -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/prefixes.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/prefixes.tf deleted file mode 120000 index e0bf5ad..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/prefixes.tf +++ /dev/null @@ -1 +0,0 @@ -../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/providers.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/providers.tf deleted file mode 120000 index 7244d01..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/providers.tf +++ /dev/null @@ -1 +0,0 @@ -../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/region.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/region.tf deleted file mode 100644 index b7b1696..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/region.tf +++ /dev/null @@ -1,4 +0,0 @@ -locals { - region = var.region -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/role.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/role.tf deleted file mode 100644 index 11a6b4d..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/role.tf +++ /dev/null @@ -1,47 +0,0 @@ -data "aws_iam_policy_document" "assume_role_policy" { - statement { - actions = ["sts:AssumeRoleWithWebIdentity"] - effect = "Allow" - - condition { - test = "StringEquals" - variable = "${local.oidc_provider_url}:sub" - values = ["system:serviceaccount:${var.namespace}:${var.name}"] - } - - principals { - identifiers = [local.oidc_provider_arn] - type = "Federated" - } - } -} - -# default name too long, remove the namespace from the role name -# include the namespace and role binding in tags - -module "app_role" { - source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" - - # role_name = format("%v%v-irsa__%v__%v", local._prefixes["eks"], var.cluster_name, var.namespace, var.name) - role_name = format("%v%v-irsa__%v", local._prefixes["eks"], var.cluster_name, var.name) - role_description = "EKS IAM Role for ${var.cluster_name} for service account ${var.namespace}:${var.name}" - enable_ldap_creation = false - assume_policy_document = data.aws_iam_policy_document.assume_role_policy.json - attached_policies = [aws_iam_policy.app_policy1.arn] - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - { - "eks:namespace" = var.namespace - "eks:user" = var.name - } - ) -} - -output "app_role_arn" { - description = "ARN of IAM Role for Service account for cluster-autoscaler" - value = module.app_role.role_arn -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/service_account.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/service_account.tf deleted file mode 100644 index 2a0d9e0..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/service_account.tf +++ /dev/null @@ -1,11 +0,0 @@ -# https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html - -resource "kubernetes_service_account" "app" { - metadata { - name = var.name - namespace = var.namespace - annotations = { - "eks.amazonaws.com/role-arn" = module.app_role.role_arn - } - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/tf-run.data b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/tf-run.data deleted file mode 100644 index 67ebd3e..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/tf-run.data +++ /dev/null @@ -1,13 +0,0 @@ -VERSION 1.2.2 -REMOTE-STATE -COMMAND tf-directory-setup.py -l none -COMMAND setup-new-directory.sh -COMMAND tf-init -upgrade -COMMAND ln -sf ../versions.tf -COMMAND ln -sf ../settings.auto.tfvars -LINKTOP init -POLICY -ALL -COMMAND tf-directory-setup.py -l s3 - -COMMENT cd .. and execute any additional directories diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/variables.eks.tf deleted file mode 120000 index 7dd95db..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/variables.eks.tf +++ /dev/null @@ -1 +0,0 @@ -../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/variables.irsa.auto.tfvars b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/variables.irsa.auto.tfvars deleted file mode 100644 index 45b1bf3..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/variables.irsa.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -namespace = "kube-system" -namespace_short = "" -name = "cluster-autoscaler" diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/variables.irsa.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/variables.irsa.tf deleted file mode 120000 index 840e7bb..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/variables.irsa.tf +++ /dev/null @@ -1 +0,0 @@ -../variables.irsa.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/variables.tags.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/variables.tags.tf deleted file mode 120000 index 2622118..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/variables.tags.tf +++ /dev/null @@ -1 +0,0 @@ -../variables.tags.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/version.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/version.tf deleted file mode 120000 index 061373c..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/version.tf +++ /dev/null @@ -1 +0,0 @@ -../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/versions.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/versions.tf deleted file mode 120000 index 8bd0ff1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/cluster-autoscaler/versions.tf +++ /dev/null @@ -1 +0,0 @@ -../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/data.eks-subdirectory.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/data.eks-subdirectory.tf deleted file mode 120000 index 43b5430..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/data.eks-subdirectory.tf +++ /dev/null @@ -1 +0,0 @@ -../includes.d/data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/parent_rs.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/parent_rs.tf deleted file mode 120000 index d85ece6..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/parent_rs.tf +++ /dev/null @@ -1 +0,0 @@ -../includes.d/parent_rs.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/prefixes.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/prefixes.tf deleted file mode 120000 index e0bf5ad..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/prefixes.tf +++ /dev/null @@ -1 +0,0 @@ -../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/providers.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/providers.tf deleted file mode 120000 index 7244d01..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/providers.tf +++ /dev/null @@ -1 +0,0 @@ -../providers.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/region.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/region.tf deleted file mode 100644 index b7b1696..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/region.tf +++ /dev/null @@ -1,4 +0,0 @@ -locals { - region = var.region -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/tf-run.data b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/tf-run.data deleted file mode 100644 index 3e6ef7c..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/tf-run.data +++ /dev/null @@ -1,12 +0,0 @@ -VERSION 1.2.2 -REMOTE-STATE -COMMAND tf-directory-setup.py -l none -f -COMMAND setup-new-directory.sh -COMMAND tf-init -upgrade -COMMAND ln -sf ../versions.tf -COMMAND ln -sf ../settings.auto.tfvars -LINKTOP init -ALL -COMMAND tf-directory-setup.py -l s3 - -COMMENT cd cluster-autoscaler and tf-run.sh apply diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/variables.eks.tf deleted file mode 120000 index 7dd95db..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/variables.eks.tf +++ /dev/null @@ -1 +0,0 @@ -../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/variables.irsa.auto.tfvars b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/variables.irsa.auto.tfvars deleted file mode 100644 index d436089..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/variables.irsa.auto.tfvars +++ /dev/null @@ -1,3 +0,0 @@ -name = "unknown" -namespace = "unknown" -namespace_short = "" diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/variables.irsa.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/variables.irsa.tf deleted file mode 100644 index 63f3ab1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/variables.irsa.tf +++ /dev/null @@ -1,14 +0,0 @@ -variable "namespace" { - description = "K8S namespace for IAM Role for Service Account (per-pod)" - type = string -} - -variable "namespace_short" { - description = "K8S namespace for IAM Role for Service Account (per-pod), short version (without the cluster name) to keep the role name under 64 characters" - type = string -} - -variable "name" { - description = "K8S service names for IAM Role for Service Account (per-pod)" - type = string -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/variables.tags.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/variables.tags.tf deleted file mode 120000 index 2622118..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/variables.tags.tf +++ /dev/null @@ -1 +0,0 @@ -../variables.tags.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/version.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/version.tf deleted file mode 120000 index 061373c..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/version.tf +++ /dev/null @@ -1 +0,0 @@ -../version.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/versions.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/versions.tf deleted file mode 120000 index 8bd0ff1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/irsa-roles/versions.tf +++ /dev/null @@ -1 +0,0 @@ -../versions.tf \ No newline at end of file diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/kubeconfig.eks-main.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/kubeconfig.eks-main.tf deleted file mode 100644 index 5a6333e..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/kubeconfig.eks-main.tf +++ /dev/null @@ -1,29 +0,0 @@ -resource "null_resource" "kubeconfig" { - triggers = { - always_run = timestamp() - } - provisioner "local-exec" { - command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" - } - provisioner "local-exec" { - command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" - } - provisioner "local-exec" { - environment = { - AWS_PROFILE = var.profile - AWS_REGION = local.region - } - command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" - } - depends_on = [aws_eks_cluster.eks_cluster] -} - -#--- -# call it like -#--- -## provisioner "local-exec" { -## environment = { -## KUBECONFIG = "${path.root}/setup/kube.config" -## } -## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" -## } diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/main.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/main.tf deleted file mode 100644 index 7489367..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/main.tf +++ /dev/null @@ -1,230 +0,0 @@ -data "aws_vpc" "eks_vpc" { - filter { - name = "tag:Name" - values = [var.eks_vpc_name] - } -} - -data "aws_subnets" "subnets" { - filter { - name = "tag:Name" - values = [var.subnets_name] - } - filter { - name = "vpc-id" - values = [data.aws_vpc.eks_vpc.id] - } -} - -data "aws_subnet" "subnets" { - for_each = toset(data.aws_subnets.subnets.ids) - id = each.key -} - -data "aws_ebs_default_kms_key" "current" {} - -data "aws_kms_key" "ebs_key" { - key_id = data.aws_ebs_default_kms_key.current.key_arn -} - -# in ew, need to exclude us-east-1e for now, as it lacks sufficient resources to establish the cluster -locals { - vpc_id = data.aws_vpc.eks_vpc.id - vpc_cidr_block = data.aws_vpc.eks_vpc.cidr_block - subnets = [for k, v in data.aws_subnet.subnets : v.id if length(regexall("us-east-1e", v.availability_zone)) == 0] - s3_base_arn = format("arn:%v:%v:::%%v", data.aws_arn.current.partition, "s3") - - base_tags = { - "eks-cluster-name" = var.cluster_name - "boc:tf_module_version" = local._module_version - "boc:created_by" = "terraform" - } - - # https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html - autoscale_tags = { - format("k8s.io/cluster-autoscaler/%v", var.cluster_name) = "owned" - "k8s.io/cluster-autoscaler/enabled" = "TRUE" - } - -} - -# we changed endpoint_public_access to false by default. This is so we can reach the EKS API through private IPs -# from on-prem and from the cloud. Otherwise, another account outside of where this is created will be unable to -# access teh API. This also requires a SG change in securitygroup.tf - -resource "aws_eks_cluster" "eks_cluster" { - name = var.cluster_name - version = var.cluster_version - role_arn = module.role_eks-cluster.role_arn - enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] - - vpc_config { - subnet_ids = local.subnets - security_group_ids = [aws_security_group.additional_eks_cluster_sg.id] - endpoint_private_access = true - endpoint_public_access = false - public_access_cidrs = var.census_public_cidr - } - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - ) - - # Ensure that IAM Role permissions are created before and deleted after EKS Cluster handling. - # Otherwise, EKS will not be able to properly delete EKS managed EC2 infrastructure such as Security Groups. - depends_on = [ - module.role_eks-cluster, - module.role_eks-nodegroup - ] -} - -resource "aws_eks_node_group" "eks-nodegroup" { - cluster_name = aws_eks_cluster.eks_cluster.name - node_group_name = format("%v%v-nodegroup", local._prefixes["eks"], var.cluster_name) - node_role_arn = module.role_eks-nodegroup.role_arn - subnet_ids = local.subnets - # instance_types = [var.eks_instance_type] - # disk_size = var.eks_instance_disk_size - - scaling_config { - desired_size = var.eks_ng_desire_size - max_size = var.eks_ng_max_size - min_size = var.eks_ng_min_size - } - - launch_template { - id = aws_launch_template.eks-nodegroup.id - version = aws_launch_template.eks-nodegroup.latest_version - } - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - local.autoscale_tags, - ) - - lifecycle { - ignore_changes = [launch_template, scaling_config] - } - - # Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling. - # Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces. - depends_on = [ - module.role_eks-cluster, - module.role_eks-nodegroup, - ] -} - -#--- -# Launch Template with AMI -#--- -#data "aws_ssm_parameter" "cluster" { -# name = "/aws/service/eks/optimized-ami/${aws_eks_cluster.eks_cluster.version}/amazon-linux-2/recommended/image_id" -#} - -#data "aws_launch_template" "cluster" { -# name = aws_launch_template.cluster.name -# -# depends_on = [aws_launch_template.cluster] -#} - -locals { - launch_template_tags = { - "Name" = format("%v%v-nodegroup-instance-name", local._prefixes["eks"], var.cluster_name) - format("kubernetes.io/cluster/%v", var.cluster_name) = "owned" - } -} - -resource "aws_launch_template" "eks-nodegroup" { - instance_type = var.eks_instance_type - name = format("%v%v-launch-template", local._prefixes["eks"], var.cluster_name) - update_default_version = true - key_name = aws_key_pair.cluster_keypair.key_name - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - ) - - tag_specifications { - resource_type = "instance" - - tags = merge( - local.base_tags, - tomap({ "boc:created_by" = "eks-launch-template" }), - local.common_tags, - local.launch_template_tags, - var.tags, - var.application_tags, - ) - } - - tag_specifications { - resource_type = "volume" - - tags = merge( - local.base_tags, - tomap({ "boc:created_by" = "eks-launch-template" }), - local.common_tags, - var.tags, - var.application_tags, - ) - } - - tag_specifications { - resource_type = "network-interface" - - tags = merge( - local.base_tags, - tomap({ "boc:created_by" = "eks-launch-template" }), - local.common_tags, - var.tags, - var.application_tags, - ) - } - - # tag_specifications { - # resource_type = "snapshot" - # - # tags = merge( - # local.base_tags, - # tomap({ "boc:created_by" = "eks-launch-template" }), - # local.common_tags, - # var.tags, - # ) - # } - - block_device_mappings { - device_name = "/dev/xvda" - - ebs { - volume_size = var.eks_instance_disk_size - delete_on_termination = true - encrypted = true - # kms_key_id = data.aws_kms_key.ebs_key.arn - # kms_key_id = data.aws_ebs_default_kms_key.current.key_arn - kms_key_id = data.aws_kms_key.ebs_key.arn - } - } - - user_data = base64encode(local.eks-node-private-userdata) -} - -#### User data for worker launch - -locals { - eks-node-private-userdata = templatefile( - "${path.module}/templates/node-private-userdata.tmpl", { - endpoint = aws_eks_cluster.eks_cluster.endpoint - cluster_ca = aws_eks_cluster.eks_cluster.certificate_authority[0].data - cluster_name = var.cluster_name - } - ) -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/oidc.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/oidc.tf deleted file mode 100644 index 311b99d..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/oidc.tf +++ /dev/null @@ -1,32 +0,0 @@ -# Most of this file references the AWS documentation to install the -# Amazon EFS CSI driver. This documentation is found here: -# https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html - -data "tls_certificate" "certs" { - url = aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer -} - -# Create the oidc provider for the service account. This is a prerequisite -# for using the EFS CSI Driver: -# https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html - -resource "aws_iam_openid_connect_provider" "oidc" { - client_id_list = ["sts.amazonaws.com"] - thumbprint_list = [data.tls_certificate.certs.certificates[0].sha1_fingerprint] - url = aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer -} - -locals { - oidc_provider_url = replace(aws_iam_openid_connect_provider.oidc.url, "https://", "") - oidc_provider_arn = aws_iam_openid_connect_provider.oidc.arn -} - -output "oidc_provider_url" { - description = "OpenID Connector provider URL" - value = local.oidc_provider_url -} - -output "oidc_provider_arn" { - description = "OpenID Connector provider ARN" - value = local.oidc_provider_arn -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/outputs.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/outputs.tf deleted file mode 100644 index e95c90d..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/outputs.tf +++ /dev/null @@ -1,58 +0,0 @@ -#output "cluster" { -# description = "Full EKS Cluster object output" -# value = aws_eks_cluster.eks_cluster -#} - -output "cluster_name" { - description = "The name of the cluster that was created." - value = aws_eks_cluster.eks_cluster.name -} - -output "cluster_endpoint" { - description = "The endpoint used to reach the Kubernetes API server." - value = aws_eks_cluster.eks_cluster.endpoint -} - -output "cluster_certificate_authority_data" { - description = "Certificate data required to successfully communicate with the Kubernetes API server." - value = aws_eks_cluster.eks_cluster.certificate_authority[0].data -} - -output "cluster_auth_token" { - description = "The token required to authenticate with the cluster." - # value = data.aws_eks_cluster_auth.eks_auth.token - value = local.aws_eks_cluster_auth.token - sensitive = true -} - -output "cluster_worker_sg_id" { - description = "Security group ids attached to the cluster worker nodes." - value = aws_security_group.all_worker_mgmt.id -} - -output "cluster_sg_id" { - description = "Security group ids attached to the cluster control plane." - value = aws_security_group.additional_eks_cluster_sg.id -} - -output "cluster_subnet_ids" { - description = "Subnet IDs used to create the cluster" - value = local.subnets -} - -output "cluster_vpc_id" { - description = "VPC IDs on which the cluster was created" - value = local.vpc_id -} - -## # secondary subnets -## output "cluster_cni_subnet_ids" { -## description = "Subnet IDs used to create the cluster on the CNI custom network." -## value = local.cni_subnets -## } -## -## output "cluster_cni_custom_sg_id" { -## description = "Security group ids attached to the cluster worker nodes for CNI custom networking.." -## value = aws_security_group.cni_custom_sg.id -## } -## diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/policy.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/policy.tf deleted file mode 100644 index b7ea3b0..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/policy.tf +++ /dev/null @@ -1,185 +0,0 @@ -resource "aws_iam_policy" "nlb-policy" { - name = format("%v%v-nlb", local._prefixes["eks-policy"], var.cluster_name) - path = "/" - description = "Allow configuration of the ELB" - policy = data.aws_iam_policy_document.nlb-policy.json - - tags = merge( - local.base_tags, - var.tags, - var.application_tags, - ) -} - -# Q: why CreateSecurityGroup -# TBD: refine resources to limit only to eks configurations -data "aws_iam_policy_document" "nlb-policy" { - statement { - sid = "EKSNLBConfiguration" - effect = "Allow" - actions = [ - "elasticloadbalancing:*", - "ec2:CreateSecurityGroup", - "ec2:Describe*", - ] - resources = ["*"] - } -} - -resource "aws_iam_policy" "cloudwatch-policy" { - name = format("%v%v-cloudwatch", local._prefixes["eks-policy"], var.cluster_name) - path = "/" - description = "Allow sending metric data to cloudwatch" - policy = data.aws_iam_policy_document.cloudwatch-policy.json - - tags = merge( - local.base_tags, - var.tags, - var.application_tags, - ) -} - -# TBD: refine resources to limit only to eks configurations -data "aws_iam_policy_document" "cloudwatch-policy" { - statement { - sid = "EKSCloudwatchMetrics" - effect = "Allow" - actions = [ - "cloudwatch:PutMetricData", - ] - resources = ["*"] - } -} - -#--- -# cluster admin policy -#--- -resource "aws_iam_policy" "cluster-admin-policy" { - name = format("%v%v-cluster-admin", local._prefixes["eks-policy"], var.cluster_name) - path = "/" - description = "Allow for administration of the cluster ${var.cluster_name} using AWS resources" - policy = data.aws_iam_policy_document.cluster-admin-policy.json - - tags = merge( - local.base_tags, - var.tags, - var.application_tags, - ) -} - -data "aws_iam_policy_document" "cluster-admin-policy" { - dynamic "statement" { - for_each = local.admin_policy_statements - iterator = s - content { - sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) - effect = lookup(s.value, "effect", "Allow") - actions = lookup(s.value, "actions", []) - resources = lookup(s.value, "resources", []) - } - } -} - -locals { - base_arn = format("arn:%v:%%v:%v:%v:%%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) - iam_arn = format("arn:%v:iam::%v:%%v", data.aws_arn.current.partition, data.aws_caller_identity.current.account_id) - common_arn = format("arn:%v:%%v:%v:%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) - eks_resources = ["cluster", "addon", "nodegroup", "identityproviderconfig"] - - admin_policy_statements = { - ECRRead = { - actions = [ - "ecr:Describe*", - "ecr:Get*", - "ecr:ListImages", - "ecr:BatchGetImage", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - ] - resources = ["*"] - } - ECRWrite = { - actions = [ - "ecr:BatchDeleteImage", - "ecr:CompleteLayerUpload", - "ecr:CreateRepository", - "ecr:DeleteRepository", - "ecr:InitiateLayerUpload", - "ecr:PutImage", - "ecr:UploadLayerPart" - ] - resources = [format(local.common_arn, "ecr", format("repository/eks/%v/*", var.cluster_name))] - } - EKSRead = { - actions = [ - "eks:ListClusters", - "eks:ListAddons", - "eks:ListNodegroups", - "eks:DescribeCluster", - "eks:DescribeAddon*", - "eks:DescribeNodegroup", - ] - resources = [ - format(local.common_arn, "eks", "cluster/*"), - format(local.common_arn, "eks", "addon/*"), - format(local.common_arn, "eks", "addons/*"), - format(local.common_arn, "eks", "/addons/*"), - format(local.common_arn, "eks", "nodegroup/*"), - ] - } - IAMRead = { - actions = [ - "iam:ListRoles", - ] - resources = ["*"] - } - SSMGet = { - actions = [ - "ssm:GetParameter", - ] - resources = [ - format("arn:%v:%v:%v:%v:%v", data.aws_arn.current.partition, "ssm", data.aws_region.current.name, "", "parameter/aws/service/eks/*") - ] - } - EKSReadMyClusters = { - actions = [ - "eks:List*", - "eks:Read*", - "eks:Describe*", - "eks:AccessKubernetesApi", - ] - resources = flatten(concat( - [format(local.common_arn, "eks", format("/clusters/%v/addons", var.cluster_name))], - [for r in local.eks_resources : [format(local.common_arn, "eks", format("%v/%v", r, var.cluster_name)), - format(local.common_arn, "eks", format("%v/%v/*", r, var.cluster_name))]] - )) - } - } -} - - -#--- -# cluster admin assume policy -#--- -resource "aws_iam_policy" "cluster-admin_assume_policy" { - name = format("%v%v-cluster-admin-assume", local._prefixes["eks-policy"], var.cluster_name) - path = "/" - description = "Allow for assume role to the cluster-admin role for ${var.cluster_name}" - policy = data.aws_iam_policy_document.cluster-admin_assume_policy.json - - tags = merge( - local.base_tags, - var.tags, - var.application_tags, - tomap({ "Name" = format("%v%v-cluster-admin-assume", local._prefixes["eks-policy"], var.cluster_name) }), - ) -} - -data "aws_iam_policy_document" "cluster-admin_assume_policy" { - statement { - sid = "AllowSTSAssumeClusterAdminRole" - effect = "Allow" - actions = ["sts:AssumeRole"] - resources = [module.role_cluster-admin.role_arn] - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/prefixes.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/prefixes.tf deleted file mode 100644 index 03303f1..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/prefixes.tf +++ /dev/null @@ -1,34 +0,0 @@ -locals { - _prefixes = { - "efs" = "v-efs-" - "s3" = "v-s3-" - "ebs" = "v-ebs-" - "kms" = "k-kms-" - "role" = "r-" - "policy" = "p-" - "group" = "g-" - "security-group" = "" # "sg-" - # VPC - "vpc" = "" - "dhcp-options" = "" - "vpc-peer" = "vpcp-" - "route-table" = "route-" - "subnet" = "" - "vpc-endpoint" = "vpce-" - "elastic-ip" = "eip-" - "nat-gateway" = "nat-" - "internet-gateway" = "igw-" - "network-acl" = "nacl-" - "customer-gateway" = "cgw-" - "vpn-gateway" = "vpcg-" - "vpn-connection" = "vpn_" - "log-group" = "lg-" - "log-stream" = "lgs-" - # EKS - "eks" = "eks-" - "eks-user" = "s-eks-" - "eks-role" = "r-eks-" - "eks-policy" = "p-eks-" - "eks-security-group" = "eks-" # "sg-eks-" - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/providers.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/providers.tf deleted file mode 100644 index f0e85a2..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/providers.tf +++ /dev/null @@ -1,25 +0,0 @@ -terraform { - required_version = ">= 0.12.31" -} - -# to import, you cannot have provider fields which count on data elements (as these locals show). You need to use the config_path. -# see these for more info: -# https://github.com/hashicorp/terraform-provider-kubernetes/issues/793 -# https://www.terraform.io/docs/cli/commands/import.html#provider-configuration -# https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs - -provider "kubernetes" { - host = local.aws_eks_cluster.endpoint - cluster_ca_certificate = base64decode(local.aws_eks_cluster.certificate_authority[0].data) - token = local.aws_eks_cluster_auth.token - # config_path = "${path.root}/setup/kube.config" -} - -provider "helm" { - kubernetes { - host = local.aws_eks_cluster.endpoint - - cluster_ca_certificate = base64decode(local.aws_eks_cluster.certificate_authority[0].data) - token = local.aws_eks_cluster_auth.token - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/region.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/region.tf deleted file mode 100644 index b7b1696..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/region.tf +++ /dev/null @@ -1,4 +0,0 @@ -locals { - region = var.region -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/role.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/role.tf deleted file mode 100644 index 3e97a71..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/role.tf +++ /dev/null @@ -1,164 +0,0 @@ -#--- -# cluster -#--- -locals { - cluster_managed_policy_list = [ - "AmazonEKSClusterPolicy", - "AmazonEC2FullAccess", - "CloudWatchLogsFullAccess", - ] - cluster_managed_policies = [for p in data.aws_iam_policy.cluster_managed_policies : p.arn] -} - -data "aws_iam_policy" "cluster_managed_policies" { - for_each = toset(local.cluster_managed_policy_list) - name = each.key -} - -# this needs the two policies nlb-policy and cloudwatch-policy, created first - -module "role_eks-cluster" { - source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" - - role_name = format("%v%v-cluster", local._prefixes["eks"], var.cluster_name) - role_description = "EKS Cluster Role for ${var.cluster_name}" - enable_ldap_creation = false - assume_policy_document = data.aws_iam_policy_document.eks_assume.json - attached_policies = concat([aws_iam_policy.nlb-policy.arn, aws_iam_policy.cloudwatch-policy.arn], local.cluster_managed_policies) - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - var.application_tags, - ) -} - -data "aws_iam_policy_document" "eks_assume" { - statement { - sid = "EKSAssumeRole" - effect = "Allow" - actions = ["sts:AssumeRole"] - - principals { - type = "Service" - identifiers = ["eks.amazonaws.com"] - } - } -} - -output "role_eks-cluster_arn" { - description = "Role ARN for EKS Cluster Role" - value = module.role_eks-cluster.role_arn -} - -#--- -# nodegroup -#--- -locals { - nodegroup_managed_policy_list = [ - "AmazonEKSWorkerNodePolicy", - "AmazonEKS_CNI_Policy", - "AmazonEC2ContainerRegistryPowerUser", - "AmazonEC2ContainerRegistryReadOnly", - "CloudWatchLogsFullAccess", - "AmazonS3FullAccess", - "AmazonSSMManagedInstanceCore", - "AmazonEC2RoleforSSM", - ] - nodegroup_managed_policies = [for p in data.aws_iam_policy.nodegroup_managed_policies : p.arn] -} - -data "aws_iam_policy" "nodegroup_managed_policies" { - for_each = toset(local.nodegroup_managed_policy_list) - name = each.key -} - -module "role_eks-nodegroup" { - source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" - - role_name = format("%v%v-nodegroup", local._prefixes["eks"], var.cluster_name) - role_description = "EKS Nodegroup Role for ${var.cluster_name}" - enable_ldap_creation = false - assume_policy_document = data.aws_iam_policy_document.ec2_assume.json - attached_policies = concat(local.nodegroup_managed_policies) - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - ) -} - -#---- -# STS: ec2 assume -#--- -data "aws_iam_policy_document" "ec2_assume" { - statement { - sid = "EKSAssumeRole" - effect = "Allow" - actions = ["sts:AssumeRole"] - - principals { - type = "Service" - identifiers = ["ec2.amazonaws.com"] - } - } -} - -output "role_eks-nodegroup-role_arn" { - description = "Role ARN for EKS Cluster Nodegroup Role" - value = module.role_eks-nodegroup.role_arn -} - -#--- -# cluster-admin -#--- -module "role_cluster-admin" { - source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git?ref=tf-upgrade" - - role_name = format("%v%v-cluster-admin", local._prefixes["eks"], var.cluster_name) - role_description = "SAML EKS cluster admin Role for ${var.cluster_name}" - enable_ldap_creation = false - assume_policy_document = data.aws_iam_policy_document.allow_sts.json - # assume_policy_document = data.aws_iam_policy_document.cluster-admin_combined.json - attached_policies = [aws_iam_policy.cluster-admin-policy.arn] - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - ) -} - -output "role_cluster-admin-role_arn" { - description = "Role ARN for EKS Cluster Admin Role" - value = module.role_cluster-admin.role_arn -} - -# data "aws_iam_policy_document" "empty" {} - -data "aws_iam_policy_document" "allow_sts" { - statement { - sid = "AllowSTSAssume" - effect = "Allow" - actions = ["sts:AssumeRole"] - principals { - type = "AWS" - identifiers = [ - format(local.iam_arn, "root"), - ] - } - } -} - -# data "aws_iam_policy_document" "cluster-admin_combined" -# source_policy_documents = [ -# data.aws_iam_policy_document.allow_sts.json -# data.aws_iam_policy_document.saml_assume.json, -# ] -# } -# diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/saml.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/saml.tf deleted file mode 100644 index 22c1f74..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/saml.tf +++ /dev/null @@ -1,26 +0,0 @@ -# because we can't link into remote state from the parent account, we have to use this -# also, there is no data source for saml provider - -locals { - saml_provider_arn = format(local.common_arn, "iam", "saml-provider/Census_TCO_IDMS") - saml_url = var.aws_environment == "gov" ? "https://signin.amazonaws-us-gov.com/saml" : "https://signin.aws.amazon.com/saml" -} - -data "aws_iam_policy_document" "saml_assume" { - statement { - sid = "SAMLFederationCensusIdP" - effect = "Allow" - actions = ["sts:AssumeRoleWithSAML"] - - principals { - type = "Federated" - identifiers = [local.saml_provider_arn] - } - - condition { - test = "StringEquals" - variable = "SAML:aud" - values = [local.saml_url] - } - } -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/securitygroup.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/securitygroup.tf deleted file mode 100644 index b66be01..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/securitygroup.tf +++ /dev/null @@ -1,99 +0,0 @@ -resource "aws_security_group" "additional_eks_cluster_sg" { - name = format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - tomap({ "Name" = format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) }), - ) - - vpc_id = data.aws_vpc.eks_vpc.id - - ingress { - from_port = 0 - to_port = 0 - protocol = -1 - - security_groups = [ - aws_security_group.all_worker_mgmt.id, - ## aws_security_group.cni_custom_sg.id - ] - } - # this grants in-VPC access to the K8S api - # updated to get all census private cidrs to get on-prem, as we are now sending the interface traffic over - # a private IP only (disabling public access). This is to reach a cluster api from another account and VPC - # so we open all the cloud accounts too - ingress { - from_port = 443 - to_port = 443 - protocol = "tcp" - # cidr_blocks = [ var.vpc_cidr_block ] - cidr_blocks = concat(var.census_private_cidr, ["10.0.0.0/8"]) - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_security_group" "all_worker_mgmt" { - name = format("%v%v-all-worker-mgmt", local._prefixes["eks-security-group"], var.cluster_name) - - tags = merge( - local.base_tags, - local.common_tags, - var.tags, - var.application_tags, - tomap({ "Name" = format("%v%v-all-worker-mgmt", local._prefixes["eks-security-group"], var.cluster_name) }), - ) - - vpc_id = data.aws_vpc.eks_vpc.id - - ingress { - from_port = 0 - to_port = 0 - protocol = -1 - cidr_blocks = [local.vpc_cidr_block] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} - -## resource "aws_security_group" "cni_custom_sg" { -## name = format("%v%v-cni-custom-networking", local._prefixes["eks-security-group"], var.cluster_name) -## -## tags = merge( -## local.base_tags, -## local.common_tags, -## var.tags, -## ) -## -## vpc_id = data.aws_vpc.eks_vpc.id -## -## ingress { -## from_port = 0 -## to_port = 0 -## protocol = -1 -## cidr_blocks = [ -## local.vpc_cidr_block, -## var.cni_vpc_cidr_block, -## ] -## } -## -## egress { -## from_port = 0 -## to_port = 0 -## protocol = "-1" -## cidr_blocks = ["0.0.0.0/0"] -## } -## } diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/settings.auto.tfvars.example b/examples/full-cluster-tf-upgrade/1.24.in-progress/settings.auto.tfvars.example deleted file mode 100644 index 43dd34a..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/settings.auto.tfvars.example +++ /dev/null @@ -1,10 +0,0 @@ -cluster_name = "org-project-env" -cluster_version = "1.24" -region = "us-gov-east-1" -## domain = "org-project-env.env.domain.census.gov" -eks_instance_disk_size = 40 -eks_vpc_name = "*vpcshortname*" -eks_instance_type = "t3.xlarge" -eks_ng_desire_size = 3 -eks_ng_max_size = 15 -eks_ng_min_size = 3 diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/setup-env.sh b/examples/full-cluster-tf-upgrade/1.24.in-progress/setup-env.sh deleted file mode 100644 index 641465f..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/setup-env.sh +++ /dev/null @@ -1,6 +0,0 @@ - -export AWS_PROFILE=252960665057-ma6-gov -export ECR_NAME="252960665057.dkr.ecr.us-gov-east-1.amazonaws.com" -export HTTP_PROXY=http://proxy.tco.census.gov:3128 -export HTTPS_PROXY=http://proxy.tco.census.gov:3128 -export NO_PROXY=.census.gov diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/templates/node-private-userdata.tmpl b/examples/full-cluster-tf-upgrade/1.24.in-progress/templates/node-private-userdata.tmpl deleted file mode 100644 index 0770f07..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/templates/node-private-userdata.tmpl +++ /dev/null @@ -1,9 +0,0 @@ -MIME-Version: 1.0 -Content-Type: multipart/mixed; boundary="==MYBOUNDARY==" - ---==MYBOUNDARY== -Content-Type: text/x-shellscript; charset="us-ascii" -#!/bin/bash -xe -sudo /etc/eks/bootstrap.sh --apiserver-endpoint "$endpoint" --b64-cluster-ca "$cluster_ca" "$cluster_name" ---==MYBOUNDARY==--\ - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/tf-run.data b/examples/full-cluster-tf-upgrade/1.24.in-progress/tf-run.data deleted file mode 100644 index 0c88eb9..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/tf-run.data +++ /dev/null @@ -1,68 +0,0 @@ -VERSION 1.3.1 -REMOTE-STATE -COMMENT make sure the private-lb subnet and container subnets are tagged properly (see README.md) -STOP then continue with at step %%NEXT%% (tag:subnets-verified) - -TAG subnets-verified -COMMAND tf-directory-setup.py -l none -f -COMMAND setup-new-directory.sh -LINKTOP provider_configs.d/provider.ldap_new.auto.tfvars -LINKTOP provider_configs.d/provider.ldap_new.tf -LINKTOP provider_configs.d/provider.ldap_new.variables.tf -LINKTOP init - -LINKTOP includes.d/variables.account_tags.tf -LINKTOP includes.d/variables.account_tags.auto.tfvars -LINKTOP includes.d/variables.infrastructure_tags.tf -LINKTOP includes.d/variables.infrastructure_tags.auto.tfvars -LINKTOP includes.d/variables.application_tags.tf -LINKTOP includes.d/variables.application_tags.auto.tfvars - -COMMAND tf-init -upgrade - -COMMENT There are two placeholder files, variables.vpc.auto.tfvars.make-link and variables.vpc.tf.make-link. -COMMENT Ensure that variables.vpc.auto.tfvars and variables.vpc.tf are either (a) linked to the files in the parent or vpc/{region}/vpcN/ directory -COMMENT or (b) copied from the vpc/{region}/vpcN/ directory if this repo is separate from the main cloud account -COMMENT You may uncomment the next two lines in tf-run.data and apply them to make the links -#COMMAND ln -sf ../../variables.vpc.tf -#COMMAND ln -sf ../../variables.vpc.auto.tfvars - -COMMENT Also check that the variables.application_tags .tf and .auto.tfvars files are linked to the proper includes.d/path/file from the root of the -COMMENT git repository. You will get missing definitions on application_tags otherwise. - -STOP check variables.vpc.* files and then continue with %%NEXT%% (tag:setup-complete) - -TAG setup-complete -POLICY - -COMMENT EC2 key pairs -null_resource.generate_keypair -aws_key_pair.cluster_keypair -COMMAND tf-directory-setup.py -l s3 -COMMENT be sure to add the setup/ec2-ssh-eks-{cluster} to git-secret, git-secret hide, add the setup/*secret and setup/*pub got git, and commit the entirety of the change - -ALL - -COMMENT Assumes setup the includes.d/parent_rs.tf according to the REAMDE.md has been done, will fail if not. You can answer n at the pause if you are not sure -PAUSE - -TAG setup-aws-auth -COMMENT cd aws-auth and tf-run.sh apply -STOP Once applied in this subdirectory, come back here and continue with step %%NEXT%% (tag:setup-efs) - -TAG setup-efs -COMMENT cd efs and tf-run.sh apply -STOP Once applied in this subdirectory, come back here and continue with step %%NEXT%% (tag:setup-irsa) - -TAG setup-irsa -COMMENT cd irsa-roles and tf-run.sh apply -COMMENT Note: irsa-roles has other subdirectories to be applied, follow the directions from tf-run there -STOP Once applied in this subdirectory, come back here and continue with step %%NEXT%% (tag:setup-common-services) - -TAG setup-common-services -COMMENT cd common-services and tf-run.sh apply -COMMENT Notes: this subdirectory is complicated, and it has a certificate step which is manual -STOP Once applied in this subdirectory, come back here and continue with step %%NEXT%% (tag:complete) - -TAG complete -COMMENT You have completed the setup of the EKS cluster. There is a DNS Infoblox step, please contact badra001 for that diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.dns.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.dns.tf deleted file mode 100644 index c82d30c..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.dns.tf +++ /dev/null @@ -1,21 +0,0 @@ -variable "main_dns_vpcs" { - description = "Map of region and VPC ids of the vpc1-services in us-gov-west-1 and us-gov-east-1 for centralized DNS" - type = map(string) - default = { - "us-gov-west-1" = "vpc-77877a12" - "us-gov-east-1" = "vpc-099a991da7c4eb8a5" - } -} - -variable "main_dns_profile" { - description = "Profile name for AWS for the main DNS central account" - type = string - default = "107742151971-do2-govcloud" -} - - -variable "dns_zone_description_prefix" { - description = "Zone description with the org-project-program-environment" - type = string - default = "" -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.eks.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.eks.tf deleted file mode 100644 index b6ba4ca..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.eks.tf +++ /dev/null @@ -1,58 +0,0 @@ -variable "eks_vpc_name" { - description = "Define the VPC name that will be used by this cluster" - type = string - default = "*UNKNOWN*" -} - -variable "subnets_name" { - description = "Define the name of the subnets to be used by this cluster" - type = string - default = "*-container-*" -} - -variable "cluster_name" { - description = "EKS cluster name name component used through out the EKS cluster describing its purpose (ex: dice-dev)" - type = string - default = null -} - -variable "cluster_version" { - description = "The EKS version number, see https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html" - type = string - default = "1.21" -} - -variable "eks_instance_type" { - description = "EKS worker node instance type" - type = string - default = "t3.xlarge" -} -variable "eks_ng_desire_size" { - description = "Node Group desire size, default is 1" - type = number - default = 4 -} - -variable "eks_ng_min_size" { - description = "Node Group minimum size, default is 1" - type = number - default = 4 -} - -variable "eks_ng_max_size" { - description = "Node Group maximum size, default is 10" - type = number - default = 16 -} - -variable "eks_instance_disk_size" { - description = "The size of the disk in gigabytes" - type = number - default = 40 -} - -variable "domain" { - description = "The DNS domain name of the cluster. Defaults to empty which causes the sample application to use the domain assigned to the load balancer of the istio ingress gateway." - type = string - default = "" -} diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.tags.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.tags.tf deleted file mode 100644 index 6e2a62e..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.tags.tf +++ /dev/null @@ -1,9 +0,0 @@ -# this exists in CAT, but not in other accounts. At some point, remove this file and all references to -# var.tags - -variable "tags" { - description = "AWS Tags to apply to appropriate resources." - type = map(string) - default = {} -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.vpc.auto.tfvars.make-link b/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.vpc.auto.tfvars.make-link deleted file mode 100644 index 86d88cb..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.vpc.auto.tfvars.make-link +++ /dev/null @@ -1,9 +0,0 @@ -# For a submodule/subrepository, copy variables.vpc.auto.tfvars from the appropriate vpc/{region}/vpc{n}/ directory in the main repo. -# In the apps directory, tf-run.data will create links to it. Any eks-* directories under that will be picked up and created -# by setup-new-directory.sh -# -# For something directly in the main repo for the account this wil be handled by setup-new-directory.sh as the apps -# directory includes it already. -# -# If you fail to do this, you will get errors on missing variables. -# diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.vpc.tf.make-link b/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.vpc.tf.make-link deleted file mode 100644 index 86d88cb..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/variables.vpc.tf.make-link +++ /dev/null @@ -1,9 +0,0 @@ -# For a submodule/subrepository, copy variables.vpc.auto.tfvars from the appropriate vpc/{region}/vpc{n}/ directory in the main repo. -# In the apps directory, tf-run.data will create links to it. Any eks-* directories under that will be picked up and created -# by setup-new-directory.sh -# -# For something directly in the main repo for the account this wil be handled by setup-new-directory.sh as the apps -# directory includes it already. -# -# If you fail to do this, you will get errors on missing variables. -# diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/version.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/version.tf deleted file mode 100644 index 724e0f6..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/version.tf +++ /dev/null @@ -1,4 +0,0 @@ -locals { - _module_version = "1.0.0" -} - diff --git a/examples/full-cluster-tf-upgrade/1.24.in-progress/versions.tf b/examples/full-cluster-tf-upgrade/1.24.in-progress/versions.tf deleted file mode 100644 index ced1ff0..0000000 --- a/examples/full-cluster-tf-upgrade/1.24.in-progress/versions.tf +++ /dev/null @@ -1,41 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 3.0" - } - ldap = { - source = "trevex/ldap" - version = ">= 0.5.4" - } - external = { - source = "hashicorp/external" - version = ">= 1.0" - } - null = { - source = "hashicorp/null" - version = ">= 1.0" - } - random = { - source = "hashicorp/random" - version = ">= 1.0" - } - template = { - source = "hashicorp/template" - version = ">= 1.0" - } - helm = { - source = "hashicorp/helm" - version = ">= 1.0" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 1.0" - } - time = { - source = "hashicorp/time" - version = ">= 0.9" - } - } - required_version = ">= 0.13" -}