From 4d6d41f54079fcd22389314424266f714dd6c0d4 Mon Sep 17 00:00:00 2001 From: Anthony Zawacki Date: Tue, 26 Sep 2023 15:18:19 -0400 Subject: [PATCH] Updates for stuff moved from tfmod-eks to here. --- cluster_autoscaler.tf | 96 +++++++++++++++++++++++++++++++++++++++++++ copy_images.tf | 45 ++++++++++++++++++++ eks_console_access.tf | 55 +++++++++++++++++++++++++ outputs.tf | 36 ++++++++++++++++ requirements.tf | 9 ++++ variables.tf | 31 ++++++++++++++ 6 files changed, 272 insertions(+) create mode 100644 cluster_autoscaler.tf create mode 100644 copy_images.tf create mode 100644 eks_console_access.tf diff --git a/cluster_autoscaler.tf b/cluster_autoscaler.tf new file mode 100644 index 0000000..e60a04a --- /dev/null +++ b/cluster_autoscaler.tf @@ -0,0 +1,96 @@ +locals { + # https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html + autoscale_tags = { + format("k8s.io/cluster-autoscaler/%v", var.cluster_name) = "owned" + "k8s.io/cluster-autoscaler/enabled" = "TRUE" + } + + ng_asg_name = module.cluster.eks_managed_node_groups["node_group"].node_group_resources[0].autoscaling_groups[0].name +} + +module "cluster_autoscaler_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + + role_name = "${var.cluster_name}-cluster-autoscaler" + + attach_cluster_autoscaler_policy = true + + cluster_autoscaler_cluster_names = [module.cluster.cluster_name] + + oidc_providers = { + main = { + provider_arn = module.cluster.oidc_provider_arn + namespace_service_accounts = ["kube-system:cluster-autoscaler"] + } + } + tags = local.tags +} + +resource "aws_autoscaling_group_tag" "on-demand" { + autoscaling_group_name = local.ng_asg_name + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/eks.amazonaws.com/capacityType" + value = "ON_DEMAND" + propagate_at_launch = true + } +} + +data "kubernetes_namespace" "kube-system" { + depends_on = [ + module.cluster.eks_managed_node_groups, + ] + + metadata { + name = "kube-system" + } +} + +resource "helm_release" "cluster-autoscaler" { + depends_on = [ + module.images, + module.cluster.eks_managed_node_groups, + ] + + chart = "cluster-autoscaler" + name = "cluster-autoscaler" + version = var.cluster_autoscaler_chart_version + namespace = data.kubernetes_namespace.kube-system.metadata[0].name + repository = "https://kubernetes.github.io/autoscaler" + + set { + name = "image.repository" + value = format("%v/%v", + module.images.images[local.autoscaler_key].dest_registry, + module.images.images[local.autoscaler_key].dest_repository + ) + } + set { + name = "image.tag" + value = module.images.images[local.autoscaler_key].tag + } + set { + name = "autoDiscovery.clusterName" + value = var.cluster_name + } + set { + name = "awsRegion" + value = var.region + } + + set { + name = "rbac.serviceAccount.name" + value = "cluster-autoscaler" + } + + set { + name = "rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" + value = module.cluster_autoscaler_irsa_role.iam_role_arn + } + + set { + name = "rbac.serviceAccount.create" + value = "false" + } +} + + diff --git a/copy_images.tf b/copy_images.tf new file mode 100644 index 0000000..a5134e1 --- /dev/null +++ b/copy_images.tf @@ -0,0 +1,45 @@ +locals { + autoscaler_key = format("%v#%v", "cluster-autoscaler", var.cluster_autoscaler_tag) + kubectl_key = format("%v#%v", "kubectl", var.kubectl_image_tag) + + image_config = [ + { + enabled = true + dest_path = null + name = "cluster-autoscaler" + source_image = "autoscaling/cluster-autoscaler" + source_registry = "registry.k8s.io" + source_tag = null + tag = var.cluster_autoscaler_tag + }, + { + enabled = true + dest_path = null + name = "kubectl" + source_image = "bitnami/kubectl" + source_registry = "docker.io" + source_tag = var.kubectl_image_tag + tag = var.kubectl_image_tag + }, + ] +} + +module "images" { + source = "git@github.e.it.census.gov:terraform-modules/aws-ecr-copy-images.git/?ref=2.0.2" + + profile = var.profile + application_name = var.cluster_name + image_config = local.image_config + tags = {} + + ### optional + ## account_alias = "" + ## account_id = "" + ## destination_password = "" + ## destination_username = "" + ## override_prefixes = {} + ## region = "" + ## source_password = "" + ## source_username = "" +} + diff --git a/eks_console_access.tf b/eks_console_access.tf new file mode 100644 index 0000000..04b9032 --- /dev/null +++ b/eks_console_access.tf @@ -0,0 +1,55 @@ +# ```shell +# curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml +# curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml +# ``` +# +# For full console, we'll use the first one. +# +# ```console +# % kubectl apply -f eks-console-full-access.yaml +# clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created +# clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created +# ``` + +locals { + cluster_roles = [ + { + name = "eks-console-full-access" + url = "https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml" + enabled = true + }, + { + name = "eks-console-restricted-access" + url = "https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml" + enabled = false + }, + ] + cluster_roles_map = { for cr in local.cluster_roles : cr.name => cr } +} + + +data "http" "cluster_roles" { + for_each = local.cluster_roles_map + url = each.value.url +} + +data "kubectl_file_documents" "access_documents" { + for_each = { for k, v in local.cluster_roles_map : k => v if v.enabled } + + content = data.http.cluster_roles[each.key].body +} + +locals { + all_access_documents = flatten([ + for cr_name, cr_data in local.cluster_roles_map : [ + for doc in data.kubectl_file_documents.access_documents[cr_name].manifests : doc + ] if cr_data.enabled + ]) +} + +resource "kubectl_manifest" "deploy_cluster_roles" { + count = length(local.all_access_documents) + + yaml_body = local.all_access_documents[count.index] +} + diff --git a/outputs.tf b/outputs.tf index 5e56618..ddfe7c4 100644 --- a/outputs.tf +++ b/outputs.tf @@ -25,3 +25,39 @@ output "rwx_storage_class" { description = "Kubernetes storage class that supports read/write many." value = kubernetes_storage_class.efs-sc.metadata[0].name } + +################################################################################ +# IRSA Roles created +################################################################################ +output "cluster_autoscaler_irsa_role" { + description = "The arn/name/unique_id of the irsa role for the cluster autoscaler addon" + value = { + arn = module.cluster_autoscaler_irsa_role.iam_role_arn + name = module.cluster_autoscaler_irsa_role.iam_role_name + unique_id = module.cluster_autoscaler_irsa_role.iam_role_unique_id + } +} + +################################################################################ +# Details about kubectl image +################################################################################ + +output "kubectl_image_full_path" { + description = "The full URI to access the kubectl image including the registry/repository:tag" + value = module.images.images[local.kubectl_key].dest_full_path +} + +output "kubectl_image_registry" { + description = "The registry portion of the URI to access the kubectl image" + value = module.images.images[local.kubectl_key].dest_registry +} + +output "kubectl_image_repository" { + description = "The repository portion of the URI to access the kubectl image" + value = module.images.images[local.kubectl_key].dest_repository +} + +output "kubectl_image_tag" { + description = "The tag portion of the URI to access the kubectl image" + value = module.images.images[local.kubectl_key].tag +} diff --git a/requirements.tf b/requirements.tf index 677f4fd..8c1f05b 100644 --- a/requirements.tf +++ b/requirements.tf @@ -6,9 +6,18 @@ terraform { source = "hashicorp/aws" version = ">= 5.14.0" } + helm = { + source = "hashicorp/helm" + version = ">= 2.11.0" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.14.0" + } kubernetes = { source = "hashicorp/kubernetes" version = ">= 2.23.0" } + } } diff --git a/variables.tf b/variables.tf index e9d7c6a..e0237d2 100644 --- a/variables.tf +++ b/variables.tf @@ -3,6 +3,17 @@ variable "cluster_name" { type = string } +variable "region" { + description = "AWS region" + type = string +} + +variable "profile" { + description = "AWS config profile" + type = string + default = "" +} + variable "vpc_id" { description = "Specify the VPC id that is used by this cluster" type = string @@ -30,3 +41,23 @@ variable "tags" { default = {} } +variable "kubectl_image_tag" { + description = "The version of bitnami/kubectl image to use." + type = string + default = "1.27.1" +} + +# helm add repo autoscaler "https://kubernetes.github.io/autoscaler" +# helm search repo -l autoscaler/cluster-autoscaler +variable "cluster_autoscaler_chart_version" { + description = "The helm chart of the cluster-autoscaler most closely matching the Kuberentes version. Review output of `helm add repo autoscaler 'https://kubernetes.github.io/autoscaler'` (if the repo hasn't been added previously) and `helm search repo -l autoscaler/cluster-autoscaler`" + type = string + default = "9.29.3" +} + +# helm show values --version [cluster_autoscaler_chart_version] autoscaler/cluster-autoscaler | grep tag: +variable "cluster_autoscaler_tag" { + description = "Image tag of cluster-autoscaler associated with the cluster_autoscaler_chart_version helm chart. `helm show values --version [cluster_autoscaler_chart_version] autoscaler/cluster-autoscaler | grep tag:`" + type = string + default = "v1.27.2" +}