From fbdca87fd626443c8da0f510e6d6301032369b82 Mon Sep 17 00:00:00 2001 From: Anthony Zawacki Date: Fri, 1 Sep 2023 09:50:29 -0400 Subject: [PATCH] Everything except clsuter_admin related items due to ldap_provider. --- aws_data.tf | 17 ++++ cluster_admin_group.tf.disabled | 12 +++ cluster_admin_policies.tf.disabled | 129 +++++++++++++++++++++++++++++ cluster_admin_roles.tf.disabled | 26 ++++++ cluster_autoscaler.tf | 78 +++++++++++++++++ copy_images.tf | 35 ++++++++ eks_console_access.tf | 55 ++++++++++++ irsa_roles.tf | 64 ++++++++++++++ main.tf | 100 ++++++++-------------- outputs.tf | 129 ++++++++++++----------------- prefixes.tf | 34 ++++++++ requirements.tf | 42 ++++++++++ saml.tf | 26 ++++++ security_groups.tf | 72 ++++++++++++++++ storage_classes.tf | 69 +++++++++++++++ variables.common.tf | 24 ++++++ variables.tf | 21 +++-- 17 files changed, 789 insertions(+), 144 deletions(-) create mode 100644 aws_data.tf create mode 100644 cluster_admin_group.tf.disabled create mode 100644 cluster_admin_policies.tf.disabled create mode 100644 cluster_admin_roles.tf.disabled create mode 100644 cluster_autoscaler.tf create mode 100644 copy_images.tf create mode 100644 eks_console_access.tf create mode 100644 irsa_roles.tf create mode 100644 prefixes.tf create mode 100644 requirements.tf create mode 100644 saml.tf create mode 100644 security_groups.tf create mode 100644 storage_classes.tf create mode 100644 variables.common.tf diff --git a/aws_data.tf b/aws_data.tf new file mode 100644 index 0000000..c2dfb22 --- /dev/null +++ b/aws_data.tf @@ -0,0 +1,17 @@ +data "aws_caller_identity" "current" {} + +data "aws_region" "current" {} + +data "aws_arn" "current" { + arn = data.aws_caller_identity.current.arn +} + +locals { + base_arn = format("arn:%v:%%v:%v:%v:%%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) + iam_arn = format("arn:%v:iam::%v:%%v", data.aws_arn.current.partition, data.aws_caller_identity.current.account_id) + common_arn = format("arn:%v:%%v:%v:%v:%%v", + data.aws_arn.current.partition, + data.aws_region.current.name, + data.aws_caller_identity.current.account_id) + +} diff --git a/cluster_admin_group.tf.disabled b/cluster_admin_group.tf.disabled new file mode 100644 index 0000000..5ee158f --- /dev/null +++ b/cluster_admin_group.tf.disabled @@ -0,0 +1,12 @@ +module "group_cluster-admin" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git" + + group_name = format("%v%v-cluster-admin", local._prefixes["eks"], var.cluster_name) + attached_policies = [aws_iam_policy.cluster-admin-policy.arn, aws_iam_policy.cluster-admin_assume_policy.arn] + + tags = merge( + local.base_tags, + var.tags, + ) +} + diff --git a/cluster_admin_policies.tf.disabled b/cluster_admin_policies.tf.disabled new file mode 100644 index 0000000..ea3f658 --- /dev/null +++ b/cluster_admin_policies.tf.disabled @@ -0,0 +1,129 @@ +#--- +# cluster admin policy +#--- +locals { + eks_resources = ["cluster", "addon", "nodegroup", "identityproviderconfig"] + + admin_policy_statements = { + ECRRead = { + actions = [ + "ecr:Describe*", + "ecr:Get*", + "ecr:ListImages", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + ] + resources = ["*"] + } + ECRWrite = { + actions = [ + "ecr:BatchDeleteImage", + "ecr:CompleteLayerUpload", + "ecr:CreateRepository", + "ecr:DeleteRepository", + "ecr:InitiateLayerUpload", + "ecr:PutImage", + "ecr:UploadLayerPart" + ] + resources = [format(local.common_arn, "ecr", format("repository/eks/%v/* ", var.cluster_name))] + } + EKSRead = { + actions = [ + "eks:ListClusters", + "eks:ListAddons", + "eks:ListNodegroups", + "eks:DescribeCluster", + "eks:DescribeAddon*", + "eks:DescribeNodegroup", + ] + resources = [ + format(local.common_arn, "eks", "cluster/*"), + format(local.common_arn, "eks", "addon/*"), + format(local.common_arn, "eks", "addons/*"), + format(local.common_arn, "eks", "/addons/*"), + format(local.common_arn, "eks", "nodegroup/*"), + ] + } + IAMRead = { + actions = [ + "iam:ListRoles", + ] + resources = ["*"] + } + SSMGet = { + actions = [ + "ssm:GetParameter", + ] + resources = [ + format("arn:%v:%v:%v:%v:%v", data.aws_arn.current.partition, "ssm", data.aws_region.current.name, "", "parameter/aws/service/eks/*") + ] + } + EKSReadMyClusters = { + actions = [ + "eks:List*", + "eks:Read*", + "eks:Describe*", + "eks:AccessKubernetesApi", + ] + resources = flatten(concat( + tolist([format(local.common_arn, "eks", format("/clusters/%v/addons", var.cluster_name))]), + [for r in local.eks_resources : tolist([ + format(local.common_arn, "eks", format("%v/%v", r, var.cluster_name)), + format(local.common_arn, "eks", format("%v/%v/*", r, var.cluster_name)) + ])])) + } + } +} + +data "aws_iam_policy_document" "cluster-admin-policy" { + dynamic "statement" { + for_each = local.admin_policy_statements + iterator = s + content { + sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) + effect = lookup(s.value, "effect", "Allow") + actions = lookup(s.value, "actions", []) + resources = lookup(s.value, "resources", []) + } + } +} + +resource "aws_iam_policy" "cluster-admin-policy" { + name = format("%v%v-cluster-admin", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow for administration of the cluster ${var.cluster_name} using AWS resources" + policy = data.aws_iam_policy_document.cluster-admin-policy.json + + tags = merge( + local.base_tags, + var.tags, + ) +} + +#--- +# cluster admin assume policy +#--- +resource "aws_iam_policy" "cluster-admin_assume_policy" { + name = format("%v%v-cluster-admin-assume", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow for assume role to the cluster-admin role for ${var.cluster_name}" + policy = data.aws_iam_policy_document.cluster-admin_assume_policy.json + + tags = merge( + local.base_tags, + var.tags, + var.application_tags, + tomap({ "Name" = format("%v%v-cluster-admin-assume", local._prefixes["eks-policy"], var.cluster_name) }), + ) +} + +data "aws_iam_policy_document" "cluster-admin_assume_policy" { + statement { + sid = "AllowSTSAssumeClusterAdminRole" + effect = "Allow" + actions = ["sts:AssumeRole"] + resources = [module.role_cluster-admin.role_arn] + } +} + diff --git a/cluster_admin_roles.tf.disabled b/cluster_admin_roles.tf.disabled new file mode 100644 index 0000000..0c2a97c --- /dev/null +++ b/cluster_admin_roles.tf.disabled @@ -0,0 +1,26 @@ +#--- +# cluster-admin +#--- +module "role_cluster-admin" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git" + + role_name = format("%v%v-cluster-admin", local._prefixes["eks"], var.cluster_name) + role_description = "SAML EKS cluster admin Role for ${var.cluster_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.allow_sts.json + # assume_policy_document = data.aws_iam_policy_document.cluster-admin_combined.json + attached_policies = [aws_iam_policy.cluster-admin-policy.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) +} + +output "role_cluster-admin-role_arn" { + description = "Role ARN for EKS Cluster Admin Role" + value = module.role_cluster-admin.role_arn +} + diff --git a/cluster_autoscaler.tf b/cluster_autoscaler.tf new file mode 100644 index 0000000..7a61b96 --- /dev/null +++ b/cluster_autoscaler.tf @@ -0,0 +1,78 @@ +locals { + # https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html + autoscale_tags = { + format("k8s.io/cluster-autoscaler/%v", var.cluster_name) = "owned" + "k8s.io/cluster-autoscaler/enabled" = "TRUE" + } + + ng_asg_name = module.cluster.eks_managed_node_groups["on_demand"].node_group_resources[0].autoscaling_groups[0].name +} + +resource "aws_autoscaling_group_tag" "on-demand" { + autoscaling_group_name = local.ng_asg_name + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/eks.amazonaws.com/capacityType" + value = "ON_DEMAND" + propagate_at_launch = true + } +} + +data "kubernetes_namespace" "kube-system" { + depends_on = [ + module.cluster.eks_managed_node_groups, + ] + + metadata { + name = "kube-system" + } +} + +resource "helm_release" "cluster-autoscaler" { + depends_on = [ + module.images, + module.cluster.eks_managed_node_groups, + ] + + chart = "cluster-autoscaler" + name = "cluster-autoscaler" + version = var.cluster_autoscaler_chart_version + namespace = data.kubernetes_namespace.kube-system.metadata[0].name + repository = "https://kubernetes.github.io/autoscaler" + + set { + name = "image.repository" + value = format("%v/%v", + module.images.images[local.autoscaler_key].dest_registry, + module.images.images[local.autoscaler_key].dest_repository + ) + } + set { + name = "image.tag" + value = module.images.images[local.autoscaler_key].tag + } + set { + name = "autoDiscovery.clusterName" + value = var.cluster_name + } + set { + name = "awsRegion" + value = var.region + } + + set { + name = "rbac.serviceAccount.name" + value = "cluster-autoscaler" + } + + set { + name = "rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" + value = module.cluster_autoscaler_irsa_role.iam_role_arn + } + + set { + name = "rbac.serviceAccount.create" + value = "false" + } +} + + diff --git a/copy_images.tf b/copy_images.tf new file mode 100644 index 0000000..9120b5e --- /dev/null +++ b/copy_images.tf @@ -0,0 +1,35 @@ +locals { + autoscaler_key = format("%v#%v", "cluster-autoscaler", var.cluster_autoscaler_tag) + + image_config = [ + { + enabled = true + dest_path = null + name = "cluster-autoscaler" + source_image = "autoscaling/cluster-autoscaler" + source_registry = "registry.k8s.io" + source_tag = null + tag = var.cluster_autoscaler_tag + }, + ] +} + +module "images" { + source = "git@github.e.it.census.gov:terraform-modules/aws-ecr-copy-images.git" + + profile = var.profile + application_name = var.cluster_name + image_config = local.image_config + tags = {} + + ### optional + ## account_alias = "" + ## account_id = "" + ## destination_password = "" + ## destination_username = "" + ## override_prefixes = {} + ## region = "" + ## source_password = "" + ## source_username = "" +} + diff --git a/eks_console_access.tf b/eks_console_access.tf new file mode 100644 index 0000000..04b9032 --- /dev/null +++ b/eks_console_access.tf @@ -0,0 +1,55 @@ +# ```shell +# curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml +# curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml +# ``` +# +# For full console, we'll use the first one. +# +# ```console +# % kubectl apply -f eks-console-full-access.yaml +# clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created +# clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created +# ``` + +locals { + cluster_roles = [ + { + name = "eks-console-full-access" + url = "https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml" + enabled = true + }, + { + name = "eks-console-restricted-access" + url = "https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml" + enabled = false + }, + ] + cluster_roles_map = { for cr in local.cluster_roles : cr.name => cr } +} + + +data "http" "cluster_roles" { + for_each = local.cluster_roles_map + url = each.value.url +} + +data "kubectl_file_documents" "access_documents" { + for_each = { for k, v in local.cluster_roles_map : k => v if v.enabled } + + content = data.http.cluster_roles[each.key].body +} + +locals { + all_access_documents = flatten([ + for cr_name, cr_data in local.cluster_roles_map : [ + for doc in data.kubectl_file_documents.access_documents[cr_name].manifests : doc + ] if cr_data.enabled + ]) +} + +resource "kubectl_manifest" "deploy_cluster_roles" { + count = length(local.all_access_documents) + + yaml_body = local.all_access_documents[count.index] +} + diff --git a/irsa_roles.tf b/irsa_roles.tf new file mode 100644 index 0000000..db5a487 --- /dev/null +++ b/irsa_roles.tf @@ -0,0 +1,64 @@ +module "vpc_cni_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + + role_name = "${var.cluster_name}-vpc-cni" + + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + + oidc_providers = { + main = { + provider_arn = module.cluster.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + tags = local.tags +} + +module "ebs_csi_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + + role_name = "${var.cluster_name}-ebs-csi-driver" + attach_ebs_csi_policy = true + + oidc_providers = { + main = { + provider_arn = module.cluster.oidc_provider_arn + namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"] + } + } + tags = local.tags +} + +module "efs_csi_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + + role_name = "${var.cluster_name}-efs-csi-driver" + attach_efs_csi_policy = true + + oidc_providers = { + main = { + provider_arn = module.cluster.oidc_provider_arn + namespace_service_accounts = ["kube-system:efs-csi-controller-sa"] + } + } + tags = local.tags +} + +module "cluster_autoscaler_irsa_role" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + + role_name = "${var.cluster_name}-cluster-autoscaler" + + attach_cluster_autoscaler_policy = true + + cluster_autoscaler_cluster_names = [module.cluster.cluster_name] + + oidc_providers = { + main = { + provider_arn = module.cluster.oidc_provider_arn + namespace_service_accounts = ["kube-system:cluster-autoscaler"] + } + } + tags = local.tags +} diff --git a/main.tf b/main.tf index ed8457e..843e9d7 100644 --- a/main.tf +++ b/main.tf @@ -1,3 +1,35 @@ +data "aws_eks_cluster" "eks" { + depends_on = [ + module.cluster.eks_managed_node_groups, + ] + + name = module.cluster.cluster_name +} + +provider "kubernetes" { + host = data.aws_eks_cluster.eks.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks.certificate_authority[0].data) + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + args = ["eks", "get-token", "--cluster-name", module.cluster.cluster_name, "--region", var.region] + } +} + +provider "helm" { + kubernetes { + host = data.aws_eks_cluster.eks.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks.certificate_authority[0].data) + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + args = ["eks", "get-token", "--cluster-name", module.cluster.cluster_name, "--region", var.region] + } + } +} + data "aws_vpc" "eks_vpc" { filter { name = "tag:Name" @@ -36,70 +68,19 @@ locals { "eks-cluster-name" = var.cluster_name "boc:tf_module_version" = local._module_version "boc:created_by" = "terraform" + CostAllocation = var.tag_costallocation } - # https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html - autoscale_tags = { - format("k8s.io/cluster-autoscaler/%v", var.cluster_name) = "owned" - "k8s.io/cluster-autoscaler/enabled" = "TRUE" - } - + # TBD - Why do we need nlb-policy additional_policies = { + #'nlb-policy' = aws_iam_policy.nlb-policy.arn } - ng_name = format("%v-ng-on_demand", var.cluster_name) - ng_asg_name = module.cluster.eks_managed_node_groups["on_demand"].node_group_resources[0].autoscaling_groups[0].name + ng_name = format("%v%v-nodegroup", local._prefixes["eks"], var.cluster_name) tags = merge(var.tags, local.base_tags) } -module "vpc_cni_irsa_role" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - - role_name = "${var.cluster_name}-vpc-cni" - - attach_vpc_cni_policy = true - vpc_cni_enable_ipv4 = true - - oidc_providers = { - main = { - provider_arn = module.cluster.oidc_provider_arn - namespace_service_accounts = ["kube-system:aws-node"] - } - } - tags = local.tags -} - -module "ebs_csi_irsa_role" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - - role_name = "${var.cluster_name}-ebs-csi-driver" - attach_ebs_csi_policy = true - - oidc_providers = { - main = { - provider_arn = module.cluster.oidc_provider_arn - namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"] - } - } - tags = local.tags -} - -module "efs_csi_irsa_role" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - - role_name = "${var.cluster_name}-efs-csi-driver" - attach_efs_csi_policy = true - - oidc_providers = { - main = { - provider_arn = module.cluster.oidc_provider_arn - namespace_service_accounts = ["kube-system:efs-csi-controller-sa"] - } - } - tags = local.tags -} - module "cluster" { source = "terraform-aws-modules/eks/aws" version = "19.16.0" @@ -189,12 +170,3 @@ resource "aws_security_group_rule" "allow_sidecar_injection" { security_group_id = module.cluster.node_security_group_id source_security_group_id = module.cluster.cluster_primary_security_group_id } - -resource "aws_autoscaling_group_tag" "on-demand" { - autoscaling_group_name = local.ng_asg_name - tag { - key = "k8s.io/cluster-autoscaler/node-template/label/eks.amazonaws.com/capacityType" - value = "ON_DEMAND" - propagate_at_launch = true - } -} diff --git a/outputs.tf b/outputs.tf index 909bbb0..2b50133 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,3 +1,7 @@ +## TBD: +# - cluster_worker_sg_id - aws_security_group.all_worker_mgmt.id +# - cluster_sg_id - aws_security_group.additional_eks_cluster_sg.id + ################################################################################ # Module information ################################################################################ @@ -13,52 +17,52 @@ output "module_version" { } ################################################################################ -# IRSA Roles +# IRSA Roles Created ################################################################################ -output "vpc_cni_irsa_role_arn" { - description = "The ARN of the irsa role for the vpc-cni addon" - value = module.vpc_cni_irsa_role.iam_role_arn -} - -output "vpc_cni_irsa_role_name" { - description = "The name of the irsa role for the vpc-cni addon" - value = module.vpc_cni_irsa_role.iam_role_name -} - -output "vpc_cni_irsa_role_unique_id" { - description = "The unique_id of the irsa role for the vpc-cni addon" - value = module.vpc_cni_irsa_role.iam_role_unique_id -} - -output "ebs_csi_irsa_role_arn" { - description = "The ARN of the irsa role for the ebs-csi-driver addon" - value = module.ebs_csi_irsa_role.iam_role_arn +output "vpc_cni_irsa_role" { + description = "The arn/name/unique_id of the irsa role for the vpc-cni addon" + value = { + arn = module.vpc_cni_irsa_role.iam_role_arn + name = module.vpc_cni_irsa_role.iam_role_name + unique_id = module.vpc_cni_irsa_role.iam_role_unique_id + } } -output "ebs_csi_irsa_role_name" { - description = "The name of the irsa role for the ebs-csi-driver addon" - value = module.ebs_csi_irsa_role.iam_role_name +output "ebs_csi_irsa_role" { + description = "The arn/name/unique_id of the irsa role for the ebs-csi-driver addon" + value = { + arn = module.ebs_csi_irsa_role.iam_role_arn + name = module.ebs_csi_irsa_role.iam_role_name + unique_id = module.ebs_csi_irsa_role.iam_role_unique_id + } } -output "ebs_csi_irsa_role_unique_id" { - description = "The unique_id of the irsa role for the ebs-csi-driver addon" - value = module.ebs_csi_irsa_role.iam_role_unique_id +output "efs_csi_irsa_role" { + description = "The arn/name/unique_id of the irsa role for the efs-csi-driver addon" + value = { + arn = module.efs_csi_irsa_role.iam_role_arn + name = module.efs_csi_irsa_role.iam_role_name + unique_id = module.efs_csi_irsa_role.iam_role_unique_id + } } -output "efs_csi_irsa_role_arn" { - description = "The ARN of the irsa role for the efs-csi-driver addon" - value = module.efs_csi_irsa_role.iam_role_arn +output "cluster_autoscaler_irsa_role" { + description = "The arn/name/unique_id of the irsa role for the cluster autoscaler addon" + value = { + arn = module.cluster_autoscaler_irsa_role.iam_role_arn + name = module.cluster_autoscaler_irsa_role.iam_role_name + unique_id = module.cluster_autoscaler_irsa_role.iam_role_unique_id + } } -output "efs_csi_irsa_role_name" { - description = "The name of the irsa role for the efs-csi-driver addon" - value = module.efs_csi_irsa_role.iam_role_name -} - -output "efs_csi_irsa_role_unique_id" { - description = "The unique_id of the irsa role for the efs-csi-driver addon" - value = module.efs_csi_irsa_role.iam_role_unique_id +output "cluster_irsa_role" { + description = "The arn/name/unique_id of the irsa role for the cluster" + value = { + arn = module.cluster.cluster_iam_role_arn + name = module.cluster.cluster_iam_role_name + unique_id = module.cluster.cluster_iam_role_unique_id + } } ################################################################################ @@ -90,11 +94,6 @@ output "cluster_name" { value = module.cluster.cluster_name } -output "cluster_oidc_issuer_url" { - description = "The URL on the EKS cluster for the OpenID Connect identity provider" - value = module.cluster.cluster_oidc_issuer_url -} - output "cluster_version" { description = "The Kubernetes version for the cluster" value = module.cluster.cluster_version @@ -110,11 +109,6 @@ output "cluster_status" { value = module.cluster.cluster_status } -output "cluster_primary_security_group_id" { - description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" - value = module.cluster.cluster_primary_security_group_id -} - ################################################################################ # KMS Key ################################################################################ @@ -148,6 +142,11 @@ output "cluster_security_group_id" { value = module.cluster.cluster_security_group_id } +output "cluster_primary_security_group_id" { + description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" + value = module.cluster.cluster_primary_security_group_id +} + ################################################################################ # Node Security Group ################################################################################ @@ -176,30 +175,16 @@ output "oidc_provider_arn" { value = module.cluster.oidc_provider_arn } +output "cluster_oidc_issuer_url" { + description = "The URL on the EKS cluster for the OpenID Connect identity provider" + value = module.cluster.cluster_oidc_issuer_url +} + output "cluster_tls_certificate_sha1_fingerprint" { description = "The SHA1 fingerprint of the public key of the cluster's certificate" value = module.cluster.cluster_tls_certificate_sha1_fingerprint } -################################################################################ -# IAM Role -################################################################################ - -output "cluster_iam_role_name" { - description = "IAM role name of the EKS cluster" - value = module.cluster.cluster_iam_role_name -} - -output "cluster_iam_role_arn" { - description = "IAM role ARN of the EKS cluster" - value = module.cluster.cluster_iam_role_arn -} - -output "cluster_iam_role_unique_id" { - description = "Stable and unique string identifying the IAM role" - value = module.cluster.cluster_iam_role_unique_id -} - ################################################################################ # EKS Addons ################################################################################ @@ -222,16 +207,16 @@ output "cluster_identity_providers" { # CloudWatch Log Group ################################################################################ -output "cloudwatch_log_group_name" { - description = "Name of cloudwatch log group created" - value = module.cluster.cloudwatch_log_group_name -} - output "cloudwatch_log_group_arn" { description = "Arn of cloudwatch log group created" value = module.cluster.cloudwatch_log_group_arn } +output "cloudwatch_log_group_name" { + description = "Name of cloudwatch log group created" + value = module.cluster.cloudwatch_log_group_name +} + ################################################################################ # Fargate Profile ################################################################################ @@ -273,7 +258,3 @@ output "self_managed_node_groups_autoscaling_group_names" { # Additional ################################################################################ -output "aws_auth_configmap_yaml" { - description = "[DEPRECATED - use `var.manage_aws_auth_configmap`] Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.cluster.aws_auth_configmap_yaml -} diff --git a/prefixes.tf b/prefixes.tf new file mode 100644 index 0000000..03303f1 --- /dev/null +++ b/prefixes.tf @@ -0,0 +1,34 @@ +locals { + _prefixes = { + "efs" = "v-efs-" + "s3" = "v-s3-" + "ebs" = "v-ebs-" + "kms" = "k-kms-" + "role" = "r-" + "policy" = "p-" + "group" = "g-" + "security-group" = "" # "sg-" + # VPC + "vpc" = "" + "dhcp-options" = "" + "vpc-peer" = "vpcp-" + "route-table" = "route-" + "subnet" = "" + "vpc-endpoint" = "vpce-" + "elastic-ip" = "eip-" + "nat-gateway" = "nat-" + "internet-gateway" = "igw-" + "network-acl" = "nacl-" + "customer-gateway" = "cgw-" + "vpn-gateway" = "vpcg-" + "vpn-connection" = "vpn_" + "log-group" = "lg-" + "log-stream" = "lgs-" + # EKS + "eks" = "eks-" + "eks-user" = "s-eks-" + "eks-role" = "r-eks-" + "eks-policy" = "p-eks-" + "eks-security-group" = "eks-" # "sg-eks-" + } +} diff --git a/requirements.tf b/requirements.tf new file mode 100644 index 0000000..0217407 --- /dev/null +++ b/requirements.tf @@ -0,0 +1,42 @@ +terraform { + required_version = ">= 0.13" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 5.14.0" + } + cloudinit = { + source = "hashicorp/cloudinit" + version = ">= 2.3.2" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.11.0" + } + http = { + source = "hashicorp/http" + version = ">= 3.4.0" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.14.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.23.0" + } + null = { + source = "hashicorp/null" + version = ">= 3.2.1" + } + time = { + source = "hashicorp/time" + version = ">= 0.9.1" + } + tls = { + source = "hashicorp/tls" + version = ">= 4.0.4" + } + } +} diff --git a/saml.tf b/saml.tf new file mode 100644 index 0000000..22c1f74 --- /dev/null +++ b/saml.tf @@ -0,0 +1,26 @@ +# because we can't link into remote state from the parent account, we have to use this +# also, there is no data source for saml provider + +locals { + saml_provider_arn = format(local.common_arn, "iam", "saml-provider/Census_TCO_IDMS") + saml_url = var.aws_environment == "gov" ? "https://signin.amazonaws-us-gov.com/saml" : "https://signin.aws.amazon.com/saml" +} + +data "aws_iam_policy_document" "saml_assume" { + statement { + sid = "SAMLFederationCensusIdP" + effect = "Allow" + actions = ["sts:AssumeRoleWithSAML"] + + principals { + type = "Federated" + identifiers = [local.saml_provider_arn] + } + + condition { + test = "StringEquals" + variable = "SAML:aud" + values = [local.saml_url] + } + } +} diff --git a/security_groups.tf b/security_groups.tf new file mode 100644 index 0000000..2a46467 --- /dev/null +++ b/security_groups.tf @@ -0,0 +1,72 @@ + +locals { + all_worker_mgmt_name = format("%v%v-all-worker-mgmt", local._prefixes["eks-security-group"], var.cluster_name) + additional_eks_cluster_sg_name = format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) +} + +resource "aws_security_group" "all_worker_mgmt" { + name = local.all_worker_mgmt_name + + tags = merge( + local.base_tags, + var.tags, + tomap({ "Name" = local.all_worker_mgmt_name }), + ) + + vpc_id = local.vpc_id + + ingress { + from_port = 0 + to_port = 0 + protocol = -1 + cidr_blocks = [local.vpc_cidr_block] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_security_group" "additional_eks_cluster_sg" { + name = local.additional_eks_cluster_sg_name + + tags = merge( + local.base_tags, + var.tags, + tomap({ "Name" = local.additional_eks_cluster_sg_name }), + ) + + vpc_id = data.aws_vpc.eks_vpc.id + + ingress { + from_port = 0 + to_port = 0 + protocol = -1 + + security_groups = [ + aws_security_group.all_worker_mgmt.id, + ] + } + # this grants in-VPC access to the K8S api + # updated to get all census private cidrs to get on-prem, as we are now sending the interface traffic over + # a private IP only (disabling public access). This is to reach a cluster api from another account and VPC + # so we open all the cloud accounts too + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + # cidr_blocks = [ var.vpc_cidr_block ] + cidr_blocks = concat(var.census_private_cidr, ["10.0.0.0/8"]) + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + diff --git a/storage_classes.tf b/storage_classes.tf new file mode 100644 index 0000000..497cdcf --- /dev/null +++ b/storage_classes.tf @@ -0,0 +1,69 @@ +resource "kubernetes_storage_class" "gp3_encrypted" { + metadata { + name = "gp3-encrypted" + annotations = { + "storageclass.kubernetes.io/is-default-class" = "true" + } + } + parameters = { + fsType = "ext4" + type = "gp3" + encrypted = "true" + # kms_key_id = data.aws_kms_key.ebs_key.arn + kmsKeyId = data.aws_kms_key.ebs_key.arn + } + storage_provisioner = "ebs.csi.aws.com" + reclaim_policy = "Delete" + volume_binding_mode = "Immediate" + allow_volume_expansion = "true" +} + +resource "kubernetes_storage_class" "ebs_encrypted" { + metadata { + name = "gp2-encrypted" + annotations = { + "storageclass.kubernetes.io/is-default-class" = "false" + } + } + parameters = { + fsType = "ext4" + type = "gp2" + encrypted = "true" + # kms_key_id = data.aws_kms_key.ebs_key.arn + kmsKeyId = data.aws_kms_key.ebs_key.arn + } + storage_provisioner = "kubernetes.io/aws-ebs" + reclaim_policy = "Delete" + volume_binding_mode = "Immediate" + allow_volume_expansion = "true" +} + +module "efs" { + source = "git@github.e.it.census.gov:terraform-modules/aws-efs.git" + + name = var.cluster_name + vpc_id = local.vpc_id + subnet_ids = local.subnets + security_groups = [aws_security_group.all_worker_mgmt.id] + + tags = merge( + local.base_tags, + var.tags, + tomap({ "efs.csi.aws.com/cluster" = "true" }), + ) +} + +resource "kubernetes_storage_class" "efs-sc" { + depends_on = [module.efs] + + metadata { + name = "efs" + } + storage_provisioner = "efs.csi.aws.com" + parameters = { + provisioningMode = "efs-ap" + fileSystemId = module.efs.id + directoryPerms = "700" + } + mount_options = ["tls"] +} diff --git a/variables.common.tf b/variables.common.tf new file mode 100644 index 0000000..ce15994 --- /dev/null +++ b/variables.common.tf @@ -0,0 +1,24 @@ +variable "census_private_cidr" { + description = "Census Private CIR Blocks" + type = list(string) + default = ["148.129.0.0/16", "172.16.0.0/12", "192.168.0.0/16"] +} + +variable "tag_costallocation" { + description = "Tag CostAllocation (default)" + type = string + default = "csvd:infrastructure" +} + +variable "tags" { + description = "AWS Tags to apply to appropriate resources" + type = map(string) + default = {} +} + +variable "aws_environment" { + description = "AWS Environment (govcloud | east-west)" + type = string + default = "" +} + diff --git a/variables.tf b/variables.tf index 73072a6..428c570 100644 --- a/variables.tf +++ b/variables.tf @@ -14,12 +14,6 @@ variable "region" { type = string } -variable "tags" { - description = "Arbitrary tags to add to objects created in AWS." - type = map(string) - default = {} -} - variable "profile" { description = "AWS config profile" type = string @@ -42,6 +36,21 @@ variable "domain" { type = string } +# helm add repo autoscaler "https://kubernetes.github.io/autoscaler" +# helm search repo -l autoscaler/cluster-autoscaler +variable "cluster_autoscaler_chart_version" { + description = "The helm chart of the cluster-autoscaler most closely matching the Kuberentes version. Review output of `helm add repo autoscaler 'https://kubernetes.github.io/autoscaler'` (if the repo hasn't been added previously) and `helm search repo -l autoscaler/cluster-autoscaler`" + type = string + default = "9.28.0" +} + +# helm show values --version [cluster_autoscaler_chart_version] autoscaler/cluster-autoscaler | grep tag: +variable "cluster_autoscaler_tag" { + description = "Image tag of cluster-autoscaler associated with the cluster_autoscaler_chart_version helm chart. `helm show values --version [cluster_autoscaler_chart_version] autoscaler/cluster-autoscaler | grep tag:`" + type = string + default = "v1.26.2" +} + variable "eks_instance_disk_size" { description = "The size of the disk of the worker nodes in gigabytes. 40 is the approximate minimum. Needs to hold the all of the normal operating system files plus every image that will be used in the cluster." type = number