diff --git a/README.md b/README.md index d9e0aa8..6109608 100644 --- a/README.md +++ b/README.md @@ -97,14 +97,14 @@ efs-csi-controller 0 5m | Name | Version | |------|---------| | [terraform](#requirement\_terraform) | >= 0.13 | -| [aws](#requirement\_aws) | ~> 5.14 | +| [aws](#requirement\_aws) | ~> 5.0 | | [null](#requirement\_null) | ~> 3.2 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | 5.96.0 | +| [aws](#provider\_aws) | 5.100.0 | | [null](#provider\_null) | 3.2.4 | | [terraform](#provider\_terraform) | n/a | @@ -113,7 +113,7 @@ efs-csi-controller 0 5m | Name | Source | Version | |------|--------|---------| | [cloudwatch\_observability\_irsa\_role](#module\_cloudwatch\_observability\_irsa\_role) | git::https://github.e.it.census.gov/SCT-Engineering/terraform-aws-iam//modules/iam-role-for-service-accounts-eks | n/a | -| [cluster](#module\_cluster) | git::https://github.e.it.census.gov/SCT-Engineering/terraform-aws-eks/ | v20.36.0 | +| [cluster](#module\_cluster) | git::https://github.e.it.census.gov/SCT-Engineering/terraform-aws-eks/ | v20.37.2 | | [ebs\_csi\_irsa\_role](#module\_ebs\_csi\_irsa\_role) | git::https://github.e.it.census.gov/SCT-Engineering/terraform-aws-iam//modules/iam-role-for-service-accounts-eks | n/a | | [efs\_csi\_irsa\_role](#module\_efs\_csi\_irsa\_role) | git::https://github.e.it.census.gov/SCT-Engineering/terraform-aws-iam//modules/iam-role-for-service-accounts-eks | n/a | | [vpc\_cni\_irsa\_role](#module\_vpc\_cni\_irsa\_role) | git::https://github.e.it.census.gov/SCT-Engineering/terraform-aws-iam//modules/iam-role-for-service-accounts-eks | n/a | @@ -123,20 +123,39 @@ efs-csi-controller 0 5m | Name | Type | |------|------| | [aws_ec2_tag.container_subnets](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_tag) | resource | +| [aws_iam_policy.cloudwatch-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.cluster-admin-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.nlb-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy_attachment.cluster-admin-attach](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy_attachment) | resource | +| [aws_iam_role.role_cluster-admin](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role.role_eks-cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.eks-cluster-cloudwatch](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.eks-cluster-managed](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.eks-cluster-nlb](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_security_group.additional_eks_cluster_sg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group.all_worker_mgmt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group.extra_cluster_sg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group_rule.allow_sidecar_injection](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | +| [aws_vpc_security_group_egress_rule.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_egress_rule) | resource | +| [aws_vpc_security_group_ingress_rule.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_ingress_rule) | resource | +| [aws_vpc_security_group_ingress_rule.additional_ingress_rules_2](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_security_group_ingress_rule) | resource | | [null_resource.git_version](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [terraform_data.subnet_validation](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/resources/data) | resource | | [aws_arn.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/arn) | data source | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_ebs_default_kms_key.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ebs_default_kms_key) | data source | +| [aws_iam_policy.cluster_managed_policies](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy) | data source | +| [aws_iam_policy_document.allow_sts](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.cloudwatch-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.cluster-admin-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.eks_assume](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.nlb-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_roles.roles](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_roles) | data source | | [aws_iam_roles.sso_admins](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_roles) | data source | | [aws_iam_roles.sso_read](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_roles) | data source | | [aws_iam_session_context.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_session_context) | data source | | [aws_kms_key.ebs_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/kms_key) | data source | +| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | | [aws_subnet.subnets](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet) | data source | | [aws_subnets.subnets](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source | | [aws_vpc.eks_vpc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) | data source | @@ -154,9 +173,9 @@ efs-csi-controller 0 5m | [cluster\_version](#input\_cluster\_version) | Kubernetes version to use for the EKS cluster | `string` | n/a | yes | | [eks\_instance\_disk\_size](#input\_eks\_instance\_disk\_size) | Size of the EKS node disk in GB | `number` | `80` | no | | [eks\_instance\_types](#input\_eks\_instance\_types) | List of EC2 instance types for the EKS node group | `list(string)` |
[
"t3a.medium"
]
| no | -| [eks\_ng\_desired\_size](#input\_eks\_ng\_desired\_size) | Desired size of the EKS node group | `number` | `4` | no | -| [eks\_ng\_max\_size](#input\_eks\_ng\_max\_size) | Maximum size of the EKS node group | `number` | `15` | no | -| [eks\_ng\_min\_size](#input\_eks\_ng\_min\_size) | Minimum size of the EKS node group | `number` | `4` | no | +| [eks\_ng\_desired\_size](#input\_eks\_ng\_desired\_size) | Desired size of the EKS node group | `number` | `2` | no | +| [eks\_ng\_max\_size](#input\_eks\_ng\_max\_size) | Maximum size of the EKS node group | `number` | `2` | no | +| [eks\_ng\_min\_size](#input\_eks\_ng\_min\_size) | Minimum size of the EKS node group | `number` | `2` | no | | [enable\_cluster\_creator\_admin\_permissions](#input\_enable\_cluster\_creator\_admin\_permissions) | Grant admin permissions to the cluster creator | `bool` | `true` | no | | [subnets\_name](#input\_subnets\_name) | Name pattern for subnets to be used by EKS cluster | `string` | `"*-container-*"` | no | | [tags](#input\_tags) | Additional tags to apply to all resources | `map(string)` | `{}` | no | diff --git a/aws_data.tf b/aws_data.tf index 1402bc0..4c35bb2 100644 --- a/aws_data.tf +++ b/aws_data.tf @@ -1,5 +1,7 @@ data "aws_caller_identity" "current" {} +data "aws_region" "current" {} + data "aws_arn" "current" { arn = data.aws_caller_identity.current.arn } diff --git a/cluster-admin.tf b/cluster-admin.tf new file mode 100644 index 0000000..3f0efa5 --- /dev/null +++ b/cluster-admin.tf @@ -0,0 +1,138 @@ +#--- +# cluster-admin +#--- +locals { + iam_arn = format("arn:%v:iam::%v:%%v", data.aws_arn.current.partition, data.aws_caller_identity.current.account_id) + common_arn = format("arn:%v:%%v:%v:%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) + eks_resources = ["cluster", "addon", "nodegroup", "identityproviderconfig"] + + admin_policy_statements = { + ECRRead = { + actions = [ + "ecr:Describe*", + "ecr:Get*", + "ecr:ListImages", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + ] + resources = ["*"] + } + ECRWrite = { + actions = [ + "ecr:BatchDeleteImage", + "ecr:CompleteLayerUpload", + "ecr:CreateRepository", + "ecr:DeleteRepository", + "ecr:InitiateLayerUpload", + "ecr:PutImage", + "ecr:UploadLayerPart" + ] + resources = [format(local.common_arn, "ecr", format("repository/eks/%v/*", var.cluster_name))] + } + EKSRead = { + actions = [ + "eks:ListClusters", + "eks:ListAddons", + "eks:ListNodegroups", + "eks:DescribeCluster", + "eks:DescribeAddon*", + "eks:DescribeNodegroup", + ] + resources = [ + format(local.common_arn, "eks", "cluster/*"), + format(local.common_arn, "eks", "addon/*"), + format(local.common_arn, "eks", "addons/*"), + format(local.common_arn, "eks", "/addons/*"), + format(local.common_arn, "eks", "nodegroup/*"), + ] + } + IAMRead = { + actions = [ + "iam:ListRoles", + ] + resources = ["*"] + } + SSMGet = { + actions = [ + "ssm:GetParameter", + ] + resources = [ + format("arn:%v:%v:%v:%v:%v", data.aws_arn.current.partition, "ssm", data.aws_region.current.name, "", "parameter/aws/service/eks/*") + ] + } + EKSReadMyClusters = { + actions = [ + "eks:List*", + "eks:Read*", + "eks:Describe*", + "eks:AccessKubernetesApi", + ] + resources = flatten(concat( + [format(local.common_arn, "eks", format("/clusters/%v/addons", var.cluster_name))], + [for r in local.eks_resources : [format(local.common_arn, "eks", format("%v/%v", r, var.cluster_name)), + format(local.common_arn, "eks", format("%v/%v/*", r, var.cluster_name))]] + )) + } + } +} + +resource "aws_iam_role" "role_cluster-admin" { + name = format("%v%v-cluster-admin", local.prefixes["eks"], var.cluster_name) + description = "SAML EKS Cluster Admin Role for ${var.cluster_name}" + + assume_role_policy = data.aws_iam_policy_document.allow_sts.json + force_detach_policies = true + tags = var.tags +} + +resource "aws_iam_policy_attachment" "cluster-admin-attach" { + name = format("%v%v-cluster-admin-attach", local.prefixes["eks"], var.cluster_name) + policy_arn = aws_iam_policy.cluster-admin-policy.arn + roles = [aws_iam_role.role_cluster-admin.name] +} + +#--- +# cluster admin policy +#--- +resource "aws_iam_policy" "cluster-admin-policy" { + name = format("%v%v-cluster-admin", local.prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow for administration of the cluster ${var.cluster_name} using AWS resources" + policy = data.aws_iam_policy_document.cluster-admin-policy.json + + tags = merge( + local.base_tags, + var.tags + ) +} + +data "aws_iam_policy_document" "cluster-admin-policy" { + dynamic "statement" { + for_each = local.admin_policy_statements + iterator = s + content { + sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) + effect = lookup(s.value, "effect", "Allow") + actions = lookup(s.value, "actions", []) + resources = lookup(s.value, "resources", []) + } + } +} + +#--- +# cluster admin assume policy +#--- +data "aws_iam_policy_document" "allow_sts" { + statement { + sid = "AllowSTSAssume" + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "AWS" + identifiers = [ + format(local.iam_arn, "root"), + ] + } + } +} diff --git a/cluster-role.tf b/cluster-role.tf new file mode 100644 index 0000000..7347e64 --- /dev/null +++ b/cluster-role.tf @@ -0,0 +1,92 @@ +#--- +# cluster +#--- +locals { + cluster_managed_policy_list = [ + "AmazonEKSClusterPolicy", + "AmazonEC2FullAccess", + "CloudWatchLogsFullAccess", + ] + cluster_managed_policies = [for p in data.aws_iam_policy.cluster_managed_policies : p.arn] +} + +data "aws_iam_policy" "cluster_managed_policies" { + for_each = toset(local.cluster_managed_policy_list) + name = each.key +} + +resource "aws_iam_policy" "nlb-policy" { + name = format("%v%v-nlb", local.prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow configuration of the ELB" + policy = data.aws_iam_policy_document.nlb-policy.json + +} + +# Q: why CreateSecurityGroup +# TBD: refine resources to limit only to eks configurations +data "aws_iam_policy_document" "nlb-policy" { + statement { + sid = "EKSNLBConfiguration" + effect = "Allow" + actions = [ + "elasticloadbalancing:*", + "ec2:CreateSecurityGroup", + "ec2:Describe*", + ] + resources = ["*"] + } +} + +resource "aws_iam_policy" "cloudwatch-policy" { + name = format("%v%v-cloudwatch", local.prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow sending metric data to cloudwatch" + policy = data.aws_iam_policy_document.cloudwatch-policy.json + +} + +# TBD: refine resources to limit only to eks configurations +data "aws_iam_policy_document" "cloudwatch-policy" { + statement { + sid = "EKSCloudwatchMetrics" + effect = "Allow" + actions = [ + "cloudwatch:PutMetricData", + ] + resources = ["*"] + } +} + +resource "aws_iam_role" "role_eks-cluster" { + name = format("%v%v-cluster", local.prefixes["eks"], var.cluster_name) + description = "EKS Cluster Role for ${var.cluster_name}" + assume_role_policy = data.aws_iam_policy_document.eks_assume.json +} + +resource "aws_iam_role_policy_attachment" "eks-cluster-nlb" { + role = aws_iam_role.role_eks-cluster.name + policy_arn = aws_iam_policy.nlb-policy.arn +} +resource "aws_iam_role_policy_attachment" "eks-cluster-cloudwatch" { + role = aws_iam_role.role_eks-cluster.name + policy_arn = aws_iam_policy.cloudwatch-policy.arn +} +resource "aws_iam_role_policy_attachment" "eks-cluster-managed" { + for_each = toset(local.cluster_managed_policies) + role = aws_iam_role.role_eks-cluster.name + policy_arn = each.key +} + +data "aws_iam_policy_document" "eks_assume" { + statement { + sid = "EKSAssumeRole" + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["eks.amazonaws.com"] + } + } +} diff --git a/main.tf b/main.tf index 0cd49a8..a3b1324 100644 --- a/main.tf +++ b/main.tf @@ -26,7 +26,7 @@ resource "terraform_data" "subnet_validation" { } module "cluster" { - source = "git::https://github.e.it.census.gov/SCT-Engineering/terraform-aws-eks/?ref=v20.36.0" + source = "git::https://github.e.it.census.gov/SCT-Engineering/terraform-aws-eks/?ref=v20.37.2" access_entries = local.access_entries cloudwatch_log_group_retention_in_days = var.cloudwatch_retention_days diff --git a/requirements.tf b/requirements.tf index ea1dad6..970fa07 100644 --- a/requirements.tf +++ b/requirements.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 5.14" + version = "~> 5.0" } null = { source = "hashicorp/null" diff --git a/security_groups.tf b/security_groups.tf index c37cec7..9712bda 100644 --- a/security_groups.tf +++ b/security_groups.tf @@ -44,6 +44,7 @@ resource "aws_security_group" "additional_eks_cluster_sg" { } } +# once setup, you cannot change any ports here resource "aws_security_group" "all_worker_mgmt" { name = local.all_worker_mgmt_name @@ -73,6 +74,8 @@ resource "aws_security_group" "all_worker_mgmt" { } } +# once setup, you cannot change any ports here +# attach to cluster create, nodegroups resource "aws_security_group" "extra_cluster_sg" { name = format("%v%v-extra", local.prefixes["eks-security-group"], var.cluster_name) description = format("Security group for additional access for EKS cluster %v", var.cluster_name) diff --git a/securitygroup.ports.tf b/securitygroup.ports.tf new file mode 100644 index 0000000..0535c58 --- /dev/null +++ b/securitygroup.ports.tf @@ -0,0 +1,164 @@ +# See +# https://stackoverflow.com/questions/71902887/transport-error-while-dialing-dial-tcp-xx-xx-xx-xx15012-i-o-timeout-with-aws-e +# Ports needed to correctly install Istio for the error message: transport: Error while dialing dial tcp xx.xx.xx.xx15012: i/o timeout +# other ports here as needed +locals { + sg_additional_ports = [ + { + component = "istio" + description = "Envoy admin port / outbound" + from_port = 15000 + to_port = 15001 + }, + { + component = "istio" + description = "Debug port" + from_port = 15004 + to_port = 15004 + }, + { + component = "istio" + description = "Envoy inbound" + from_port = 15006 + to_port = 15006 + }, + { + component = "istio" + description = "HBONE mTLS tunnel port / secure networks XDS and CA services (Plaintext)" + from_port = 15008 + to_port = 15010 + }, + { + component = "istio" + description = "XDS and CA services (TLS and mTLS)" + from_port = 15012 + to_port = 15012 + }, + { + component = "istio" + description = "Control plane monitoring" + from_port = 15014 + to_port = 15014 + }, + { + component = "istio" + description = "Webhook container port, forwarded from 443" + from_port = 15017 + to_port = 15017 + }, + { + component = "istio" + description = "Merged Prometheus telemetry from Istio agent, Envoy, and application, Health checks" + from_port = 15020 + to_port = 15021 + }, + { + component = "istio" + description = "DNS port" + from_port = 15053 + to_port = 15053 + }, + { + component = "istio" + description = "Envoy Prometheus telemetry" + from_port = 15090 + to_port = 15090 + }, + { + component = "istio" + description = "aws-load-balancer-controller" + from_port = 9443 + to_port = 9443 + }, + { + component = "cert-manager" + description = "cert-manager-webhook" + from_port = 10250 + to_port = 10250 + } + ] + + sg_additional_ports_2 = [ + { + component = "istio" + description = "XDS and CA services (TLS and mTLS)" + from_port = 15012 + to_port = 15012 + }, + { + component = "istio" + description = "Webhook container port, forwarded from 443" + from_port = 15017 + to_port = 15017 + } + ] + + sg_additional_ingress_rules = { + for ikey, ivalue in local.sg_additional_ports : + "${ikey}_ingress" => { + description = ivalue.description + protocol = "tcp" + from_port = ivalue.from_port + to_port = ivalue.to_port + type = "ingress" + self = true + } + } + + sg_additional_egress_rules = { + for ekey, evalue in local.sg_additional_ports : + "${ekey}_egress" => { + description = evalue.description + protocol = "tcp" + from_port = evalue.from_port + to_port = evalue.to_port + type = "egress" + self = true + } + } + + sg_additional_ingress_rules_2 = { + for ikey, ivalue in local.sg_additional_ports_2 : + "${ikey}_ingress" => { + description = ivalue.description + protocol = "tcp" + from_port = ivalue.from_port + to_port = ivalue.to_port + type = "ingress" + self = true + } + } +} + +resource "aws_vpc_security_group_ingress_rule" "additional" { + for_each = { for k, v in local.sg_additional_ingress_rules : v.from_port => v } + security_group_id = aws_security_group.additional_eks_cluster_sg.id + + description = each.value.description + from_port = each.value.from_port + to_port = each.value.to_port + ip_protocol = each.value.protocol + referenced_security_group_id = each.value.self ? aws_security_group.additional_eks_cluster_sg.id : null +} + +resource "aws_vpc_security_group_egress_rule" "additional" { + for_each = { for k, v in local.sg_additional_egress_rules : v.from_port => v } + security_group_id = aws_security_group.additional_eks_cluster_sg.id + + description = each.value.description + from_port = each.value.from_port + to_port = each.value.to_port + ip_protocol = each.value.protocol + referenced_security_group_id = each.value.self ? aws_security_group.additional_eks_cluster_sg.id : null +} + +resource "aws_vpc_security_group_ingress_rule" "additional_ingress_rules_2" { + for_each = { for k, v in local.sg_additional_ingress_rules_2 : v.from_port => v } + security_group_id = aws_security_group.extra_cluster_sg.id + + description = each.value.description + from_port = each.value.from_port + to_port = each.value.to_port + ip_protocol = each.value.protocol + referenced_security_group_id = aws_security_group.additional_eks_cluster_sg.id +} diff --git a/variables.tf b/variables.tf index 8a9fb29..9e86fcb 100644 --- a/variables.tf +++ b/variables.tf @@ -11,8 +11,8 @@ variable "cluster_version" { description = "Kubernetes version to use for the EKS cluster" type = string validation { - condition = can(regex("^[0-9]+\\.[0-9]+$", var.cluster_version)) && contains(["1.27", "1.28", "1.29", "1.30", "1.31", "1.32"], var.cluster_version) - error_message = "Cluster version must be in the format 'x.y' (e.g., '1.27') and must be one of: 1.27, 1.28, 1.29, 1.30, 1.31, 1.32" + condition = can(regex("^[0-9]+\\.[0-9]+$", var.cluster_version)) && contains(["1.31", "1.32", "1.33"], var.cluster_version) + error_message = "Cluster version must be in the format 'x.y' (e.g., '1.33') and must be one of: 1.31, 1.32, 1.33" } } @@ -82,7 +82,7 @@ variable "eks_instance_types" { variable "eks_ng_min_size" { description = "Minimum size of the EKS node group" type = number - default = 4 + default = 2 validation { condition = var.eks_ng_min_size >= 1 error_message = "Minimum node group size must be at least 1." @@ -92,11 +92,7 @@ variable "eks_ng_min_size" { variable "eks_ng_desired_size" { description = "Desired size of the EKS node group" type = number - default = 4 - validation { - condition = var.eks_ng_desired_size >= var.eks_ng_min_size && var.eks_ng_desired_size <= var.eks_ng_max_size - error_message = "Desired size must be between minimum and maximum sizes." - } + default = 2 validation { condition = var.eks_ng_desired_size >= 1 error_message = "Desired size must be at least 1." @@ -106,10 +102,10 @@ variable "eks_ng_desired_size" { variable "eks_ng_max_size" { description = "Maximum size of the EKS node group" type = number - default = 15 + default = 2 validation { - condition = var.eks_ng_max_size >= var.eks_ng_min_size - error_message = "Maximum node group size must be greater than or equal to minimum size." + condition = var.eks_ng_max_size >= 1 + error_message = "Maximum node group size must be at least 1." } }