From 4a31aae8e1aa069376147c0bbe3869a9625bd17b Mon Sep 17 00:00:00 2001 From: badra001 Date: Mon, 7 Aug 2023 08:50:06 -0400 Subject: [PATCH] update security groups to add a new one for the cluster/node group --- examples/full-cluster-tf-upgrade/1.24/main.tf | 8 +-- .../full-cluster-tf-upgrade/1.24/outputs.tf | 5 ++ .../1.24/securitygroup.tf | 54 +++++++++++++++++++ examples/full-cluster-tf-upgrade/1.25/main.tf | 8 +-- .../full-cluster-tf-upgrade/1.25/outputs.tf | 5 ++ .../1.25/securitygroup.tf | 54 +++++++++++++++++++ 6 files changed, 128 insertions(+), 6 deletions(-) diff --git a/examples/full-cluster-tf-upgrade/1.24/main.tf b/examples/full-cluster-tf-upgrade/1.24/main.tf index 250759b..6f8c098 100644 --- a/examples/full-cluster-tf-upgrade/1.24/main.tf +++ b/examples/full-cluster-tf-upgrade/1.24/main.tf @@ -73,8 +73,9 @@ resource "aws_eks_cluster" "eks_cluster" { enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] vpc_config { - subnet_ids = local.subnets - security_group_ids = [aws_security_group.additional_eks_cluster_sg.id] + subnet_ids = local.subnets + # security_group_ids = [aws_security_group.additional_eks_cluster_sg.id] + security_group_ids = [aws_security_group.extra_cluster_sg.id] endpoint_private_access = true endpoint_public_access = false public_access_cidrs = var.census_public_cidr @@ -159,7 +160,8 @@ resource "aws_launch_template" "eks-nodegroup" { name = format("%v%v-launch-template", local._prefixes["eks"], var.cluster_name) update_default_version = true key_name = aws_key_pair.cluster_keypair.key_name - vpc_security_group_ids = [aws_security_group.additional_eks_cluster_sg.id] + # vpc_security_group_ids = [aws_security_group.additional_eks_cluster_sg.id] + vpc_security_group_ids = [aws_security_group.extra_cluster_sg.id] tags = merge( local.base_tags, diff --git a/examples/full-cluster-tf-upgrade/1.24/outputs.tf b/examples/full-cluster-tf-upgrade/1.24/outputs.tf index e95c90d..fe6708f 100644 --- a/examples/full-cluster-tf-upgrade/1.24/outputs.tf +++ b/examples/full-cluster-tf-upgrade/1.24/outputs.tf @@ -35,6 +35,11 @@ output "cluster_sg_id" { value = aws_security_group.additional_eks_cluster_sg.id } +output "extra_cluster_sg_id" { + description = "Security group IDs for cluster/node access" + value = aws_security_group.extra_cluster_sg.id +} + output "cluster_subnet_ids" { description = "Subnet IDs used to create the cluster" value = local.subnets diff --git a/examples/full-cluster-tf-upgrade/1.24/securitygroup.tf b/examples/full-cluster-tf-upgrade/1.24/securitygroup.tf index b66be01..a2e3baa 100644 --- a/examples/full-cluster-tf-upgrade/1.24/securitygroup.tf +++ b/examples/full-cluster-tf-upgrade/1.24/securitygroup.tf @@ -1,3 +1,27 @@ +# these grant access, which may no longer be necessary. See https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html +# for details. If no SG is used when creating the cluster, it uses the default SG for it. In shared vpcs, there is no access to +# a default SG, which has no in or out rules, but the cluster will fail trying to get to the default SG. +# +# EKS created sg: eks-cluster-sg-{cluster-name}-{id} +# * in any from self +# * out any +# +# additional_eks_cluster_sg +# * in any from all_worker_mgmt SG +# * in port 443 from census on-prem, and 10/8 +# * out any +# +# all_worker_mgmt +# * in any local cidr +# * out any +# +# We can refine the SGs to let it create the default SG, whic contains all the needed cluster, and then +# create a new single SG for the needed traffic and add it to the launch template and cluster when created. +# We absorbe the local 10.x.x.x/x cidr into the 10/8 +# +# extra_cluster_sg +# * in port 443 from census on-prem, and 10/8 + resource "aws_security_group" "additional_eks_cluster_sg" { name = format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) @@ -97,3 +121,33 @@ resource "aws_security_group" "all_worker_mgmt" { ## cidr_blocks = ["0.0.0.0/0"] ## } ## } + +# attach to cluster create, nodegroups +resource "aws_security_group" "extra_cluster_sg" { + name = format("%v%v-extra", local._prefixes["eks-security-group"], var.cluster_name) + description = format("Security group for additional access for EKS cluster %v", var.cluster_name) + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + { "Name" = format("%v%v-extra", local._prefixes["eks-security-group"], var.cluster_name) }, + ) + + vpc_id = data.aws_vpc.eks_vpc.id + + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = concat(var.census_private_cidr, ["10.0.0.0/8"]) + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} diff --git a/examples/full-cluster-tf-upgrade/1.25/main.tf b/examples/full-cluster-tf-upgrade/1.25/main.tf index ba7cb31..6f8c098 100644 --- a/examples/full-cluster-tf-upgrade/1.25/main.tf +++ b/examples/full-cluster-tf-upgrade/1.25/main.tf @@ -73,8 +73,9 @@ resource "aws_eks_cluster" "eks_cluster" { enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] vpc_config { - subnet_ids = local.subnets - security_group_ids = [aws_security_group.additional_eks_cluster_sg.id] + subnet_ids = local.subnets + # security_group_ids = [aws_security_group.additional_eks_cluster_sg.id] + security_group_ids = [aws_security_group.extra_cluster_sg.id] endpoint_private_access = true endpoint_public_access = false public_access_cidrs = var.census_public_cidr @@ -159,7 +160,8 @@ resource "aws_launch_template" "eks-nodegroup" { name = format("%v%v-launch-template", local._prefixes["eks"], var.cluster_name) update_default_version = true key_name = aws_key_pair.cluster_keypair.key_name - vpc_security_group_ids = [aws_security_group.all_worker_mgmt.id] + # vpc_security_group_ids = [aws_security_group.additional_eks_cluster_sg.id] + vpc_security_group_ids = [aws_security_group.extra_cluster_sg.id] tags = merge( local.base_tags, diff --git a/examples/full-cluster-tf-upgrade/1.25/outputs.tf b/examples/full-cluster-tf-upgrade/1.25/outputs.tf index e95c90d..fe6708f 100644 --- a/examples/full-cluster-tf-upgrade/1.25/outputs.tf +++ b/examples/full-cluster-tf-upgrade/1.25/outputs.tf @@ -35,6 +35,11 @@ output "cluster_sg_id" { value = aws_security_group.additional_eks_cluster_sg.id } +output "extra_cluster_sg_id" { + description = "Security group IDs for cluster/node access" + value = aws_security_group.extra_cluster_sg.id +} + output "cluster_subnet_ids" { description = "Subnet IDs used to create the cluster" value = local.subnets diff --git a/examples/full-cluster-tf-upgrade/1.25/securitygroup.tf b/examples/full-cluster-tf-upgrade/1.25/securitygroup.tf index b66be01..a2e3baa 100644 --- a/examples/full-cluster-tf-upgrade/1.25/securitygroup.tf +++ b/examples/full-cluster-tf-upgrade/1.25/securitygroup.tf @@ -1,3 +1,27 @@ +# these grant access, which may no longer be necessary. See https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html +# for details. If no SG is used when creating the cluster, it uses the default SG for it. In shared vpcs, there is no access to +# a default SG, which has no in or out rules, but the cluster will fail trying to get to the default SG. +# +# EKS created sg: eks-cluster-sg-{cluster-name}-{id} +# * in any from self +# * out any +# +# additional_eks_cluster_sg +# * in any from all_worker_mgmt SG +# * in port 443 from census on-prem, and 10/8 +# * out any +# +# all_worker_mgmt +# * in any local cidr +# * out any +# +# We can refine the SGs to let it create the default SG, whic contains all the needed cluster, and then +# create a new single SG for the needed traffic and add it to the launch template and cluster when created. +# We absorbe the local 10.x.x.x/x cidr into the 10/8 +# +# extra_cluster_sg +# * in port 443 from census on-prem, and 10/8 + resource "aws_security_group" "additional_eks_cluster_sg" { name = format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) @@ -97,3 +121,33 @@ resource "aws_security_group" "all_worker_mgmt" { ## cidr_blocks = ["0.0.0.0/0"] ## } ## } + +# attach to cluster create, nodegroups +resource "aws_security_group" "extra_cluster_sg" { + name = format("%v%v-extra", local._prefixes["eks-security-group"], var.cluster_name) + description = format("Security group for additional access for EKS cluster %v", var.cluster_name) + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + { "Name" = format("%v%v-extra", local._prefixes["eks-security-group"], var.cluster_name) }, + ) + + vpc_id = data.aws_vpc.eks_vpc.id + + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = concat(var.census_private_cidr, ["10.0.0.0/8"]) + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +}