From 8340cf444441edd343081cdb0ee7728baf578da2 Mon Sep 17 00:00:00 2001 From: badra001 Date: Tue, 30 Jan 2024 12:14:08 -0500 Subject: [PATCH] * 2.0.4 -- 2024-01-30 - examples/full-cluster-tf-upgrade/1.28 - remove keypair (ec2-keypair.tf.obsolete) - change securitygroups to ignore ingress, egress (as EKS modifies some of them) and add only things we want to control differently --- CHANGELOG.md | 6 + common/version.tf | 2 +- ...ec2-keypair.tf => ec2-keypair.tf.obsolete} | 0 examples/full-cluster-tf-upgrade/1.28/main.tf | 2 +- .../1.28/securitygroup.ports.tf | 128 ++++++++++++++++++ .../1.28/securitygroup.tf | 18 ++- 6 files changed, 152 insertions(+), 4 deletions(-) rename examples/full-cluster-tf-upgrade/1.28/{ec2-keypair.tf => ec2-keypair.tf.obsolete} (100%) create mode 100644 examples/full-cluster-tf-upgrade/1.28/securitygroup.ports.tf diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e953f8..5e2277c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,3 +22,9 @@ - change common-services to use cert-manager-issuer which uses the new acmpca-eks-cert-manager module - remove extraneous helm charts for non-issuer ca - add contact_email variable + +* 2.0.4 -- 2024-01-30 + - examples/full-cluster-tf-upgrade/1.28 + - remove keypair (ec2-keypair.tf.obsolete) + - change securitygroups to ignore ingress, egress (as EKS modifies some of them) and add only things we want to + control differently diff --git a/common/version.tf b/common/version.tf index 4ab7a1f..f033f34 100644 --- a/common/version.tf +++ b/common/version.tf @@ -1,3 +1,3 @@ locals { - _module_version = "2.0.3" + _module_version = "2.0.4" } diff --git a/examples/full-cluster-tf-upgrade/1.28/ec2-keypair.tf b/examples/full-cluster-tf-upgrade/1.28/ec2-keypair.tf.obsolete similarity index 100% rename from examples/full-cluster-tf-upgrade/1.28/ec2-keypair.tf rename to examples/full-cluster-tf-upgrade/1.28/ec2-keypair.tf.obsolete diff --git a/examples/full-cluster-tf-upgrade/1.28/main.tf b/examples/full-cluster-tf-upgrade/1.28/main.tf index 989c423..8b0df68 100644 --- a/examples/full-cluster-tf-upgrade/1.28/main.tf +++ b/examples/full-cluster-tf-upgrade/1.28/main.tf @@ -161,7 +161,7 @@ resource "aws_launch_template" "eks-nodegroup" { name = format("%v%v-launch-template", local._prefixes["eks"], var.cluster_name) update_default_version = true # key_name = aws_key_pair.cluster_keypair.key_name - key_name = module.key_pair.key_pair_name + # key_name = module.key_pair.key_pair_name # vpc_security_group_ids = [aws_security_group.additional_eks_cluster_sg.id] vpc_security_group_ids = [aws_security_group.extra_cluster_sg.id] diff --git a/examples/full-cluster-tf-upgrade/1.28/securitygroup.ports.tf b/examples/full-cluster-tf-upgrade/1.28/securitygroup.ports.tf new file mode 100644 index 0000000..b6aa621 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.28/securitygroup.ports.tf @@ -0,0 +1,128 @@ +# See +# https://stackoverflow.com/questions/71902887/transport-error-while-dialing-dial-tcp-xx-xx-xx-xx15012-i-o-timeout-with-aws-e +# Ports needed to correctly install Istio for the error message: transport: Error while dialing dial tcp xx.xx.xx.xx15012: i/o timeout +# other ports here as needed +locals { + sg_additional_port = [ + { + component = "istio" + description = "Envoy admin port / outbound" + from_port = 15000 + to_port = 15001 + }, + { + component = "istio" + description = "Debug port" + from_port = 15004 + to_port = 15004 + }, + { + component = "istio" + description = "Envoy inbound" + from_port = 15006 + to_port = 15006 + }, + { + component = "istio" + description = "HBONE mTLS tunnel port / secure networks XDS and CA services (Plaintext)" + from_port = 15008 + to_port = 15010 + }, + { + component = "istio" + description = "XDS and CA services (TLS and mTLS)" + from_port = 15012 + to_port = 15012 + }, + { + component = "istio" + description = "Control plane monitoring" + from_port = 15014 + to_port = 15014 + }, + { + component = "istio" + description = "Webhook container port, forwarded from 443" + from_port = 15017 + to_port = 15017 + }, + { + component = "istio" + description = "Merged Prometheus telemetry from Istio agent, Envoy, and application, Health checks" + from_port = 15020 + to_port = 15021 + }, + { + component = "istio" + description = "DNS port" + from_port = 15053 + to_port = 15053 + }, + { + component = "istio" + description = "Envoy Prometheus telemetry" + from_port = 15090 + to_port = 15090 + }, + { + component = "istio" + description = "aws-load-balancer-controller" + from_port = 9443 + to_port = 9443 + }, + { + component = "cert-manager" + description = "cert-manager-webhook" + from_port = 10250 + to_port = 10250 + }, + ] + + sg_additional_ingress_rules = { + for ikey, ivalue in local.sg_additional_ports : + "${ikey}_ingress" => { + description = ivalue.description + protocol = "tcp" + from_port = ivalue.from_port + to_port = ivalue.to_port + type = "ingress" + self = true + } + } + + sg_additonal_egress_rules = { + for ekey, evalue in local.sg_additional_ports : + "${ekey}_egress" => { + description = evalue.description + protocol = "tcp" + from_port = evalue.from_port + to_port = evalue.to_port + type = "egress" + self = true + } + } +} + +resource "aws_vpc_security_group_ingress_rule" "additional" { + for_each = { for k, v in local.sg_additional_ingress_rules : v.from_port => v } + security_group_id = aws_security_group.additional_eks_cluster_sg.id + + description = each.value.description + from_port = each.value.from_port + to_port = each.value.to_port + ip_protocol = each.value.protocol + referenced_security_group_id = each.value.self ? aws_security_group.additional_eks_cluster_sg.id : null + # referenced_security_group_id = aws_security_group.all_worker_mgmt.id +} + +resource "aws_vpc_security_group_egress_rule" "additional" { + for_each = { for k, v in local.sg_additiona_egress_rules : v.from_port => v } + security_group_id = aws_security_group.additional_eks_cluster_sg.id + + description = each.value.description + from_port = each.value.from_port + to_port = each.value.to_port + ip_protocol = each.value.protocol + referenced_security_group_id = each.value.self ? aws_security_group.additional_eks_cluster_sg.id : null + # referenced_security_group_id = aws_security_group.all_worker_mgmt.id +} diff --git a/examples/full-cluster-tf-upgrade/1.28/securitygroup.tf b/examples/full-cluster-tf-upgrade/1.28/securitygroup.tf index 1e6eebd..6e4555c 100644 --- a/examples/full-cluster-tf-upgrade/1.28/securitygroup.tf +++ b/examples/full-cluster-tf-upgrade/1.28/securitygroup.tf @@ -24,6 +24,7 @@ # * in port 443 from census on-prem, and 10/8 # * in port 10250 for kubectl logs from census on-prem, and 10/8 +# once setup, you cannot change any ports here resource "aws_security_group" "additional_eks_cluster_sg" { name = format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) @@ -32,7 +33,7 @@ resource "aws_security_group" "additional_eks_cluster_sg" { local.common_tags, var.tags, var.application_tags, - tomap({ "Name" = format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) }), + { "Name" = format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) }, ) vpc_id = data.aws_vpc.eks_vpc.id @@ -65,8 +66,13 @@ resource "aws_security_group" "additional_eks_cluster_sg" { protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } + + lifecycle { + ignore_changes = [ingress, egress] + } } +# once setup, you cannot change any ports here resource "aws_security_group" "all_worker_mgmt" { name = format("%v%v-all-worker-mgmt", local._prefixes["eks-security-group"], var.cluster_name) @@ -75,7 +81,7 @@ resource "aws_security_group" "all_worker_mgmt" { local.common_tags, var.tags, var.application_tags, - tomap({ "Name" = format("%v%v-all-worker-mgmt", local._prefixes["eks-security-group"], var.cluster_name) }), + { "Name" = format("%v%v-all-worker-mgmt", local._prefixes["eks-security-group"], var.cluster_name) }, ) vpc_id = data.aws_vpc.eks_vpc.id @@ -93,6 +99,10 @@ resource "aws_security_group" "all_worker_mgmt" { protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } + + lifecycle { + ignore_changes = [ingress, egress] + } } ## resource "aws_security_group" "cni_custom_sg" { @@ -124,6 +134,7 @@ resource "aws_security_group" "all_worker_mgmt" { ## } ## } +# once setup, you cannot change any ports here # attach to cluster create, nodegroups resource "aws_security_group" "extra_cluster_sg" { name = format("%v%v-extra", local._prefixes["eks-security-group"], var.cluster_name) @@ -167,4 +178,7 @@ resource "aws_security_group" "extra_cluster_sg" { protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } + lifecycle { + ignore_changes = [ingress, egress] + } }