From b48734cd1b56a39e9fb4980195075b9501174870 Mon Sep 17 00:00:00 2001 From: badra001 Date: Fri, 3 Jan 2025 12:27:33 -0500 Subject: [PATCH] add brief guide --- .../full-cluster-tf-upgrade/1.30-1.31.diffs | 643 ++++++++++++++++++ .../README.upgrade-1.30-1.31.md | 26 + 2 files changed, 669 insertions(+) create mode 100644 examples/full-cluster-tf-upgrade/1.30-1.31.diffs create mode 100644 examples/full-cluster-tf-upgrade/README.upgrade-1.30-1.31.md diff --git a/examples/full-cluster-tf-upgrade/1.30-1.31.diffs b/examples/full-cluster-tf-upgrade/1.30-1.31.diffs new file mode 100644 index 0000000..9fe8a99 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/1.30-1.31.diffs @@ -0,0 +1,643 @@ +diff -Nuar 1.31/addons/variables.addons.tf 1.30/addons/variables.addons.tf +--- 1.31/addons/variables.addons.tf 2025-01-02 10:50:40.202398021 -0500 ++++ 1.30/addons/variables.addons.tf 2024-11-22 08:43:20.512170919 -0500 +@@ -72,17 +72,6 @@ + "amazon-cloudwatch-observability" = "v2.1.2-eksbuild.1" + "eks-pod-identity-agent" = "v1.3.2-eksbuild.2" + } +- "1.31" = { +- "coredns" = "v1.11.4-eksbuild.1" +- "kube-proxy" = "v1.31.3-eksbuild.2" +- "vpc-cni" = "v1.19.2-eksbuild.1" +- "aws-ebs-csi-driver" = "v1.37.0-eksbuild.1" +- "aws-efs-csi-driver" = "v2.1.2-eksbuild.1" +- "adot" = "v0.109.0-eksbuild.2" +- "snapshot-controller" = "v8.1.0-eksbuild.2" +- "amazon-cloudwatch-observability" = "v2.6.0-eksbuild.1" +- "eks-pod-identity-agent" = "v1.3.4-eksbuild.1" +- } + } + } + +diff -Nuar 1.31/ATTIC/dns-zone.tf.cat.obsolete 1.30/ATTIC/dns-zone.tf.cat.obsolete +--- 1.31/ATTIC/dns-zone.tf.cat.obsolete 1969-12-31 19:00:00.000000000 -0500 ++++ 1.30/ATTIC/dns-zone.tf.cat.obsolete 2024-01-10 10:28:52.971964350 -0500 +@@ -0,0 +1,128 @@ ++locals { ++ cluster_domain_name = format("%v.%v", var.cluster_name, var.vpc_domain_name) ++ cluster_domain_description = format("%v EKS Cluster DNS Zone", var.cluster_name) ++# true for gov, fale for cat ++## aws_dns_infrastructure = false ++} ++ ++resource "aws_route53_zone" "cluster_domain" { ++ name = local.cluster_domain_name ++ comment = local.cluster_domain_description ++ force_destroy = false ++ ++ vpc { ++ vpc_id = data.aws_vpc.eks_vpc.id ++ vpc_region = local.region ++ } ++ ++ ## dynamic "vpc" { ++ ## for_each = true ? var.region_map : {} ++ ## iterator = r ++ ## content { ++ ## vpc_id = var.main_dns_vpcs[r.value] ++ ## vpc_region = r.value ++ ## } ++ ## } ++ ++ lifecycle { ++ ignore_changes = [vpc] ++ } ++ ++ tags = merge( ++ local.base_tags, ++ local.common_tags, ++ var.tags, ++ var.application_tags, ++ tomap({ "Name" = local.cluster_domain_name }), ++ ) ++ ++ # depends_on = [ aws_route53_vpc_association_authorization.west_cluster_domain, aws_route53_vpc_association_authorization.east_cluster_domain ] ++} ++ ++output "cluster_domain_name" { ++ description = "DNS Zone Name" ++ value = local.cluster_domain_name ++} ++ ++output "cluster_domain_id" { ++ description = "DNS Zone ID" ++ value = aws_route53_zone.cluster_domain.zone_id ++} ++ ++output "cluster_domain_ns" { ++ description = "DNS Zone Nameservers" ++ value = aws_route53_zone.cluster_domain.name_servers ++} ++ ++# now we need to add the NS records for the new zone to the parent zone ++data "aws_route53_zone" "parent" { ++ name = var.vpc_domain_name ++ private_zone = true ++} ++ ++resource "aws_route53_record" "cluster_domain" { ++ allow_overwrite = true ++ name = local.cluster_domain_name ++ type = "NS" ++ ttl = 900 ++ zone_id = data.aws_route53_zone.parent.zone_id ++ ++ records = aws_route53_zone.cluster_domain.name_servers ++} ++ ++## #--- ++## # associate to main do2-govcloud vpc1-services east and west for inbound resolution ++## # NOT in cat ++## #--- ++## provider "aws" { ++## alias = "east_main_dns" ++## region = local.aws_dns_infrastructure ? var.region_map["east"] : "" ++## profile = var.main_dns_profile ++## } ++## ++## provider "aws" { ++## alias = "west_main_dns" ++## region = local.aws_dns_infrastructure ? var.region_map["west"] : "" ++## profile = var.main_dns_profile ++## } ++## ++## # resource "aws_route53_vpc_association_authorization" "cluster_domain" { ++## # for_each = var.region_map ++## # ++## # zone_id = aws_route53_zone.cluster_domain.zone_id ++## # vpc_region = each.value ++## # vpc_id = var.main_dns_vpcs[each.value] ++## # } ++## ++## resource "aws_route53_vpc_association_authorization" "west_cluster_domain" { ++## for_each = local.aws_dns_infrastructure ? tomap({ "zone" = aws_route53_zone.cluster_domain }) : {} ++## zone_id = each.value.zone_id ++## vpc_region = "us-gov-west-1" ++## vpc_id = var.main_dns_vpcs["us-gov-west-1"] ++## } ++## ++## resource "aws_route53_vpc_association_authorization" "east_cluster_domain" { ++## for_each = local.aws_dns_infrastructure ? tomap({ "zone" = aws_route53_zone.cluster_domain }) : {} ++## zone_id = each.value.zone_id ++## vpc_region = "us-gov-east-1" ++## vpc_id = var.main_dns_vpcs["us-gov-east-1"] ++## } ++## ++## resource "aws_route53_zone_association" "west_cluster_domain" { ++## provider = aws.west_main_dns ++## for_each = local.aws_dns_infrastructure ? aws_route53_vpc_association_authorization.west_cluster_domain : {} ++## ++## zone_id = each.value.zone_id ++## vpc_id = each.value.vpc_id ++## vpc_region = each.value.vpc_region ++## } ++## ++## resource "aws_route53_zone_association" "east_cluster_domain" { ++## provider = aws.east_main_dns ++## for_each = local.aws_dns_infrastructure ? aws_route53_vpc_association_authorization.east_cluster_domain : {} ++## ++## zone_id = each.value.zone_id ++## vpc_id = each.value.vpc_id ++## vpc_region = each.value.vpc_region ++## } ++## +diff -Nuar 1.31/ATTIC/ec2-keypair.tf.obsolete 1.30/ATTIC/ec2-keypair.tf.obsolete +--- 1.31/ATTIC/ec2-keypair.tf.obsolete 1969-12-31 19:00:00.000000000 -0500 ++++ 1.30/ATTIC/ec2-keypair.tf.obsolete 2024-01-10 10:28:52.973964394 -0500 +@@ -0,0 +1,92 @@ ++locals { ++ keypair_name = format("ec2-ssh-%v%v", local._prefixes["eks"], var.cluster_name) ++ timestamp = formatdate("YYYYMMDD", time_static.timestamp.rfc3339) ++} ++ ++resource "time_static" "timestamp" {} ++ ++## # two-step process to create ++## # terraform apply -target=null_resource.generate_keypair ++## # terraform apply ++## # when done, add to git ++## # cd setup ++## # echo inf-ec2-keypair >> .gitignore ++## # git-secret add inf-ec2-keypair ++## # git-secret hide ++## # git add inf-ec2-keypair.{pub,secret} ++## # git commit -m'add ec2-keypair: inf-ec2-keypair' inf-ec2-keypair.{pub,secret} .gitignore ++## ++## # inf-keypair ++## resource "null_resource" "generate_keypair" { ++## provisioner "local-exec" { ++## command = "test -d setup || mkdir setup" ++## } ++## provisioner "local-exec" { ++## working_dir = "./setup" ++## command = "ssh-keygen -f ${local.keypair_name} -N '' -t rsa -b 2048 -C '${local.keypair_name}@${var.cluster_name}.${local.vpc_domain_name}'" ++## } ++## } ++## ++## resource "aws_key_pair" "cluster_keypair" { ++## key_name = local.keypair_name ++## public_key = file("setup/${local.keypair_name}.pub") ++## depends_on = [null_resource.generate_keypair] ++## } ++## ++## output "cluster_keypair" { ++## description = "EC2 keypair for EKS Cluster" ++## value = aws_key_pair.cluster_keypair.key_name ++## } ++ ++module "key_pair" { ++ source = "terraform-aws-modules/key-pair/aws" ++ ++ key_name = local.keypair_name ++ create_private_key = true ++ ++ tags = merge( ++ var.tags, ++ { ++ "Name" = local.keypair_name ++ "launch_time" = time_static.timestamp.rfc3339 ++ } ++ ) ++} ++ ++output "cluster_keypair" { ++ description = "EC2 Key Pair Name" ++ value = module.key_pair.key_pair_name ++} ++ ++resource "local_sensitive_file" "ssh_private_key" { ++ content = format("%v\n", module.key_pair.private_key_openssh) ++ directory_permission = "0700" ++ file_permission = "0600" ++ filename = format("%v/%v", null_resource.setup_directory.triggers.directory, local.keypair_name) ++} ++ ++resource "local_sensitive_file" "ssh_public_key" { ++ content = format("%v\n", module.key_pair.public_key_openssh) ++ directory_permission = "0700" ++ file_permission = "0600" ++ filename = format("%v/%v.pub", null_resource.setup_directory.triggers.directory, local.keypair_name) ++} ++ ++resource "local_file" "gitignore" { ++ content = format("%v\n", basename(local_sensitive_file.ssh_private_key.filename)) ++ directory_permission = "0700" ++ file_permission = "0600" ++ filename = format("%v/%v", null_resource.setup_directory.triggers.directory, ".gitignore") ++} ++ ++#resource "local_sensitive_file" "gitsecret_script" { ++# content = templatefile("${path.module}/templates/manage-git-secret.sh.tpl", { ++# ssh_key_directory = null_resource.setup_directory.triggers.directory ++# ssh_private_key_filename = local_sensitive_file.ssh_private_key.filename ++# ssh_public_key_filename = local_sensitive_file.ssh_public_key.filename ++# }) ++# directory_permission = "0700" ++# file_permission = "0755" ++# filename = format("%v/%v", null_resource.setup_directory.triggers.directory, "manage-git-secret.sh") ++#} ++# +diff -Nuar 1.31/charts.yml 1.30/charts.yml +--- 1.31/charts.yml 2025-01-03 11:00:25.628782336 -0500 ++++ 1.30/charts.yml 2024-09-20 13:24:36.697809494 -0400 +@@ -2,13 +2,13 @@ + documentation: "https://artifacthub.io/packages/helm/cluster-autoscaler/cluster-autoscaler" + name: "cluster-autoscaler" + repository: "https://kubernetes.github.io/autoscaler" +- version: "9.44.0" ++ version: "9.37.0" + use_remote: true + cert-manager: + documetation: "https://artifacthub.io/packages/helm/cert-manager/cert-manager" + name: "cert-manager" + repository: "https://charts.jetstack.io" +- version: "1.16.2" ++ version: "v1.15.3" + use_remote: true + metrics-server: + documentation: "https://artifacthub.io/packages/helm/bitnami/metrics-server" +@@ -16,6 +16,3 @@ + repository: "https://charts.bitnami.com/bitnami" + version: "7.2.14" + use_remote: true +-# new one, does not work yet +-# repository: "oci://registry-1.docker.io/bitnamicharts" +-# version: "7.3.0" +diff -Nuar 1.31/common-services/cluster-autoscaler/cluster-autoscaler.tf 1.30/common-services/cluster-autoscaler/cluster-autoscaler.tf +--- 1.31/common-services/cluster-autoscaler/cluster-autoscaler.tf 2025-01-02 11:26:41.167720707 -0500 ++++ 1.30/common-services/cluster-autoscaler/cluster-autoscaler.tf 2024-10-02 11:04:51.735671552 -0400 +@@ -38,7 +38,7 @@ + profile = var.profile + application_list = [] + application_name = format("eks/%v", var.cluster_name) +- image_config = [for k, v in local.images_settings : v if(v.enabled && k == "cluster-autoscaler")] ++ image_config = [for k, v in local.images_settings : v if (v.enabled && k == "cluster-autoscaler")] + force_delete = true + + enable_lifecycle_policy = true +diff -Nuar 1.31/common-services/main.tf 1.30/common-services/main.tf +--- 1.31/common-services/main.tf 2025-01-03 10:20:49.160389566 -0500 ++++ 1.30/common-services/main.tf 2024-09-27 14:17:46.578373821 -0400 +@@ -166,11 +166,11 @@ + # } + set { + name = "startupapicheck.image.repository" +- value = split(":", local.image_output["cert-manager-startupapicheck"].dest_full_path)[0] ++ value = split(":", local.image_output["cert-manager-ctl"].dest_full_path)[0] + } + set { + name = "startupapicheck.image.tag" +- value = local.image_output["cert-manager-startupapicheck"].tag ++ value = local.image_output["cert-manager-ctl"].tag + } + + # timeout = 180 +diff -Nuar 1.31/common-services/variables.common-services.auto.tfvars 1.30/common-services/variables.common-services.auto.tfvars +--- 1.31/common-services/variables.common-services.auto.tfvars 2025-01-03 11:42:21.351123835 -0500 ++++ 1.30/common-services/variables.common-services.auto.tfvars 2024-09-20 13:18:47.670621009 -0400 +@@ -1,4 +1,4 @@ +-istio_tag = "1.23.4" ++istio_tag = "1.23.2" + tls_crt_b64 = "" + tls_crt_contents = "" + tls_crt_file = "" +diff -Nuar 1.31/common-services/variables.images.auto.tfvars 1.30/common-services/variables.images.auto.tfvars +--- 1.31/common-services/variables.images.auto.tfvars 2025-01-02 11:16:58.637256722 -0500 ++++ 1.30/common-services/variables.images.auto.tfvars 2024-09-27 14:20:28.912247294 -0400 +@@ -20,13 +20,13 @@ + "cert-manager" = { + name = "cert-manager" + repository = "https://charts.jetstack.io" +- version = "v1.16.4" ++ version = "v1.14.4" + use_remote = true + } + "metrics-server" = { + name = "metrics-server" + repository = "https://charts.bitnami.com/bitnami" +- version = "7.3.0" ++ version = "6.13.1" + use_remote = true + } + #helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +@@ -45,7 +45,7 @@ + source_registry = "quay.io" + source_image = "jetstack/cert-manager-controller" + source_tag = null +- tag = "v1.16.2" ++ tag = "v1.14.4" + enabled = true + } + "cert-manager-cainjector" = { +@@ -55,7 +55,7 @@ + source_registry = "quay.io" + source_image = "jetstack/cert-manager-cainjector" + source_tag = null +- tag = "v1.16.2" ++ tag = "v1.14.4" + enabled = true + } + "cert-manager-webhook" = { +@@ -65,7 +65,7 @@ + source_registry = "quay.io" + source_image = "jetstack/cert-manager-webhook" + source_tag = null +- tag = "v1.16.2" ++ tag = "v1.14.4" + enabled = true + } + "cert-manager-ctl" = { +@@ -75,7 +75,7 @@ + source_registry = "quay.io" + source_image = "jetstack/cert-manager-ctl" + source_tag = null +- tag = "v1.16.2" ++ tag = "v1.14.4" + enabled = true + } + +@@ -87,8 +87,8 @@ + source_registry = "docker.io" + source_image = "bitnami/metrics-server" + source_tag = null +- tag = "0.7.2" +- # tag = "0.7.2-debian-12-r8" ++ tag = "0.7.0" ++ # tag = "0.7.0-debian-12-r8" + enabled = true + } + +@@ -100,7 +100,7 @@ + source_registry = "docker.io" + source_image = "istio/operator" + source_tag = null +- tag = "1.24.2" ++ tag = "1.20.3" + enabled = true + } + "istio-pilot" = { +@@ -110,7 +110,7 @@ + source_registry = "docker.io" + source_image = "istio/pilot" + source_tag = null +- tag = "1.24.2" ++ tag = "1.20.3" + enabled = true + } + "istio-proxyv2" = { +@@ -120,7 +120,7 @@ + source_registry = "docker.io" + source_image = "istio/proxyv2" + source_tag = null +- tag = "1.24.2" ++ tag = "1.20.3" + enabled = true + } + +@@ -132,8 +132,8 @@ + source_registry = "docker.io" + source_image = "bitnami/prometheus" + source_tag = null +- tag = "3.0.1" +- # tag = "3.0.1-debian-12-r1" ++ tag = "2.50.1" ++ # tag = "2.50.1-debian-12-r1" + enabled = true + } + "alertmanager" = { +@@ -144,7 +144,7 @@ + source_image = "bitnami/alertmanager" + source_tag = null + tag = "0.27.0" +- # tag = "0.27.0-debian-12-r28" ++ # tag = "0.27.0-debian-12-r1" + enabled = true + } + +@@ -156,8 +156,8 @@ + source_registry = "docker.io" + source_image = "bitnami/prometheus-operator" + source_tag = null +- tag = "0.79.2" +- # tag = "0.79.2-debian-12-r0" ++ tag = "0.72.0" ++ # tag = "0.72.0-debian-12-r1" + enabled = true + } + } +diff -Nuar 1.31/images.yml 1.30/images.yml +--- 1.31/images.yml 2025-01-03 11:42:41.133801082 -0500 ++++ 1.30/images.yml 2024-10-02 11:04:51.736671569 -0400 +@@ -7,7 +7,7 @@ + source_image: "autoscaling/cluster-autoscaler" + source_tag: null + enabled: true +- tag: "v1.32.0" ++ tag: "v1.30.2" + cert-manager-controller: + documentation: "https://cert-manager.io/docs/releases/" + name: "cert-manager-controller" +@@ -17,7 +17,7 @@ + source_image: "jetstack/cert-manager-controller" + source_tag: null + enabled: true +- tag: "v1.16.2" ++ tag: "v1.15.3" + cert-manager-cainjector: + documentation: "https://cert-manager.io/docs/releases/" + name: "cert-manager-cainjector" +@@ -27,7 +27,7 @@ + source_image: "jetstack/cert-manager-cainjector" + source_tag: null + enabled: true +- tag: "v1.16.2" ++ tag: "v1.15.3" + cert-manager-webhook: + documentation: "https://cert-manager.io/docs/releases/" + name: "cert-manager-webhook" +@@ -37,7 +37,7 @@ + source_image: "jetstack/cert-manager-webhook" + source_tag: null + enabled: true +- tag: "v1.16.2" ++ tag: "v1.15.3" + cert-manager-ctl: + documentation: "https://cert-manager.io/docs/releases/" + name: "cert-manager-ctl" +@@ -46,19 +46,8 @@ + source_registry: "quay.io" + source_image: "jetstack/cert-manager-ctl" + source_tag: null +- enabled: false +-# tag: "v1.16.2" +- tag: "v1.14.7" +-cert-manager-startupapicheck: +- documentation: "https://cert-manager.io/docs/releases/" +- name: "cert-manager-startupapicheck" +- image: "quay.io/jetstack/cert-manager-startupapicheck" +- dest_path: null +- source_registry: "quay.io" +- source_image: "jetstack/cert-manager-startupapicheck" +- source_tag: null + enabled: true +- tag: "v1.16.2" ++ tag: "v1.14.7" + metrics-server: + documentation: "https://hub.docker.com/r/bitnami/metrics-server/tags" + name: "metrics-server" +@@ -78,7 +67,7 @@ + source_image: "istio/operator" + source_tag: null + enabled: true +- tag: "1.23.4" ++ tag: "1.23.2" + istio-pilot: + documentation: "https://istio.io/latest/docs/releases/supported-releases" + name: "istio/pilot" +@@ -88,7 +77,7 @@ + source_image: "istio/pilot" + source_tag: null + enabled: true +- tag: "1.23.4" ++ tag: "1.23.2" + istio-proxyv2: + documentation: "https://istio.io/latest/docs/releases/supported-releases" + name: "istio/proxyv2" +@@ -98,7 +87,7 @@ + source_image: "istio/proxyv2" + source_tag: null + enabled: true +- tag: "1.23.4" ++ tag: "1.23.2" + prometheus: + documentation: "https://hub.docker.com/r/bitnami/prometheus/tags" + name: "prometheus" +@@ -108,7 +97,7 @@ + source_image: "bitnami/prometheus" + source_tag: null + enabled: true +- tag: "3.0.1" ++ tag: "2.54.1" + prometheus-operator: + documentation: "https://hub.docker.com/r/bitnami/prometheus-operator/tags" + name: "prometheus-operator" +@@ -118,7 +107,7 @@ + source_image: "bitnami/prometheus-operator" + source_tag: null + enabled: true +- tag: "0.79.2" ++ tag: "0.77.0" + alertmanager: + documentation: "https://hub.docker.com/r/bitnami/alertmanager/tags" + name: "alertmanager" +diff -Nuar 1.31/README.md 1.30/README.md +--- 1.31/README.md 2025-01-03 12:10:42.242314393 -0500 ++++ 1.30/README.md 2024-10-02 11:04:51.735671552 -0400 +@@ -1,6 +1,6 @@ +-# EKS Full Cluster Example 1.31 ++# EKS Full Cluster Example 1.30 + +-This is for deploying an EKS cluster with 1.31. ++This is for deploying an EKS cluster with 1.30. + + ## About + +@@ -52,7 +52,7 @@ + ``` + + We add the tag `"kubernetes.io/cluster/{cluster_name}" = "shared"` in order for the node groups to pick up the +-cluster subnets. This is on the new `container` subnet. ++cluster subnets. This is on the new `container` ubnet. + + For creating a service which uses load balancers (ELB, ALB, or NLB), the last tag listed here is needed + `"kubernetes.io/role/internal-elb" = 1`. This is only one tag for all EKS, not one per cluster, and it should apply +@@ -75,7 +75,7 @@ + + ```hcl + cluster_name = "org-project-env" +-cluster_version = "1.31" ++cluster_version = "1.28" + region = "us-gov-east-1" + domain = "org-project-env.env.domain.census.gov" + contact_email = "org-project-env-group-mailing-list@census.gov" +@@ -291,8 +291,26 @@ + ## Common Services + ### Certificate Authority + +-This is now handled by the `acmpca-eks-cert-manager` submodule of `aws-certificate`, which uses the ACM PCA to obtain +-a signed certificate. No actions are needed to get a CSR signed by TCO through Remedy. ++Set the download to `false` ++ ++```shell ++# ca-cert.tf ++ ca_cert_download = false ++``` ++ ++Do the first apply, which generates the key and csr. You'll need to then submit the CSR. (directions generated) ++ ++```shell ++tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') ++``` ++ ++ ++```shell ++# terraform taint null_resource.ca_cert[0] ++# # (wait for submitted cert to be ready) ++tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') ++tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') ++``` + + ### Rest of Setup + +@@ -458,8 +476,3 @@ + + - 1.0.0 -- 2023-10-27 + - setup for 1.28, ready for edits +- +-- 1.1.0 -- 2025-01-03 +- - remove old certificate stuff +- - updated to 1.31 +- - still needs some updates +diff -Nuar 1.31/settings.auto.tfvars.example 1.30/settings.auto.tfvars.example +--- 1.31/settings.auto.tfvars.example 2025-01-02 11:26:10.964155032 -0500 ++++ 1.30/settings.auto.tfvars.example 2024-09-20 12:21:43.314385818 -0400 +@@ -3,7 +3,7 @@ + # domain_name is removed + + cluster_name = "{org}-{project}-{env}" +-cluster_version = "1.31" ++cluster_version = "1.30" + region = "us-gov-east-1" + contact_email = "" ## enter valid @census.gov email for the customer's group contact list + domain = "NAME" ## set to correct domain if using a shared vpc +diff -Nuar 1.31/variables.addons.tf 1.30/variables.addons.tf +--- 1.31/variables.addons.tf 2025-01-02 10:50:40.202398021 -0500 ++++ 1.30/variables.addons.tf 2024-11-22 08:43:20.512170919 -0500 +@@ -72,17 +72,6 @@ + "amazon-cloudwatch-observability" = "v2.1.2-eksbuild.1" + "eks-pod-identity-agent" = "v1.3.2-eksbuild.2" + } +- "1.31" = { +- "coredns" = "v1.11.4-eksbuild.1" +- "kube-proxy" = "v1.31.3-eksbuild.2" +- "vpc-cni" = "v1.19.2-eksbuild.1" +- "aws-ebs-csi-driver" = "v1.37.0-eksbuild.1" +- "aws-efs-csi-driver" = "v2.1.2-eksbuild.1" +- "adot" = "v0.109.0-eksbuild.2" +- "snapshot-controller" = "v8.1.0-eksbuild.2" +- "amazon-cloudwatch-observability" = "v2.6.0-eksbuild.1" +- "eks-pod-identity-agent" = "v1.3.4-eksbuild.1" +- } + } + } + diff --git a/examples/full-cluster-tf-upgrade/README.upgrade-1.30-1.31.md b/examples/full-cluster-tf-upgrade/README.upgrade-1.30-1.31.md new file mode 100644 index 0000000..703d331 --- /dev/null +++ b/examples/full-cluster-tf-upgrade/README.upgrade-1.30-1.31.md @@ -0,0 +1,26 @@ +# EKS Upgrade 1.30 to 1.31 + +Copy files from 1.31/{path} to eks-{clustername}/{path} + +* variables.addons.tf +* charts.yml +* images.yml +* common-services/main.tf +* common-services/variables.common-services.auto.tfvars + +Update `cluster_version` from 1.30 to 1.31 in + +* settings.auto.tfvars + +Apply in various directories where changes happened + +* (main) +* addons/ +* common-services/ + +There is some approach/process to upgrade the version, find it and put it here. + +# CHANGELOG + +- 1.31.0 -- 2025-01-03 + - created brief guide