From 5d9ea95e266d5837c27e509b28b36dbc018e6797 Mon Sep 17 00:00:00 2001 From: badra001 Date: Wed, 10 Nov 2021 10:39:10 -0500 Subject: [PATCH] initial setup of xamples --- .../alb-controller/README.md | 77 ++ .../charts/alb-controller-crds/.helmignore | 23 + .../charts/alb-controller-crds/Chart.yaml | 6 + .../templates/_helpers.tpl | 62 + .../alb-controller-crds/templates/crds.yaml | 452 ++++++++ .../charts/alb-controller-crds/values.yaml | 0 .../aws-load-balancer-controller/.helmignore | 24 + .../aws-load-balancer-controller/Chart.yaml | 22 + .../aws-load-balancer-controller/README.md | 197 ++++ .../ci/extra_args | 1 + .../ci/values.yaml | 7 + .../crds/crds.yaml | 452 ++++++++ .../aws-load-balancer-controller/crds/t | 2 + .../templates/NOTES.txt | 1 + .../templates/_helpers.tpl | 93 ++ .../templates/deployment.yaml | 180 +++ .../templates/pdb.yaml | 13 + .../templates/rbac.yaml | 80 ++ .../templates/service.yaml | 13 + .../templates/serviceaccount.yaml | 14 + .../templates/webhook.yaml | 170 +++ .../aws-load-balancer-controller/test.yaml | 181 +++ .../aws-load-balancer-controller/values.yaml | 196 ++++ .../alb-controller/crds.tf.0.14.8 | 663 +++++++++++ .../alb-controller/data.eks.tf | 15 + .../alb-controller/ecr.tf | 1 + .../alb-controller/iam_policy.json | 207 ++++ .../alb-controller/locals.tf | 4 + .../alb-controller/main.tf | 61 + .../alb-controller/policy.tf | 241 ++++ .../alb-controller/prefixes.tf | 1 + .../alb-controller/providers.tf | 1 + .../alb-controller/role.tf | 38 + .../alb-controller/s | 0 .../alb-controller/serviceaccount.tf | 17 + .../alb-controller/variables.alb.tf | 5 + .../alb-controller/variables.vpc.tf | 150 +++ .../alb-controller/version.tf | 1 + .../dnsutils/README.md | 10 + .../dnsutils/copy_image.sh | 1 + .../dnsutils/copy_images.tf | 71 ++ .../dnsutils/data.eks.tf | 15 + .../dnsutils/history.1 | 1000 +++++++++++++++++ .../dnsutils/locals.tf | 4 + .../dnsutils/main.tf | 100 ++ .../dnsutils/output.log | 41 + .../dnsutils/pod.txt | 71 ++ .../dnsutils/pod.yaml | 14 + .../dnsutils/prefixes.tf | 1 + .../dnsutils/providers.tf | 1 + .../dnsutils/variables.eks.tf | 1 + .../dnsutils/variables.vpc.tf | 1 + .../dnsutils/version.tf | 1 + .../kube-bench/README.md | 10 + .../kube-bench/copy_image.sh | 1 + .../kube-bench/copy_images.tf | 35 + .../kube-bench/data.eks.tf | 15 + .../kube-bench/history.1 | 1000 +++++++++++++++++ .../kube-bench/job-eks.yaml | 37 + .../kube-bench/job-eks.yaml.orig | 37 + .../kube-bench/locals.tf | 4 + .../kube-bench/main.tf | 56 + .../kube-bench/output.log | 41 + .../kube-bench/pod.txt | 71 ++ .../kube-bench/prefixes.tf | 1 + .../kube-bench/providers.tf | 1 + .../kube-bench/variables.eks.tf | 1 + .../kube-bench/variables.vpc.tf | 1 + .../kube-bench/version.tf | 1 + .../sample-alb/README.md | 24 + .../sample-alb/copy_image.sh | 1 + .../sample-alb/copy_images.tf | 53 + .../sample-alb/data.eks.tf | 15 + .../sample-alb/ecr.tf | 1 + .../sample-alb/locals.tf | 4 + .../sample-alb/main.tf | 126 +++ .../sample-alb/prefixes.tf | 1 + .../sample-alb/providers.tf | 1 + .../sample-alb/variables.sample.tf | 12 + .../sample-alb/variables.vpc.tf | 1 + .../sample-alb/version.tf | 1 + .../sample-elb/README.md | 74 ++ .../sample-elb/copy_image.sh | 1 + .../sample-elb/copy_images.tf | 48 + .../sample-elb/data.eks.tf | 15 + .../sample-elb/ecr.tf | 1 + .../sample-elb/locals.tf | 4 + .../sample-elb/main.tf | 103 ++ .../sample-elb/prefixes.tf | 1 + .../sample-elb/providers.tf | 1 + .../sample-elb/variables.sample.tf | 12 + .../sample-elb/variables.vpc.tf | 1 + .../sample-elb/version.tf | 1 + .../sample-istio/README.md | 70 ++ .../sample-istio/charts/my-nginx/.helmignore | 23 + .../sample-istio/charts/my-nginx/Chart.yaml | 6 + .../charts/my-nginx/templates/_helpers.tpl | 62 + .../my-nginx/templates/certificate.yaml | 16 + .../charts/my-nginx/templates/deployment.yaml | 61 + .../charts/my-nginx/templates/gateway.yaml | 27 + .../charts/my-nginx/templates/hpa.yaml | 28 + .../charts/my-nginx/templates/service.yaml | 15 + .../my-nginx/templates/serviceaccount.yaml | 12 + .../my-nginx/templates/virtualservice.yaml | 25 + .../sample-istio/charts/my-nginx/values.yaml | 75 ++ .../sample-istio/copy_image.sh | 1 + .../sample-istio/copy_images.tf | 37 + .../sample-istio/data.eks.tf | 15 + .../sample-istio/ecr-login.txt | 1 + .../sample-istio/kubeconfig.tf | 29 + .../sample-istio/locals.tf | 4 + .../sample-istio/main.tf | 49 + .../sample-istio/prefixes.tf | 1 + .../sample-istio/providers.tf | 1 + .../sample-istio/setup/kube.config | 53 + .../sample-istio/variables.eks.tf | 1 + .../sample-istio/variables.sample.tf | 5 + .../sample-istio/version.tf | 1 + .../sample-nlb/README.md | 57 + .../sample-nlb/copy_image.sh | 1 + .../sample-nlb/copy_images.tf | 48 + .../sample-nlb/data.eks.tf | 15 + .../sample-nlb/ecr.tf | 1 + .../sample-nlb/locals.tf | 4 + .../sample-nlb/main.tf | 104 ++ .../sample-nlb/prefixes.tf | 1 + .../sample-nlb/providers.tf | 1 + .../sample-nlb/variables.sample.tf | 12 + .../sample-nlb/variables.vpc.tf | 1 + .../sample-nlb/version.tf | 1 + examples/full-cluster/.gitignore | 4 + examples/full-cluster/.terraform-docs.yml | 44 + examples/full-cluster/OFF/empty/locals.tf | 4 + examples/full-cluster/OFF/empty/prefixes.tf | 1 + examples/full-cluster/OFF/empty/test.tf | 5 + examples/full-cluster/OFF/empty/version.tf | 1 + examples/full-cluster/README.md | 419 +++++++ examples/full-cluster/ROLES.md | 119 ++ .../aws-auth/config_map.aws-auth.yaml.tpl | 17 + examples/full-cluster/aws-auth/data.eks.tf | 15 + examples/full-cluster/aws-auth/kubeconfig.tf | 29 + .../full-cluster/aws-auth/patch-aws-auth.tf | 135 +++ examples/full-cluster/aws-auth/prefixes.tf | 1 + examples/full-cluster/aws-auth/providers.tf | 1 + examples/full-cluster/aws-auth/region.tf | 4 + .../aws-auth/settings.aws-auth.tf | 11 + examples/full-cluster/aws-auth/tf-run.data | 6 + .../aws-auth/variables.aws-auth.tf | 23 + .../full-cluster/aws-auth/variables.eks.tf | 1 + .../full-cluster/aws-auth/variables.vpc.tf | 1 + examples/full-cluster/aws-auth/version.tf | 1 + examples/full-cluster/bin/copy_image.sh | 324 ++++++ .../bin/fix-terminating-namespace.sh | 29 + examples/full-cluster/bin/show-k8s-things.sh | 7 + .../cluster-roles/.terraform-docs.yml | 44 + examples/full-cluster/cluster-roles/README.md | 236 ++++ .../full-cluster/cluster-roles/RESULTS.md | 41 + examples/full-cluster/cluster-roles/cm.tf.off | 6 + .../full-cluster/cluster-roles/data.eks.tf | 15 + .../cluster-roles/dba-clusterrole.tf | 24 + .../cluster-roles/dba-rolebinding.tf | 40 + .../full-cluster/cluster-roles/dba.iam.tf | 113 ++ .../cluster-roles/deployer-clusterrole.tf | 41 + .../cluster-roles/deployer-rolebinding.tf | 64 ++ .../cluster-roles/deployer.iam.tf | 132 +++ .../full-cluster/cluster-roles/kubeconfig.tf | 29 + examples/full-cluster/cluster-roles/locals.tf | 11 + examples/full-cluster/cluster-roles/main.tf | 30 + .../full-cluster/cluster-roles/prefixes.tf | 1 + .../full-cluster/cluster-roles/providers.tf | 1 + examples/full-cluster/cluster-roles/region.tf | 4 + .../cluster-roles/remote_state.yml | 9 + .../cluster-roles/variables.eks.tf | 1 + .../full-cluster/cluster-roles/variables.tf | 85 ++ .../full-cluster/cluster-roles/version.tf | 1 + .../full-cluster/common-services/.gitignore | 1 + .../common-services/README.certs.md | 5 + .../full-cluster/common-services/README.md | 84 ++ .../full-cluster/common-services/ca-cert.tf | 119 ++ .../charts/cluster-autoscaler/.helmignore | 23 + .../charts/cluster-autoscaler/Chart.yaml | 20 + .../charts/cluster-autoscaler/README.md | 415 +++++++ .../cluster-autoscaler/README.md.gotmpl | 335 ++++++ .../cluster-autoscaler/templates/NOTES.txt | 18 + .../cluster-autoscaler/templates/_helpers.tpl | 87 ++ .../templates/clusterrole.yaml | 150 +++ .../templates/clusterrolebinding.yaml | 16 + .../templates/deployment.yaml | 256 +++++ .../cluster-autoscaler/templates/pdb.yaml | 15 + .../templates/podsecuritypolicy.yaml | 46 + .../priority-expander-configmap.yaml | 17 + .../templates/prometheusrule.yaml | 15 + .../cluster-autoscaler/templates/role.yaml | 46 + .../templates/rolebinding.yaml | 16 + .../cluster-autoscaler/templates/secret.yaml | 20 + .../cluster-autoscaler/templates/service.yaml | 36 + .../templates/servicemonitor.yaml | 24 + .../charts/cluster-autoscaler/values.yaml | 339 ++++++ .../.helmignore | 23 + .../Chart.yaml | 24 + .../templates/_helpers.tpl | 62 + .../templates/ca-key-pair.yaml | 8 + .../templates/clusterissuer.yaml | 7 + .../values.yaml | 6 + .../charts/istio-operator/Chart.yaml | 12 + .../istio-operator/crds/crd-operator.yaml | 48 + .../istio-operator/files/gen-operator.yaml | 220 ++++ .../istio-operator/templates/clusterrole.yaml | 115 ++ .../templates/clusterrole_binding.yaml | 13 + .../charts/istio-operator/templates/crds.yaml | 6 + .../istio-operator/templates/deployment.yaml | 51 + .../istio-operator/templates/namespace.yaml | 8 + .../istio-operator/templates/service.yaml | 16 + .../templates/service_account.yaml | 12 + .../charts/istio-operator/values.yaml | 29 + .../istio-peerauthentication/.helmignore | 23 + .../istio-peerauthentication/Chart.yaml | 24 + .../templates/_helpers.tpl | 62 + .../templates/peerauthentication.yaml | 9 + .../istio-peerauthentication/values.yaml | 0 .../charts/istio-profile/.helmignore | 23 + .../charts/istio-profile/Chart.yaml | 6 + .../istio-profile/templates/_helpers.tpl | 62 + .../templates/istiooperator.yaml | 188 ++++ .../charts/istio-profile/values.yaml | 44 + .../.helmignore | 23 + .../self-signed-certificate-issuer/Chart.yaml | 6 + .../templates/_helpers.tpl | 62 + .../templates/ca-issuer.yaml | 8 + .../templates/selfsigned-ca.yaml | 17 + .../templates/selfsigned-clusterissuer.yaml | 7 + .../values.yaml | 0 .../vault-certificate-issuer/.helmignore | 23 + .../vault-certificate-issuer/Chart.yaml | 24 + .../templates/_helpers.tpl | 62 + .../templates/app-role-issuer.yaml | 18 + .../templates/app-role-secret.yaml | 10 + .../templates/service-account-issuer.yaml | 20 + .../templates/token-issuer.yaml | 15 + .../templates/token-secret.yaml | 10 + .../vault-certificate-issuer/values.yaml | 47 + .../common-services/copy_image.sh | 1 + .../common-services/copy_images.tf | 88 ++ .../full-cluster/common-services/data.eks.tf | 15 + examples/full-cluster/common-services/dns.tf | 25 + .../common-services/kubeconfig.tf | 29 + .../full-cluster/common-services/locals.tf | 19 + examples/full-cluster/common-services/main.tf | 399 +++++++ .../full-cluster/common-services/prefixes.tf | 1 + .../full-cluster/common-services/providers.tf | 1 + .../full-cluster/common-services/region.tf | 4 + .../common-services/remote_state.yml | 9 + .../test-cluster-autoscaling.json | 24 + .../full-cluster/common-services/tf-run.data | 27 + .../variables.common-services.tf | 209 ++++ .../common-services/variables.eks.tf | 1 + .../common-services/variables.vpc.tf | 1 + .../full-cluster/common-services/version.tf | 1 + examples/full-cluster/data.eks.tf | 18 + examples/full-cluster/dns-zone.tf | 42 + examples/full-cluster/ebs-encryption.tf | 81 ++ examples/full-cluster/ec2-keypair.tf | 36 + examples/full-cluster/efs/README.efs.md | 81 ++ examples/full-cluster/efs/README.md | 167 +++ examples/full-cluster/efs/copy_image.sh | 1 + examples/full-cluster/efs/copy_images.tf | 57 + examples/full-cluster/efs/data.eks.tf | 15 + examples/full-cluster/efs/ecr.tf | 57 + examples/full-cluster/efs/efs.tf | 26 + examples/full-cluster/efs/kubeconfig.tf | 29 + examples/full-cluster/efs/locals.tf | 19 + examples/full-cluster/efs/main.tf | 125 +++ .../full-cluster/efs/persistent-volume.tf | 19 + examples/full-cluster/efs/policy.tf | 55 + examples/full-cluster/efs/prefixes.tf | 1 + examples/full-cluster/efs/providers.tf | 1 + examples/full-cluster/efs/region.tf | 4 + examples/full-cluster/efs/role.tf | 48 + examples/full-cluster/efs/tf-run.data | 7 + examples/full-cluster/efs/variables.efs.tf | 37 + examples/full-cluster/efs/variables.eks.tf | 1 + examples/full-cluster/efs/variables.vpc.tf | 1 + examples/full-cluster/efs/version.tf | 1 + examples/full-cluster/eks-console-access.tf | 70 ++ examples/full-cluster/group.tf | 13 + .../irsa-roles/cluster-autoscaler/data.eks.tf | 1 + .../irsa-roles.autoscale.tf.off | 63 ++ .../irsa-roles/cluster-autoscaler/locals.tf | 19 + .../irsa-roles/cluster-autoscaler/policy.tf | 23 + .../irsa-roles/cluster-autoscaler/prefixes.tf | 1 + .../cluster-autoscaler/providers.tf | 1 + .../irsa-roles/cluster-autoscaler/region.tf | 3 + .../cluster-autoscaler/remote_state.yml | 9 + .../irsa-roles/cluster-autoscaler/role.tf | 39 + .../cluster-autoscaler/service_account.tf | 11 + .../irsa-roles/cluster-autoscaler/tf-run.data | 6 + .../cluster-autoscaler/variables.eks.tf | 1 + .../cluster-autoscaler/variables.irsa.tf | 1 + .../cluster-autoscaler/variables.tags.tf | 1 + .../irsa-roles/cluster-autoscaler/version.tf | 1 + examples/full-cluster/irsa-roles/data.eks.tf | 15 + examples/full-cluster/irsa-roles/prefixes.tf | 1 + examples/full-cluster/irsa-roles/providers.tf | 1 + .../full-cluster/irsa-roles/remote_state.yml | 9 + examples/full-cluster/irsa-roles/tf-run.data | 7 + .../full-cluster/irsa-roles/variables.eks.tf | 1 + .../full-cluster/irsa-roles/variables.irsa.tf | 9 + .../full-cluster/irsa-roles/variables.tags.tf | 1 + examples/full-cluster/irsa-roles/version.tf | 1 + examples/full-cluster/kubeconfig.tf | 29 + examples/full-cluster/locals.tf | 4 + examples/full-cluster/main.tf | 219 ++++ examples/full-cluster/oidc.tf | 32 + examples/full-cluster/outputs.tf | 58 + examples/full-cluster/policy.tf | 186 +++ examples/full-cluster/prefixes.tf | 34 + examples/full-cluster/providers.tf | 19 + examples/full-cluster/role.tf | 162 +++ examples/full-cluster/saml.tf | 26 + examples/full-cluster/securitygroup.tf | 88 ++ .../full-cluster/settings.auto.tfvars.example | 10 + examples/full-cluster/setup-env.sh | 6 + .../templates/node-private-userdata.tmpl | 9 + examples/full-cluster/tf-run.data | 23 + examples/full-cluster/variables.eks.tf | 58 + examples/full-cluster/variables.tags.tf | 9 + examples/full-cluster/variables.vpc.tf | 61 + examples/full-cluster/version.tf | 4 + 328 files changed, 16495 insertions(+) create mode 100644 examples/established-cluster-examples/alb-controller/README.md create mode 100644 examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/.helmignore create mode 100644 examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/Chart.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/templates/_helpers.tpl create mode 100644 examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/templates/crds.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/values.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/.helmignore create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/Chart.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/README.md create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/ci/extra_args create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/ci/values.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/crds/crds.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/crds/t create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/NOTES.txt create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/_helpers.tpl create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/deployment.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/pdb.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/rbac.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/service.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/serviceaccount.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/webhook.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/test.yaml create mode 100644 examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/values.yaml create mode 100644 examples/established-cluster-examples/alb-controller/crds.tf.0.14.8 create mode 100644 examples/established-cluster-examples/alb-controller/data.eks.tf create mode 120000 examples/established-cluster-examples/alb-controller/ecr.tf create mode 100644 examples/established-cluster-examples/alb-controller/iam_policy.json create mode 100644 examples/established-cluster-examples/alb-controller/locals.tf create mode 100644 examples/established-cluster-examples/alb-controller/main.tf create mode 100644 examples/established-cluster-examples/alb-controller/policy.tf create mode 120000 examples/established-cluster-examples/alb-controller/prefixes.tf create mode 120000 examples/established-cluster-examples/alb-controller/providers.tf create mode 100644 examples/established-cluster-examples/alb-controller/role.tf create mode 100644 examples/established-cluster-examples/alb-controller/s create mode 100644 examples/established-cluster-examples/alb-controller/serviceaccount.tf create mode 100644 examples/established-cluster-examples/alb-controller/variables.alb.tf create mode 100644 examples/established-cluster-examples/alb-controller/variables.vpc.tf create mode 120000 examples/established-cluster-examples/alb-controller/version.tf create mode 100644 examples/established-cluster-examples/dnsutils/README.md create mode 120000 examples/established-cluster-examples/dnsutils/copy_image.sh create mode 100644 examples/established-cluster-examples/dnsutils/copy_images.tf create mode 100644 examples/established-cluster-examples/dnsutils/data.eks.tf create mode 100644 examples/established-cluster-examples/dnsutils/history.1 create mode 100644 examples/established-cluster-examples/dnsutils/locals.tf create mode 100644 examples/established-cluster-examples/dnsutils/main.tf create mode 100644 examples/established-cluster-examples/dnsutils/output.log create mode 100644 examples/established-cluster-examples/dnsutils/pod.txt create mode 100644 examples/established-cluster-examples/dnsutils/pod.yaml create mode 120000 examples/established-cluster-examples/dnsutils/prefixes.tf create mode 120000 examples/established-cluster-examples/dnsutils/providers.tf create mode 120000 examples/established-cluster-examples/dnsutils/variables.eks.tf create mode 120000 examples/established-cluster-examples/dnsutils/variables.vpc.tf create mode 120000 examples/established-cluster-examples/dnsutils/version.tf create mode 100644 examples/established-cluster-examples/kube-bench/README.md create mode 120000 examples/established-cluster-examples/kube-bench/copy_image.sh create mode 100644 examples/established-cluster-examples/kube-bench/copy_images.tf create mode 100644 examples/established-cluster-examples/kube-bench/data.eks.tf create mode 100644 examples/established-cluster-examples/kube-bench/history.1 create mode 100644 examples/established-cluster-examples/kube-bench/job-eks.yaml create mode 100644 examples/established-cluster-examples/kube-bench/job-eks.yaml.orig create mode 100644 examples/established-cluster-examples/kube-bench/locals.tf create mode 100644 examples/established-cluster-examples/kube-bench/main.tf create mode 100644 examples/established-cluster-examples/kube-bench/output.log create mode 100644 examples/established-cluster-examples/kube-bench/pod.txt create mode 120000 examples/established-cluster-examples/kube-bench/prefixes.tf create mode 120000 examples/established-cluster-examples/kube-bench/providers.tf create mode 120000 examples/established-cluster-examples/kube-bench/variables.eks.tf create mode 120000 examples/established-cluster-examples/kube-bench/variables.vpc.tf create mode 120000 examples/established-cluster-examples/kube-bench/version.tf create mode 100644 examples/established-cluster-examples/sample-alb/README.md create mode 120000 examples/established-cluster-examples/sample-alb/copy_image.sh create mode 100644 examples/established-cluster-examples/sample-alb/copy_images.tf create mode 100644 examples/established-cluster-examples/sample-alb/data.eks.tf create mode 120000 examples/established-cluster-examples/sample-alb/ecr.tf create mode 100644 examples/established-cluster-examples/sample-alb/locals.tf create mode 100644 examples/established-cluster-examples/sample-alb/main.tf create mode 120000 examples/established-cluster-examples/sample-alb/prefixes.tf create mode 120000 examples/established-cluster-examples/sample-alb/providers.tf create mode 100644 examples/established-cluster-examples/sample-alb/variables.sample.tf create mode 120000 examples/established-cluster-examples/sample-alb/variables.vpc.tf create mode 120000 examples/established-cluster-examples/sample-alb/version.tf create mode 100644 examples/established-cluster-examples/sample-elb/README.md create mode 120000 examples/established-cluster-examples/sample-elb/copy_image.sh create mode 100644 examples/established-cluster-examples/sample-elb/copy_images.tf create mode 100644 examples/established-cluster-examples/sample-elb/data.eks.tf create mode 120000 examples/established-cluster-examples/sample-elb/ecr.tf create mode 100644 examples/established-cluster-examples/sample-elb/locals.tf create mode 100644 examples/established-cluster-examples/sample-elb/main.tf create mode 120000 examples/established-cluster-examples/sample-elb/prefixes.tf create mode 120000 examples/established-cluster-examples/sample-elb/providers.tf create mode 100644 examples/established-cluster-examples/sample-elb/variables.sample.tf create mode 120000 examples/established-cluster-examples/sample-elb/variables.vpc.tf create mode 120000 examples/established-cluster-examples/sample-elb/version.tf create mode 100644 examples/established-cluster-examples/sample-istio/README.md create mode 100644 examples/established-cluster-examples/sample-istio/charts/my-nginx/.helmignore create mode 100644 examples/established-cluster-examples/sample-istio/charts/my-nginx/Chart.yaml create mode 100644 examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/_helpers.tpl create mode 100644 examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/certificate.yaml create mode 100644 examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/deployment.yaml create mode 100644 examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/gateway.yaml create mode 100644 examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/hpa.yaml create mode 100644 examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/service.yaml create mode 100644 examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/serviceaccount.yaml create mode 100644 examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/virtualservice.yaml create mode 100644 examples/established-cluster-examples/sample-istio/charts/my-nginx/values.yaml create mode 120000 examples/established-cluster-examples/sample-istio/copy_image.sh create mode 100644 examples/established-cluster-examples/sample-istio/copy_images.tf create mode 100644 examples/established-cluster-examples/sample-istio/data.eks.tf create mode 100644 examples/established-cluster-examples/sample-istio/ecr-login.txt create mode 100644 examples/established-cluster-examples/sample-istio/kubeconfig.tf create mode 100644 examples/established-cluster-examples/sample-istio/locals.tf create mode 100644 examples/established-cluster-examples/sample-istio/main.tf create mode 120000 examples/established-cluster-examples/sample-istio/prefixes.tf create mode 120000 examples/established-cluster-examples/sample-istio/providers.tf create mode 100644 examples/established-cluster-examples/sample-istio/setup/kube.config create mode 120000 examples/established-cluster-examples/sample-istio/variables.eks.tf create mode 100644 examples/established-cluster-examples/sample-istio/variables.sample.tf create mode 120000 examples/established-cluster-examples/sample-istio/version.tf create mode 100644 examples/established-cluster-examples/sample-nlb/README.md create mode 120000 examples/established-cluster-examples/sample-nlb/copy_image.sh create mode 100644 examples/established-cluster-examples/sample-nlb/copy_images.tf create mode 100644 examples/established-cluster-examples/sample-nlb/data.eks.tf create mode 120000 examples/established-cluster-examples/sample-nlb/ecr.tf create mode 100644 examples/established-cluster-examples/sample-nlb/locals.tf create mode 100644 examples/established-cluster-examples/sample-nlb/main.tf create mode 120000 examples/established-cluster-examples/sample-nlb/prefixes.tf create mode 120000 examples/established-cluster-examples/sample-nlb/providers.tf create mode 100644 examples/established-cluster-examples/sample-nlb/variables.sample.tf create mode 120000 examples/established-cluster-examples/sample-nlb/variables.vpc.tf create mode 120000 examples/established-cluster-examples/sample-nlb/version.tf create mode 100644 examples/full-cluster/.gitignore create mode 100644 examples/full-cluster/.terraform-docs.yml create mode 100644 examples/full-cluster/OFF/empty/locals.tf create mode 120000 examples/full-cluster/OFF/empty/prefixes.tf create mode 100644 examples/full-cluster/OFF/empty/test.tf create mode 120000 examples/full-cluster/OFF/empty/version.tf create mode 100644 examples/full-cluster/ROLES.md create mode 100644 examples/full-cluster/aws-auth/config_map.aws-auth.yaml.tpl create mode 100644 examples/full-cluster/aws-auth/data.eks.tf create mode 100644 examples/full-cluster/aws-auth/kubeconfig.tf create mode 100644 examples/full-cluster/aws-auth/patch-aws-auth.tf create mode 120000 examples/full-cluster/aws-auth/prefixes.tf create mode 120000 examples/full-cluster/aws-auth/providers.tf create mode 100644 examples/full-cluster/aws-auth/region.tf create mode 100644 examples/full-cluster/aws-auth/settings.aws-auth.tf create mode 100644 examples/full-cluster/aws-auth/tf-run.data create mode 100644 examples/full-cluster/aws-auth/variables.aws-auth.tf create mode 120000 examples/full-cluster/aws-auth/variables.eks.tf create mode 120000 examples/full-cluster/aws-auth/variables.vpc.tf create mode 120000 examples/full-cluster/aws-auth/version.tf create mode 100755 examples/full-cluster/bin/copy_image.sh create mode 100755 examples/full-cluster/bin/fix-terminating-namespace.sh create mode 100755 examples/full-cluster/bin/show-k8s-things.sh create mode 100644 examples/full-cluster/cluster-roles/.terraform-docs.yml create mode 100644 examples/full-cluster/cluster-roles/README.md create mode 100644 examples/full-cluster/cluster-roles/RESULTS.md create mode 100644 examples/full-cluster/cluster-roles/cm.tf.off create mode 100644 examples/full-cluster/cluster-roles/data.eks.tf create mode 100644 examples/full-cluster/cluster-roles/dba-clusterrole.tf create mode 100644 examples/full-cluster/cluster-roles/dba-rolebinding.tf create mode 100644 examples/full-cluster/cluster-roles/dba.iam.tf create mode 100644 examples/full-cluster/cluster-roles/deployer-clusterrole.tf create mode 100644 examples/full-cluster/cluster-roles/deployer-rolebinding.tf create mode 100644 examples/full-cluster/cluster-roles/deployer.iam.tf create mode 100644 examples/full-cluster/cluster-roles/kubeconfig.tf create mode 100644 examples/full-cluster/cluster-roles/locals.tf create mode 100644 examples/full-cluster/cluster-roles/main.tf create mode 120000 examples/full-cluster/cluster-roles/prefixes.tf create mode 120000 examples/full-cluster/cluster-roles/providers.tf create mode 100644 examples/full-cluster/cluster-roles/region.tf create mode 100644 examples/full-cluster/cluster-roles/remote_state.yml create mode 120000 examples/full-cluster/cluster-roles/variables.eks.tf create mode 100644 examples/full-cluster/cluster-roles/variables.tf create mode 120000 examples/full-cluster/cluster-roles/version.tf create mode 100644 examples/full-cluster/common-services/.gitignore create mode 100644 examples/full-cluster/common-services/README.certs.md create mode 100644 examples/full-cluster/common-services/README.md create mode 100644 examples/full-cluster/common-services/ca-cert.tf create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/.helmignore create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/Chart.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/README.md create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/README.md.gotmpl create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/NOTES.txt create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/_helpers.tpl create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/deployment.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/pdb.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/role.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/secret.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/service.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml create mode 100644 examples/full-cluster/common-services/charts/cluster-autoscaler/values.yaml create mode 100644 examples/full-cluster/common-services/charts/intermediate-certificate-issuer/.helmignore create mode 100644 examples/full-cluster/common-services/charts/intermediate-certificate-issuer/Chart.yaml create mode 100644 examples/full-cluster/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl create mode 100644 examples/full-cluster/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml create mode 100644 examples/full-cluster/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml create mode 100644 examples/full-cluster/common-services/charts/intermediate-certificate-issuer/values.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-operator/Chart.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-operator/crds/crd-operator.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-operator/files/gen-operator.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-operator/templates/clusterrole.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-operator/templates/clusterrole_binding.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-operator/templates/crds.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-operator/templates/deployment.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-operator/templates/namespace.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-operator/templates/service.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-operator/templates/service_account.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-operator/values.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-peerauthentication/.helmignore create mode 100644 examples/full-cluster/common-services/charts/istio-peerauthentication/Chart.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-peerauthentication/templates/_helpers.tpl create mode 100644 examples/full-cluster/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-peerauthentication/values.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-profile/.helmignore create mode 100644 examples/full-cluster/common-services/charts/istio-profile/Chart.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-profile/templates/_helpers.tpl create mode 100644 examples/full-cluster/common-services/charts/istio-profile/templates/istiooperator.yaml create mode 100644 examples/full-cluster/common-services/charts/istio-profile/values.yaml create mode 100644 examples/full-cluster/common-services/charts/self-signed-certificate-issuer/.helmignore create mode 100644 examples/full-cluster/common-services/charts/self-signed-certificate-issuer/Chart.yaml create mode 100644 examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl create mode 100644 examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml create mode 100644 examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml create mode 100644 examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml create mode 100644 examples/full-cluster/common-services/charts/self-signed-certificate-issuer/values.yaml create mode 100644 examples/full-cluster/common-services/charts/vault-certificate-issuer/.helmignore create mode 100644 examples/full-cluster/common-services/charts/vault-certificate-issuer/Chart.yaml create mode 100644 examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl create mode 100644 examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml create mode 100644 examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml create mode 100644 examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml create mode 100644 examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml create mode 100644 examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml create mode 100644 examples/full-cluster/common-services/charts/vault-certificate-issuer/values.yaml create mode 120000 examples/full-cluster/common-services/copy_image.sh create mode 100644 examples/full-cluster/common-services/copy_images.tf create mode 100644 examples/full-cluster/common-services/data.eks.tf create mode 100644 examples/full-cluster/common-services/dns.tf create mode 100644 examples/full-cluster/common-services/kubeconfig.tf create mode 100644 examples/full-cluster/common-services/locals.tf create mode 100644 examples/full-cluster/common-services/main.tf create mode 120000 examples/full-cluster/common-services/prefixes.tf create mode 120000 examples/full-cluster/common-services/providers.tf create mode 100644 examples/full-cluster/common-services/region.tf create mode 100644 examples/full-cluster/common-services/remote_state.yml create mode 100644 examples/full-cluster/common-services/test-cluster-autoscaling.json create mode 100644 examples/full-cluster/common-services/tf-run.data create mode 100644 examples/full-cluster/common-services/variables.common-services.tf create mode 120000 examples/full-cluster/common-services/variables.eks.tf create mode 120000 examples/full-cluster/common-services/variables.vpc.tf create mode 120000 examples/full-cluster/common-services/version.tf create mode 100644 examples/full-cluster/data.eks.tf create mode 100644 examples/full-cluster/dns-zone.tf create mode 100644 examples/full-cluster/ebs-encryption.tf create mode 100644 examples/full-cluster/ec2-keypair.tf create mode 100644 examples/full-cluster/efs/README.efs.md create mode 100644 examples/full-cluster/efs/README.md create mode 120000 examples/full-cluster/efs/copy_image.sh create mode 100644 examples/full-cluster/efs/copy_images.tf create mode 100644 examples/full-cluster/efs/data.eks.tf create mode 100644 examples/full-cluster/efs/ecr.tf create mode 100644 examples/full-cluster/efs/efs.tf create mode 100644 examples/full-cluster/efs/kubeconfig.tf create mode 100644 examples/full-cluster/efs/locals.tf create mode 100644 examples/full-cluster/efs/main.tf create mode 100644 examples/full-cluster/efs/persistent-volume.tf create mode 100644 examples/full-cluster/efs/policy.tf create mode 120000 examples/full-cluster/efs/prefixes.tf create mode 120000 examples/full-cluster/efs/providers.tf create mode 100644 examples/full-cluster/efs/region.tf create mode 100644 examples/full-cluster/efs/role.tf create mode 100644 examples/full-cluster/efs/tf-run.data create mode 100644 examples/full-cluster/efs/variables.efs.tf create mode 120000 examples/full-cluster/efs/variables.eks.tf create mode 120000 examples/full-cluster/efs/variables.vpc.tf create mode 120000 examples/full-cluster/efs/version.tf create mode 100644 examples/full-cluster/eks-console-access.tf create mode 100644 examples/full-cluster/group.tf create mode 120000 examples/full-cluster/irsa-roles/cluster-autoscaler/data.eks.tf create mode 100644 examples/full-cluster/irsa-roles/cluster-autoscaler/irsa-roles.autoscale.tf.off create mode 100644 examples/full-cluster/irsa-roles/cluster-autoscaler/locals.tf create mode 100644 examples/full-cluster/irsa-roles/cluster-autoscaler/policy.tf create mode 120000 examples/full-cluster/irsa-roles/cluster-autoscaler/prefixes.tf create mode 120000 examples/full-cluster/irsa-roles/cluster-autoscaler/providers.tf create mode 100644 examples/full-cluster/irsa-roles/cluster-autoscaler/region.tf create mode 100644 examples/full-cluster/irsa-roles/cluster-autoscaler/remote_state.yml create mode 100644 examples/full-cluster/irsa-roles/cluster-autoscaler/role.tf create mode 100644 examples/full-cluster/irsa-roles/cluster-autoscaler/service_account.tf create mode 100644 examples/full-cluster/irsa-roles/cluster-autoscaler/tf-run.data create mode 120000 examples/full-cluster/irsa-roles/cluster-autoscaler/variables.eks.tf create mode 120000 examples/full-cluster/irsa-roles/cluster-autoscaler/variables.irsa.tf create mode 120000 examples/full-cluster/irsa-roles/cluster-autoscaler/variables.tags.tf create mode 120000 examples/full-cluster/irsa-roles/cluster-autoscaler/version.tf create mode 100644 examples/full-cluster/irsa-roles/data.eks.tf create mode 120000 examples/full-cluster/irsa-roles/prefixes.tf create mode 120000 examples/full-cluster/irsa-roles/providers.tf create mode 100644 examples/full-cluster/irsa-roles/remote_state.yml create mode 100644 examples/full-cluster/irsa-roles/tf-run.data create mode 120000 examples/full-cluster/irsa-roles/variables.eks.tf create mode 100644 examples/full-cluster/irsa-roles/variables.irsa.tf create mode 120000 examples/full-cluster/irsa-roles/variables.tags.tf create mode 120000 examples/full-cluster/irsa-roles/version.tf create mode 100644 examples/full-cluster/kubeconfig.tf create mode 100644 examples/full-cluster/locals.tf create mode 100644 examples/full-cluster/main.tf create mode 100644 examples/full-cluster/oidc.tf create mode 100644 examples/full-cluster/outputs.tf create mode 100644 examples/full-cluster/policy.tf create mode 100644 examples/full-cluster/prefixes.tf create mode 100644 examples/full-cluster/providers.tf create mode 100644 examples/full-cluster/role.tf create mode 100644 examples/full-cluster/saml.tf create mode 100644 examples/full-cluster/securitygroup.tf create mode 100644 examples/full-cluster/settings.auto.tfvars.example create mode 100644 examples/full-cluster/setup-env.sh create mode 100644 examples/full-cluster/templates/node-private-userdata.tmpl create mode 100644 examples/full-cluster/tf-run.data create mode 100644 examples/full-cluster/variables.eks.tf create mode 100644 examples/full-cluster/variables.tags.tf create mode 100644 examples/full-cluster/variables.vpc.tf create mode 100644 examples/full-cluster/version.tf diff --git a/examples/established-cluster-examples/alb-controller/README.md b/examples/established-cluster-examples/alb-controller/README.md new file mode 100644 index 0000000..d26262c --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/README.md @@ -0,0 +1,77 @@ + +# About alb-controller + +This directory constructs the appropriate resources for an EKS cluster setup for the alb-controller + + + +# Application Information + +* Application: {name of application} +* Organization: {division} +* Project: {project} +* Point of Contact(s): {username list} +* Creation Date: {yyyy-mm-dd} +* References: + * Requirements: {url} + * Remedy Ticket: {number} + * Other: {url} +* Related Configurations: + * {directory-path} + +# Application Requirements + + + +# Terraform Directions + +## Policies + +First, we have to create the two polices. The roles will not get created until they do. + +```shell +tf-plan -target=aws_iam_policy.alb-policy +tf-apply -target=aws_iam_policy.alb-policy +``` + +Then apply the rest. We did need to change the ECR to use something in east. + +```shell +tf-plan +tf-apply +``` + +```console +% kubectl -n kube-system get events +``` + +```console +% kubectl -n kube-system get pods -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +aws-load-balancer-controller-54fdf64896-jzwsr 1/1 Running 0 109m 10.194.26.74 ip-10-194-26-252.ec2.internal +aws-load-balancer-controller-54fdf64896-qqt6d 1/1 Running 0 109m 10.194.24.242 ip-10-194-24-49.ec2.internal +aws-node-29kmc 1/1 Running 0 6d4h 10.194.24.90 ip-10-194-24-90.ec2.internal +aws-node-6d8ls 1/1 Running 1 6d4h 10.194.25.120 ip-10-194-25-120.ec2.internal +aws-node-6vrbg 1/1 Running 1 6d4h 10.194.26.252 ip-10-194-26-252.ec2.internal +aws-node-ldgxc 1/1 Running 1 6d4h 10.194.24.49 ip-10-194-24-49.ec2.internal +coredns-65bfc5645f-g86rx 1/1 Running 0 6d4h 10.194.24.207 ip-10-194-24-90.ec2.internal +coredns-65bfc5645f-xj9rl 1/1 Running 0 6d4h 10.194.24.69 ip-10-194-24-90.ec2.internal +efs-csi-controller-65fb886fd4-7slw6 3/3 Running 0 2d 10.194.24.90 ip-10-194-24-90.ec2.internal +efs-csi-controller-65fb886fd4-vcf9l 3/3 Running 0 2d 10.194.25.120 ip-10-194-25-120.ec2.internal +efs-csi-node-6t6v6 3/3 Running 0 2d 10.194.25.120 ip-10-194-25-120.ec2.internal +efs-csi-node-kxqfb 3/3 Running 0 2d 10.194.24.49 ip-10-194-24-49.ec2.internal +efs-csi-node-p8hzn 3/3 Running 0 2d 10.194.26.252 ip-10-194-26-252.ec2.internal +efs-csi-node-xxq9h 3/3 Running 0 2d 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-proxy-78n7f 1/1 Running 0 6d4h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-proxy-cms7c 1/1 Running 0 6d4h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-proxy-h2t6n 1/1 Running 0 6d4h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-proxy-jkxnz 1/1 Running 0 6d4h 10.194.25.120 ip-10-194-25-120.ec2.internal +``` + + +# Details + + +{{ .Content }} + + diff --git a/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/.helmignore b/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/Chart.yaml b/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/Chart.yaml new file mode 100644 index 0000000..711422d --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: alb-controller-crds +description: Installs the CRDs for the ALB-controller +type: application +version: 0.1.0 +appVersion: "1.0.0" diff --git a/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/templates/_helpers.tpl b/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/templates/_helpers.tpl new file mode 100644 index 0000000..18f5548 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "alb-controller-crds.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "alb-controller-crds.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "alb-controller-crds.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "alb-controller-crds.labels" -}} +helm.sh/chart: {{ include "alb-controller-crds.chart" . }} +{{ include "alb-controller-crds.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "alb-controller-crds.selectorLabels" -}} +app.kubernetes.io/name: {{ include "alb-controller-crds.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "alb-controller-crds.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "alb-controller-crds.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/templates/crds.yaml b/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/templates/crds.yaml new file mode 100644 index 0000000..7a3a671 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/templates/crds.yaml @@ -0,0 +1,452 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: ingressclassparams.elbv2.k8s.aws +spec: + group: elbv2.k8s.aws + names: + kind: IngressClassParams + listKind: IngressClassParamsList + plural: ingressclassparams + singular: ingressclassparams + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The Ingress Group name + jsonPath: .spec.group.name + name: GROUP-NAME + type: string + - description: The AWS Load Balancer scheme + jsonPath: .spec.scheme + name: SCHEME + type: string + - description: The AWS Load Balancer ipAddressType + jsonPath: .spec.ipAddressType + name: IP-ADDRESS-TYPE + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: IngressClassParams is the Schema for the IngressClassParams API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IngressClassParamsSpec defines the desired state of IngressClassParams + properties: + group: + description: Group defines the IngressGroup for all Ingresses that belong to IngressClass with this IngressClassParams. + properties: + name: + description: Name is the name of IngressGroup. + type: string + required: + - name + type: object + ipAddressType: + description: IPAddressType defines the ip address type for all Ingresses that belong to IngressClass with this IngressClassParams. + enum: + - ipv4 + - dualstack + type: string + namespaceSelector: + description: NamespaceSelector restrict the namespaces of Ingresses that are allowed to specify the IngressClass with this IngressClassParams. * if absent or present but empty, it selects all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + scheme: + description: Scheme defines the scheme for all Ingresses that belong to IngressClass with this IngressClassParams. + enum: + - internal + - internet-facing + type: string + tags: + description: Tags defines list of Tags on AWS resources provisioned for Ingresses that belong to IngressClass with this IngressClassParams. + items: + description: Tag defines a AWS Tag on resources. + properties: + key: + description: The key of the tag. + type: string + value: + description: The value of the tag. + type: string + required: + - key + - value + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: targetgroupbindings.elbv2.k8s.aws +spec: + group: elbv2.k8s.aws + names: + kind: TargetGroupBinding + listKind: TargetGroupBindingList + plural: targetgroupbindings + singular: targetgroupbinding + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The Kubernetes Service's name + jsonPath: .spec.serviceRef.name + name: SERVICE-NAME + type: string + - description: The Kubernetes Service's port + jsonPath: .spec.serviceRef.port + name: SERVICE-PORT + type: string + - description: The AWS TargetGroup's TargetType + jsonPath: .spec.targetType + name: TARGET-TYPE + type: string + - description: The AWS TargetGroup's Amazon Resource Name + jsonPath: .spec.targetGroupARN + name: ARN + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TargetGroupBinding is the Schema for the TargetGroupBinding API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TargetGroupBindingSpec defines the desired state of TargetGroupBinding + properties: + networking: + description: networking provides the networking setup for ELBV2 LoadBalancer to access targets in TargetGroup. + properties: + ingress: + description: List of ingress rules to allow ELBV2 LoadBalancer to access targets in TargetGroup. + items: + properties: + from: + description: List of peers which should be able to access the targets in TargetGroup. At least one NetworkingPeer should be specified. + items: + description: NetworkingPeer defines the source/destination peer for networking rules. + properties: + ipBlock: + description: IPBlock defines an IPBlock peer. If specified, none of the other fields can be set. + properties: + cidr: + description: CIDR is the network CIDR. Both IPV4 or IPV6 CIDR are accepted. + type: string + required: + - cidr + type: object + securityGroup: + description: SecurityGroup defines a SecurityGroup peer. If specified, none of the other fields can be set. + properties: + groupID: + description: GroupID is the EC2 SecurityGroupID. + type: string + required: + - groupID + type: object + type: object + type: array + ports: + description: List of ports which should be made accessible on the targets in TargetGroup. If ports is empty or unspecified, it defaults to all ports with TCP. + items: + properties: + port: + anyOf: + - type: integer + - type: string + description: The port which traffic must match. When NodePort endpoints(instance TargetType) is used, this must be a numerical port. When Port endpoints(ip TargetType) is used, this can be either numerical or named port on pods. if port is unspecified, it defaults to all ports. + x-kubernetes-int-or-string: true + protocol: + description: The protocol which traffic must match. If protocol is unspecified, it defaults to TCP. + enum: + - TCP + - UDP + type: string + type: object + type: array + required: + - from + - ports + type: object + type: array + type: object + serviceRef: + description: serviceRef is a reference to a Kubernetes Service and ServicePort. + properties: + name: + description: Name is the name of the Service. + type: string + port: + anyOf: + - type: integer + - type: string + description: Port is the port of the ServicePort. + x-kubernetes-int-or-string: true + required: + - name + - port + type: object + targetGroupARN: + description: targetGroupARN is the Amazon Resource Name (ARN) for the TargetGroup. + type: string + targetType: + description: targetType is the TargetType of TargetGroup. If unspecified, it will be automatically inferred. + enum: + - instance + - ip + type: string + required: + - serviceRef + - targetGroupARN + type: object + status: + description: TargetGroupBindingStatus defines the observed state of TargetGroupBinding + properties: + observedGeneration: + description: The generation observed by the TargetGroupBinding controller. + format: int64 + type: integer + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The Kubernetes Service's name + jsonPath: .spec.serviceRef.name + name: SERVICE-NAME + type: string + - description: The Kubernetes Service's port + jsonPath: .spec.serviceRef.port + name: SERVICE-PORT + type: string + - description: The AWS TargetGroup's TargetType + jsonPath: .spec.targetType + name: TARGET-TYPE + type: string + - description: The AWS TargetGroup's Amazon Resource Name + jsonPath: .spec.targetGroupARN + name: ARN + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: TargetGroupBinding is the Schema for the TargetGroupBinding API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TargetGroupBindingSpec defines the desired state of TargetGroupBinding + properties: + networking: + description: networking defines the networking rules to allow ELBV2 LoadBalancer to access targets in TargetGroup. + properties: + ingress: + description: List of ingress rules to allow ELBV2 LoadBalancer to access targets in TargetGroup. + items: + description: NetworkingIngressRule defines a particular set of traffic that is allowed to access TargetGroup's targets. + properties: + from: + description: List of peers which should be able to access the targets in TargetGroup. At least one NetworkingPeer should be specified. + items: + description: NetworkingPeer defines the source/destination peer for networking rules. + properties: + ipBlock: + description: IPBlock defines an IPBlock peer. If specified, none of the other fields can be set. + properties: + cidr: + description: CIDR is the network CIDR. Both IPV4 or IPV6 CIDR are accepted. + type: string + required: + - cidr + type: object + securityGroup: + description: SecurityGroup defines a SecurityGroup peer. If specified, none of the other fields can be set. + properties: + groupID: + description: GroupID is the EC2 SecurityGroupID. + type: string + required: + - groupID + type: object + type: object + type: array + ports: + description: List of ports which should be made accessible on the targets in TargetGroup. If ports is empty or unspecified, it defaults to all ports with TCP. + items: + description: NetworkingPort defines the port and protocol for networking rules. + properties: + port: + anyOf: + - type: integer + - type: string + description: The port which traffic must match. When NodePort endpoints(instance TargetType) is used, this must be a numerical port. When Port endpoints(ip TargetType) is used, this can be either numerical or named port on pods. if port is unspecified, it defaults to all ports. + x-kubernetes-int-or-string: true + protocol: + description: The protocol which traffic must match. If protocol is unspecified, it defaults to TCP. + enum: + - TCP + - UDP + type: string + type: object + type: array + required: + - from + - ports + type: object + type: array + type: object + nodeSelector: + description: node selector for instance type target groups to only register certain nodes + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + serviceRef: + description: serviceRef is a reference to a Kubernetes Service and ServicePort. + properties: + name: + description: Name is the name of the Service. + type: string + port: + anyOf: + - type: integer + - type: string + description: Port is the port of the ServicePort. + x-kubernetes-int-or-string: true + required: + - name + - port + type: object + targetGroupARN: + description: targetGroupARN is the Amazon Resource Name (ARN) for the TargetGroup. + minLength: 1 + type: string + targetType: + description: targetType is the TargetType of TargetGroup. If unspecified, it will be automatically inferred. + enum: + - instance + - ip + type: string + required: + - serviceRef + - targetGroupARN + type: object + status: + description: TargetGroupBindingStatus defines the observed state of TargetGroupBinding + properties: + observedGeneration: + description: The generation observed by the TargetGroupBinding controller. + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/values.yaml b/examples/established-cluster-examples/alb-controller/charts/alb-controller-crds/values.yaml new file mode 100644 index 0000000..e69de29 diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/.helmignore b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/.helmignore new file mode 100644 index 0000000..bbcfa46 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/.helmignore @@ -0,0 +1,24 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +crds/kustomization.yaml diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/Chart.yaml b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/Chart.yaml new file mode 100644 index 0000000..b150b3b --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/Chart.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +appVersion: v2.2.3 +description: AWS Load Balancer Controller Helm chart for Kubernetes +home: https://github.com/aws/eks-charts +icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png +keywords: +- eks +- alb +- load balancer +- ingress +- nlb +maintainers: +- email: kishorj@users.noreply.github.com + name: kishorj + url: https://github.com/kishorj +- email: m00nf1sh@users.noreply.github.com + name: m00nf1sh + url: https://github.com/m00nf1sh +name: aws-load-balancer-controller +sources: +- https://github.com/aws/eks-charts +version: 1.2.6 diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/README.md b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/README.md new file mode 100644 index 0000000..08e4950 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/README.md @@ -0,0 +1,197 @@ +# AWS Load Balancer Controller + +AWS Load Balancer controller Helm chart for Kubernetes + +## TL;DR: +```sh +helm repo add eks https://aws.github.io/eks-charts +# If using IAM Roles for service account install as follows - NOTE: you need to specify both of the chart values `serviceAccount.create=false` and `serviceAccount.name=aws-load-balancer-controller` +helm install aws-load-balancer-controller eks/aws-load-balancer-controller --set clusterName=my-cluster -n kube-system --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller +# If not using IAM Roles for service account +helm install aws-load-balancer-controller eks/aws-load-balancer-controller --set clusterName=my-cluster -n kube-system +``` + +## Introduction +AWS Load Balancer controller manages the following AWS resources +- Application Load Balancers to satisfy Kubernetes ingress objects +- Network Load Balancers to satisfy Kubernetes service objects of type LoadBalancer with appropriate annotations + +## Security updates +**Note**: Deployed chart does not receive security updates automatically. You need to manually upgrade to a newer chart. + +## Prerequisites +- Kubernetes >= 1.16 for ALB +- Kubernetes >= 1.16 for NLB IP/Instance using Service type NodePort +- Kubernetes >= v1.20 or EKS >= 1.16 or the following patch releases for Service type `LoadBalancer` + - 1.18.18+ for 1.18 + - 1.19.10+ for 1.19 +- IAM permissions + +The controller runs on the worker nodes, so it needs access to the AWS ALB/NLB resources via IAM permissions. The +IAM permissions can either be setup via IAM roles for ServiceAccount or can be attached directly to the worker node IAM roles. + +#### Setup IAM for ServiceAccount +1. Create IAM OIDC provider + ``` + eksctl utils associate-iam-oidc-provider \ + --region \ + --cluster \ + --approve + ``` +1. Download IAM policy for the AWS Load Balancer Controller + ``` + curl -o iam-policy.json https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/main/docs/install/iam_policy.json + ``` +1. Create an IAM policy called AWSLoadBalancerControllerIAMPolicy + ``` + aws iam create-policy \ + --policy-name AWSLoadBalancerControllerIAMPolicy \ + --policy-document file://iam-policy.json + ``` + Take note of the policy ARN that is returned + +1. Create a IAM role and ServiceAccount for the Load Balancer controller, use the ARN from the step above + ``` + eksctl create iamserviceaccount \ + --cluster= \ + --namespace=kube-system \ + --name=aws-load-balancer-controller \ + --attach-policy-arn=arn:aws:iam:::policy/AWSLoadBalancerControllerIAMPolicy \ + --approve + ``` +#### Setup IAM manually +If not setting up IAM for ServiceAccount, apply the IAM policies from the following URL at minimum. +``` +https://raw.githubusercontent.com/kubernetes-sigs/aws-alb-ingress-controller/main/docs/install/iam_policy.json +``` + +#### Upgrading from ALB ingress controller +If migrating from ALB ingress controller, grant [additional IAM permissions](https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/main/docs/install/iam_policy_v1_to_v2_additional.json). + +#### Upgrading from AWS Load Balancer controller v2.1.3 and earlier +- Additional IAM permissions required, ensure you have granted the [required IAM permissions](https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/main/docs/install/iam_policy.json). +- CRDs need to be updated as follows +```shell script +kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" +``` +- you can run helm upgrade without uninstalling the old chart completely + +## Installing the Chart +**Note**: You need to uninstall aws-alb-ingress-controller. Please refer to the [upgrade](#Upgrade) section below before you proceed. + +Add the EKS repository to Helm: +```shell script +helm repo add eks https://aws.github.io/eks-charts +``` + +Install the TargetGroupBinding CRDs: + +```shell script +kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" +``` + +Install the AWS Load Balancer controller, if using iamserviceaccount +```shell script +# NOTE: The clusterName value must be set either via the values.yaml or the Helm command line. The in the command +# below should be replaced with name of your k8s cluster before running it. +helm upgrade -i aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName= --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller +``` + +Install the AWS Load Balancer controller, if not using iamserviceaccount +```shell script +helm upgrade -i aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName= +``` + +## Upgrade +The new controller is backwards compatible with the existing ingress objects. However, it will not coexist with the older aws-alb-ingress-controller. +The old controller must be uninstalled completely before installing the new version. +### Kubectl installation +If you had installed the previous version via kubectl, uninstall as follows +```shell script +$ kubectl delete deployment -n kube-system alb-ingress-controller +# Find the version of the current controller +$ kubectl describe deployment -n kube-system alb-ingress-controller |grep Image + Image: docker.io/amazon/aws-alb-ingress-controller:v1.1.8 +# In this case, the version is v1.1.8, the rbac roles can be removed as follows +$ kubectl delete -f https://raw.githubusercontent.com/kubernetes-sigs/aws-alb-ingress-controller/v1.1.8/docs/examples/rbac-role.yaml +``` +### Helm installation +If you had installed the incubator/aws-alb-ingress-controller Helm chart, uninstall as follows +```shell script +# NOTE: If installed under a different chart name and namespace, please specify as appropriate +$ helm delete aws-alb-ingress-controller -n kube-system +``` + +If you had installed the 0.1.x version of eks-charts/aws-load-balancer-controller chart earlier, the upgrade to chart version 1.0.0 will +not work due to incompatibility of the webhook api version, uninstall as follows +```shell script +$ helm delete aws-load-balancer-controller -n kube-system +``` + +## Uninstalling the Chart +```sh +helm delete aws-load-balancer-controller -n kube-system +``` + +## HA configuration +Chart release v1.2.0 and later enables high availability configuration by default. +- The default number of replicas is 2. You can pass`--set replicaCount=1` flag during chart installation to disable this. Due to leader election, only one controller will actively reconcile resources. +- The default priority class for the controller pods is `system-cluster-critical` +- Soft pod anti-affinity is enabled for controller pods with `topologyKey: kubernetes.io/hostname` if custom affinity is not configured +- Pod disruption budget (PDB) has not been set by default. If you plan on running at least 2 controller pods, you can pass `--set podDisruptionBudget.maxUnavailable=1` flag during chart installation + +## Configuration + +The following tables lists the configurable parameters of the chart and their default values. +The default values set by the application itself can be confirmed [here](https://kubernetes-sigs.github.io/aws-load-balancer-controller/guide/controller/configurations/). + +| Parameter | Description | Default | +| ------------------------------------------- | -------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `image.repository` | image repository | `602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-load-balancer-controller` | +| `image.tag` | image tag | `` | +| `image.pullPolicy` | image pull policy | `IfNotPresent` | +| `clusterName` | Kubernetes cluster name | None | +| `securityContext` | Set to security context for pod | `{}` | +| `resources` | Controller pod resource requests & limits | `{}` | +| `priorityClassName` | Controller pod priority class | system-cluster-critical | +| `nodeSelector` | Node labels for controller pod assignment | `{}` | +| `tolerations` | Controller pod toleration for taints | `{}` | +| `affinity` | Affinity for pod assignment | `{}` | +| `podAnnotations` | Annotations to add to each pod | `{}` | +| `podLabels` | Labels to add to each pod | `{}` | +| `rbac.create` | if `true`, create and use RBAC resources | `true` | +| `serviceAccount.annotations` | optional annotations to add to service account | None | +| `serviceAccount.automountServiceAccountToken` | Automount API credentials for a Service Account | `true` | +| `serviceAccount.create` | If `true`, create a new service account | `true` | +| `serviceAccount.name` | Service account to be used | None | +| `terminationGracePeriodSeconds` | Time period for controller pod to do a graceful shutdown | 10 | +| `ingressClass` | The ingress class to satisfy | alb | +| `region` | The AWS region for the kubernetes cluster | None | +| `vpcId` | The VPC ID for the Kubernetes cluster | None | +| `awsMaxRetries` | Maximum retries for AWS APIs | None | +| `enablePodReadinessGateInject` | If enabled, targetHealth readiness gate will get injected to the pod spec for the matching endpoint pods | None | +| `enableShield` | Enable Shield addon for ALB | None | +| `enableWaf` | Enable WAF addon for ALB | None | +| `enableWafv2` | Enable WAF V2 addon for ALB | None | +| `ingressMaxConcurrentReconciles` | Maximum number of concurrently running reconcile loops for ingress | None | +| `logLevel` | Set the controller log level - info, debug | None | +| `metricsBindAddr` | The address the metric endpoint binds to | "" | +| `webhookBindPort` | The TCP port the Webhook server binds to | None | +| `serviceMaxConcurrentReconciles` | Maximum number of concurrently running reconcile loops for service | None | +| `targetgroupbindingMaxConcurrentReconciles` | Maximum number of concurrently running reconcile loops for targetGroupBinding | None | +| `targetgroupbindingMaxExponentialBackoffDelay` | Maximum duration of exponential backoff for targetGroupBinding reconcile failures | None | +| `syncPeriod` | Period at which the controller forces the repopulation of its local object stores | None | +| `watchNamespace` | Namespace the controller watches for updates to Kubernetes objects, If empty, all namespaces are watched | None | +| `disableIngressClassAnnotation` | Disables the usage of kubernetes.io/ingress.class annotation | None | +| `disableIngressGroupNameAnnotation` | Disables the usage of alb.ingress.kubernetes.io/group.name annotation | None | +| `defaultSSLPolicy` | Specifies the default SSL policy to use for HTTPS or TLS listeners | None | +| `externalManagedTags` | Specifies the list of tag keys on AWS resources that are managed externally | `[]` | +| `livenessProbe` | Liveness probe settings for the controller | (see `values.yaml`) | +| `env` | Environment variables to set for aws-load-balancer-controller pod | None | +| `hostNetwork` | If `true`, use hostNetwork | `false` | +| `extraVolumeMounts` | Extra volume mounts for the pod | `[]` | +| `extraVolumes` | Extra volumes for the pod | `[]` | +| `defaultTags` | Default tags to apply to all AWS resources managed by this controller | `{}` | +| `replicaCount` | Number of controller pods to run, only one will be active due to leader election | `2` | +| `podDisruptionBudget` | Limit the disruption for controller pods. Require at least 2 controller replicas and 3 worker nodes | `{}` | +| `udpateStrategy` | Defines the update strategy for the deployment | `{}` | diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/ci/extra_args b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/ci/extra_args new file mode 100644 index 0000000..c72e0d8 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/ci/extra_args @@ -0,0 +1 @@ +--set clusterName=k8s-ci-cluster diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/ci/values.yaml b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/ci/values.yaml new file mode 100644 index 0000000..fedf33d --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/ci/values.yaml @@ -0,0 +1,7 @@ +# CI testing values for aws-load-balancer-controller + +region: us-west-2 +image: + repository: kishorj/aws-load-balancer-controller + tag: v2.0.0-rc1 + pullPolicy: Always diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/crds/crds.yaml b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/crds/crds.yaml new file mode 100644 index 0000000..7a3a671 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/crds/crds.yaml @@ -0,0 +1,452 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: ingressclassparams.elbv2.k8s.aws +spec: + group: elbv2.k8s.aws + names: + kind: IngressClassParams + listKind: IngressClassParamsList + plural: ingressclassparams + singular: ingressclassparams + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The Ingress Group name + jsonPath: .spec.group.name + name: GROUP-NAME + type: string + - description: The AWS Load Balancer scheme + jsonPath: .spec.scheme + name: SCHEME + type: string + - description: The AWS Load Balancer ipAddressType + jsonPath: .spec.ipAddressType + name: IP-ADDRESS-TYPE + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: IngressClassParams is the Schema for the IngressClassParams API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IngressClassParamsSpec defines the desired state of IngressClassParams + properties: + group: + description: Group defines the IngressGroup for all Ingresses that belong to IngressClass with this IngressClassParams. + properties: + name: + description: Name is the name of IngressGroup. + type: string + required: + - name + type: object + ipAddressType: + description: IPAddressType defines the ip address type for all Ingresses that belong to IngressClass with this IngressClassParams. + enum: + - ipv4 + - dualstack + type: string + namespaceSelector: + description: NamespaceSelector restrict the namespaces of Ingresses that are allowed to specify the IngressClass with this IngressClassParams. * if absent or present but empty, it selects all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + scheme: + description: Scheme defines the scheme for all Ingresses that belong to IngressClass with this IngressClassParams. + enum: + - internal + - internet-facing + type: string + tags: + description: Tags defines list of Tags on AWS resources provisioned for Ingresses that belong to IngressClass with this IngressClassParams. + items: + description: Tag defines a AWS Tag on resources. + properties: + key: + description: The key of the tag. + type: string + value: + description: The value of the tag. + type: string + required: + - key + - value + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: targetgroupbindings.elbv2.k8s.aws +spec: + group: elbv2.k8s.aws + names: + kind: TargetGroupBinding + listKind: TargetGroupBindingList + plural: targetgroupbindings + singular: targetgroupbinding + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The Kubernetes Service's name + jsonPath: .spec.serviceRef.name + name: SERVICE-NAME + type: string + - description: The Kubernetes Service's port + jsonPath: .spec.serviceRef.port + name: SERVICE-PORT + type: string + - description: The AWS TargetGroup's TargetType + jsonPath: .spec.targetType + name: TARGET-TYPE + type: string + - description: The AWS TargetGroup's Amazon Resource Name + jsonPath: .spec.targetGroupARN + name: ARN + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TargetGroupBinding is the Schema for the TargetGroupBinding API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TargetGroupBindingSpec defines the desired state of TargetGroupBinding + properties: + networking: + description: networking provides the networking setup for ELBV2 LoadBalancer to access targets in TargetGroup. + properties: + ingress: + description: List of ingress rules to allow ELBV2 LoadBalancer to access targets in TargetGroup. + items: + properties: + from: + description: List of peers which should be able to access the targets in TargetGroup. At least one NetworkingPeer should be specified. + items: + description: NetworkingPeer defines the source/destination peer for networking rules. + properties: + ipBlock: + description: IPBlock defines an IPBlock peer. If specified, none of the other fields can be set. + properties: + cidr: + description: CIDR is the network CIDR. Both IPV4 or IPV6 CIDR are accepted. + type: string + required: + - cidr + type: object + securityGroup: + description: SecurityGroup defines a SecurityGroup peer. If specified, none of the other fields can be set. + properties: + groupID: + description: GroupID is the EC2 SecurityGroupID. + type: string + required: + - groupID + type: object + type: object + type: array + ports: + description: List of ports which should be made accessible on the targets in TargetGroup. If ports is empty or unspecified, it defaults to all ports with TCP. + items: + properties: + port: + anyOf: + - type: integer + - type: string + description: The port which traffic must match. When NodePort endpoints(instance TargetType) is used, this must be a numerical port. When Port endpoints(ip TargetType) is used, this can be either numerical or named port on pods. if port is unspecified, it defaults to all ports. + x-kubernetes-int-or-string: true + protocol: + description: The protocol which traffic must match. If protocol is unspecified, it defaults to TCP. + enum: + - TCP + - UDP + type: string + type: object + type: array + required: + - from + - ports + type: object + type: array + type: object + serviceRef: + description: serviceRef is a reference to a Kubernetes Service and ServicePort. + properties: + name: + description: Name is the name of the Service. + type: string + port: + anyOf: + - type: integer + - type: string + description: Port is the port of the ServicePort. + x-kubernetes-int-or-string: true + required: + - name + - port + type: object + targetGroupARN: + description: targetGroupARN is the Amazon Resource Name (ARN) for the TargetGroup. + type: string + targetType: + description: targetType is the TargetType of TargetGroup. If unspecified, it will be automatically inferred. + enum: + - instance + - ip + type: string + required: + - serviceRef + - targetGroupARN + type: object + status: + description: TargetGroupBindingStatus defines the observed state of TargetGroupBinding + properties: + observedGeneration: + description: The generation observed by the TargetGroupBinding controller. + format: int64 + type: integer + type: object + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The Kubernetes Service's name + jsonPath: .spec.serviceRef.name + name: SERVICE-NAME + type: string + - description: The Kubernetes Service's port + jsonPath: .spec.serviceRef.port + name: SERVICE-PORT + type: string + - description: The AWS TargetGroup's TargetType + jsonPath: .spec.targetType + name: TARGET-TYPE + type: string + - description: The AWS TargetGroup's Amazon Resource Name + jsonPath: .spec.targetGroupARN + name: ARN + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: TargetGroupBinding is the Schema for the TargetGroupBinding API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TargetGroupBindingSpec defines the desired state of TargetGroupBinding + properties: + networking: + description: networking defines the networking rules to allow ELBV2 LoadBalancer to access targets in TargetGroup. + properties: + ingress: + description: List of ingress rules to allow ELBV2 LoadBalancer to access targets in TargetGroup. + items: + description: NetworkingIngressRule defines a particular set of traffic that is allowed to access TargetGroup's targets. + properties: + from: + description: List of peers which should be able to access the targets in TargetGroup. At least one NetworkingPeer should be specified. + items: + description: NetworkingPeer defines the source/destination peer for networking rules. + properties: + ipBlock: + description: IPBlock defines an IPBlock peer. If specified, none of the other fields can be set. + properties: + cidr: + description: CIDR is the network CIDR. Both IPV4 or IPV6 CIDR are accepted. + type: string + required: + - cidr + type: object + securityGroup: + description: SecurityGroup defines a SecurityGroup peer. If specified, none of the other fields can be set. + properties: + groupID: + description: GroupID is the EC2 SecurityGroupID. + type: string + required: + - groupID + type: object + type: object + type: array + ports: + description: List of ports which should be made accessible on the targets in TargetGroup. If ports is empty or unspecified, it defaults to all ports with TCP. + items: + description: NetworkingPort defines the port and protocol for networking rules. + properties: + port: + anyOf: + - type: integer + - type: string + description: The port which traffic must match. When NodePort endpoints(instance TargetType) is used, this must be a numerical port. When Port endpoints(ip TargetType) is used, this can be either numerical or named port on pods. if port is unspecified, it defaults to all ports. + x-kubernetes-int-or-string: true + protocol: + description: The protocol which traffic must match. If protocol is unspecified, it defaults to TCP. + enum: + - TCP + - UDP + type: string + type: object + type: array + required: + - from + - ports + type: object + type: array + type: object + nodeSelector: + description: node selector for instance type target groups to only register certain nodes + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + serviceRef: + description: serviceRef is a reference to a Kubernetes Service and ServicePort. + properties: + name: + description: Name is the name of the Service. + type: string + port: + anyOf: + - type: integer + - type: string + description: Port is the port of the ServicePort. + x-kubernetes-int-or-string: true + required: + - name + - port + type: object + targetGroupARN: + description: targetGroupARN is the Amazon Resource Name (ARN) for the TargetGroup. + minLength: 1 + type: string + targetType: + description: targetType is the TargetType of TargetGroup. If unspecified, it will be automatically inferred. + enum: + - instance + - ip + type: string + required: + - serviceRef + - targetGroupARN + type: object + status: + description: TargetGroupBindingStatus defines the observed state of TargetGroupBinding + properties: + observedGeneration: + description: The generation observed by the TargetGroupBinding controller. + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/crds/t b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/crds/t new file mode 100644 index 0000000..0d8ab6f --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/crds/t @@ -0,0 +1,2 @@ + +An error occurred (AccessDeniedException) when calling the CreateRepository operation: User: arn:aws:iam::079788916859:user/u-zawac002 is not authorized to perform: ecr:CreateRepository on resource: arn:aws:ecr:us-east-1:079788916859:repository/eks/test1/my-nginx diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/NOTES.txt b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/NOTES.txt new file mode 100644 index 0000000..04e98e0 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/NOTES.txt @@ -0,0 +1 @@ +AWS Load Balancer controller installed! diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/_helpers.tpl b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/_helpers.tpl new file mode 100644 index 0000000..4304085 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/_helpers.tpl @@ -0,0 +1,93 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "aws-load-balancer-controller.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "aws-load-balancer-controller.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "aws-load-balancer-controller.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Chart name prefix for resource names +Strip the "-controller" suffix from the default .Chart.Name if the nameOverride is not specified. +This enables using a shorter name for the resources, for example aws-load-balancer-webhook. +*/}} +{{- define "aws-load-balancer-controller.namePrefix" -}} +{{- $defaultNamePrefix := .Chart.Name | trimSuffix "-controller" -}} +{{- default $defaultNamePrefix .Values.nameOverride | trunc 42 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "aws-load-balancer-controller.labels" -}} +helm.sh/chart: {{ include "aws-load-balancer-controller.chart" . }} +{{ include "aws-load-balancer-controller.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "aws-load-balancer-controller.selectorLabels" -}} +app.kubernetes.io/name: {{ include "aws-load-balancer-controller.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "aws-load-balancer-controller.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "aws-load-balancer-controller.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Generate certificates for webhook +*/}} +{{- define "aws-load-balancer-controller.gen-certs" -}} +{{- $namePrefix := ( include "aws-load-balancer-controller.namePrefix" . ) -}} +{{- $altNames := list ( printf "%s-%s.%s" $namePrefix "webhook-service" .Release.Namespace ) ( printf "%s-%s.%s.svc" $namePrefix "webhook-service" .Release.Namespace ) -}} +{{- $ca := genCA "aws-load-balancer-controller-ca" 3650 -}} +{{- $cert := genSignedCert ( include "aws-load-balancer-controller.fullname" . ) nil $altNames 3650 $ca -}} +caCert: {{ $ca.Cert | b64enc }} +clientCert: {{ $cert.Cert | b64enc }} +clientKey: {{ $cert.Key | b64enc }} +{{- end -}} + +{{/* +Convert map to comma separated key=value string +*/}} +{{- define "aws-load-balancer-controller.convert-map-to-csv" -}} +{{- range $key, $value := . -}} {{ $key }}={{ $value }}, {{- end -}} +{{- end -}} diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/deployment.yaml b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/deployment.yaml new file mode 100644 index 0000000..b9ac7b3 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/deployment.yaml @@ -0,0 +1,180 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "aws-load-balancer-controller.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "aws-load-balancer-controller.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "aws-load-balancer-controller.selectorLabels" . | nindent 6 }} + {{- with .Values.updateStrategy }} + strategy: + {{ toYaml . | nindent 4 }} + {{- end }} + template: + metadata: + labels: + {{- include "aws-load-balancer-controller.selectorLabels" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ (split ":" .Values.metricsBindAddr)._1 | default 8080 }}" + {{- if .Values.podAnnotations }} + {{- toYaml .Values.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "aws-load-balancer-controller.serviceAccountName" . }} + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: {{ template "aws-load-balancer-controller.namePrefix" . }}-tls + {{- with .Values.extraVolumes }} + {{ toYaml . | nindent 6 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- if .Values.hostNetwork }} + hostNetwork: true + {{- end }} + containers: + - name: {{ .Chart.Name }} + args: + - --cluster-name={{ required "Chart cannot be installed without a valid clusterName!" .Values.clusterName }} + {{- if .Values.ingressClass }} + - --ingress-class={{ .Values.ingressClass }} + {{- end }} + {{- if .Values.region }} + - --aws-region={{ .Values.region }} + {{- end }} + {{- if .Values.vpcId }} + - --aws-vpc-id={{ .Values.vpcId }} + {{- end }} + {{- if .Values.awsMaxRetries }} + - --aws-max-retries={{ .Values.awsMaxRetries }} + {{- end }} + {{- if kindIs "bool" .Values.enablePodReadinessGateInject }} + - --enable-pod-readiness-gate-inject={{ .Values.enablePodReadinessGateInject }} + {{- end }} + {{- if kindIs "bool" .Values.enableShield }} + - --enable-shield={{ .Values.enableShield }} + {{- end }} + {{- if kindIs "bool" .Values.enableWaf }} + - --enable-waf={{ .Values.enableWaf }} + {{- end }} + {{- if kindIs "bool" .Values.enableWafv2 }} + - --enable-wafv2={{ .Values.enableWafv2 }} + {{- end }} + {{- if .Values.metricsBindAddr }} + - --metrics-bind-addr={{ .Values.metricsBindAddr }} + {{- end }} + {{- if .Values.ingressMaxConcurrentReconciles }} + - --ingress-max-concurrent-reconciles={{ .Values.ingressMaxConcurrentReconciles }} + {{- end }} + {{- if .Values.serviceMaxConcurrentReconciles }} + - --service-max-concurrent-reconciles={{ .Values.serviceMaxConcurrentReconciles }} + {{- end }} + {{- if .Values.targetgroupbindingMaxConcurrentReconciles }} + - --targetgroupbinding-max-concurrent-reconciles={{ .Values.targetgroupbindingMaxConcurrentReconciles }} + {{- end }} + {{- if .Values.targetgroupbindingMaxExponentialBackoffDelay }} + - --targetgroupbinding-max-exponential-backoff-delay={{ .Values.targetgroupbindingMaxExponentialBackoffDelay }} + {{- end }} + {{- if .Values.logLevel }} + - --log-level={{ .Values.logLevel }} + {{- end }} + {{- if .Values.webhookBindPort }} + - --webhook-bind-port={{ .Values.webhookBindPort }} + {{- end }} + {{- if .Values.syncPeriod }} + - --sync-period={{ .Values.syncPeriod }} + {{- end }} + {{- if .Values.watchNamespace }} + - --watch-namespace={{ .Values.watchNamespace }} + {{- end }} + {{- if kindIs "bool" .Values.disableIngressClassAnnotation }} + - --disable-ingress-class-annotation={{ .Values.disableIngressClassAnnotation }} + {{- end }} + {{- if kindIs "bool" .Values.disableIngressGroupNameAnnotation }} + - --disable-ingress-group-name-annotation={{ .Values.disableIngressGroupNameAnnotation }} + {{- end }} + {{- if .Values.defaultSSLPolicy }} + - --default-ssl-policy={{ .Values.defaultSSLPolicy }} + {{- end }} + {{- if .Values.externalManagedTags }} + - --external-managed-tags={{ join "," .Values.externalManagedTags }} + {{- end }} + {{- if .Values.defaultTags }} + - --default-tags={{ include "aws-load-balancer-controller.convert-map-to-csv" .Values.defaultTags | trimSuffix "," }} + {{- end }} + {{- if .Values.env }} + env: + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + {{- end }} + command: + - /controller + securityContext: + {{- toYaml .Values.securityContext | nindent 10 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + {{- with .Values.extraVolumeMounts }} + {{ toYaml . | nindent 8 }} + {{- end }} + ports: + - name: webhook-server + containerPort: {{ .Values.webhookBindPort | default 9443 }} + protocol: TCP + - name: metrics-server + containerPort: {{ (split ":" .Values.metricsBindAddr)._1 | default 8080 }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 10 }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 10 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: + {{- toYaml .Values.affinity | nindent 8 }} + {{- else }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - {{ include "aws-load-balancer-controller.name" . }} + topologyKey: kubernetes.io/hostname + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/pdb.yaml b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/pdb.yaml new file mode 100644 index 0000000..9e5ff91 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/pdb.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.podDisruptionBudget (gt (int .Values.replicaCount) 1) }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ include "aws-load-balancer-controller.fullname" . }} + labels: + {{- include "aws-load-balancer-controller.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "aws-load-balancer-controller.selectorLabels" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/rbac.yaml b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/rbac.yaml new file mode 100644 index 0000000..2b059c2 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/rbac.yaml @@ -0,0 +1,80 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "aws-load-balancer-controller.fullname" . }}-leader-election-role + namespace: {{ .Release.Namespace }} + labels: + {{- include "aws-load-balancer-controller.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: [configmaps] + verbs: [create] +- apiGroups: [""] + resources: [configmaps] + resourceNames: [aws-load-balancer-controller-leader] + verbs: [get, patch, update] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "aws-load-balancer-controller.fullname" . }}-leader-election-rolebinding + namespace: {{ .Release.Namespace }} + labels: + {{- include "aws-load-balancer-controller.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "aws-load-balancer-controller.fullname" . }}-leader-election-role +subjects: +- kind: ServiceAccount + name: {{ template "aws-load-balancer-controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "aws-load-balancer-controller.fullname" . }}-role + labels: + {{- include "aws-load-balancer-controller.labels" . | nindent 4 }} +rules: +- apiGroups: ["elbv2.k8s.aws"] + resources: [targetgroupbindings] + verbs: [create, delete, get, list, patch, update, watch] +- apiGroups: ["elbv2.k8s.aws"] + resources: [ingressclassparams] + verbs: [get, list, watch] +- apiGroups: [""] + resources: [events] + verbs: [create, patch] +- apiGroups: [""] + resources: [pods] + verbs: [get, list, watch] +- apiGroups: ["networking.k8s.io"] + resources: [ingressclasses] + verbs: [get, list, watch] +- apiGroups: ["", "extensions", "networking.k8s.io"] + resources: [services, ingresses] + verbs: [get, list, patch, update, watch] +- apiGroups: [""] + resources: [nodes, secrets, namespaces, endpoints] + verbs: [get, list, watch] +- apiGroups: ["elbv2.k8s.aws", "", "extensions", "networking.k8s.io"] + resources: [targetgroupbindings/status, pods/status, services/status, ingresses/status] + verbs: [update, patch] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "aws-load-balancer-controller.fullname" . }}-rolebinding + labels: + {{- include "aws-load-balancer-controller.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "aws-load-balancer-controller.fullname" . }}-role +subjects: +- kind: ServiceAccount + name: {{ template "aws-load-balancer-controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/service.yaml b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/service.yaml new file mode 100644 index 0000000..812f1b1 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "aws-load-balancer-controller.namePrefix" . }}-webhook-service + namespace: {{ .Release.Namespace }} + labels: +{{ include "aws-load-balancer-controller.labels" . | indent 4 }} +spec: + ports: + - port: 443 + targetPort: webhook-server + selector: + {{- include "aws-load-balancer-controller.selectorLabels" . | nindent 4 }} diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/serviceaccount.yaml b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/serviceaccount.yaml new file mode 100644 index 0000000..cb82d37 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "aws-load-balancer-controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "aws-load-balancer-controller.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/webhook.yaml b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/webhook.yaml new file mode 100644 index 0000000..ff5d747 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/templates/webhook.yaml @@ -0,0 +1,170 @@ +{{ $tls := fromYaml ( include "aws-load-balancer-controller.gen-certs" . ) }} +--- +{{- if .Capabilities.APIVersions.Has "admissionregistration.k8s.io/v1" }} +apiVersion: admissionregistration.k8s.io/v1 +{{- else }} +apiVersion: admissionregistration.k8s.io/v1beta1 +{{- end }} +kind: MutatingWebhookConfiguration +metadata: +{{- if $.Values.enableCertManager }} + annotations: + cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ template "aws-load-balancer-controller.namePrefix" . }}-serving-cert +{{- end }} + name: {{ include "aws-load-balancer-controller.namePrefix" . }}-webhook + labels: + {{- include "aws-load-balancer-controller.labels" . | nindent 4 }} +webhooks: +- clientConfig: + caBundle: {{ if not $.Values.enableCertManager -}}{{ $tls.caCert }}{{- else -}}Cg=={{ end }} + service: + name: {{ template "aws-load-balancer-controller.namePrefix" . }}-webhook-service + namespace: {{ $.Release.Namespace }} + path: /mutate-v1-pod + failurePolicy: Fail + name: mpod.elbv2.k8s.aws + admissionReviewVersions: + - v1beta1 + namespaceSelector: + matchExpressions: + - key: elbv2.k8s.aws/pod-readiness-gate-inject + operator: In + values: + - enabled + objectSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: NotIn + values: + - {{ include "aws-load-balancer-controller.name" . }} + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + resources: + - pods + sideEffects: None +- clientConfig: + caBundle: {{ if not $.Values.enableCertManager -}}{{ $tls.caCert }}{{- else -}}Cg=={{ end }} + service: + name: {{ template "aws-load-balancer-controller.namePrefix" . }}-webhook-service + namespace: {{ $.Release.Namespace }} + path: /mutate-elbv2-k8s-aws-v1beta1-targetgroupbinding + failurePolicy: Fail + name: mtargetgroupbinding.elbv2.k8s.aws + admissionReviewVersions: + - v1beta1 + rules: + - apiGroups: + - elbv2.k8s.aws + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - targetgroupbindings + sideEffects: None +--- +{{- if .Capabilities.APIVersions.Has "admissionregistration.k8s.io/v1" }} +apiVersion: admissionregistration.k8s.io/v1 +{{- else }} +apiVersion: admissionregistration.k8s.io/v1beta1 +{{- end }} +kind: ValidatingWebhookConfiguration +metadata: +{{- if $.Values.enableCertManager }} + annotations: + cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ template "aws-load-balancer-controller.namePrefix" . }}-serving-cert +{{- end }} + name: {{ include "aws-load-balancer-controller.namePrefix" . }}-webhook + labels: + {{- include "aws-load-balancer-controller.labels" . | nindent 4 }} +webhooks: +- clientConfig: + caBundle: {{ if not $.Values.enableCertManager -}}{{ $tls.caCert }}{{- else -}}Cg=={{ end }} + service: + name: {{ template "aws-load-balancer-controller.namePrefix" . }}-webhook-service + namespace: {{ $.Release.Namespace }} + path: /validate-elbv2-k8s-aws-v1beta1-targetgroupbinding + failurePolicy: Fail + name: vtargetgroupbinding.elbv2.k8s.aws + admissionReviewVersions: + - v1beta1 + rules: + - apiGroups: + - elbv2.k8s.aws + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - targetgroupbindings + sideEffects: None +- clientConfig: + caBundle: {{ if not $.Values.enableCertManager -}}{{ $tls.caCert }}{{- else -}}Cg=={{ end }} + service: + name: {{ template "aws-load-balancer-controller.namePrefix" . }}-webhook-service + namespace: {{ $.Release.Namespace }} + path: /validate-networking-v1beta1-ingress + failurePolicy: Fail + matchPolicy: Equivalent + name: vingress.elbv2.k8s.aws + admissionReviewVersions: + - v1beta1 + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + sideEffects: None +--- +{{- if not $.Values.enableCertManager }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "aws-load-balancer-controller.namePrefix" . }}-tls + namespace: {{ .Release.Namespace }} + labels: +{{ include "aws-load-balancer-controller.labels" . | indent 4 }} +type: kubernetes.io/tls +data: + ca.crt: {{ $tls.caCert }} + tls.crt: {{ $tls.clientCert }} + tls.key: {{ $tls.clientKey }} +{{- else }} +apiVersion: cert-manager.io/v1alpha2 +kind: Certificate +metadata: + name: {{ template "aws-load-balancer-controller.namePrefix" . }}-serving-cert + namespace: {{ .Release.Namespace }} + labels: +{{ include "aws-load-balancer-controller.labels" . | indent 4 }} +spec: + dnsNames: + - {{ template "aws-load-balancer-controller.namePrefix" . }}-webhook-service.{{ .Release.Namespace }}.svc + - {{ template "aws-load-balancer-controller.namePrefix" . }}-webhook-service.{{ .Release.Namespace }}.svc.cluster.local + issuerRef: + kind: Issuer + name: {{ template "aws-load-balancer-controller.namePrefix" . }}-selfsigned-issuer + secretName: {{ template "aws-load-balancer-controller.namePrefix" . }}-tls +--- +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + name: {{ template "aws-load-balancer-controller.namePrefix" . }}-selfsigned-issuer + namespace: {{ .Release.Namespace }} + labels: +{{ include "aws-load-balancer-controller.labels" . | indent 4 }} +spec: + selfSigned: {} +{{- end }} diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/test.yaml b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/test.yaml new file mode 100644 index 0000000..529303c --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/test.yaml @@ -0,0 +1,181 @@ +# Default values for aws-load-balancer-controller. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 2 + +image: + repository: 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-load-balancer-controller + tag: v2.2.0 + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +# The name of the Kubernetes cluster. A non-empty value is required +clusterName: test-cluster + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + +rbac: + # Specifies whether rbac resources should be created + create: true + +podSecurityContext: + fsGroup: 65534 + +securityContext: + # capabilities: + # drop: + # - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + allowPrivilegeEscalation: false + +# Time period for the controller pod to do a graceful shutdown +terminationGracePeriodSeconds: 10 + +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +# priorityClassName specifies the PriorityClass to indicate the importance of controller pods +# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +priorityClassName: system-cluster-critical + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +podAnnotations: {} + +podLabels: {} + +# Enable cert-manager +enableCertManager: false + +# The ingress class this controller will satisfy. If not specified, controller will match all +# ingresses without ingress class annotation and ingresses of type alb +ingressClass: alb + +# The AWS region for the kubernetes cluster. Set to use KIAM or kube2iam for example. +region: + +# The VPC ID for the Kubernetes cluster. Set this manually when your pods are unable to use the metadata service to determine this automatically +vpcId: + +# Maximum retries for AWS APIs (default 10) +awsMaxRetries: + +# If enabled, targetHealth readiness gate will get injected to the pod spec for the matching endpoint pods (default true) +enablePodReadinessGateInject: + +# Enable Shield addon for ALB (default true) +enableShield: + +# Enable WAF addon for ALB (default true) +enableWaf: + +# Enable WAF V2 addon for ALB (default true) +enableWafv2: + +# Maximum number of concurrently running reconcile loops for ingress (default 3) +ingressMaxConcurrentReconciles: + +# Set the controller log level - info(default), debug (default "info") +logLevel: + +# The address the metric endpoint binds to. (default ":8080") +metricsBindAddr: "" + +# The TCP port the Webhook server binds to. (default 9443) +webhookBindPort: + +# Maximum number of concurrently running reconcile loops for service (default 3) +serviceMaxConcurrentReconciles: + +# Maximum number of concurrently running reconcile loops for targetGroupBinding +targetgroupbindingMaxConcurrentReconciles: + +# Period at which the controller forces the repopulation of its local object stores. (default 1h0m0s) +syncPeriod: + +# Namespace the controller watches for updates to Kubernetes objects, If empty, all namespaces are watched. +watchNamespace: + +# disableIngressClassAnnotation disables the usage of kubernetes.io/ingress.class annotation, false by default +disableIngressClassAnnotation: + +# disableIngressGroupNameAnnotation disables the usage of alb.ingress.kubernetes.io/group.name annotation, false by default +disableIngressGroupNameAnnotation: + +# defaultSSLPolicy specifies the default SSL policy to use for TLS/HTTPS listeners +defaultSSLPolicy: + +# Liveness probe configuration for the controller +livenessProbe: + failureThreshold: 2 + httpGet: + path: /healthz + port: 61779 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 10 + +# Environment variables to set for aws-load-balancer-controller pod. +# We strongly discourage programming access credentials in the controller environment. You should setup IRSA or +# comparable solutions like kube2iam, kiam etc instead. +env: + +# Specifies if aws-load-balancer-controller should be started in hostNetwork mode. +# +# This is required if using a custom CNI where the managed control plane nodes are unable to initiate +# network connections to the pods, for example using Calico CNI plugin on EKS. This is not required or +# recommended if using the Amazon VPC CNI plugin. +hostNetwork: false + +# extraVolumeMounts are the additional volume mounts. This enables setting up IRSA on non-EKS Kubernetes cluster +extraVolumeMounts: + - name: aws-iam-token + mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + readOnly: true + +# extraVolumes for the extraVolumeMounts. Useful to mount a projected service account token for example. +extraVolumes: + - name: aws-iam-token + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + audience: sts.amazonaws.com + expirationSeconds: 86400 + path: token + +# defaultTags are the tags to apply to all AWS resources managed by this controller +defaultTags: + default_tag1: value1 + default_tag2: value2 + +# podDisruptionBudget specifies the disruption budget for the controller pods. +# Disruption budget will be configured only when the replicaCount is greater than 1 +podDisruptionBudget: + maxUnavailable: 1 + +# externalManagedTags is the list of tag keys on AWS resources that will be managed externally +externalManagedTags: [] diff --git a/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/values.yaml b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/values.yaml new file mode 100644 index 0000000..d9d6335 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/charts/aws-load-balancer-controller/values.yaml @@ -0,0 +1,196 @@ +# Default values for aws-load-balancer-controller. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 2 + +image: + repository: 602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-load-balancer-controller + tag: v2.2.3 + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +# The name of the Kubernetes cluster. A non-empty value is required +clusterName: + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + +rbac: + # Specifies whether rbac resources should be created + create: true + +podSecurityContext: + fsGroup: 65534 + +securityContext: + # capabilities: + # drop: + # - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + allowPrivilegeEscalation: false + +# Time period for the controller pod to do a graceful shutdown +terminationGracePeriodSeconds: 10 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +# priorityClassName specifies the PriorityClass to indicate the importance of controller pods +# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +priorityClassName: system-cluster-critical + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +updateStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 1 + +podAnnotations: {} + +podLabels: {} + +# Enable cert-manager +enableCertManager: false + +# The ingress class this controller will satisfy. If not specified, controller will match all +# ingresses without ingress class annotation and ingresses of type alb +ingressClass: alb + +# The AWS region for the kubernetes cluster. Set to use KIAM or kube2iam for example. +region: + +# The VPC ID for the Kubernetes cluster. Set this manually when your pods are unable to use the metadata service to determine this automatically +vpcId: + +# Maximum retries for AWS APIs (default 10) +awsMaxRetries: + +# If enabled, targetHealth readiness gate will get injected to the pod spec for the matching endpoint pods (default true) +enablePodReadinessGateInject: + +# Enable Shield addon for ALB (default true) +enableShield: + +# Enable WAF addon for ALB (default true) +enableWaf: + +# Enable WAF V2 addon for ALB (default true) +enableWafv2: + +# Maximum number of concurrently running reconcile loops for ingress (default 3) +ingressMaxConcurrentReconciles: + +# Set the controller log level - info(default), debug (default "info") +logLevel: + +# The address the metric endpoint binds to. (default ":8080") +metricsBindAddr: "" + +# The TCP port the Webhook server binds to. (default 9443) +webhookBindPort: + +# Maximum number of concurrently running reconcile loops for service (default 3) +serviceMaxConcurrentReconciles: + +# Maximum number of concurrently running reconcile loops for targetGroupBinding +targetgroupbindingMaxConcurrentReconciles: + +# Maximum duration of exponential backoff for targetGroupBinding reconcile failures +targetgroupbindingMaxExponentialBackoffDelay: + +# Period at which the controller forces the repopulation of its local object stores. (default 1h0m0s) +syncPeriod: + +# Namespace the controller watches for updates to Kubernetes objects, If empty, all namespaces are watched. +watchNamespace: + +# disableIngressClassAnnotation disables the usage of kubernetes.io/ingress.class annotation, false by default +disableIngressClassAnnotation: + +# disableIngressGroupNameAnnotation disables the usage of alb.ingress.kubernetes.io/group.name annotation, false by default +disableIngressGroupNameAnnotation: + +# defaultSSLPolicy specifies the default SSL policy to use for TLS/HTTPS listeners +defaultSSLPolicy: + +# Liveness probe configuration for the controller +livenessProbe: + failureThreshold: 2 + httpGet: + path: /healthz + port: 61779 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 10 + +# Environment variables to set for aws-load-balancer-controller pod. +# We strongly discourage programming access credentials in the controller environment. You should setup IRSA or +# comparable solutions like kube2iam, kiam etc instead. +env: + # ENV_1: "" + # ENV_2: "" + +# Specifies if aws-load-balancer-controller should be started in hostNetwork mode. +# +# This is required if using a custom CNI where the managed control plane nodes are unable to initiate +# network connections to the pods, for example using Calico CNI plugin on EKS. This is not required or +# recommended if using the Amazon VPC CNI plugin. +hostNetwork: false + +# extraVolumeMounts are the additional volume mounts. This enables setting up IRSA on non-EKS Kubernetes cluster +extraVolumeMounts: + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + +# extraVolumes for the extraVolumeMounts. Useful to mount a projected service account token for example. +extraVolumes: + # - name: aws-iam-token + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + +# defaultTags are the tags to apply to all AWS resources managed by this controller +defaultTags: {} + # default_tag1: value1 + # default_tag2: value2 + +# podDisruptionBudget specifies the disruption budget for the controller pods. +# Disruption budget will be configured only when the replicaCount is greater than 1 +podDisruptionBudget: {} +# maxUnavailable: 1 + +# externalManagedTags is the list of tag keys on AWS resources that will be managed externally +externalManagedTags: [] diff --git a/examples/established-cluster-examples/alb-controller/crds.tf.0.14.8 b/examples/established-cluster-examples/alb-controller/crds.tf.0.14.8 new file mode 100644 index 0000000..4a5e2da --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/crds.tf.0.14.8 @@ -0,0 +1,663 @@ +# This is the CRDs that need to be loaded for the aws-load-balancer-controller. +# Based upon the crds/crds.yaml file in the chart. + + +resource "kubernetes_manifest" "ingress-crd" { + manifest = { + "apiVersion" = "apiextensions.k8s.io/v1" + "kind" = "CustomResourceDefinition" + + "metadata" = { + "annotations" = { + "controller-gen.kubebuilder.io/version" = "v0.5.0" + } + "name" = "ingressclassparams.elbv2.k8s.aws" + } + + "spec" = { + "group" = "elbv2.k8s.aws" + + "names" = { + "kind" = "IngressClassParams" + "listKind" = "IngressClassParamsList" + "plural" = "ingressclassparams" + "singular" = "ingressclassparams" + } + + "scope" = "Cluster" + + "versions" = [ + { + "additionalPrinterColumns" = [ + { + "description" = "The Ingress Group name" + "jsonPath" = ".spec.group.name" + "name" = "GROUP-NAME" + "type" = "string" + }, + { + "description" = "The AWS Load Balancer scheme" + "jsonPath" = ".spec.scheme" + "name" = "SCHEME" + "type" = "string" + }, + { + "description" = "The AWS Load Balancer ipAddressType" + "jsonPath" = ".spec.ipAddressType" + "name" = "IP-ADDRESS-TYPE" + "type" = "string" + }, + { + "jsonPath" = ".metadata.creationTimestamp" + "name" = "AGE" + "type" = "date" + }, + ] + "name" = "v1beta1" + "schema" = { + "openAPIV3Schema" = { + "description" = "IngressClassParams is the Schema for the IngressClassParams API" + "properties" = { + "apiVersion" = { + "description" = "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + "type" = "string" + } + "kind" = { + "description" = "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + "type" = "string" + } + "metadata" = { + "type" = "object" + } + "spec" = { + "description" = "IngressClassParamsSpec defines the desired state of IngressClassParams" + "properties" = { + "group" = { + "description" = "Group defines the IngressGroup for all Ingresses that belong to IngressClass with this IngressClassParams." + "properties" = { + "name" = { + "description" = "Name is the name of IngressGroup." + "type" = "string" + } + } + "required" = [ + "name", + ] + "type" = "object" + } + "ipAddressType" = { + "description" = "IPAddressType defines the ip address type for all Ingresses that belong to IngressClass with this IngressClassParams." + "enum" = [ + "ipv4", + "dualstack", + ] + "type" = "string" + } + "namespaceSelector" = { + "description" = "NamespaceSelector restrict the namespaces of Ingresses that are allowed to specify the IngressClass with this IngressClassParams. * if absent or present but empty, it selects all namespaces." + "properties" = { + "matchExpressions" = { + "description" = "matchExpressions is a list of label selector requirements. The requirements are ANDed." + "items" = { + "description" = "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + "properties" = { + "key" = { + "description" = "key is the label key that the selector applies to." + "type" = "string" + } + "operator" = { + "description" = "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + "type" = "string" + } + "values" = { + "description" = "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + "items" = { + "type" = "string" + } + "type" = "array" + } + } + "required" = [ + "key", + "operator", + ] + "type" = "object" + } + "type" = "array" + } + "matchLabels" = { + "additionalProperties" = { + "type" = "string" + } + "description" = "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + "type" = "object" + } + } + "type" = "object" + } + "scheme" = { + "description" = "Scheme defines the scheme for all Ingresses that belong to IngressClass with this IngressClassParams." + "enum" = [ + "internal", + "internet-facing", + ] + "type" = "string" + } + "tags" = { + "description" = "Tags defines list of Tags on AWS resources provisioned for Ingresses that belong to IngressClass with this IngressClassParams." + "items" = { + "description" = "Tag defines a AWS Tag on resources." + "properties" = { + "key" = { + "description" = "The key of the tag." + "type" = "string" + } + "value" = { + "description" = "The value of the tag." + "type" = "string" + } + } + "required" = [ + "key", + "value", + ] + "type" = "object" + } + "type" = "array" + } + } + "type" = "object" + } + } + "type" = "object" + } + } + "served" = true + "storage" = true + "subresources" = {} + }, + ] + } + } +} + + +resource "kubernetes_manifest" "target-crd" { + manifest = { + "apiVersion" = "apiextensions.k8s.io/v1" + "kind" = "CustomResourceDefinition" + + "metadata" = { + "annotations" = { + "controller-gen.kubebuilder.io/version" = "v0.5.0" + } + "name" = "targetgroupbindings.elbv2.k8s.aws" + } + + "spec" = { + + "group" = "elbv2.k8s.aws" + + "names" = { + "kind" = "TargetGroupBinding" + "listKind" = "TargetGroupBindingList" + "plural" = "targetgroupbindings" + "singular" = "targetgroupbinding" + } + + "scope" = "Namespaced" + + "versions" = [ + { + "additionalPrinterColumns" = [ + { + "description" = "The Kubernetes Service's name" + "jsonPath" = ".spec.serviceRef.name" + "name" = "SERVICE-NAME" + "type" = "string" + }, + { + "description" = "The Kubernetes Service's port" + "jsonPath" = ".spec.serviceRef.port" + "name" = "SERVICE-PORT" + "type" = "string" + }, + { + "description" = "The AWS TargetGroup's TargetType" + "jsonPath" = ".spec.targetType" + "name" = "TARGET-TYPE" + "type" = "string" + }, + { + "description" = "The AWS TargetGroup's Amazon Resource Name" + "jsonPath" = ".spec.targetGroupARN" + "name" = "ARN" + "priority" = 1 + "type" = "string" + }, + { + "jsonPath" = ".metadata.creationTimestamp" + "name" = "AGE" + "type" = "date" + }, + ] + "name" = "v1alpha1" + "schema" = { + "openAPIV3Schema" = { + "description" = "TargetGroupBinding is the Schema for the TargetGroupBinding API" + "properties" = { + "apiVersion" = { + "description" = "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + "type" = "string" + } + "kind" = { + "description" = "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + "type" = "string" + } + "metadata" = { + "type" = "object" + } + "spec" = { + "description" = "TargetGroupBindingSpec defines the desired state of TargetGroupBinding" + "properties" = { + "networking" = { + "description" = "networking provides the networking setup for ELBV2 LoadBalancer to access targets in TargetGroup." + "properties" = { + "ingress" = { + "description" = "List of ingress rules to allow ELBV2 LoadBalancer to access targets in TargetGroup." + "items" = { + "properties" = { + "from" = { + "description" = "List of peers which should be able to access the targets in TargetGroup. At least one NetworkingPeer should be specified." + "items" = { + "description" = "NetworkingPeer defines the source/destination peer for networking rules." + "properties" = { + "ipBlock" = { + "description" = "IPBlock defines an IPBlock peer. If specified, none of the other fields can be set." + "properties" = { + "cidr" = { + "description" = "CIDR is the network CIDR. Both IPV4 or IPV6 CIDR are accepted." + "type" = "string" + } + } + "required" = [ + "cidr", + ] + "type" = "object" + } + "securityGroup" = { + "description" = "SecurityGroup defines a SecurityGroup peer. If specified, none of the other fields can be set." + "properties" = { + "groupID" = { + "description" = "GroupID is the EC2 SecurityGroupID." + "type" = "string" + } + } + "required" = [ + "groupID", + ] + "type" = "object" + } + } + "type" = "object" + } + "type" = "array" + } + "ports" = { + "description" = "List of ports which should be made accessible on the targets in TargetGroup. If ports is empty or unspecified, it defaults to all ports with TCP." + "items" = { + "properties" = { + "port" = { + "anyOf" = [ + { + "type" = "integer" + }, + { + "type" = "string" + }, + ] + "description" = "The port which traffic must match. When NodePort endpoints(instance TargetType) is used, this must be a numerical port. When Port endpoints(ip TargetType) is used, this can be either numerical or named port on pods. if port is unspecified, it defaults to all ports." + "x-kubernetes-int-or-string" = true + } + "protocol" = { + "description" = "The protocol which traffic must match. If protocol is unspecified, it defaults to TCP." + "enum" = [ + "TCP", + "UDP", + ] + "type" = "string" + } + } + "type" = "object" + } + "type" = "array" + } + } + "required" = [ + "from", + "ports", + ] + "type" = "object" + } + "type" = "array" + } + } + "type" = "object" + } + "serviceRef" = { + "description" = "serviceRef is a reference to a Kubernetes Service and ServicePort." + "properties" = { + "name" = { + "description" = "Name is the name of the Service." + "type" = "string" + } + "port" = { + "anyOf" = [ + { + "type" = "integer" + }, + { + "type" = "string" + }, + ] + "description" = "Port is the port of the ServicePort." + "x-kubernetes-int-or-string" = true + } + } + "required" = [ + "name", + "port", + ] + "type" = "object" + } + "targetGroupARN" = { + "description" = "targetGroupARN is the Amazon Resource Name (ARN) for the TargetGroup." + "type" = "string" + } + "targetType" = { + "description" = "targetType is the TargetType of TargetGroup. If unspecified, it will be automatically inferred." + "enum" = [ + "instance", + "ip", + ] + "type" = "string" + } + } + "required" = [ + "serviceRef", + "targetGroupARN", + ] + "type" = "object" + } + "status" = { + "description" = "TargetGroupBindingStatus defines the observed state of TargetGroupBinding" + "properties" = { + "observedGeneration" = { + "description" = "The generation observed by the TargetGroupBinding controller." + "format" = "int64" + "type" = "integer" + } + } + "type" = "object" + } + } + "type" = "object" + } + } + "served" = true + "storage" = false + "subresources" = { + "status" = {} + } + }, + { + "additionalPrinterColumns" = [ + { + "description" = "The Kubernetes Service's name" + "jsonPath" = ".spec.serviceRef.name" + "name" = "SERVICE-NAME" + "type" = "string" + }, + { + "description" = "The Kubernetes Service's port" + "jsonPath" = ".spec.serviceRef.port" + "name" = "SERVICE-PORT" + "type" = "string" + }, + { + "description" = "The AWS TargetGroup's TargetType" + "jsonPath" = ".spec.targetType" + "name" = "TARGET-TYPE" + "type" = "string" + }, + { + "description" = "The AWS TargetGroup's Amazon Resource Name" + "jsonPath" = ".spec.targetGroupARN" + "name" = "ARN" + "priority" = 1 + "type" = "string" + }, + { + "jsonPath" = ".metadata.creationTimestamp" + "name" = "AGE" + "type" = "date" + }, + ] + "name" = "v1beta1" + "schema" = { + "openAPIV3Schema" = { + "description" = "TargetGroupBinding is the Schema for the TargetGroupBinding API" + "properties" = { + "apiVersion" = { + "description" = "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + "type" = "string" + } + "kind" = { + "description" = "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + "type" = "string" + } + "metadata" = { + "type" = "object" + } + "spec" = { + "description" = "TargetGroupBindingSpec defines the desired state of TargetGroupBinding" + "properties" = { + "networking" = { + "description" = "networking defines the networking rules to allow ELBV2 LoadBalancer to access targets in TargetGroup." + "properties" = { + "ingress" = { + "description" = "List of ingress rules to allow ELBV2 LoadBalancer to access targets in TargetGroup." + "items" = { + "description" = "NetworkingIngressRule defines a particular set of traffic that is allowed to access TargetGroup's targets." + "properties" = { + "from" = { + "description" = "List of peers which should be able to access the targets in TargetGroup. At least one NetworkingPeer should be specified." + "items" = { + "description" = "NetworkingPeer defines the source/destination peer for networking rules." + "properties" = { + "ipBlock" = { + "description" = "IPBlock defines an IPBlock peer. If specified, none of the other fields can be set." + "properties" = { + "cidr" = { + "description" = "CIDR is the network CIDR. Both IPV4 or IPV6 CIDR are accepted." + "type" = "string" + } + } + "required" = [ + "cidr", + ] + "type" = "object" + } + "securityGroup" = { + "description" = "SecurityGroup defines a SecurityGroup peer. If specified, none of the other fields can be set." + "properties" = { + "groupID" = { + "description" = "GroupID is the EC2 SecurityGroupID." + "type" = "string" + } + } + "required" = [ + "groupID", + ] + "type" = "object" + } + } + "type" = "object" + } + "type" = "array" + } + "ports" = { + "description" = "List of ports which should be made accessible on the targets in TargetGroup. If ports is empty or unspecified, it defaults to all ports with TCP." + "items" = { + "description" = "NetworkingPort defines the port and protocol for networking rules." + "properties" = { + "port" = { + "anyOf" = [ + { + "type" = "integer" + }, + { + "type" = "string" + }, + ] + "description" = "The port which traffic must match. When NodePort endpoints(instance TargetType) is used, this must be a numerical port. When Port endpoints(ip TargetType) is used, this can be either numerical or named port on pods. if port is unspecified, it defaults to all ports." + "x-kubernetes-int-or-string" = true + } + "protocol" = { + "description" = "The protocol which traffic must match. If protocol is unspecified, it defaults to TCP." + "enum" = [ + "TCP", + "UDP", + ] + "type" = "string" + } + } + "type" = "object" + } + "type" = "array" + } + } + "required" = [ + "from", + "ports", + ] + "type" = "object" + } + "type" = "array" + } + } + "type" = "object" + } + "nodeSelector" = { + "description" = "node selector for instance type target groups to only register certain nodes" + "properties" = { + "matchExpressions" = { + "description" = "matchExpressions is a list of label selector requirements. The requirements are ANDed." + "items" = { + "description" = "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values." + "properties" = { + "key" = { + "description" = "key is the label key that the selector applies to." + "type" = "string" + } + "operator" = { + "description" = "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist." + "type" = "string" + } + "values" = { + "description" = "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." + "items" = { + "type" = "string" + } + "type" = "array" + } + } + "required" = [ + "key", + "operator", + ] + "type" = "object" + } + "type" = "array" + } + "matchLabels" = { + "additionalProperties" = { + "type" = "string" + } + "description" = "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." + "type" = "object" + } + } + "type" = "object" + } + "serviceRef" = { + "description" = "serviceRef is a reference to a Kubernetes Service and ServicePort." + "properties" = { + "name" = { + "description" = "Name is the name of the Service." + "type" = "string" + } + "port" = { + "anyOf" = [ + { + "type" = "integer" + }, + { + "type" = "string" + }, + ] + "description" = "Port is the port of the ServicePort." + "x-kubernetes-int-or-string" = true + } + } + "required" = [ + "name", + "port", + ] + "type" = "object" + } + "targetGroupARN" = { + "description" = "targetGroupARN is the Amazon Resource Name (ARN) for the TargetGroup." + "minLength" = 1 + "type" = "string" + } + "targetType" = { + "description" = "targetType is the TargetType of TargetGroup. If unspecified, it will be automatically inferred." + "enum" = [ + "instance", + "ip", + ] + "type" = "string" + } + } + "required" = [ + "serviceRef", + "targetGroupARN", + ] + "type" = "object" + } + "status" = { + "description" = "TargetGroupBindingStatus defines the observed state of TargetGroupBinding" + "properties" = { + "observedGeneration" = { + "description" = "The generation observed by the TargetGroupBinding controller." + "format" = "int64" + "type" = "integer" + } + } + "type" = "object" + } + } + "type" = "object" + } + } + "served" = true + "storage" = true + "subresources" = { + "status" = {} + } + }, + ] + } + } +} diff --git a/examples/established-cluster-examples/alb-controller/data.eks.tf b/examples/established-cluster-examples/alb-controller/data.eks.tf new file mode 100644 index 0000000..4cebea9 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/data.eks.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster +# for main.tf +# aws_eks_cluster = aws_eks_cluster.eks_cluster +# for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/established-cluster-examples/alb-controller/ecr.tf b/examples/established-cluster-examples/alb-controller/ecr.tf new file mode 120000 index 0000000..654d0cc --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/ecr.tf @@ -0,0 +1 @@ +../efs/ecr.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/alb-controller/iam_policy.json b/examples/established-cluster-examples/alb-controller/iam_policy.json new file mode 100644 index 0000000..c11ff94 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/iam_policy.json @@ -0,0 +1,207 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "iam:CreateServiceLinkedRole", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeInstances", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeTags", + "ec2:GetCoipPoolUsage", + "ec2:DescribeCoipPools", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeListenerCertificates", + "elasticloadbalancing:DescribeSSLPolicies", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetGroupAttributes", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:DescribeTags" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "cognito-idp:DescribeUserPoolClient", + "acm:ListCertificates", + "acm:DescribeCertificate", + "iam:ListServerCertificates", + "iam:GetServerCertificate", + "waf-regional:GetWebACL", + "waf-regional:GetWebACLForResource", + "waf-regional:AssociateWebACL", + "waf-regional:DisassociateWebACL", + "wafv2:GetWebACL", + "wafv2:GetWebACLForResource", + "wafv2:AssociateWebACL", + "wafv2:DisassociateWebACL", + "shield:GetSubscriptionState", + "shield:DescribeProtection", + "shield:CreateProtection", + "shield:DeleteProtection" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateSecurityGroup" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateTags" + ], + "Resource": "arn:aws:ec2:*:*:security-group/*", + "Condition": { + "StringEquals": { + "ec2:CreateAction": "CreateSecurityGroup" + }, + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Resource": "arn:aws:ec2:*:*:security-group/*", + "Condition": { + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "true", + "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress", + "ec2:DeleteSecurityGroup" + ], + "Resource": "*", + "Condition": { + "Null": { + "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Resource": "*", + "Condition": { + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:CreateRule", + "elasticloadbalancing:DeleteRule" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:RemoveTags" + ], + "Resource": [ + "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*" + ], + "Condition": { + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "true", + "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:RemoveTags" + ], + "Resource": [ + "arn:aws:elasticloadbalancing:*:*:listener/net/*/*/*", + "arn:aws:elasticloadbalancing:*:*:listener/app/*/*/*", + "arn:aws:elasticloadbalancing:*:*:listener-rule/net/*/*/*", + "arn:aws:elasticloadbalancing:*:*:listener-rule/app/*/*/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:SetIpAddressType", + "elasticloadbalancing:SetSecurityGroups", + "elasticloadbalancing:SetSubnets", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:ModifyTargetGroupAttributes", + "elasticloadbalancing:DeleteTargetGroup" + ], + "Resource": "*", + "Condition": { + "Null": { + "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" + } + } + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:DeregisterTargets" + ], + "Resource": "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*" + }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:SetWebAcl", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:AddListenerCertificates", + "elasticloadbalancing:RemoveListenerCertificates", + "elasticloadbalancing:ModifyRule" + ], + "Resource": "*" + } + ] +} diff --git a/examples/established-cluster-examples/alb-controller/locals.tf b/examples/established-cluster-examples/alb-controller/locals.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/locals.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/established-cluster-examples/alb-controller/main.tf b/examples/established-cluster-examples/alb-controller/main.tf new file mode 100644 index 0000000..80e4097 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/main.tf @@ -0,0 +1,61 @@ +locals { + charts = { + "alb-controller" = { + name = "aws-load-balanacer-controller" + repository = "https://aws.github.io/eks-charts" + version = "1.2.6" + use_remote = true + } + "alb-controller-crds" = { + name = "alb-controller-crds" + repository = "" + version = "" + use_remote = false + } + } + + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } +} + +resource "helm_release" "alb-controller-crds" { + chart = "alb-controller-crds" + name = "alb-controller-crds" + namespace = "kube-system" +# repository = "${path.module}/charts" + repository = local.charts["alb-controller-crds"].use_remote ? local.charts["alb-controller-crds"].repository : "${path.module}/charts" + version = local.charts["alb-controller-crds"].use_remote ? local.charts["alb-controller-crds"].version : null + timeout = 180 +} + +resource "helm_release" "alb-controller" { + chart = "aws-load-balancer-controller" + name = "aws-load-balancer-controller" + namespace = "kube-system" +# repository = "${path.module}/charts" + repository = local.charts["alb-controller"].use_remote ? local.charts["alb-controller"].repository : "${path.module}/charts" + version = local.charts["alb-controller"].use_remote ? local.charts["alb-controller"].version : null + timeout = 180 + + depends_on = [ helm_release.alb-controller-crds ] + + set { + name = "image.repository" + value = "${local.ecr}amazon/aws-load-balancer-controller" + } + set { + name = "clusterName" + value = var.cluster_name + } + set { + name = "serviceAccount.create" + value = "false" + } + set { + name = "serviceAccount.name" + value = kubernetes_service_account.alb-controller.metadata[0].name + } +} diff --git a/examples/established-cluster-examples/alb-controller/policy.tf b/examples/established-cluster-examples/alb-controller/policy.tf new file mode 100644 index 0000000..20f77cb --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/policy.tf @@ -0,0 +1,241 @@ +data "aws_iam_policy_document" "alb-iam" { + statement { + effect = "Allow" + actions = [ + "iam:CreateServiceLinkedRole", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeInstances", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeTags", + "ec2:GetCoipPoolUsage", + "ec2:DescribeCoipPools", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeListenerCertificates", + "elasticloadbalancing:DescribeSSLPolicies", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetGroupAttributes", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:DescribeTags" + ] + resources = ["*"] + } + + statement { + effect = "Allow" + actions = [ + "cognito-idp:DescribeUserPoolClient", + "acm:ListCertificates", + "acm:DescribeCertificate", + "iam:ListServerCertificates", + "iam:GetServerCertificate", + "waf-regional:GetWebACL", + "waf-regional:GetWebACLForResource", + "waf-regional:AssociateWebACL", + "waf-regional:DisassociateWebACL", + "wafv2:GetWebACL", + "wafv2:GetWebACLForResource", + "wafv2:AssociateWebACL", + "wafv2:DisassociateWebACL", + "shield:GetSubscriptionState", + "shield:DescribeProtection", + "shield:CreateProtection", + "shield:DeleteProtection" + ] + resources = ["*"] + } + + statement { + effect = "Allow" + actions = [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress" + ] + resources = ["*"] + } + + statement { + effect = "Allow" + actions = [ + "ec2:CreateSecurityGroup" + ] + resources = ["*"] + } + + statement { + effect = "Allow" + actions = [ + "ec2:CreateTags" + ] + resources = ["arn:aws:ec2:*:*:security-group/*"] + condition { + test = "StringEquals" + variable = "ec2:CreateAction" + values = ["CreateSecurityGroup"] + } + + condition { + test = "Null" + variable = "aws:RequestTag/elbv2.k8s.aws/cluster" + values = ["false"] + } + } + + statement { + effect = "Allow" + actions = [ + "ec2:CreateTags", + "ec2:DeleteTags" + ] + resources = ["arn:aws:ec2:*:*:security-group/*"] + condition { + test = "Null" + variable = "aws:RequestTag/elbv2.k8s.aws/cluster" + values = ["true"] + } + condition { + test = "Null" + variable = "aws:ResourceTag/elbv2.k8s.aws/cluster" + values = ["false"] + } + } + + statement { + effect = "Allow" + actions = [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress", + "ec2:DeleteSecurityGroup" + ] + resources = ["*"] + condition { + test = "Null" + variable = "aws:ResourceTag/elbv2.k8s.aws/cluster" + values = ["false"] + } + } + + statement { + effect = "Allow" + actions = [ + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ] + resources = ["*"] + condition { + test = "Null" + variable = "aws:RequestTag/elbv2.k8s.aws/cluster" + values = ["false"] + } + } + + statement { + effect = "Allow" + actions = [ + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:CreateRule", + "elasticloadbalancing:DeleteRule" + ] + resources = ["*"] + } + + statement { + effect = "Allow" + actions = [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:RemoveTags" + ] + resources = [ + "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*" + ] + condition { + test = "Null" + variable = "aws:RequestTag/elbv2.k8s.aws/cluster" + values = ["true"] + } + condition { + test = "Null" + variable = "aws:ResourceTag/elbv2.k8s.aws/cluster" + values = ["false"] + } + } + + statement { + effect = "Allow" + actions = [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:RemoveTags" + ] + resources = [ + "arn:aws:elasticloadbalancing:*:*:listener/net/*/*/*", + "arn:aws:elasticloadbalancing:*:*:listener/app/*/*/*", + "arn:aws:elasticloadbalancing:*:*:listener-rule/net/*/*/*", + "arn:aws:elasticloadbalancing:*:*:listener-rule/app/*/*/*" + ] + } + + statement { + effect = "Allow" + actions = [ + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:SetIpAddressType", + "elasticloadbalancing:SetSecurityGroups", + "elasticloadbalancing:SetSubnets", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:ModifyTargetGroupAttributes", + "elasticloadbalancing:DeleteTargetGroup" + ] + resources = ["*"] + condition { + test = "Null" + variable = "aws:ResourceTag/elbv2.k8s.aws/cluster" + values = ["false"] + } + } + + statement { + effect = "Allow" + actions = [ + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:DeregisterTargets" + ] + resources = ["arn:aws:elasticloadbalancing:*:*:targetgroup/*/*"] + } + + statement { + effect = "Allow" + actions = [ + "elasticloadbalancing:SetWebAcl", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:AddListenerCertificates", + "elasticloadbalancing:RemoveListenerCertificates", + "elasticloadbalancing:ModifyRule" + ] + resources = ["*"] + } +} + +resource "aws_iam_policy" "alb-policy" { + name = format("%v%v-alb", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow kuberentes to manage ALBs for the cluster." + policy = data.aws_iam_policy_document.alb-iam.json + + tags = merge( + local.base_tags, + var.tags, + ) +} + diff --git a/examples/established-cluster-examples/alb-controller/prefixes.tf b/examples/established-cluster-examples/alb-controller/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/alb-controller/providers.tf b/examples/established-cluster-examples/alb-controller/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/alb-controller/role.tf b/examples/established-cluster-examples/alb-controller/role.tf new file mode 100644 index 0000000..5839bc2 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/role.tf @@ -0,0 +1,38 @@ +locals { + oidc = replace(data.aws_eks_cluster.cluster.identity[0].oidc[0].issuer, "https://", "") + account_id = data.aws_caller_identity.current.account_id + principal = format("arn:%v:iam::%v:oidc-provider/%v", data.aws_arn.current.partition, local.account_id, local.oidc) +} + +module "role_alb-controller" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git" + + role_name = format("%v%v-alb-controller", local._prefixes["eks"], var.cluster_name) + role_description = "EKS ALB Controller Role for ${var.cluster_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.alb_assume_webidentity.json + + attached_policies = [aws_iam_policy.alb-policy.arn] +} + +data "aws_iam_policy_document" "alb_assume_webidentity" { + statement { + sid = "ALBAssumeRoleWebIdentity" + effect = "Allow" + actions = ["sts:AssumeRoleWithWebIdentity"] + principals { + type = "Federated" + identifiers = [local.principal] + } + condition { + test = "StringEquals" + variable = "${local.oidc}:sub" + values = ["system:serviceaccount:kube-system:aws-load-balancer-controller"] + } + } +} + +output "role_alb-controller_arn" { + description = "Role ARN for EKS ALB Controller Role" + value = module.role_alb-controller.role_arn +} diff --git a/examples/established-cluster-examples/alb-controller/s b/examples/established-cluster-examples/alb-controller/s new file mode 100644 index 0000000..e69de29 diff --git a/examples/established-cluster-examples/alb-controller/serviceaccount.tf b/examples/established-cluster-examples/alb-controller/serviceaccount.tf new file mode 100644 index 0000000..c0ca1a7 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/serviceaccount.tf @@ -0,0 +1,17 @@ + +resource "kubernetes_service_account" "alb-controller" { + metadata { + name = "aws-load-balancer-controller" + namespace = "kube-system" + + labels = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/name" = "aws-load-balancer-controller" + } + + annotations = { + "eks.amazonaws.com/role-arn" = module.role_alb-controller.role_arn + } + } +} + diff --git a/examples/established-cluster-examples/alb-controller/variables.alb.tf b/examples/established-cluster-examples/alb-controller/variables.alb.tf new file mode 100644 index 0000000..d984bb2 --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/variables.alb.tf @@ -0,0 +1,5 @@ +variable "cluster_name" { + description = "The name of the EKS cluster into which the efs-provisioner is to be installed." + type = string +} + diff --git a/examples/established-cluster-examples/alb-controller/variables.vpc.tf b/examples/established-cluster-examples/alb-controller/variables.vpc.tf new file mode 100644 index 0000000..3fa214a --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/variables.vpc.tf @@ -0,0 +1,150 @@ +variable "vpc_name" { + description = "VPC Name including environment (if necessary), excluding vpc{N}" + type = string +} + +variable "vpc_index" { + description = "VPC index number. This used for NACL rule number caculations." + type = number +} + +variable "vpc_cidr_block" { + description = "VPC CIDR Block" + type = string +} + +variable "vpc_short_name" { + description = "VPC short name component, vpc{index}" + type = string +} + +variable "vpc_environment" { + description = "VPC environment purpose (common, shared, dev, stage, ite, prod)" + type = string + default = "" +} + +variable "vpc_enable_igw" { + description = "Enable AWS Internet Gateway (IGW) on the VPC (true | false[x])" + type = bool + default = false +} + +variable "vpc_enable_nat" { + description = "Enable AWS NAT Gateway on the VPC (true | false[x])" + type = bool + default = false +} + +variable "vpc_enable_vpn" { + description = "Enable AWS VPN Configuration on the VPC (true[x] | false)" + type = bool + default = true +} + +variable "vpc_enable_awsdns" { + description = "Enable AWS DNS on the VPC" + type = bool + default = false +} + +variable "vpn_settings" { + description = "VPN Connection details array of site, bgp_asn_id and ip_address" + type = list(object( + { + site = string + bgp_asn_id = number + ip_address = string + } + )) + default = [] +} + +## +## vpc_index = 1 +## vpc_short_name = "vpc1" +## vpc_environment = "services" +## vpc_full_name = "vpc1-services" +## vpc_cidr_block = "10.197.0.0/19" +## vpc_vpn_dynamic_routing = true +## vpc_enable_igw = true +## vpc_enable_nat = true +## vpc_enable_vpn = true +## +## vpn_connections = { +## hq : { +## "asn_id" : 65510, +## "ip" : "148.129.160.12" +## } +## bcc : { +## "asn_id" : 65511, +## "ip" : "148.129.90.12" +## } +## } +## +## +## variable "dhcp_vpc_domain_name" { +## description = "Domain Name for DHCP Options" +## default = "compute.csp1.census.gov" +## } +## +## variable "vpc_dns_servers" { +## description = "Enterprise DNS Servers" +## # default = ["10.193.0.22", "10.193.2.22" ] +## default = ["148.129.127.22", "148.129.191.22"] +## # add 10.193.0.22, 10.193.2.22 +## } +## +## variable "vpc_ntp_servers" { +## description = "Enterprise NTP Servers" +## default = ["148.129.127.23", "148.129.191.23"] +## } +## +## variable "vpc_cidr_block" { +## description = "VPC CIDR Block" +## } +## +## variable "network_census" { +## description = "Census Subnets" +## type = list +## default = ["148.129.0.0/16", "172.16.0.0/12", "192.168.0.0/16"] +## } +## +## # 10.20/16 -- npc voic devices +## variable "network_census_extra" { +## description = "Extra Census CIDR blocks (smaller 10.x.x.x/mask)" +## type = list +## default = ["10.20.0.0/16"] +## } +## +## variable "network_peers" { +## description = "Census AWS Peer Subnets" +## type = list +## default = ["10.193.0.0/19"] +## # default = [ ] +## } +## +## variable "vpn_connections" { +## description = "VPN Connection Details" +## type = map +## } +## +## variable "vpc_vpn_dynamic_routing" { +## description = "Dyanmic routing with BGP (true | false)" +## type = bool +## } +## +## # bits is the extra size of the bits from the subnet, which is split from the size of the vpc cidr +## # vpc_cidr = /19 +## # cidr_subnets defines values (19 + N) +## # this bits gets you (19 + N + bits) +## # example: /19 vpc cidr, 3 for cidr means a /22 (19+3) and then 2 for bits means /24 +## variable "subnet_maps" { +## description = "Subnet objects" +## type = list(object( +## { +## label = string +## bits = number +## private = bool +## })) +## } diff --git a/examples/established-cluster-examples/alb-controller/version.tf b/examples/established-cluster-examples/alb-controller/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/established-cluster-examples/alb-controller/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/dnsutils/README.md b/examples/established-cluster-examples/dnsutils/README.md new file mode 100644 index 0000000..eb60652 --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/README.md @@ -0,0 +1,10 @@ +https://aws.amazon.com/blogs/containers/introducing-cis-amazon-eks-benchmark/ + + + +https://github.com/aquasecurity/kube-bench + + + +https://raw.githubusercontent.com/aquasecurity/kube-bench/main/job-eks.yaml + diff --git a/examples/established-cluster-examples/dnsutils/copy_image.sh b/examples/established-cluster-examples/dnsutils/copy_image.sh new file mode 120000 index 0000000..534e41c --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/copy_image.sh @@ -0,0 +1 @@ +../common-services/copy_image.sh \ No newline at end of file diff --git a/examples/established-cluster-examples/dnsutils/copy_images.tf b/examples/established-cluster-examples/dnsutils/copy_images.tf new file mode 100644 index 0000000..6cca35d --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/copy_images.tf @@ -0,0 +1,71 @@ +data "aws_ecr_authorization_token" "token" {} + +locals { + account_id = data.aws_caller_identity.current.account_id + repo_parent_name = format("eks/%v", var.cluster_name) + + account_ecr = format("%v.dkr.ecr.%v.amazonaws.com/%v", local.account_id, var.region, local.repo_parent_name) + + images = [ +# { +# name = "dnsutils" +# image = "gcr.io/kubernetes-e2e-test-images/dnsutils" +# tag = "1.3" +# enabled = false +# }, + { + name = "dnsutils" + image = "docker.io/tutum/dnsutils" + tag = "latest" + enabled = true + }, + { + name = "busybox" + image = "docker.io/busybox" + tag = "latest" + enabled = true + }, + { + name = "alpine" + image = "docker.io/alpine" + tag = "latest" + enabled = true + }, + { + name = "alpine-curl" + image = "docker.io/byrnedo/alpine-curl" + tag = "latest" + enabled = true + }, + { + name = "aws-cli" + image = "docker.io/amazon/aws-cli" + tag = "latest" + enabled = true + }, + ] + image_repos = { for image in local.images : image.name => format("%v/%v", local.account_ecr, image.name) } + image_details = { for image in local.images : image.name => { + name = image.name + source_image = image.image + tag = image.tag + repo = local.image_repos[image.name] + dest_image = format("%v:%v",local.image_repos[image.name],image.tag) + } } +} + +resource "null_resource" "copy_images" { + for_each = { for image in local.images : image.name => image if image.enabled} + + provisioner "local-exec" { + command = "${path.module}/copy_image.sh" + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + SOURCE_IMAGE = format("%v:%v", each.value.image, each.value.tag) + DESTINATION_IMAGE = format("%v/%v:%v", local.account_ecr, each.value.name, each.value.tag) + DESTINATION_USERNAME = data.aws_ecr_authorization_token.token.user_name + DESTINATION_PASSWORD = data.aws_ecr_authorization_token.token.password + } + } +} diff --git a/examples/established-cluster-examples/dnsutils/data.eks.tf b/examples/established-cluster-examples/dnsutils/data.eks.tf new file mode 100644 index 0000000..4cebea9 --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/data.eks.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster +# for main.tf +# aws_eks_cluster = aws_eks_cluster.eks_cluster +# for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/established-cluster-examples/dnsutils/history.1 b/examples/established-cluster-examples/dnsutils/history.1 new file mode 100644 index 0000000..909885f --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/history.1 @@ -0,0 +1,1000 @@ + 2325 2021/09/01 15:30:44 less README.md + 2326 2021/09/01 15:31:00 tf-plan -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy + 2327 2021/09/01 15:31:17 tf-apply -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy + 2328 2021/09/01 15:31:39 tf-plan + 2329 2021/09/01 15:32:24 ls logs/ + 2330 2021/09/01 15:32:28 less logs/plan.20210901.163052* + 2331 2021/09/01 15:35:04 tf-apply + 2332 2021/09/01 15:52:23 vi main.tf + 2333 2021/09/01 15:52:32 grep KUBECONFIG * + 2334 2021/09/01 15:52:39 vi eks-console-access.tf + 2335 2021/09/01 15:53:11 vi main.tf + 2336 2021/09/01 15:56:00 vi main.tf eks-console-access.tf + 2337 2021/09/01 15:57:03 tf-plan + 2338 2021/09/01 15:57:24 tf-plan less + 2339 2021/09/01 15:57:43 tf-plan less | grep -iE ' (created|replaced|destroyed)' + 2340 2021/09/01 15:57:59 tf-apply + 2341 2021/09/01 15:59:16 ls -al setup/ + 2342 2021/09/01 15:59:18 ls -al setup/kube.config + 2343 2021/09/01 15:59:22 ls setup/kube.config + 2344 2021/09/01 15:59:24 cat setup/kube.config + 2345 2021/09/01 15:59:27 fg + 2346 2021/09/01 16:22:12 ls -al + 2347 2021/09/01 16:22:17 tf-directory-setup.py -l none + 2348 2021/09/01 16:22:19 tf-init + 2349 2021/09/01 16:22:30 tf-directory-setup.py -l s3 + 2350 2021/09/01 16:22:32 tf-plan + 2351 2021/09/01 16:23:12 ls + 2352 2021/09/01 16:23:15 cd aws-auth/ + 2353 2021/09/01 16:23:15 ls + 2354 2021/09/01 16:23:25 tf-directory-setup.py -l none + 2355 2021/09/01 16:23:29 tf-init + 2356 2021/09/01 16:23:40 grep kubectl * + 2357 2021/09/01 16:23:42 vi patch-aws-auth.tf + 2358 2021/09/01 16:24:26 vi patch-aws-auth.tf ../main.tf + 2359 2021/09/01 16:24:57 tf-plan + 2360 2021/09/01 16:25:17 vi patch-aws-auth.tf + 2361 2021/09/01 16:25:45 tf-plan + 2362 2021/09/01 16:26:07 tf-apply + 2363 2021/09/01 16:26:30 ls setup/ + 2364 2021/09/01 16:26:32 cat setup/kube.config + 2365 2021/09/01 16:26:47 cat ../setup/kube.config + 2366 2021/09/01 16:26:55 vi patch-aws-auth.tf + 2367 2021/09/01 16:27:29 tf-apply + 2368 2021/09/01 16:27:55 ls + 2369 2021/09/01 16:28:00 grep cni_vpc ../var*tf + 2370 2021/09/01 16:28:07 diff variables.vpc.tf .. + 2371 2021/09/01 16:28:12 ln -sf ../variables.vpc.tf . + 2372 2021/09/01 16:28:17 ls + 2373 2021/09/01 16:28:29 grpe kubectl patch-aws-auth.tf + 2374 2021/09/01 16:28:32 grep kubectl patch-aws-auth.tf + 2375 2021/09/01 16:28:38 vi patch-aws-auth.tf + 2376 2021/09/01 16:28:44 tf-appy + 2377 2021/09/01 16:28:46 tf-apply + 2378 2021/09/01 16:29:15 tf-directory-setup.py -l s3 + 2379 2021/09/01 16:29:16 git status . + 2380 2021/09/01 16:29:24 git add -A remote* + 2381 2021/09/01 16:29:26 git status . + 2382 2021/09/01 16:29:36 git commit -m'update to use kubeconfig extract' . + 2383 2021/09/01 16:29:37 cd .. + 2384 2021/09/01 16:29:39 git status . + 2385 2021/09/01 16:29:46 git add eniconfig.yaml remote_state.* -A + 2386 2021/09/01 16:29:48 git status . + 2387 2021/09/01 16:30:08 git commit -m'setup for kubeconfig, make it work with cni custom networking' -a + 2388 2021/09/01 16:30:10 git status . + 2389 2021/09/01 16:30:12 cd .. + 2390 2021/09/01 16:30:12 ls + 2391 2021/09/01 16:30:15 cd *3 + 2392 2021/09/01 16:30:16 ls + 2393 2021/09/01 16:30:19 cd eks-test3 + 2394 2021/09/01 16:30:20 ls + 2395 2021/09/01 16:31:23 cat aws-auth/p + 2396 2021/09/01 16:31:24 cat aws-auth/patch-aws-auth.tf + 2397 2021/09/01 16:31:27 cat aws-auth/patch-aws-auth.tf |less + 2398 2021/09/01 16:31:34 cat aws-auth/*tfvars + 2399 2021/09/01 16:32:56 ls + 2400 2021/09/01 16:33:07 cd efs/ + 2401 2021/09/01 16:33:07 ls + 2402 2021/09/01 16:33:09 grep kubectl * + 2403 2021/09/01 16:33:15 tf-directory-setup.py -l none + 2404 2021/09/01 16:33:21 vi main.tf ../main.tf + 2405 2021/09/01 16:34:37 cd .. + 2406 2021/09/01 16:34:39 vi main.tf + 2407 2021/09/01 16:35:02 cp efs/kubeconfig.tf . + 2408 2021/09/01 16:35:03 vi kubeconfig.tf + 2409 2021/09/01 16:35:52 cd aws-auth/ + 2410 2021/09/01 16:35:54 vi patch-aws-auth.tf + 2411 2021/09/01 16:36:06 ln -s ../kubeconfig.tf . + 2412 2021/09/01 16:36:08 cd ../efs/ + 2413 2021/09/01 16:36:10 rm kubeconfig.tf + 2414 2021/09/01 16:36:13 ln -s ../kubeconfig.tf . + 2415 2021/09/01 16:36:13 ls + 2416 2021/09/01 16:36:18 tf-init + 2417 2021/09/01 16:36:29 ls + 2418 2021/09/01 16:36:35 less efs.tf + 2419 2021/09/01 16:37:04 tf-plan + 2420 2021/09/01 16:38:07 ls + 2421 2021/09/01 16:38:31 ls ../remote_state.*s3 + 2422 2021/09/01 16:38:39 ln -s ../remote_state.*s3 . + 2423 2021/09/01 16:38:41 tf-plan + 2424 2021/09/01 16:39:35 grep applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test3 *tf + 2425 2021/09/01 16:39:44 cd .. + 2426 2021/09/01 16:39:45 ls + 2427 2021/09/01 16:39:49 cat outputs. + 2428 2021/09/01 16:39:50 cat outputs.tf + 2429 2021/09/01 16:40:04 tf-appy + 2430 2021/09/01 16:40:11 ls -al *tf + 2431 2021/09/01 16:40:15 tf-appy + 2432 2021/09/01 16:40:17 tf-apply + 2433 2021/09/01 16:41:44 cd efs/ + 2434 2021/09/01 16:41:45 ls + 2435 2021/09/01 16:41:49 vi remote_state.yml + 2436 2021/09/01 16:41:53 ls + 2437 2021/09/01 16:41:55 ls -al + 2438 2021/09/01 16:42:17 rm remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test3.tf.s3 + 2439 2021/09/01 16:42:29 ln -s ../remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test3.tf . + 2440 2021/09/01 16:42:30 tf-plan + 2441 2021/09/01 16:43:15 grep ^res *tf + 2442 2021/09/01 16:43:19 grep ^res *tf|grep policy + 2443 2021/09/01 16:43:28 tf-apply -target=aws_iam_policy.efs-policy + 2444 2021/09/01 16:44:23 tf-plan + 2445 2021/09/01 16:45:41 tf-apply + 2446 2021/09/01 16:47:50 vi main.tf + 2447 2021/09/01 16:47:55 vi main.tf kubeconfig.tf + 2448 2021/09/01 16:48:16 tf-apply + 2449 2021/09/01 16:52:46 less pa + 2450 2021/09/01 16:52:47 ls + 2451 2021/09/01 16:52:49 less main.tf + 2452 2021/09/01 16:53:15 grep subnet_ids * + 2453 2021/09/01 16:53:19 vi efs.tf + 2454 2021/09/01 16:54:12 grep pvc *md + 2455 2021/09/01 16:54:21 kubectl describe pvc efs-test3-claim + 2456 2021/09/01 16:54:29 kubectl --kubeconfig setup/kube.config describe pvc efs-test3-claim + 2457 2021/09/01 16:58:59 cd .. + 2458 2021/09/01 16:59:01 ls + 2459 2021/09/01 16:59:03 vi outputs. + 2460 2021/09/01 16:59:05 vi outputs.tf + 2461 2021/09/01 16:59:38 vi outputs.tf main.tf + 2462 2021/09/01 17:01:02 tf-apply + 2463 2021/09/01 17:01:26 cd efs + 2464 2021/09/01 17:01:26 ls + 2465 2021/09/01 17:01:27 vi main.tf + 2466 2021/09/01 17:01:34 vi efs.tf + 2467 2021/09/01 17:01:45 vi locals.tf + 2468 2021/09/01 17:01:47 vi main.tf + 2469 2021/09/01 17:02:33 vi locals.tf + 2470 2021/09/01 17:02:35 vi efs.tf + 2471 2021/09/01 17:02:45 vi locals.tf + 2472 2021/09/01 17:02:47 vi main.tf + 2473 2021/09/01 17:02:52 tf-plan + 2474 2021/09/01 17:03:45 tf-apply + 2475 2021/09/01 17:07:11 ls + 2476 2021/09/01 17:07:13 vi main.tf + 2477 2021/09/01 17:08:09 grep ^resource main.tf + 2478 2021/09/01 17:08:52 tf-destroy + 2479 2021/09/01 17:09:39 grep subnet_id * + 2480 2021/09/01 17:10:17 history|grep target + 2481 2021/09/01 17:10:20 tf-apply -target=aws_iam_policy.efs-policy + 2482 2021/09/01 17:10:38 tf-apply + 2483 2021/09/01 17:17:22 history|grep kube + 2484 2021/09/01 17:17:27 kubectl --kubeconfig setup/kube.config describe pvc efs-test3-claim + 2485 2021/09/01 17:18:15 git status . + 2486 2021/09/01 17:18:27 git add kubeconfig.tf ../kubeconfig.tf ../aws-auth/kubeconfig.tf + 2487 2021/09/01 17:18:34 git add remote*back* remote*test3* + 2488 2021/09/01 17:31:59 git status . + 2489 2021/09/01 17:32:09 git commit -m'update' . + 2490 2021/09/01 17:32:12 git status + 2491 2021/09/01 17:32:19 cd .. + 2492 2021/09/01 17:32:20 git status . + 2493 2021/09/01 17:32:27 git commit -m'add cni to outputs' . + 2494 2021/09/01 17:32:31 git status . + 2495 2021/09/01 17:32:40 ls efs/setup/ + 2496 2021/09/01 17:32:41 ls setup/ + 2497 2021/09/01 17:32:48 git add setup/*yaml + 2498 2021/09/01 17:32:54 git commit -m'add yaml downloads' . + 2499 2021/09/01 17:32:55 git status + 2500 2021/09/01 17:32:57 git push + 2501 2021/09/01 17:33:16 ls + 2502 2021/09/01 17:33:19 cd sample-nlb/ + 2503 2021/09/01 17:33:19 ls + 2504 2021/09/01 17:33:22 vi remote_state.yml + 2505 2021/09/01 17:33:25 tf-init + 2506 2021/09/01 17:33:31 tf-directory-setup.py -l none + 2507 2021/09/01 17:33:32 tf-init + 2508 2021/09/01 17:33:37 ls + 2509 2021/09/01 17:33:41 grep kubectl * + 2510 2021/09/01 17:33:47 vi main.tf + 2511 2021/09/01 17:33:55 grpw subnet + 2512 2021/09/01 17:33:57 grep subnet * + 2513 2021/09/01 17:34:09 grep remote_state * + 2514 2021/09/01 17:34:18 tf-plan + 2515 2021/09/01 17:34:34 tf-apply + 2516 2021/09/01 17:41:49 ls + 2517 2021/09/01 17:41:50 vi main.tf + 2518 2021/09/02 09:00:49 history|grep kube + 2519 2021/09/02 09:01:17 kubectl --kubeconfig setup/kube.config get pod --all-namespaces -o wide + 2520 2021/09/02 09:01:21 ls setup + 2521 2021/09/02 09:01:28 kubectl --kubeconfig ../setup/kube.config get pod --all-namespaces -o wide + 2522 2021/09/02 09:01:54 cd .. + 2523 2021/09/02 09:01:54 ls + 2524 2021/09/02 09:01:58 cd common-services/ + 2525 2021/09/02 09:01:58 ls + 2526 2021/09/02 09:02:03 vi ca-cert.tf + 2527 2021/09/02 09:02:17 ls + 2528 2021/09/02 09:02:22 cd ../sample-nlb/ + 2529 2021/09/02 09:02:22 ls + 2530 2021/09/02 09:02:26 ls -al *tf + 2531 2021/09/02 09:02:28 tf-directory-setup.py -l s3 + 2532 2021/09/02 09:02:30 git status . + 2533 2021/09/02 09:02:36 git add remote_state.backend.tf remote*nlb* + 2534 2021/09/02 09:02:42 git commit -m'add remote states' . + 2535 2021/09/02 09:02:44 git push + 2536 2021/09/02 09:02:48 cd .. + 2537 2021/09/02 09:04:09 kubectl --kubeconfig setup/kube.config get events --all-namespaces -o wide + 2538 2021/09/02 09:04:16 kubectl --kubeconfig setup/kube.config get event --all-namespaces -o wide + 2539 2021/09/02 09:04:26 kubectl --kubeconfig setup/kube.config get event -o wide + 2540 2021/09/02 09:04:38 kubectl --help --kubeconfig setup/kube.config get event -o wide + 2541 2021/09/02 09:04:44 kubectl --help # --kubeconfig setup/kube.config get event -o wide + 2542 2021/09/02 09:04:54 kubectl api-resources + 2543 2021/09/02 09:05:07 kubectl api-resources --kubeconfig setup/kube.config g + 2544 2021/09/02 09:05:08 kubectl api-resources --kubeconfig setup/kube.config + 2545 2021/09/02 09:05:13 kubectl api-resources --kubeconfig setup/kube.config |sort + 2546 2021/09/02 09:05:35 kubectl --kubeconfig setup/kube.config get eniconfigs -o wide + 2547 2021/09/02 09:06:16 kubectl --kubeconfig setup/kube.config get secrets -o wide + 2548 2021/09/02 09:08:29 cd sample-nlb/ + 2549 2021/09/02 09:08:30 ls + 2550 2021/09/02 09:08:32 vi main.tf + 2551 2021/09/02 09:15:48 cd .. + 2552 2021/09/02 09:15:49 ls + 2553 2021/09/02 09:15:55 cat dns-zone.tf + 2554 2021/09/02 09:16:18 cd common-services/ + 2555 2021/09/02 09:16:18 ls + 2556 2021/09/02 09:16:24 grep dns * + 2557 2021/09/02 09:16:26 grep route53 * + 2558 2021/09/02 09:16:31 less dns.tf + 2559 2021/09/02 09:16:44 ls + 2560 2021/09/02 09:16:48 vi remote_state.yml + 2561 2021/09/02 09:16:50 ls + 2562 2021/09/02 09:16:58 tf-directory-setup.py -l none + 2563 2021/09/02 09:17:09 ln -s ../kubeconfig.tf . + 2564 2021/09/02 09:17:14 grep kubectl * + 2565 2021/09/02 09:17:25 tf-init + 2566 2021/09/02 09:17:40 tf-plan + 2567 2021/09/02 09:18:09 ls + 2568 2021/09/02 09:18:18 ls ../remote*tf + 2569 2021/09/02 09:18:24 ln -s ../remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test3.tf . + 2570 2021/09/02 09:18:26 tf-plan + 2571 2021/09/02 10:00:54 vi main.tf + 2572 2021/09/02 10:01:01 tf-plan + 2573 2021/09/02 10:09:00 vi main.tf + 2574 2021/09/02 10:10:14 grep tls_crt_file *tf + 2575 2021/09/02 10:10:16 grep tls_crt_file *tfvars + 2576 2021/09/02 10:10:22 vi main.tf + 2577 2021/09/02 10:12:51 tf-plan + 2578 2021/09/02 10:12:56 vi main.tf + 2579 2021/09/02 10:13:14 tf-plan + 2580 2021/09/02 10:13:16 vi main.tf + 2581 2021/09/02 10:13:41 tf-plan + 2582 2021/09/02 10:13:48 vi main.tf + 2583 2021/09/02 10:14:18 tf-plan + 2584 2021/09/02 10:14:42 grep tls_ *tfvars + 2585 2021/09/02 10:14:44 vi main.tf + 2586 2021/09/02 10:14:53 tf-plan + 2587 2021/09/02 10:19:56 vi dns.tf + 2588 2021/09/02 10:22:44 tf-plan + 2589 2021/09/02 10:23:22 vi dns.tf + 2590 2021/09/02 10:23:59 tf-plan + 2591 2021/09/02 10:24:21 vi dns.tf + 2592 2021/09/02 10:24:29 tf-plan + 2593 2021/09/02 10:25:55 ls + 2594 2021/09/02 10:25:58 vi ca-cert.tf + 2595 2021/09/02 10:29:51 tf-plan + 2596 2021/09/02 10:31:14 ls + 2597 2021/09/02 10:31:20 ln -sf ../variables.vpc.tf . + 2598 2021/09/02 10:31:22 tf-plan + 2599 2021/09/02 10:37:18 tf-apply + 2600 2021/09/02 10:38:13 tf-apply -auto-approve + 2601 2021/09/02 10:39:54 vi ca-cert.tf + 2602 2021/09/02 10:40:42 tf-apply -auto-approve + 2603 2021/09/02 10:41:13 vi ca-cert.tf + 2604 2021/09/02 10:41:22 ls + 2605 2021/09/02 10:41:26 grep no-certificate *tf + 2606 2021/09/02 10:41:29 vi main.tf + 2607 2021/09/02 10:41:54 terraform console + 2608 2021/09/02 10:43:21 vi main.tf + 2609 2021/09/02 10:46:24 tf-plan + 2610 2021/09/02 10:46:39 vi main.tf + 2611 2021/09/02 10:46:49 tf-plan + 2612 2021/09/02 10:47:29 tf-apply + 2613 2021/09/02 10:47:50 tf-apply -auto-approve + 2614 2021/09/02 10:50:14 history|grep pod + 2615 2021/09/02 10:50:19 kubectl --kubeconfig ../setup/kube.config get pod --all-namespaces -o wide + 2616 2021/09/02 10:51:38 kubectl --kubeconfig ../setup/kube.config get secret --all-namespaces -o wide + 2617 2021/09/02 10:51:48 kubectl --kubeconfig ../setup/kube.config get secret --all-namespaces -o wide|grep -iE "tls|cert" + 2618 2021/09/02 10:52:10 kubectl --kubeconfig ../setup/kube.config get secret --all-namespaces -o wide|grep tls + 2619 2021/09/02 12:52:10 git status . + 2620 2021/09/02 12:52:17 git add remo*back* remote*test3* + 2621 2021/09/02 12:52:19 git status . + 2622 2021/09/02 12:52:22 git add kubeconfig.tf + 2623 2021/09/02 12:52:23 ls certs + 2624 2021/09/02 12:52:31 git add certs/*csr certs/*pub* + 2625 2021/09/02 12:52:34 git status . + 2626 2021/09/02 12:52:44 vi .gitignore + 2627 2021/09/02 12:52:57 git add .gitignore + 2628 2021/09/02 12:53:00 git commit -m'ignore key' . + 2629 2021/09/02 12:53:03 git status . + 2630 2021/09/02 12:53:08 git amdend + 2631 2021/09/02 12:53:11 git amend + 2632 2021/09/02 12:54:02 git commit --amend -m'add remote state, ignore .key, change variables.vpc, add kubeconfig' . + 2633 2021/09/02 12:54:05 git push + 2634 2021/09/02 12:56:43 cd .. + 2635 2021/09/02 12:56:43 ls + 2636 2021/09/02 12:56:45 cd sample-istio/ + 2637 2021/09/02 12:56:45 ls + 2638 2021/09/02 12:56:48 vi remote_state.yml + 2639 2021/09/02 12:56:54 tf-directory-setup.py -l none + 2640 2021/09/02 12:56:56 cd ../common-services/ + 2641 2021/09/02 12:56:56 ls + 2642 2021/09/02 12:57:00 tf-directory-setup.py -l s3 + 2643 2021/09/02 12:57:02 git status . + 2644 2021/09/02 12:57:07 git commit -m'update linkl' . + 2645 2021/09/02 12:57:10 cd ../sample-istio/ + 2646 2021/09/02 12:57:10 ls + 2647 2021/09/02 12:57:16 grep kubectl * + 2648 2021/09/02 12:57:22 ln -s ../kubeconfig.tf . + 2649 2021/09/02 12:57:25 ls -al + 2650 2021/09/02 12:57:30 tf-init + 2651 2021/09/02 12:57:38 ln -sf ../variables.vpc.tf . + 2652 2021/09/02 12:57:41 tf-plan + 2653 2021/09/02 12:57:59 tf-apply + 2654 2021/09/02 13:07:31 history|grep kube + 2655 2021/09/02 13:07:41 kubectl --kubeconfig ../setup/kube.config get events + 2656 2021/09/02 13:07:48 kubectl --kubeconfig ../setup/kube.config get events --all-namespaces -o wide + 2657 2021/09/02 13:08:04 kubectl --kubeconfig ../setup/kube.config get events -n my-nginx -o wide + 2658 2021/09/02 13:08:08 clear + 2659 2021/09/02 13:08:09 kubectl --kubeconfig ../setup/kube.config get events -n my-nginx -o wide + 2660 2021/09/02 13:11:49 ls + 2661 2021/09/02 13:11:56 vi copy_images.tf + 2662 2021/09/02 13:12:45 diff copy_images.tf ../../eks-test2//sample-istio/ + 2663 2021/09/02 13:12:50 ls -al + 2664 2021/09/02 13:13:04 diff copy_image.sh ../../eks-test2/sample-istio/ + 2665 2021/09/02 13:13:07 vi main.tf + 2666 2021/09/02 13:13:18 diff main.tf ../../eks-test2/sample-istio/ + 2667 2021/09/02 13:13:27 grep copy *tf + 2668 2021/09/02 13:13:36 grep copy_images logs/*apply* + 2669 2021/09/02 13:14:20 cd ../../eks-test2 + 2670 2021/09/02 13:14:21 cd sample- + 2671 2021/09/02 13:14:22 cd sample-istio/ + 2672 2021/09/02 13:14:23 ls + 2673 2021/09/02 13:14:25 less logs/*apply* + 2674 2021/09/02 13:15:20 cd ../../eks-test3/sample-istio/ + 2675 2021/09/02 13:15:27 less logs/*apply* + 2676 2021/09/02 13:16:05 vi main.tf + 2677 2021/09/02 13:16:16 grep domian *tf + 2678 2021/09/02 13:16:17 grep domain *tf + 2679 2021/09/02 13:16:20 vi main.tf + 2680 2021/09/02 13:16:26 grep domain *tf + 2681 2021/09/02 13:16:28 vi main.tf + 2682 2021/09/02 13:16:40 grpe domain *tf + 2683 2021/09/02 13:16:42 grpe domain *tfvars + 2684 2021/09/02 13:16:43 grep domain *tfvars + 2685 2021/09/02 13:16:53 cd ../.. + 2686 2021/09/02 13:16:57 cd eks-test2/sample-istio/ + 2687 2021/09/02 13:16:57 grep domain *tfvars + 2688 2021/09/02 13:17:06 cd ../.. + 2689 2021/09/02 13:17:13 cd eks-test3/sample-istio/ + 2690 2021/09/02 13:17:13 ls + 2691 2021/09/02 13:17:21 vi test3.auto.tfvars + 2692 2021/09/02 13:17:31 cat ../../eks-test2/test2.auto.tfvars + 2693 2021/09/02 13:17:38 cat ../../eks-test2/sample-istio/test2.auto.tfvars + 2694 2021/09/02 13:17:44 ls + 2695 2021/09/02 13:17:47 ls *auto* + 2696 2021/09/02 13:17:59 grep domain * + 2697 2021/09/02 13:18:06 vi settings.auto.tfvars + 2698 2021/09/02 13:18:27 ls + 2699 2021/09/02 13:30:33 history|grep kube + 2700 2021/09/02 13:30:40 kubectl --kubeconfig ../setup/kube.config get pod --all-namespaces -o wide + 2701 2021/09/02 13:30:50 kubectl --kubeconfig ../setup/kube.config get service --all-namespaces -o wide + 2702 2021/09/02 13:41:04 cd .. + 2703 2021/09/02 13:41:05 ls + 2704 2021/09/02 13:41:18 grep LoadBalanacer * + 2705 2021/09/02 13:41:21 cd common-services/ + 2706 2021/09/02 13:41:22 grep LoadBalanacer * + 2707 2021/09/02 13:41:25 grep LoadBalanacer */* + 2708 2021/09/02 13:41:27 grep LoadBalanacer */*/* + 2709 2021/09/02 13:41:43 gre pingressgateway + 2710 2021/09/02 13:41:45 grep ingress * + 2711 2021/09/02 13:42:01 grep ingress */* + 2712 2021/09/02 13:42:05 grep ingress */*/* + 2713 2021/09/02 13:42:08 grep ingress */*/*/* + 2714 2021/09/02 13:42:17 vi main.tf + 2715 2021/09/02 13:42:35 grep -i clusterip * + 2716 2021/09/02 13:42:38 grep -i clusterip */ + 2717 2021/09/02 13:42:42 grep -i clusterip */*/* + 2718 2021/09/02 13:42:44 grep -i clusterip */*/*/* + 2719 2021/09/02 13:42:51 vi charts/istio-profile/templates/istiooperator.yaml + 2720 2021/09/02 13:43:55 kubectl --kubeconfig ../setup/kube.config get events -n my-nginx -o wide|grep ecr + 2721 2021/09/02 13:44:54 ls + 2722 2021/09/02 13:44:58 cd ../sample-istio/ + 2723 2021/09/02 13:44:59 ls + 2724 2021/09/02 13:45:01 vi main.tf + 2725 2021/09/02 13:45:19 vi *.tf + 2726 2021/09/02 13:47:06 ls + 2727 2021/09/02 13:47:15 vi variables.sample.tf + 2728 2021/09/02 13:47:19 ls + 2729 2021/09/02 13:47:30 less log/*app* + 2730 2021/09/02 13:47:35 less logs/*app* + 2731 2021/09/02 13:49:37 cd ../../ + 2732 2021/09/02 13:49:42 cd eks-test2/sample-istio/ + 2733 2021/09/02 13:49:47 less logs/*appy* + 2734 2021/09/02 13:49:50 less logs/*apply* + 2735 2021/09/02 13:50:17 cd ../../eks-test3 + 2736 2021/09/02 13:50:22 cd sample-istio/ + 2737 2021/09/02 13:50:25 less logs/*apply* + 2738 2021/09/02 13:50:48 grep login ../*READ* + 2739 2021/09/02 13:50:58 history|grep login + 2740 2021/09/02 13:51:08 pushd ~/docker + 2741 2021/09/02 13:51:08 ls + 2742 2021/09/02 13:51:13 cat pull.sh + 2743 2021/09/02 13:51:23 ls images/ + 2744 2021/09/02 13:51:24 less x + 2745 2021/09/02 13:51:26 ls + 2746 2021/09/02 13:51:33 history|grep export + 2747 2021/09/02 13:51:38 export HTTP_PROXY=http://proxy.tco.census.gov:3128 + 2748 2021/09/02 13:51:43 export ECR_NAME=252960665057.dkr.ecr.us-gov-east-1.amazonaws.com + 2749 2021/09/02 13:51:51 ls + 2750 2021/09/02 13:52:01 cat ~/.aws/config + 2751 2021/09/02 13:52:18 export AWS_PROFILE=252960665057-ma6-gov + 2752 2021/09/02 13:52:42 ecr get-login-password --region us-gov-east-1 > ecr-login.txt + 2753 2021/09/02 13:53:53 aws ecr get-login-password --region us-gov-east-1 > ecr-login.txt + 2754 2021/09/02 13:54:21 skopeo login --username AWS --password $(cat ecr-login.txt ) 252960665057.dkr.ecr.us-gov-east-1.amazonaws.com + 2755 2021/09/02 13:59:15 skopeo inspect + 2756 2021/09/02 13:59:18 [1:43 PM] Dayong Lu (CENSUS/ADSD CTR) + 2757 2021/09/02 13:59:18 + 2758 2021/09/02 13:59:18 Failed to pull image "079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx/nginx:1.21": rpc error: code = + 2759 2021/09/02 13:59:39 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx/nginx:1.21 + 2760 2021/09/02 13:59:56 history|grep login + 2761 2021/09/02 14:00:23 export AWS_PROFILE=252960665057-ma6-gov + 2762 2021/09/02 14:00:31 unexport AWS_PROFILE + 2763 2021/09/02 14:00:37 unset AWS_PROFILE + 2764 2021/09/02 14:01:10 aws --profile 079788916859-do2-cat ecr get-login-password --region us-east-1 > ecr-login.txt + 2765 2021/09/02 14:01:30 skopeo login --username AWS --password $(cat ecr-login.txt ) 079788916859.dkr.ecr.us-east-1.amazonaws.com + 2766 2021/09/02 14:01:38 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx/nginx:1.21 + 2767 2021/09/02 14:01:45 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test2/nginx/nginx:1.21 + 2768 2021/09/02 14:01:59 popd + 2769 2021/09/02 14:02:12 tf-plan + 2770 2021/09/02 14:03:27 tf-apply + 2771 2021/09/02 15:38:16 ls + 2772 2021/09/02 15:38:18 tf-destroy + 2773 2021/09/02 15:45:26 git status . + 2774 2021/09/02 15:45:40 git add settings.auto.tfvars remote*istio* remote*back* + 2775 2021/09/02 15:45:41 git status . + 2776 2021/09/02 15:45:44 git add kubeconfig.tf + 2777 2021/09/02 15:45:52 git commit -m'add remote state, kubeconfig' . + 2778 2021/09/02 15:45:54 ls + 2779 2021/09/02 15:45:57 git status . + 2780 2021/09/02 15:45:59 tf-directory-setup.py -l s3 + 2781 2021/09/02 15:46:04 git commit -m'change link' . + 2782 2021/09/02 15:46:06 ls -al set + 2783 2021/09/02 15:46:08 ls -al setup/ + 2784 2021/09/02 15:46:09 ls + 2785 2021/09/02 15:46:12 git status . + 2786 2021/09/02 15:46:14 cd .. + 2787 2021/09/02 15:46:14 ls + 2788 2021/09/02 15:46:18 cd sample-nlb/ + 2789 2021/09/02 15:46:18 ls + 2790 2021/09/02 15:46:20 tf-destroy + 2791 2021/09/02 15:58:10 git status . + 2792 2021/09/02 15:58:12 ls + 2793 2021/09/02 15:58:15 cd .. + 2794 2021/09/02 15:58:16 ls + 2795 2021/09/02 15:58:19 cd common-services/ + 2796 2021/09/02 15:58:20 git status . + 2797 2021/09/02 15:58:24 tf-destroy + 2798 2021/09/02 16:09:39 tf-destroy -auto-approve + 2799 2021/09/02 16:35:41 terraform state list + 2800 2021/09/02 16:35:51 cd .. + 2801 2021/09/02 16:35:52 ls + 2802 2021/09/02 16:35:54 cd efs/ + 2803 2021/09/02 16:35:55 lt + 2804 2021/09/02 16:35:57 tf-destroy + 2805 2021/09/02 16:37:30 cd .. + 2806 2021/09/02 16:37:31 ls + 2807 2021/09/02 16:37:36 cd alb-controller/ + 2808 2021/09/02 16:37:37 ls + 2809 2021/09/02 16:37:41 terraform state list + 2810 2021/09/02 16:37:44 cd .. + 2811 2021/09/02 16:37:44 ls + 2812 2021/09/02 16:37:52 cd aws-auth/ + 2813 2021/09/02 16:37:53 ls + 2814 2021/09/02 16:37:55 terraform state list + 2815 2021/09/02 16:38:09 less patch-aws-auth.tf + 2816 2021/09/02 16:38:16 tf-destroy + 2817 2021/09/02 16:38:37 cat setup/config_map.patch.yaml + 2818 2021/09/02 16:38:39 cd .. + 2819 2021/09/02 16:38:40 ls + 2820 2021/09/02 16:38:43 tf-destroy + 2821 2021/09/02 16:49:12 git status . + 2822 2021/09/02 16:49:28 git diff eniconfig.yaml + 2823 2021/09/02 16:49:39 git diff .|less + 2824 2021/09/02 16:49:43 ls + 2825 2021/09/02 16:54:39 mv test3.auto.tfvars settings.auto.tfvars + 2826 2021/09/02 16:54:40 vi set + 2827 2021/09/02 16:54:43 vbi settings.auto.tfvars + 2828 2021/09/02 16:54:45 vi settings.auto.tfvars + 2829 2021/09/02 16:55:21 ls */sett* + 2830 2021/09/02 16:55:26 vi sample-istio/variables.* + 2831 2021/09/02 16:55:48 ls + 2832 2021/09/02 16:55:52 vi variables.vpc.tf + 2833 2021/09/02 16:55:57 ls + 2834 2021/09/02 16:55:58 vi eks-console-access.tf + 2835 2021/09/02 16:55:59 ls + 2836 2021/09/02 16:56:01 vi variables.eks.tf + 2837 2021/09/02 16:56:18 ls + 2838 2021/09/02 16:56:41 terraform state list + 2839 2021/09/02 16:57:24 grep 1.20 * + 2840 2021/09/02 16:57:31 grep 1.20 * -A5 + 2841 2021/09/02 16:57:34 grep 1.20 * -B5 + 2842 2021/09/02 16:57:41 vi settings.auto.tfvars + 2843 2021/09/02 16:57:52 ls + 2844 2021/09/02 16:58:38 tf-state list + 2845 2021/09/02 16:58:51 history|grep target + 2846 2021/09/02 16:58:56 tf-plan -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy + 2847 2021/09/02 16:59:15 tf-apply -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy + 2848 2021/09/02 16:59:40 tf-plan + 2849 2021/09/02 17:10:46 tf-plan less + 2850 2021/09/02 17:17:44 tf-apply + 2851 2021/09/02 17:18:24 tf-apply -auto-approve + 2852 2021/09/02 19:25:37 tf-apply + 2853 2021/09/03 08:36:02 vi main.tf kubeconfig.tf + 2854 2021/09/03 08:37:11 grep aws_eks_cluster */*tf + 2855 2021/09/03 08:37:35 ls -al */provid* + 2856 2021/09/03 08:37:43 less alb-controller/providers.tf + 2857 2021/09/03 08:38:00 less main.tf + 2858 2021/09/03 08:38:09 terraform console + 2859 2021/09/03 08:38:52 vi kubeconfig.tf + 2860 2021/09/03 08:39:26 vi main.tf + 2861 2021/09/03 08:39:55 tf-plan + 2862 2021/09/03 08:40:10 vi kubeconfig.tf + 2863 2021/09/03 08:40:17 tf-plan + 2864 2021/09/03 08:41:05 ls -al */kubeconfig* + 2865 2021/09/03 08:41:20 rm */kubeconfig.tf + 2866 2021/09/03 08:41:24 cp kubeconfig.tf aws-auth/ + 2867 2021/09/03 08:41:27 cp kubeconfig.tf common-services/ + 2868 2021/09/03 08:41:31 cp kubeconfig.tf efs/ + 2869 2021/09/03 08:41:34 cp kubeconfig.tf sample-istio/ + 2870 2021/09/03 08:41:43 vi */kubeconfig.tf + 2871 2021/09/03 08:42:12 tf-apply + 2872 2021/09/03 09:13:54 ls + 2873 2021/09/03 09:13:57 cd aws-auth/ + 2874 2021/09/03 09:13:57 ls + 2875 2021/09/03 09:14:01 tf-init + 2876 2021/09/03 09:14:11 ls ../*auto* + 2877 2021/09/03 09:14:20 ln -s ../settings.auto.tfvars . + 2878 2021/09/03 09:14:23 rm test3.auto.tfvars + 2879 2021/09/03 09:14:30 cat test3.aws-auth.auto.tfvars + 2880 2021/09/03 09:14:45 mv test3.aws-auth.auto.tfvars aws-auth.auto.tvfars + 2881 2021/09/03 09:14:54 mv aws-auth.auto.tvfars aws-auth.auto.tfvars + 2882 2021/09/03 09:14:56 tf-plan + 2883 2021/09/03 09:15:27 grep cluster_version ../*tf + 2884 2021/09/03 09:15:38 ln -s ../variables.eks.tf . + 2885 2021/09/03 09:15:40 tf-plan + 2886 2021/09/03 09:15:48 vi variables.aws-auth.tf + 2887 2021/09/03 09:15:54 tf-plan + 2888 2021/09/03 09:16:23 tf-apply + 2889 2021/09/03 09:17:26 history|grpe kube + 2890 2021/09/03 09:17:28 history|grep kube + 2891 2021/09/03 09:17:43 kubectl --kubeconfig setup/kube.config get pod --all-namespaces -o wide + 2892 2021/09/03 09:17:58 kubectl --kubeconfig setup/kube.config get service --all-namespaces -o wide + 2893 2021/09/03 09:18:07 kubectl --kubeconfig setup/kube.config get events --all-namespaces -o wide + 2894 2021/09/03 09:18:16 kubectl --kubeconfig setup/kube.config get all --all-namespaces -o wide + 2895 2021/09/03 09:18:29 ls + 2896 2021/09/03 09:18:32 cd ../efs/ + 2897 2021/09/03 09:18:32 ls + 2898 2021/09/03 09:18:39 rm test3.auto.tfvars + 2899 2021/09/03 09:18:42 ln -s ../settings.auto.tfvars . + 2900 2021/09/03 09:18:46 ln -s ../variables.eks.tf . + 2901 2021/09/03 09:18:48 vi variables.efs.tf + 2902 2021/09/03 09:18:59 pwd + 2903 2021/09/03 09:19:00 ls + 2904 2021/09/03 09:19:10 vi main.tf efs.tf + 2905 2021/09/03 09:19:18 tf-plan + 2906 2021/09/03 09:35:07 ls + 2907 2021/09/03 09:35:10 vi variables.efs.tf + 2908 2021/09/03 09:35:29 ls + 2909 2021/09/03 09:35:31 tf-plan + 2910 2021/09/03 09:36:28 grep target README* + 2911 2021/09/03 09:36:37 tf-apply -target=aws_iam_policy.efs-policy + 2912 2021/09/03 09:37:26 tf-plan + 2913 2021/09/03 11:22:27 pwd + 2914 2021/09/03 11:22:28 ls + 2915 2021/09/03 11:22:32 tf-apply + 2916 2021/09/03 11:38:40 kubectl --kubeconfig setup/kube.config get all --all-namespaces -o wide + 2917 2021/09/03 11:41:10 ls + 2918 2021/09/03 11:41:12 cd .. + 2919 2021/09/03 11:41:12 ls + 2920 2021/09/03 11:59:57 pwd + 2921 2021/09/03 11:59:57 ls + 2922 2021/09/03 12:00:00 cd common-services/ + 2923 2021/09/03 12:00:01 ls + 2924 2021/09/03 12:00:03 ls -al certs/ + 2925 2021/09/03 12:00:21 rm certs/* + 2926 2021/09/03 12:00:21 ls + 2927 2021/09/03 12:00:25 rm test3.auto.tfvars + 2928 2021/09/03 12:00:30 ln -s ../settings.auto.tfvars . + 2929 2021/09/03 12:00:33 tf-init + 2930 2021/09/03 14:24:57 ls + 2931 2021/09/03 14:24:59 pwd + 2932 2021/09/03 14:25:01 tf-plan + 2933 2021/09/03 14:25:37 tf-apply + 2934 2021/09/03 14:26:07 git status . + 2935 2021/09/03 14:26:08 git status + 2936 2021/09/03 14:26:13 git add settings.auto.tfvars + 2937 2021/09/03 14:26:16 git status + 2938 2021/09/03 14:26:22 cd .. + 2939 2021/09/03 14:26:25 git add */settings.auto* + 2940 2021/09/03 14:26:27 git status + 2941 2021/09/03 14:26:32 git commit -m'add settings' . + 2942 2021/09/03 14:26:35 git push + 2943 2021/09/03 14:26:41 cd common-services/ + 2944 2021/09/03 14:26:43 tf-apply + 2945 2021/09/03 14:27:53 ls + 2946 2021/09/03 14:27:58 ln -s ../variables.eks.tf . + 2947 2021/09/03 14:28:00 cd .. + 2948 2021/09/03 14:28:02 ls -al set + 2949 2021/09/03 14:28:04 gi tadd sett + 2950 2021/09/03 14:28:07 git add settings.auto.tfvars + 2951 2021/09/03 14:28:11 git commit -m'add settings' . + 2952 2021/09/03 14:28:13 git push + 2953 2021/09/03 14:28:19 cd common-services/ + 2954 2021/09/03 14:28:20 tf-plan + 2955 2021/09/03 14:28:26 vi variables.common-services.tf + 2956 2021/09/03 14:28:36 tf-plan + 2957 2021/09/03 14:29:11 cd .. + 2958 2021/09/03 14:29:12 tf-plan + 2959 2021/09/03 14:29:27 terraform force-unlock 7080002d-b733-e956-f8d8-5241cc298832 + 2960 2021/09/03 14:29:38 cd common-services/ + 2961 2021/09/03 14:29:42 tf-apply + 2962 2021/09/03 14:36:59 history|grep kube + 2963 2021/09/03 14:37:03 kubectl --kubeconfig setup/kube.config get all --all-namespaces -o wide + 2964 2021/09/03 15:42:25 cd ../sample-istio/ + 2965 2021/09/03 15:42:25 ls + 2966 2021/09/03 15:42:28 rm test3.auto.tfvars + 2967 2021/09/03 15:42:32 ln -s ../settings.auto.tfvars + 2968 2021/09/03 15:42:37 cat settings.auto.tfvars + 2969 2021/09/03 15:42:43 cat ../settings.auto.tfvars + 2970 2021/09/03 15:42:49 ln -sf ../settings.auto.tfvars + 2971 2021/09/03 15:42:52 ln -s ../variables.eks.tf . + 2972 2021/09/03 15:42:54 tf-plan + 2973 2021/09/03 15:43:01 vi variables.sample.tf + 2974 2021/09/03 15:43:07 tf-plan + 2975 2021/09/03 15:43:43 tf-apply + 2976 2021/09/03 19:54:53 kubectl --kubeconfig setup/kube.config get all --all-namespaces -o wide + 2977 2021/09/03 19:55:21 kubectl --kubeconfig setup/kube.config get all -n my-nginx -o wide + 2978 2021/09/03 19:55:31 kubectl --kubeconfig setup/kube.config get events -n my-nginx -o wide + 2979 2021/09/03 19:55:52 cd ../.. + 2980 2021/09/03 19:55:55 cd eks-test2 + 2981 2021/09/03 19:56:00 cd sample-it + 2982 2021/09/03 19:56:01 cd sample-istio/ + 2983 2021/09/03 19:56:05 kubectl --kubeconfig setup/kube.config get events -n my-nginx -o wide + 2984 2021/09/03 19:56:18 cp ../../eks-test3/sample-istio/kubeconfig.tf . + 2985 2021/09/03 19:56:19 ls + 2986 2021/09/03 19:56:21 tf-plan + 2987 2021/09/03 19:56:36 tf-apply + 2988 2021/09/03 19:56:54 kubectl --kubeconfig setup/kube.config get events -n my-nginx -o wide + 2989 2021/09/03 19:57:12 kubectl --kubeconfig setup/kube.config get all --all-namespaces -o wide + 2990 2021/09/03 19:57:28 kubectl --kubeconfig setup/kube.config get all -n my-nginx -o wide + 2991 2021/09/03 19:57:42 cd .. + 2992 2021/09/03 19:57:46 cd ../eks-test3 + 2993 2021/09/03 19:57:53 cd sample-s + 2994 2021/09/03 19:57:53 ls + 2995 2021/09/03 19:57:55 cd sample-istio/ + 2996 2021/09/03 19:57:58 less logs/*appy* + 2997 2021/09/03 19:58:03 less logs/*apply* + 2998 2021/09/03 19:58:41 grep copy_images ../../eks-test2/sample-istio/logs/*apply* + 2999 2021/09/03 19:59:10 pwd + 3000 2021/09/03 19:59:18 less copy_images.tf + 3001 2021/09/03 20:01:22 grep nginx/nginx ../../eks-test2/sample-istio/logs/*apply* + 3002 2021/09/03 20:01:32 grep helm ../../eks-test2/sample-istio/logs/*apply* + 3003 2021/09/03 20:01:39 pwd + 3004 2021/09/03 20:01:49 diff ../../eks-test2/sample-istio/main.tf . + 3005* 2021/09/03 20:01:51 l + 3006 2021/09/03 20:02:00 diff ../../eks-test2/sample-istio/copy_images.tf . + 3007 2021/09/03 20:02:05 diff ../../eks-test2/sample-istio/copy_image.sh + 3008 2021/09/03 20:02:07 diff ../../eks-test2/sample-istio/copy_image.sh . + 3009 2021/09/03 20:02:18 ls charts/ + 3010 2021/09/03 20:02:21 ls charts/my-nginx/ + 3011 2021/09/03 20:02:34 diff ../../eks-test2/sample-istio/charts/my-nginx/values.yaml charts/my-nginx/ + 3012 2021/09/03 20:02:40 less charts/my-nginx/* + 3013 2021/09/03 20:02:54 pwd + 3014 2021/09/03 20:02:55 cd charts/ + 3015 2021/09/03 20:02:56 ls + 3016 2021/09/03 20:02:58 cd my-nginx/ + 3017 2021/09/03 20:02:58 ls + 3018 2021/09/03 20:03:02 grep test * + 3019 2021/09/03 20:03:04 vi values.yaml + 3020 2021/09/03 20:03:33 cat ../../settings.auto.tfvars + 3021 2021/09/03 20:03:45 vi values.yaml + 3022 2021/09/03 20:04:27 ls templates/ + 3023 2021/09/03 20:04:30 vi templates/* + 3024 2021/09/03 20:04:46 cd .. + 3025 2021/09/03 20:04:51 cd ../eks-test2 + 3026 2021/09/03 20:04:53 cd sample-istio/ + 3027 2021/09/03 20:04:53 ls + 3028 2021/09/03 20:04:54 tf-plan + 3029 2021/09/03 20:05:20 kubectl --kubeconfig setup/kube.config get all -n my-nginx -o wide + 3030 2021/09/03 20:05:39 kubectl --kubeconfig setup/kube.config get service -n my-nginx -o wide + 3031 2021/09/03 20:05:53 kubectl --kubeconfig setup/kube.config get deployment -n my-nginx -o wide + 3032 2021/09/03 20:06:00 pwd + 3033 2021/09/03 20:06:04 cd ../../eks-test2 + 3034 2021/09/03 20:06:06 cd sample- + 3035 2021/09/03 20:06:08 cd ../eks-test2 + 3036 2021/09/03 20:06:09 cd sample-istio/ + 3037 2021/09/03 20:06:10 ls + 3038 2021/09/03 20:06:12 pwd + 3039 2021/09/03 20:06:15 cd ../.. + 3040 2021/09/03 20:06:18 cd eks-test3 + 3041 2021/09/03 20:06:19 cd sample-istio/ + 3042 2021/09/03 20:06:20 ls + 3043 2021/09/03 20:06:23 tf-plan + 3044 2021/09/03 20:07:03 grep ^re copy_images.tf + 3045 2021/09/03 20:07:10 pwd + 3046 2021/09/03 20:07:10 ls + 3047 2021/09/03 20:07:14 less copy_images.tf + 3048 2021/09/03 20:07:37 terraform console + 3049 2021/09/03 20:08:12 #AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) + 3050 2021/09/03 20:08:17 grep copy_image logs/*app* + 3051 2021/09/03 20:09:41 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) SOURCE_IMAGE=public.ecr.aws/nginx/nginx:1.21 DESTINATION_IMAGE=079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx:1.21 DESTINATION_USERNAME=AWS DESTINATION_PASSWORD="eyJwYXlsb2FkIjoiNEVCdTVSTlpiWUh1K3NNa050OFd3VisrOURDL3VCYmNCOEhPOUZBdlhXaDlGMWRVVldobWdsQ3VZQ0xHQXpBUVJmZzBWN0JoY0ZrU3NDTldOUmdUemVPZnlIZDJNWTlORFczNCs3K05NVkFYUVhlTU9xRTJxN1dOdE4rU3cyRE5tS3ZyZWswOG5JOTlJWSs4UHRhd1pvQjVNRzNxYWF1ODNrNTJpeEJFQXZ1WGlOb3RMeUEwdHBSeUJGeFdXY3pJYUJ2T3lDODY0UGZ1UjJzMFFKUnJldjYyVFduSEY1eGkxOFVRcFVVZkRYeSsrZkF3TEhBbm5QbE5lMkxCSUM2NTJNQlVRQzRrQVN2elBOeld4dE0vSHl1aWpZQjF1aEZQbTJmVVdJRHVrMXcrOFNJU0lIeDI1S2lGTS9yOFYxWE5pemdZRzJidUpjakZaRGpWa0grclRBQmdSRnlCNjN4S09aUU05YjhSVkt3dEV3Y09hUzBrS2RIMXQvbXNUQXRWM0hEN2JKQVk1TG5pazJ5VE5LTlVTSTRSMDZXMkIrWlYveGdvUjBtSXBGN25oOWZ1SStyM0lTMUpaVUwzYzBFQ0Q5ZWFSejJURmxFd1VyMFJOVkcxNnl4MnVZRnFBTWZ5a2dsYUJ1ZTlPS0VUUzVZVkNlTkFDZXJHZDFNRmgrM1NCZFNJeWpVaWs2T3I3c2dNc3Aydm1jU3c4eDYwdHJLNVRNQm44ejBSRDA1RnBsVFVKS2YrNGlkelkyT1p3M1pqcnlLRjlORnZ4NkphVi9mc0V0eklpQTQybUo4YllkQWFZYmpQVVV3L0pMUGlybXlIdXdOTEFXNVVMeHFzNnJxTGlmSk0yQ082bUlFaTd2VUVobk1IUUNlbDVLa0FwMEpIRm5GWmk1d2pZWVNJbEdTRk4ySDFKaUFObzRlVk8wbURSZ0ZMd0Mrc1dvcW1Qbk0vNStaTGtnQUpUUDJWaWJUVHVhREF6S2N2eWJtQ2xYVmNIZk9rNXZycmdKL2ZhQ1VjK2dNYTIxRW5pUnA0Sm5nZFd2U3FhRHJQRnc0aHlEYUZlK0tVVmlqYjRjRUpFaUhNbVJRV1FCdDc2cExZbnVtdDczTjZPQUFLK2VpZnQ1aEU0MnlqUUF0STRhYWI5VnlvTnJnL01KQW8xc2JhaFpXV0dFK2N1RzdQZlhxNHFRek9mMHF3U1lwS3Yzc0JqNVNVblp6UU82T1dZWS9lSldWdVQrQ1pUdkR0Q21ERE9UZUVwOEVqMmhJT0s3YVp4VHp6USt0SzJlV1hoemhuZnV5YTFkYXJDSFhRV2Zsd25rbVI2RGlZQWdOWGI4VVBwUTFSZXZsa3VGVlFERHdkR1ZldHNSZVB5T21OTlE5dHBYWExhYjcvMkhLOFpOQ0Y2d2FLZ3c9PSIsImRhdGFrZXkiOiJBUUVCQUhod20wWWFJU0plUnRKbTVuMUc2dXFlZWtYdW9YWFBlNVVGY2U5UnE4LzE0d0FBQUg0d2ZBWUpLb1pJaHZjTkFRY0dvRzh3YlFJQkFEQm9CZ2txaGtpRzl3MEJCd0V3SGdZSllJWklBV1VEQkFFdU1CRUVETGlZWjVXUTVpdm1wUkNIcVFJQkVJQTdvZm1DZy9oR29uZmJYSVFIaWRNVmd4M1l2VDl0cnNTVEdKVmJPclBvQWw4dURqc3I0WEZSMVRUelZOaENPclFkS0ZuK21ZdkhMRmdKWGE0PSIsInZlcnNpb24iOiIyIiwidHlwZSI6IkRBVEFfS0VZIiwiZXhwaXJhdGlvbiI6MTYzMDc0MTQyOX0=" bash -x ./copy_image.sh |& tee XX + 3052 2021/09/03 20:10:30 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) aws ecr describe-repositories --region us-east-1 --output json --repository-names eks/test3/nginx + 3053 2021/09/03 20:10:41 get-profile + 3054 2021/09/03 20:10:48 get-region + 3055 2021/09/03 20:10:56 vi ~/.bash.a + 3056 2021/09/03 20:10:59 vi ~/.bash.als + 3057 2021/09/03 20:11:05 cat ~/.bash/aliases + 3058 2021/09/03 20:11:07 ls ~/.bash* + 3059 2021/09/03 20:11:11 ls ~/.bash_aliases + 3060 2021/09/03 20:11:14 cat ~/.bash_aliases + 3061 2021/09/03 20:11:18 pwd + 3062 2021/09/03 20:11:24 alias + 3063 2021/09/03 20:11:31 source ~/.bash_aliases + 3064 2021/09/03 20:11:37 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) SOURCE_IMAGE=public.ecr.aws/nginx/nginx:1.21 DESTINATION_IMAGE=079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx:1.21 DESTINATION_USERNAME=AWS DESTINATION_PASSWORD="eyJwYXlsb2FkIjoiNEVCdTVSTlpiWUh1K3NNa050OFd3VisrOURDL3VCYmNCOEhPOUZBdlhXaDlGMWRVVldobWdsQ3VZQ0xHQXpBUVJmZzBWN0JoY0ZrU3NDTldOUmdUemVPZnlIZDJNWTlORFczNCs3K05NVkFYUVhlTU9xRTJxN1dOdE4rU3cyRE5tS3ZyZWswOG5JOTlJWSs4UHRhd1pvQjVNRzNxYWF1ODNrNTJpeEJFQXZ1WGlOb3RMeUEwdHBSeUJGeFdXY3pJYUJ2T3lDODY0UGZ1UjJzMFFKUnJldjYyVFduSEY1eGkxOFVRcFVVZkRYeSsrZkF3TEhBbm5QbE5lMkxCSUM2NTJNQlVRQzRrQVN2elBOeld4dE0vSHl1aWpZQjF1aEZQbTJmVVdJRHVrMXcrOFNJU0lIeDI1S2lGTS9yOFYxWE5pemdZRzJidUpjakZaRGpWa0grclRBQmdSRnlCNjN4S09aUU05YjhSVkt3dEV3Y09hUzBrS2RIMXQvbXNUQXRWM0hEN2JKQVk1TG5pazJ5VE5LTlVTSTRSMDZXMkIrWlYveGdvUjBtSXBGN25oOWZ1SStyM0lTMUpaVUwzYzBFQ0Q5ZWFSejJURmxFd1VyMFJOVkcxNnl4MnVZRnFBTWZ5a2dsYUJ1ZTlPS0VUUzVZVkNlTkFDZXJHZDFNRmgrM1NCZFNJeWpVaWs2T3I3c2dNc3Aydm1jU3c4eDYwdHJLNVRNQm44ejBSRDA1RnBsVFVKS2YrNGlkelkyT1p3M1pqcnlLRjlORnZ4NkphVi9mc0V0eklpQTQybUo4YllkQWFZYmpQVVV3L0pMUGlybXlIdXdOTEFXNVVMeHFzNnJxTGlmSk0yQ082bUlFaTd2VUVobk1IUUNlbDVLa0FwMEpIRm5GWmk1d2pZWVNJbEdTRk4ySDFKaUFObzRlVk8wbURSZ0ZMd0Mrc1dvcW1Qbk0vNStaTGtnQUpUUDJWaWJUVHVhREF6S2N2eWJtQ2xYVmNIZk9rNXZycmdKL2ZhQ1VjK2dNYTIxRW5pUnA0Sm5nZFd2U3FhRHJQRnc0aHlEYUZlK0tVVmlqYjRjRUpFaUhNbVJRV1FCdDc2cExZbnVtdDczTjZPQUFLK2VpZnQ1aEU0MnlqUUF0STRhYWI5VnlvTnJnL01KQW8xc2JhaFpXV0dFK2N1RzdQZlhxNHFRek9mMHF3U1lwS3Yzc0JqNVNVblp6UU82T1dZWS9lSldWdVQrQ1pUdkR0Q21ERE9UZUVwOEVqMmhJT0s3YVp4VHp6USt0SzJlV1hoemhuZnV5YTFkYXJDSFhRV2Zsd25rbVI2RGlZQWdOWGI4VVBwUTFSZXZsa3VGVlFERHdkR1ZldHNSZVB5T21OTlE5dHBYWExhYjcvMkhLOFpOQ0Y2d2FLZ3c9PSIsImRhdGFrZXkiOiJBUUVCQUhod20wWWFJU0plUnRKbTVuMUc2dXFlZWtYdW9YWFBlNVVGY2U5UnE4LzE0d0FBQUg0d2ZBWUpLb1pJaHZjTkFRY0dvRzh3YlFJQkFEQm9CZ2txaGtpRzl3MEJCd0V3SGdZSllJWklBV1VEQkFFdU1CRUVETGlZWjVXUTVpdm1wUkNIcVFJQkVJQTdvZm1DZy9oR29uZmJYSVFIaWRNVmd4M1l2VDl0cnNTVEdKVmJPclBvQWw4dURqc3I0WEZSMVRUelZOaENPclFkS0ZuK21ZdkhMRmdKWGE0PSIsInZlcnNpb24iOiIyIiwidHlwZSI6IkRBVEFfS0VZIiwiZXhwaXJhdGlvbiI6MTYzMDc0MTQyOX0=" bash -x ./copy_image.sh |& tee XX + 3065 2021/09/03 20:12:01 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) aws ecr describe-repositories --region us-east-1 --output json --repository-names eks/test3/nginx + 3066 2021/09/03 20:12:56 skopeo inspect --dest-creds AWS:eyJwYXlsb2FkIjoiNEVCdTVSTlpiWUh1K3NNa050OFd3VisrOURDL3VCYmNCOEhPOUZBdlhXaDlGMWRVVldobWdsQ3VZQ0xHQXpBUVJmZzBWN0JoY0ZrU3NDTldOUmdUemVPZnlIZDJNWTlORFczNCs3K05NVkFYUVhlTU9xRTJxN1dOdE4rU3cyRE5tS3ZyZWswOG5JOTlJWSs4UHRhd1pvQjVNRzNxYWF1ODNrNTJpeEJFQXZ1WGlOb3RMeUEwdHBSeUJGeFdXY3pJYUJ2T3lDODY0UGZ1UjJzMFFKUnJldjYyVFduSEY1eGkxOFVRcFVVZkRYeSsrZkF3TEhBbm5QbE5lMkxCSUM2NTJNQlVRQzRrQVN2elBOeld4dE0vSHl1aWpZQjF1aEZQbTJmVVdJRHVrMXcrOFNJU0lIeDI1S2lGTS9yOFYxWE5pemdZRzJidUpjakZaRGpWa0grclRBQmdSRnlCNjN4S09aUU05YjhSVkt3dEV3Y09hUzBrS2RIMXQvbXNUQXRWM0hEN2JKQVk1TG5pazJ5VE5LTlVTSTRSMDZXMkIrWlYveGdvUjBtSXBGN25oOWZ1SStyM0lTMUpaVUwzYzBFQ0Q5ZWFSejJURmxFd1VyMFJOVkcxNnl4MnVZRnFBTWZ5a2dsYUJ1ZTlPS0VUUzVZVkNlTkFDZXJHZDFNRmgrM1NCZFNJeWpVaWs2T3I3c2dNc3Aydm1jU3c4eDYwdHJLNVRNQm44ejBSRDA1RnBsVFVKS2YrNGlkelkyT1p3M1pqcnlLRjlORnZ4NkphVi9mc0V0eklpQTQybUo4YllkQWFZYmpQVVV3L0pMUGlybXlIdXdOTEFXNVVMeHFzNnJxTGlmSk0yQ082bUlFaTd2VUVobk1IUUNlbDVLa0FwMEpIRm5GWmk1d2pZWVNJbEdTRk4ySDFKaUFObzRlVk8wbURSZ0ZMd0Mrc1dvcW1Qbk0vNStaTGtnQUpUUDJWaWJUVHVhREF6S2N2eWJtQ2xYVmNIZk9rNXZycmdKL2ZhQ1VjK2dNYTIxRW5pUnA0Sm5nZFd2U3FhRHJQRnc0aHlEYUZlK0tVVmlqYjRjRUpFaUhNbVJRV1FCdDc2cExZbnVtdDczTjZPQUFLK2VpZnQ1aEU0MnlqUUF0STRhYWI5VnlvTnJnL01KQW8xc2JhaFpXV0dFK2N1RzdQZlhxNHFRek9mMHF3U1lwS3Yzc0JqNVNVblp6UU82T1dZWS9lSldWdVQrQ1pUdkR0Q21ERE9UZUVwOEVqMmhJT0s3YVp4VHp6USt0SzJlV1hoemhuZnV5YTFkYXJDSFhRV2Zsd25rbVI2RGlZQWdOWGI4VVBwUTFSZXZsa3VGVlFERHdkR1ZldHNSZVB5T21OTlE5dHBYWExhYjcvMkhLOFpOQ0Y2d2FLZ3c9PSIsImRhdGFrZXkiOiJBUUVCQUhod20wWWFJU0plUnRKbTVuMUc2dXFlZWtYdW9YWFBlNVVGY2U5UnE4LzE0d0FBQUg0d2ZBWUpLb1pJaHZjTkFRY0dvRzh3YlFJQkFEQm9CZ2txaGtpRzl3MEJCd0V3SGdZSllJWklBV1VEQkFFdU1CRUVETGlZWjVXUTVpdm1wUkNIcVFJQkVJQTdvZm1DZy9oR29uZmJYSVFIaWRNVmd4M1l2VDl0cnNTVEdKVmJPclBvQWw4dURqc3I0WEZSMVRUelZOaENPclFkS0ZuK21ZdkhMRmdKWGE0PSIsInZlcnNpb24iOiIyIiwidHlwZSI6IkRBVEFfS0VZIiwiZXhwaXJhdGlvbiI6MTYzMDc0MTQyOX0= docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx:1.21 + 3067 2021/09/03 20:13:05 ls + 3068 2021/09/03 20:13:07 vi main.tf + 3069 2021/09/03 20:13:22 ls + 3070 2021/09/03 20:13:23 vi locals.tf + 3071 2021/09/03 20:13:25 ls + 3072 2021/09/03 20:13:32 grep repos * + 3073 2021/09/03 20:13:46 grep image_repos *tfvars + 3074 2021/09/03 20:13:47 grep image_repos *tf + 3075 2021/09/03 20:13:56 grep images *tf + 3076 2021/09/03 20:14:03 vi copy_images.tf + 3077 2021/09/03 20:14:58 pwd + 3078 2021/09/03 20:15:01 tf-plan + 3079 2021/09/03 20:15:17 tf-apply + 3080 2021/09/07 08:21:25 git br + 3081 2021/09/07 10:08:50 history|grep login + 3082 2021/09/07 10:08:57 ls ecr* + 3083 2021/09/07 10:09:02 pushd ~/dock + 3084 2021/09/07 10:09:02 ls + 3085 2021/09/07 10:09:04 pushd ~/docker/ + 3086 2021/09/07 10:09:05 ls + 3087 2021/09/07 10:09:07 cat pull.sh + 3088 2021/09/07 10:09:12 cat x + 3089 2021/09/07 10:09:20 history|grep ecr + 3090 2021/09/07 10:09:24 history|grep ecr > Y + 3091 2021/09/07 10:09:25 vi Y + 3092 2021/09/07 10:13:13 mv Y ecr-login.sh + 3093 2021/09/07 10:13:15 chmod 755 ecr-login. + 3094 2021/09/07 10:13:16 chmod 755 ecr-login.sh + 3095 2021/09/07 10:13:22 vi ecr-login.sh + 3096 2021/09/07 10:13:40 cat ecr-login. + 3097 2021/09/07 10:13:42 cat ecr-login.sh + 3098 2021/09/07 10:13:46 # AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) ./ecr-login.sh + 3099 2021/09/07 10:13:50 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) ./ecr-login.sh + 3100 2021/09/07 10:13:52 ls + 3101 2021/09/07 10:19:16 history|grep PRO + 3102 2021/09/07 10:19:27 echo $AWS_PROFILE + 3103 2021/09/07 10:19:35 popd + 3104 2021/09/07 10:19:55 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) ./ecr-login.sh + 3105 2021/09/07 10:19:59 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) ./docker/ecr-login.sh + 3106 2021/09/07 10:20:06 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) $HOME/docker/ecr-login.sh + 3107 2021/09/07 10:20:17 tf-apply less + 3108 2021/09/07 10:20:31 skopeo inspect docker://"079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx" + 3109 2021/09/07 10:20:37 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx + 3110 2021/09/07 10:21:07 grep nginx logs/*app* + 3111 2021/09/07 10:21:12 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx:1.21 + 3112 2021/09/07 10:21:31 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx/nginx:1.21 + 3113 2021/09/07 10:21:36 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test2/nginx/nginx:1.21 + 3114 2021/09/07 10:21:42 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx/nginx:1.21 + 3115 2021/09/07 10:21:50 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx:1.21 + 3116 2021/09/07 10:22:06 ls + 3117 2021/09/07 10:23:01 vi main.tf + 3118 2021/09/07 10:23:15 cd cah + 3119 2021/09/07 10:23:17 cd charts/ + 3120 2021/09/07 10:23:17 la + 3121 2021/09/07 10:23:18 ls + 3122 2021/09/07 10:23:20 cd my-nginx/ + 3123 2021/09/07 10:23:21 ls + 3124 2021/09/07 10:23:22 vi * + 3125 2021/09/07 10:23:54 cd .. + 3126 2021/09/07 10:23:58 cd ../eks-test2 + 3127 2021/09/07 10:23:58 ls + 3128 2021/09/07 10:24:00 cd sample-istio/ + 3129 2021/09/07 10:24:01 l + 3130 2021/09/07 10:24:02 vi main.tf + 3131 2021/09/07 10:24:10 cd charts/ + 3132 2021/09/07 10:24:11 ls + 3133 2021/09/07 10:24:12 cd my-nginx/ + 3134 2021/09/07 10:24:13 ls + 3135 2021/09/07 10:24:15 vi Chart.yaml + 3136 2021/09/07 10:24:17 ls + 3137 2021/09/07 10:24:19 vi values.yaml + 3138 2021/09/07 10:25:16 cd .. + 3139 2021/09/07 10:25:19 less main.tf + 3140 2021/09/07 10:25:39 less logs/*app* + 3141 2021/09/07 10:26:21 vi main.tf + 3142 2021/09/07 10:26:27 ls + 3143 2021/09/07 10:26:33 vi copy_images.tf + 3144 2021/09/07 10:28:28 :q + 3145 2021/09/07 10:43:33 ls + 3146 2021/09/07 10:43:40 vi copy_images.tf + 3147 2021/09/07 10:44:06 grep name.*nginx ../sample-*/*tf + 3148 2021/09/07 10:44:12 ls .. + 3149 2021/09/07 10:44:22 less ../sample-*/*tf + 3150 2021/09/07 10:44:41 less ../sample-*/main* + 3151 2021/09/07 10:44:50 less ../sample-*/copy*tf + 3152 2021/09/07 10:45:16 :q + 3153 2021/09/07 10:45:18 less ../sample-*/copy*tf + 3154 2021/09/07 10:45:54 vi copy_images.tf + 3155 2021/09/07 10:46:03 tf-plan + 3156 2021/09/07 10:47:50 grep ngingx ../sample*/copy*tf + 3157 2021/09/07 10:47:55 grep nginx ../sample*/copy*tf + 3158 2021/09/07 10:48:01 grep nginx ../sample*/copy*tf|grep image + 3159 2021/09/07 10:48:19 grep 'image.*nginx' ../sample*/copy*tf + 3160 2021/09/07 10:59:11 ls + 3161 2021/09/07 10:59:15 vi copy_images.tf + 3162 2021/09/07 10:59:27 tf-plan + 3163 2021/09/07 11:02:47 vi main.tf + 3164 2021/09/07 11:03:06 vi copy_images.tf + 3165 2021/09/07 11:03:47 terraform state list|grep copy_ + 3166 2021/09/07 11:03:56 terraform state list + 3167 2021/09/07 11:04:09 pwd + 3168 2021/09/07 11:04:12 ls + 3169 2021/09/07 11:04:19 pwd + 3170 2021/09/07 11:04:28 vi copy_images.tf + 3171 2021/09/07 11:04:36 cd .. + 3172 2021/09/07 11:04:40 cdeks-test3cd + 3173 2021/09/07 11:04:45 cd ekts-test + 3174 2021/09/07 11:04:47 cd eks-test3 + 3175 2021/09/07 11:04:49 cd sample-istio/ + 3176 2021/09/07 11:04:51 vi copy_images.tf + 3177 2021/09/07 11:04:58 tf-plan + 3178 2021/09/07 11:05:25 vi main.tf + 3179 2021/09/07 11:05:34 tf-plan + 3180 2021/09/07 11:07:20 tf-apply + 3181 2021/09/07 11:08:40 tf-apply -auto-approve + 3182 2021/09/07 11:24:54 sipcalc 100.64.0.0/15 + 3183 2021/09/07 11:31:21 kubecfl --kubconfig setup/kube.config get pods -o wide + 3184 2021/09/07 11:31:24 kubectl --kubconfig setup/kube.config get pods -o wide + 3185 2021/09/07 11:31:30 kubectl --kubeconfig setup/kube.config get pods -o wide + 3186 2021/09/07 11:31:36 kubectl --kubeconfig setup/kube.config get pods -o wide --all-namespaces + 3187 2021/09/07 11:31:55 kubectl --kubeconfig setup/kube.config get events -o wide -n my-nginx + 3188 2021/09/07 11:32:34 vi copy_images.tf + 3189 2021/09/07 11:33:22 grep copy_ logs/*app* + 3190 2021/09/07 11:33:31 vi main.tf + 3191 2021/09/07 11:33:37 vi copy_images.tf + 3192 2021/09/07 11:34:27 tf-plan + 3193 2021/09/07 11:35:07 tf-apply + 3194 2021/09/07 11:37:01 kubectl --kubeconfig setup/kube.config get events -o wide -n my-nginx + 3195 2021/09/07 11:37:15 kubectl --kubeconfig setup/kube.config get all -o wide -n my-nginx + 3196 2021/09/07 11:38:38 kubectl --kubeconfig setup/kube.config get svc -o wide --all-namespaces + 3197 2021/09/07 11:41:19 less main.tf + 3198 2021/09/07 11:41:42 less charts/my-nginx/* + 3199 2021/09/07 11:41:51 cat settings.auto.tfvars + 3200 2021/09/07 11:44:03 kubectl --kubeconfig setup/kube.config get nodes -o wide + 3201 2021/09/07 11:44:09 kubectl --kubeconfig setup/kube.config get pods -o wide + 3202 2021/09/07 11:44:18 kubectl --kubeconfig setup/kube.config get pods -o wide --all-namespaces + 3203 2021/09/07 11:44:24 kubectl --kubeconfig setup/kube.config get pods -o wide --all-namespaces|grep 100.64 + 3204 2021/09/07 12:02:43 kubectl --kubeconfig setup/kube.config get services -o wide --all-namespaces + 3205 2021/09/07 12:11:09 cd .. + 3206 2021/09/07 12:11:10 ls + 3207 2021/09/07 12:11:14 mkdir kube-bench + 3208 2021/09/07 12:11:15 cd kube-bench/ + 3209 2021/09/07 12:11:15 ls + 3210 2021/09/07 12:11:37 vi README.md + 3211 2021/09/07 12:13:49 curl -k -o https://raw.githubusercontent.com/aquasecurity/kube-bench/main/job-eks.yaml + 3212 2021/09/07 12:13:52 curl -k -O https://raw.githubusercontent.com/aquasecurity/kube-bench/main/job-eks.yaml + 3213 2021/09/07 12:13:55 ls -al + 3214 2021/09/07 12:13:59 history|grpe kube + 3215 2021/09/07 12:14:19 kubectl --kubeconfig ../sample-istio/setup/kube.config get pods -o wide + 3216 2021/09/07 12:14:31 kubectl --kubeconfig ../sample-istio/setup/kube.config get pods -o wide --all-namespaces + 3217 2021/09/07 12:14:36 less job-eks.yaml + 3218 2021/09/07 12:15:20 kubectl --kubeconfig ../sample-istio/setup/kube.config apply -f job-eks.yaml + 3219 2021/09/07 12:15:25 kubectl --kubeconfig ../sample-istio/setup/kube.config get pods -o wide --all-namespaces + 3220 2021/09/07 12:15:33 kubectl --kubeconfig ../sample-istio/setup/kube.config get pods -o wide + 3221 2021/09/07 12:15:54 kubectl --kubeconfig ../sample-istio/setup/kube.config get logs kube-bench-bcxpw + 3222 2021/09/07 12:16:04 kubectl --kubeconfig ../sample-istio/setup/kube.config logs kube-bench-bcxpw + 3223 2021/09/07 12:16:11 kubectl --kubeconfig ../sample-istio/setup/kube.config get pods -o wide + 3224 2021/09/07 12:16:18 kubectl --kubeconfig ../sample-istio/setup/kube.config get events -o wide + 3225 2021/09/07 12:16:25 ls + 3226 2021/09/07 12:16:35 cp ../sample-istio/copy_images.tf . + 3227 2021/09/07 12:16:41 cd ../sample-istio/ + 3228 2021/09/07 12:16:42 ls + 3229 2021/09/07 12:16:49 ls -al copy_image.sh + 3230 2021/09/07 12:16:51 cd .. + 3231 2021/09/07 12:16:54 cd kube-bench/ + 3232 2021/09/07 12:16:59 ln -s ../common-services/copy_image.sh . + 3233 2021/09/07 12:17:08 vi copy_images.tf + 3234 2021/09/07 12:17:32 cat *yaml + 3235 2021/09/07 12:17:36 vi copy_images.tf + 3236 2021/09/07 12:18:00 tf-init + 3237 2021/09/07 12:18:07 tf-apply + 3238 2021/09/07 12:18:12 ls + 3239 2021/09/07 12:18:20 cp ../sample-istio/remote_state.yml . + 3240 2021/09/07 12:18:21 vi remote_state.yml + 3241 2021/09/07 12:18:37 tf-directory-setup.py -l none + 3242 2021/09/07 12:18:38 ls + 3243 2021/09/07 12:18:40 setup-new-directory.sh + 3244 2021/09/07 12:18:41 ls + 3245 2021/09/07 12:18:47 tf-init + 3246 2021/09/07 12:18:53 tf-plan + 3247 2021/09/07 12:19:02 cp ../locals.tf . + 3248 2021/09/07 12:19:04 cat locals.tf + 3249 2021/09/07 12:19:06 tf-plan + 3250 2021/09/07 12:19:11 ls + 3251 2021/09/07 12:19:14 ln -s ../settings.auto.tfvars . + 3252 2021/09/07 12:19:17 tf-plan + 3253 2021/09/07 12:19:29 ln -s ../variables.eks.tf . + 3254 2021/09/07 12:19:30 tf-plan + 3255 2021/09/07 12:19:45 ls + 3256 2021/09/07 12:19:54 cd ../sample-istio/ + 3257 2021/09/07 12:19:54 ls + 3258 2021/09/07 12:20:02 cd ../kube-bench/ + 3259 2021/09/07 12:20:06 ln -s ../variables.vpc.tf . + 3260 2021/09/07 12:20:07 tf-plan + 3261 2021/09/07 12:20:21 tf-apply + 3262 2021/09/07 12:22:03 kubectl --kubeconfig ../sample-istio/setup/kube.config get events -o wide + 3263 2021/09/07 12:22:13 kubectl --kubeconfig ../sample-istio/setup/kube.config get pods -o wide + 3264 2021/09/07 12:22:30 kubectl --kubeconfig ../sample-istio/setup/kube.config log kube-bench-bcxpw + 3265 2021/09/07 12:22:59 export KUBECONFIG $(pwd)/../sample-istio/setup/kube.config + 3266 2021/09/07 12:23:08 export KUBECONFIG=$(pwd)/../sample-istio/setup/kube.config + 3267 2021/09/07 12:23:12 kubectl get pods -o wide + 3268 2021/09/07 12:23:26 kubectl describe pod kube-bench + 3269 2021/09/07 12:23:58 env|grep ECR + 3270 2021/09/07 12:24:10 cat ~/docker//ecr-login.sh + 3271 2021/09/07 12:24:17 export ECR_NAME="079788916859.dkr.ecr.us-east-1.amazonaws.com" + 3272 2021/09/07 12:24:39 skopeo inspect docker://$ECR_NAME/aquasec/kube-bench + 3273 2021/09/07 12:24:51 skopeo inspect docker://$ECR_NAME/aquasec/kube-bench:latest + 3274 2021/09/07 12:24:57 echo $ECR_NAME + 3275 2021/09/07 12:25:05 skopeo inspect docker://$ECR_NAME/eks/test3/aquasec/kube-bench:latest + 3276 2021/09/07 12:25:09 skopeo inspect docker://$ECR_NAME/eks/test3/aquasec/kube-bench + 3277 2021/09/07 12:25:16 grep copy logs/*app* + 3278 2021/09/07 12:25:35 cp job-eks.yaml job-eks.yaml.orig + 3279 2021/09/07 12:25:37 vi job-eks.yaml + 3280 2021/09/07 12:25:59 kubectl help + 3281 2021/09/07 12:26:06 kubectl delete pod kube-bench + 3282 2021/09/07 12:26:19 kubectl get pod + 3283 2021/09/07 12:26:26 kubectl delete pod kube-bench-bcxpw + 3284 2021/09/07 12:26:41 kubectl get pod -o wide + 3285 2021/09/07 12:26:45 kubectl get pods -o wide + 3286 2021/09/07 12:26:56 history|grep apply + 3287 2021/09/07 12:27:03 kubectl apply -f job-eks.yaml + 3288 2021/09/07 12:27:21 kubectl delete job kube-bench + 3289 2021/09/07 12:27:28 kubectl get pods -o wide + 3290 2021/09/07 12:27:37 kubectl get pods -o wide --wach + 3291 2021/09/07 12:27:39 kubectl get pods -o wide --watch + 3292 2021/09/07 12:28:53 kubectl get pods -o wide + 3293 2021/09/07 12:28:59 kubectl apply -f job-eks.yaml + 3294 2021/09/07 12:29:03 kubectl get pods -o wide + 3295 2021/09/07 12:29:15 kubectl describe pod kube-bench + 3296 2021/09/07 12:29:27 kubectl get events kube-bench + 3297 2021/09/07 12:29:37 kubectl get events kube-bench-rcr6k + 3298 2021/09/07 12:29:43 kubectl get events + 3299 2021/09/07 12:30:01 vi job-eks.yaml + 3300 2021/09/07 12:30:43 kubectl delete job kube-bench + 3301* 2021/09/07 12:30:49 kubectl descri + 3302 2021/09/07 12:35:16 kubectl log kube-bench + 3303 2021/09/07 12:35:19 kubectl logs kube-bench + 3304 2021/09/07 12:35:31 kubectl get pods -o wide + 3305 2021/09/07 12:35:35 history|grep app + 3306 2021/09/07 12:35:40 kubectl apply -f job-eks.yaml + 3307 2021/09/07 12:35:44 kubectl get pods -o wide + 3308 2021/09/07 12:35:48 history|grep descr + 3309 2021/09/07 12:35:52 kubectl describe pod kube-bench + 3310 2021/09/07 12:36:02 kubectl logs kube-bench + 3311 2021/09/07 12:36:10 kubectl describe pod kube-bench + 3312 2021/09/07 12:58:23 bc + 3313 2021/09/07 14:01:51 kubectl describe pod kube-bench + 3314 2021/09/07 14:02:04 kubectl logs kube-bench + 3315 2021/09/07 14:02:14 kubectl logs kube-bench-89l6b + 3316 2021/09/07 14:02:57 history|grep app + 3317 2021/09/07 14:03:18 kubectl describe pod kube-bench + 3318 2021/09/07 14:14:47 kubectl describe pod kube-bench > pod.txt + 3319 2021/09/07 14:14:57 kubectl logs kube-bench-89l6b > output.log + 3320 2021/09/07 14:14:58 less output + 3321 2021/09/07 14:15:04 less output.log + 3322 2021/09/07 14:16:32 kubectl get pods + 3323 2021/09/07 14:16:40 kubectl delete -f job-eks.yaml + 3324 2021/09/07 14:16:59 history >> history.1 diff --git a/examples/established-cluster-examples/dnsutils/locals.tf b/examples/established-cluster-examples/dnsutils/locals.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/locals.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/established-cluster-examples/dnsutils/main.tf b/examples/established-cluster-examples/dnsutils/main.tf new file mode 100644 index 0000000..f46eae2 --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/main.tf @@ -0,0 +1,100 @@ +resource "kubernetes_namespace" "app" { + metadata { + name = "utility-apps" + } +} + +resource "kubernetes_pod" "busybox" { + metadata { + name = "busybox" + namespace = "utility-apps" + } + spec { + container { + name = "busybox" + image = local.image_details["busybox"].dest_image + command = ["sleep","3600"] + image_pull_policy = "IfNotPresent" + } + restart_policy = "Always" + } +} + +resource "kubernetes_pod" "dnsutils" { + metadata { + name = "dnsutils" + namespace = "utility-apps" + } + spec { + container { + name = "dnsutils" + image = local.image_details["dnsutils"].dest_image + command = ["sleep","3600"] + image_pull_policy = "IfNotPresent" + } + restart_policy = "Always" + } +} + +resource "kubernetes_pod" "alpine" { + metadata { + name = "alpine" + namespace = "utility-apps" + } + spec { + container { + name = "alpine" + image = local.image_details["alpine"].dest_image + command = ["sleep","3600"] + image_pull_policy = "IfNotPresent" + } + restart_policy = "Always" + } +} + +resource "kubernetes_pod" "alpine-curl" { + metadata { + name = "alpine-curl" + namespace = "utility-apps" + } + spec { + container { + name = "alpine-curl" + image = local.image_details["alpine-curl"].dest_image + command = ["sleep","3600"] + image_pull_policy = "IfNotPresent" + } + restart_policy = "Always" + } +} + +resource "kubernetes_pod" "aws-cli" { + metadata { + name = "aws-cli" + namespace = "utility-apps" + } + spec { + service_account_name = "aws-cli" + container { + name = "aws-cli" + image = local.image_details["aws-cli"].dest_image + command = ["sleep","3600"] + image_pull_policy = "IfNotPresent" + } + restart_policy = "Always" + } +} + +# https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html + +resource "kubernetes_service_account" "aws-cli" { + metadata { + name = "aws-cli" + namespace = "utility-apps" + annotations = { + "eks.amazonaws.com/role-arn"= data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.role_irsa-app2_arn + } + } +} + + diff --git a/examples/established-cluster-examples/dnsutils/output.log b/examples/established-cluster-examples/dnsutils/output.log new file mode 100644 index 0000000..71ee8cd --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/output.log @@ -0,0 +1,41 @@ +[INFO] 3 Worker Node Security Configuration +[INFO] 3.1 Worker Node Configuration Files +[PASS] 3.1.1 Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored) +[PASS] 3.1.2 Ensure that the proxy kubeconfig file ownership is set to root:root (Scored) +[PASS] 3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored) +[PASS] 3.1.4 Ensure that the kubelet configuration file ownership is set to root:root (Scored) +[INFO] 3.2 Kubelet +[PASS] 3.2.1 Ensure that the --anonymous-auth argument is set to false (Scored) +[PASS] 3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored) +[PASS] 3.2.3 Ensure that the --client-ca-file argument is set as appropriate (Scored) +[PASS] 3.2.4 Ensure that the --read-only-port argument is set to 0 (Scored) +[PASS] 3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored) +[PASS] 3.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Scored) +[PASS] 3.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Scored) +[PASS] 3.2.8 Ensure that the --hostname-override argument is not set (Scored) +[WARN] 3.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Scored) +[PASS] 3.2.10 Ensure that the --rotate-certificates argument is not set to false (Scored) +[PASS] 3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) + +== Remediations node == +3.2.9 If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +== Summary node == +14 checks PASS +0 checks FAIL +1 checks WARN +0 checks INFO + +== Summary total == +14 checks PASS +0 checks FAIL +1 checks WARN +0 checks INFO + diff --git a/examples/established-cluster-examples/dnsutils/pod.txt b/examples/established-cluster-examples/dnsutils/pod.txt new file mode 100644 index 0000000..09f3718 --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/pod.txt @@ -0,0 +1,71 @@ +Name: kube-bench-89l6b +Namespace: default +Priority: 0 +Node: ip-10-194-25-15.ec2.internal/10.194.25.15 +Start Time: Tue, 07 Sep 2021 12:35:41 -0400 +Labels: controller-uid=2dc67fe7-a273-4db3-bd09-3bc491dbf681 + job-name=kube-bench +Annotations: kubernetes.io/psp: eks.privileged +Status: Succeeded +IP: 100.64.2.164 +IPs: + IP: 100.64.2.164 +Controlled By: Job/kube-bench +Containers: + kube-bench: + Container ID: docker://8bf8279d9195e122fc396f7399e100e7985af6ef9714259b6a7060c3271cdcee + Image: 079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/kube-bench:latest + Image ID: docker-pullable://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/kube-bench@sha256:e02aa2eb58c9a6bee9e2b060684051be14b266f0e9952cadd8f71f32f578b5d7 + Port: + Host Port: + Command: + kube-bench + run + --targets + node + --benchmark + eks-1.0 + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Tue, 07 Sep 2021 12:35:44 -0400 + Finished: Tue, 07 Sep 2021 12:35:44 -0400 + Ready: False + Restart Count: 0 + Environment: + Mounts: + /etc/kubernetes from etc-kubernetes (ro) + /etc/systemd from etc-systemd (ro) + /var/lib/kubelet from var-lib-kubelet (ro) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-9mhvq (ro) +Conditions: + Type Status + Initialized True + Ready False + ContainersReady False + PodScheduled True +Volumes: + var-lib-kubelet: + Type: HostPath (bare host directory volume) + Path: /var/lib/kubelet + HostPathType: + etc-systemd: + Type: HostPath (bare host directory volume) + Path: /etc/systemd + HostPathType: + etc-kubernetes: + Type: HostPath (bare host directory volume) + Path: /etc/kubernetes + HostPathType: + kube-api-access-9mhvq: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + ConfigMapOptional: + DownwardAPI: true +QoS Class: BestEffort +Node-Selectors: +Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s +Events: + diff --git a/examples/established-cluster-examples/dnsutils/pod.yaml b/examples/established-cluster-examples/dnsutils/pod.yaml new file mode 100644 index 0000000..9192bf3 --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/pod.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dnsutils + namespace: default +spec: + containers: + - name: dnsutils + image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3 + command: + - sleep + - "3600" + imagePullPolicy: IfNotPresent + restartPolicy: Always diff --git a/examples/established-cluster-examples/dnsutils/prefixes.tf b/examples/established-cluster-examples/dnsutils/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/dnsutils/providers.tf b/examples/established-cluster-examples/dnsutils/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/dnsutils/variables.eks.tf b/examples/established-cluster-examples/dnsutils/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/dnsutils/variables.vpc.tf b/examples/established-cluster-examples/dnsutils/variables.vpc.tf new file mode 120000 index 0000000..f672f33 --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/variables.vpc.tf @@ -0,0 +1 @@ +../variables.vpc.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/dnsutils/version.tf b/examples/established-cluster-examples/dnsutils/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/established-cluster-examples/dnsutils/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/kube-bench/README.md b/examples/established-cluster-examples/kube-bench/README.md new file mode 100644 index 0000000..eb60652 --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/README.md @@ -0,0 +1,10 @@ +https://aws.amazon.com/blogs/containers/introducing-cis-amazon-eks-benchmark/ + + + +https://github.com/aquasecurity/kube-bench + + + +https://raw.githubusercontent.com/aquasecurity/kube-bench/main/job-eks.yaml + diff --git a/examples/established-cluster-examples/kube-bench/copy_image.sh b/examples/established-cluster-examples/kube-bench/copy_image.sh new file mode 120000 index 0000000..534e41c --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/copy_image.sh @@ -0,0 +1 @@ +../common-services/copy_image.sh \ No newline at end of file diff --git a/examples/established-cluster-examples/kube-bench/copy_images.tf b/examples/established-cluster-examples/kube-bench/copy_images.tf new file mode 100644 index 0000000..44d7a54 --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/copy_images.tf @@ -0,0 +1,35 @@ +data "aws_ecr_authorization_token" "token" {} + +locals { + account_id = data.aws_caller_identity.current.account_id + repo_parent_name = format("eks/%v", var.cluster_name) + + account_ecr = format("%v.dkr.ecr.%v.amazonaws.com/%v", local.account_id, var.region, local.repo_parent_name) + + images = [ + # kube-bench + { + name = "kube-bench" + image = "aquasec/kube-bench" + tag = "latest" + }, + ] + image_repos = { for image in local.images : image.name => format("%v/%v", local.account_ecr, dirname(image.name)) } +} + +resource "null_resource" "copy_images" { + for_each = { for image in local.images : image.name => image } + + provisioner "local-exec" { + command = "${path.module}/copy_image.sh" + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + SOURCE_IMAGE = format("%v:%v", each.value.image, each.value.tag) + DESTINATION_IMAGE = format("%v/%v:%v", local.account_ecr, each.value.name, each.value.tag) + DESTINATION_USERNAME = data.aws_ecr_authorization_token.token.user_name + DESTINATION_PASSWORD = data.aws_ecr_authorization_token.token.password + } + } +} + diff --git a/examples/established-cluster-examples/kube-bench/data.eks.tf b/examples/established-cluster-examples/kube-bench/data.eks.tf new file mode 100644 index 0000000..4cebea9 --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/data.eks.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster +# for main.tf +# aws_eks_cluster = aws_eks_cluster.eks_cluster +# for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/established-cluster-examples/kube-bench/history.1 b/examples/established-cluster-examples/kube-bench/history.1 new file mode 100644 index 0000000..909885f --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/history.1 @@ -0,0 +1,1000 @@ + 2325 2021/09/01 15:30:44 less README.md + 2326 2021/09/01 15:31:00 tf-plan -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy + 2327 2021/09/01 15:31:17 tf-apply -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy + 2328 2021/09/01 15:31:39 tf-plan + 2329 2021/09/01 15:32:24 ls logs/ + 2330 2021/09/01 15:32:28 less logs/plan.20210901.163052* + 2331 2021/09/01 15:35:04 tf-apply + 2332 2021/09/01 15:52:23 vi main.tf + 2333 2021/09/01 15:52:32 grep KUBECONFIG * + 2334 2021/09/01 15:52:39 vi eks-console-access.tf + 2335 2021/09/01 15:53:11 vi main.tf + 2336 2021/09/01 15:56:00 vi main.tf eks-console-access.tf + 2337 2021/09/01 15:57:03 tf-plan + 2338 2021/09/01 15:57:24 tf-plan less + 2339 2021/09/01 15:57:43 tf-plan less | grep -iE ' (created|replaced|destroyed)' + 2340 2021/09/01 15:57:59 tf-apply + 2341 2021/09/01 15:59:16 ls -al setup/ + 2342 2021/09/01 15:59:18 ls -al setup/kube.config + 2343 2021/09/01 15:59:22 ls setup/kube.config + 2344 2021/09/01 15:59:24 cat setup/kube.config + 2345 2021/09/01 15:59:27 fg + 2346 2021/09/01 16:22:12 ls -al + 2347 2021/09/01 16:22:17 tf-directory-setup.py -l none + 2348 2021/09/01 16:22:19 tf-init + 2349 2021/09/01 16:22:30 tf-directory-setup.py -l s3 + 2350 2021/09/01 16:22:32 tf-plan + 2351 2021/09/01 16:23:12 ls + 2352 2021/09/01 16:23:15 cd aws-auth/ + 2353 2021/09/01 16:23:15 ls + 2354 2021/09/01 16:23:25 tf-directory-setup.py -l none + 2355 2021/09/01 16:23:29 tf-init + 2356 2021/09/01 16:23:40 grep kubectl * + 2357 2021/09/01 16:23:42 vi patch-aws-auth.tf + 2358 2021/09/01 16:24:26 vi patch-aws-auth.tf ../main.tf + 2359 2021/09/01 16:24:57 tf-plan + 2360 2021/09/01 16:25:17 vi patch-aws-auth.tf + 2361 2021/09/01 16:25:45 tf-plan + 2362 2021/09/01 16:26:07 tf-apply + 2363 2021/09/01 16:26:30 ls setup/ + 2364 2021/09/01 16:26:32 cat setup/kube.config + 2365 2021/09/01 16:26:47 cat ../setup/kube.config + 2366 2021/09/01 16:26:55 vi patch-aws-auth.tf + 2367 2021/09/01 16:27:29 tf-apply + 2368 2021/09/01 16:27:55 ls + 2369 2021/09/01 16:28:00 grep cni_vpc ../var*tf + 2370 2021/09/01 16:28:07 diff variables.vpc.tf .. + 2371 2021/09/01 16:28:12 ln -sf ../variables.vpc.tf . + 2372 2021/09/01 16:28:17 ls + 2373 2021/09/01 16:28:29 grpe kubectl patch-aws-auth.tf + 2374 2021/09/01 16:28:32 grep kubectl patch-aws-auth.tf + 2375 2021/09/01 16:28:38 vi patch-aws-auth.tf + 2376 2021/09/01 16:28:44 tf-appy + 2377 2021/09/01 16:28:46 tf-apply + 2378 2021/09/01 16:29:15 tf-directory-setup.py -l s3 + 2379 2021/09/01 16:29:16 git status . + 2380 2021/09/01 16:29:24 git add -A remote* + 2381 2021/09/01 16:29:26 git status . + 2382 2021/09/01 16:29:36 git commit -m'update to use kubeconfig extract' . + 2383 2021/09/01 16:29:37 cd .. + 2384 2021/09/01 16:29:39 git status . + 2385 2021/09/01 16:29:46 git add eniconfig.yaml remote_state.* -A + 2386 2021/09/01 16:29:48 git status . + 2387 2021/09/01 16:30:08 git commit -m'setup for kubeconfig, make it work with cni custom networking' -a + 2388 2021/09/01 16:30:10 git status . + 2389 2021/09/01 16:30:12 cd .. + 2390 2021/09/01 16:30:12 ls + 2391 2021/09/01 16:30:15 cd *3 + 2392 2021/09/01 16:30:16 ls + 2393 2021/09/01 16:30:19 cd eks-test3 + 2394 2021/09/01 16:30:20 ls + 2395 2021/09/01 16:31:23 cat aws-auth/p + 2396 2021/09/01 16:31:24 cat aws-auth/patch-aws-auth.tf + 2397 2021/09/01 16:31:27 cat aws-auth/patch-aws-auth.tf |less + 2398 2021/09/01 16:31:34 cat aws-auth/*tfvars + 2399 2021/09/01 16:32:56 ls + 2400 2021/09/01 16:33:07 cd efs/ + 2401 2021/09/01 16:33:07 ls + 2402 2021/09/01 16:33:09 grep kubectl * + 2403 2021/09/01 16:33:15 tf-directory-setup.py -l none + 2404 2021/09/01 16:33:21 vi main.tf ../main.tf + 2405 2021/09/01 16:34:37 cd .. + 2406 2021/09/01 16:34:39 vi main.tf + 2407 2021/09/01 16:35:02 cp efs/kubeconfig.tf . + 2408 2021/09/01 16:35:03 vi kubeconfig.tf + 2409 2021/09/01 16:35:52 cd aws-auth/ + 2410 2021/09/01 16:35:54 vi patch-aws-auth.tf + 2411 2021/09/01 16:36:06 ln -s ../kubeconfig.tf . + 2412 2021/09/01 16:36:08 cd ../efs/ + 2413 2021/09/01 16:36:10 rm kubeconfig.tf + 2414 2021/09/01 16:36:13 ln -s ../kubeconfig.tf . + 2415 2021/09/01 16:36:13 ls + 2416 2021/09/01 16:36:18 tf-init + 2417 2021/09/01 16:36:29 ls + 2418 2021/09/01 16:36:35 less efs.tf + 2419 2021/09/01 16:37:04 tf-plan + 2420 2021/09/01 16:38:07 ls + 2421 2021/09/01 16:38:31 ls ../remote_state.*s3 + 2422 2021/09/01 16:38:39 ln -s ../remote_state.*s3 . + 2423 2021/09/01 16:38:41 tf-plan + 2424 2021/09/01 16:39:35 grep applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test3 *tf + 2425 2021/09/01 16:39:44 cd .. + 2426 2021/09/01 16:39:45 ls + 2427 2021/09/01 16:39:49 cat outputs. + 2428 2021/09/01 16:39:50 cat outputs.tf + 2429 2021/09/01 16:40:04 tf-appy + 2430 2021/09/01 16:40:11 ls -al *tf + 2431 2021/09/01 16:40:15 tf-appy + 2432 2021/09/01 16:40:17 tf-apply + 2433 2021/09/01 16:41:44 cd efs/ + 2434 2021/09/01 16:41:45 ls + 2435 2021/09/01 16:41:49 vi remote_state.yml + 2436 2021/09/01 16:41:53 ls + 2437 2021/09/01 16:41:55 ls -al + 2438 2021/09/01 16:42:17 rm remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test3.tf.s3 + 2439 2021/09/01 16:42:29 ln -s ../remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test3.tf . + 2440 2021/09/01 16:42:30 tf-plan + 2441 2021/09/01 16:43:15 grep ^res *tf + 2442 2021/09/01 16:43:19 grep ^res *tf|grep policy + 2443 2021/09/01 16:43:28 tf-apply -target=aws_iam_policy.efs-policy + 2444 2021/09/01 16:44:23 tf-plan + 2445 2021/09/01 16:45:41 tf-apply + 2446 2021/09/01 16:47:50 vi main.tf + 2447 2021/09/01 16:47:55 vi main.tf kubeconfig.tf + 2448 2021/09/01 16:48:16 tf-apply + 2449 2021/09/01 16:52:46 less pa + 2450 2021/09/01 16:52:47 ls + 2451 2021/09/01 16:52:49 less main.tf + 2452 2021/09/01 16:53:15 grep subnet_ids * + 2453 2021/09/01 16:53:19 vi efs.tf + 2454 2021/09/01 16:54:12 grep pvc *md + 2455 2021/09/01 16:54:21 kubectl describe pvc efs-test3-claim + 2456 2021/09/01 16:54:29 kubectl --kubeconfig setup/kube.config describe pvc efs-test3-claim + 2457 2021/09/01 16:58:59 cd .. + 2458 2021/09/01 16:59:01 ls + 2459 2021/09/01 16:59:03 vi outputs. + 2460 2021/09/01 16:59:05 vi outputs.tf + 2461 2021/09/01 16:59:38 vi outputs.tf main.tf + 2462 2021/09/01 17:01:02 tf-apply + 2463 2021/09/01 17:01:26 cd efs + 2464 2021/09/01 17:01:26 ls + 2465 2021/09/01 17:01:27 vi main.tf + 2466 2021/09/01 17:01:34 vi efs.tf + 2467 2021/09/01 17:01:45 vi locals.tf + 2468 2021/09/01 17:01:47 vi main.tf + 2469 2021/09/01 17:02:33 vi locals.tf + 2470 2021/09/01 17:02:35 vi efs.tf + 2471 2021/09/01 17:02:45 vi locals.tf + 2472 2021/09/01 17:02:47 vi main.tf + 2473 2021/09/01 17:02:52 tf-plan + 2474 2021/09/01 17:03:45 tf-apply + 2475 2021/09/01 17:07:11 ls + 2476 2021/09/01 17:07:13 vi main.tf + 2477 2021/09/01 17:08:09 grep ^resource main.tf + 2478 2021/09/01 17:08:52 tf-destroy + 2479 2021/09/01 17:09:39 grep subnet_id * + 2480 2021/09/01 17:10:17 history|grep target + 2481 2021/09/01 17:10:20 tf-apply -target=aws_iam_policy.efs-policy + 2482 2021/09/01 17:10:38 tf-apply + 2483 2021/09/01 17:17:22 history|grep kube + 2484 2021/09/01 17:17:27 kubectl --kubeconfig setup/kube.config describe pvc efs-test3-claim + 2485 2021/09/01 17:18:15 git status . + 2486 2021/09/01 17:18:27 git add kubeconfig.tf ../kubeconfig.tf ../aws-auth/kubeconfig.tf + 2487 2021/09/01 17:18:34 git add remote*back* remote*test3* + 2488 2021/09/01 17:31:59 git status . + 2489 2021/09/01 17:32:09 git commit -m'update' . + 2490 2021/09/01 17:32:12 git status + 2491 2021/09/01 17:32:19 cd .. + 2492 2021/09/01 17:32:20 git status . + 2493 2021/09/01 17:32:27 git commit -m'add cni to outputs' . + 2494 2021/09/01 17:32:31 git status . + 2495 2021/09/01 17:32:40 ls efs/setup/ + 2496 2021/09/01 17:32:41 ls setup/ + 2497 2021/09/01 17:32:48 git add setup/*yaml + 2498 2021/09/01 17:32:54 git commit -m'add yaml downloads' . + 2499 2021/09/01 17:32:55 git status + 2500 2021/09/01 17:32:57 git push + 2501 2021/09/01 17:33:16 ls + 2502 2021/09/01 17:33:19 cd sample-nlb/ + 2503 2021/09/01 17:33:19 ls + 2504 2021/09/01 17:33:22 vi remote_state.yml + 2505 2021/09/01 17:33:25 tf-init + 2506 2021/09/01 17:33:31 tf-directory-setup.py -l none + 2507 2021/09/01 17:33:32 tf-init + 2508 2021/09/01 17:33:37 ls + 2509 2021/09/01 17:33:41 grep kubectl * + 2510 2021/09/01 17:33:47 vi main.tf + 2511 2021/09/01 17:33:55 grpw subnet + 2512 2021/09/01 17:33:57 grep subnet * + 2513 2021/09/01 17:34:09 grep remote_state * + 2514 2021/09/01 17:34:18 tf-plan + 2515 2021/09/01 17:34:34 tf-apply + 2516 2021/09/01 17:41:49 ls + 2517 2021/09/01 17:41:50 vi main.tf + 2518 2021/09/02 09:00:49 history|grep kube + 2519 2021/09/02 09:01:17 kubectl --kubeconfig setup/kube.config get pod --all-namespaces -o wide + 2520 2021/09/02 09:01:21 ls setup + 2521 2021/09/02 09:01:28 kubectl --kubeconfig ../setup/kube.config get pod --all-namespaces -o wide + 2522 2021/09/02 09:01:54 cd .. + 2523 2021/09/02 09:01:54 ls + 2524 2021/09/02 09:01:58 cd common-services/ + 2525 2021/09/02 09:01:58 ls + 2526 2021/09/02 09:02:03 vi ca-cert.tf + 2527 2021/09/02 09:02:17 ls + 2528 2021/09/02 09:02:22 cd ../sample-nlb/ + 2529 2021/09/02 09:02:22 ls + 2530 2021/09/02 09:02:26 ls -al *tf + 2531 2021/09/02 09:02:28 tf-directory-setup.py -l s3 + 2532 2021/09/02 09:02:30 git status . + 2533 2021/09/02 09:02:36 git add remote_state.backend.tf remote*nlb* + 2534 2021/09/02 09:02:42 git commit -m'add remote states' . + 2535 2021/09/02 09:02:44 git push + 2536 2021/09/02 09:02:48 cd .. + 2537 2021/09/02 09:04:09 kubectl --kubeconfig setup/kube.config get events --all-namespaces -o wide + 2538 2021/09/02 09:04:16 kubectl --kubeconfig setup/kube.config get event --all-namespaces -o wide + 2539 2021/09/02 09:04:26 kubectl --kubeconfig setup/kube.config get event -o wide + 2540 2021/09/02 09:04:38 kubectl --help --kubeconfig setup/kube.config get event -o wide + 2541 2021/09/02 09:04:44 kubectl --help # --kubeconfig setup/kube.config get event -o wide + 2542 2021/09/02 09:04:54 kubectl api-resources + 2543 2021/09/02 09:05:07 kubectl api-resources --kubeconfig setup/kube.config g + 2544 2021/09/02 09:05:08 kubectl api-resources --kubeconfig setup/kube.config + 2545 2021/09/02 09:05:13 kubectl api-resources --kubeconfig setup/kube.config |sort + 2546 2021/09/02 09:05:35 kubectl --kubeconfig setup/kube.config get eniconfigs -o wide + 2547 2021/09/02 09:06:16 kubectl --kubeconfig setup/kube.config get secrets -o wide + 2548 2021/09/02 09:08:29 cd sample-nlb/ + 2549 2021/09/02 09:08:30 ls + 2550 2021/09/02 09:08:32 vi main.tf + 2551 2021/09/02 09:15:48 cd .. + 2552 2021/09/02 09:15:49 ls + 2553 2021/09/02 09:15:55 cat dns-zone.tf + 2554 2021/09/02 09:16:18 cd common-services/ + 2555 2021/09/02 09:16:18 ls + 2556 2021/09/02 09:16:24 grep dns * + 2557 2021/09/02 09:16:26 grep route53 * + 2558 2021/09/02 09:16:31 less dns.tf + 2559 2021/09/02 09:16:44 ls + 2560 2021/09/02 09:16:48 vi remote_state.yml + 2561 2021/09/02 09:16:50 ls + 2562 2021/09/02 09:16:58 tf-directory-setup.py -l none + 2563 2021/09/02 09:17:09 ln -s ../kubeconfig.tf . + 2564 2021/09/02 09:17:14 grep kubectl * + 2565 2021/09/02 09:17:25 tf-init + 2566 2021/09/02 09:17:40 tf-plan + 2567 2021/09/02 09:18:09 ls + 2568 2021/09/02 09:18:18 ls ../remote*tf + 2569 2021/09/02 09:18:24 ln -s ../remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test3.tf . + 2570 2021/09/02 09:18:26 tf-plan + 2571 2021/09/02 10:00:54 vi main.tf + 2572 2021/09/02 10:01:01 tf-plan + 2573 2021/09/02 10:09:00 vi main.tf + 2574 2021/09/02 10:10:14 grep tls_crt_file *tf + 2575 2021/09/02 10:10:16 grep tls_crt_file *tfvars + 2576 2021/09/02 10:10:22 vi main.tf + 2577 2021/09/02 10:12:51 tf-plan + 2578 2021/09/02 10:12:56 vi main.tf + 2579 2021/09/02 10:13:14 tf-plan + 2580 2021/09/02 10:13:16 vi main.tf + 2581 2021/09/02 10:13:41 tf-plan + 2582 2021/09/02 10:13:48 vi main.tf + 2583 2021/09/02 10:14:18 tf-plan + 2584 2021/09/02 10:14:42 grep tls_ *tfvars + 2585 2021/09/02 10:14:44 vi main.tf + 2586 2021/09/02 10:14:53 tf-plan + 2587 2021/09/02 10:19:56 vi dns.tf + 2588 2021/09/02 10:22:44 tf-plan + 2589 2021/09/02 10:23:22 vi dns.tf + 2590 2021/09/02 10:23:59 tf-plan + 2591 2021/09/02 10:24:21 vi dns.tf + 2592 2021/09/02 10:24:29 tf-plan + 2593 2021/09/02 10:25:55 ls + 2594 2021/09/02 10:25:58 vi ca-cert.tf + 2595 2021/09/02 10:29:51 tf-plan + 2596 2021/09/02 10:31:14 ls + 2597 2021/09/02 10:31:20 ln -sf ../variables.vpc.tf . + 2598 2021/09/02 10:31:22 tf-plan + 2599 2021/09/02 10:37:18 tf-apply + 2600 2021/09/02 10:38:13 tf-apply -auto-approve + 2601 2021/09/02 10:39:54 vi ca-cert.tf + 2602 2021/09/02 10:40:42 tf-apply -auto-approve + 2603 2021/09/02 10:41:13 vi ca-cert.tf + 2604 2021/09/02 10:41:22 ls + 2605 2021/09/02 10:41:26 grep no-certificate *tf + 2606 2021/09/02 10:41:29 vi main.tf + 2607 2021/09/02 10:41:54 terraform console + 2608 2021/09/02 10:43:21 vi main.tf + 2609 2021/09/02 10:46:24 tf-plan + 2610 2021/09/02 10:46:39 vi main.tf + 2611 2021/09/02 10:46:49 tf-plan + 2612 2021/09/02 10:47:29 tf-apply + 2613 2021/09/02 10:47:50 tf-apply -auto-approve + 2614 2021/09/02 10:50:14 history|grep pod + 2615 2021/09/02 10:50:19 kubectl --kubeconfig ../setup/kube.config get pod --all-namespaces -o wide + 2616 2021/09/02 10:51:38 kubectl --kubeconfig ../setup/kube.config get secret --all-namespaces -o wide + 2617 2021/09/02 10:51:48 kubectl --kubeconfig ../setup/kube.config get secret --all-namespaces -o wide|grep -iE "tls|cert" + 2618 2021/09/02 10:52:10 kubectl --kubeconfig ../setup/kube.config get secret --all-namespaces -o wide|grep tls + 2619 2021/09/02 12:52:10 git status . + 2620 2021/09/02 12:52:17 git add remo*back* remote*test3* + 2621 2021/09/02 12:52:19 git status . + 2622 2021/09/02 12:52:22 git add kubeconfig.tf + 2623 2021/09/02 12:52:23 ls certs + 2624 2021/09/02 12:52:31 git add certs/*csr certs/*pub* + 2625 2021/09/02 12:52:34 git status . + 2626 2021/09/02 12:52:44 vi .gitignore + 2627 2021/09/02 12:52:57 git add .gitignore + 2628 2021/09/02 12:53:00 git commit -m'ignore key' . + 2629 2021/09/02 12:53:03 git status . + 2630 2021/09/02 12:53:08 git amdend + 2631 2021/09/02 12:53:11 git amend + 2632 2021/09/02 12:54:02 git commit --amend -m'add remote state, ignore .key, change variables.vpc, add kubeconfig' . + 2633 2021/09/02 12:54:05 git push + 2634 2021/09/02 12:56:43 cd .. + 2635 2021/09/02 12:56:43 ls + 2636 2021/09/02 12:56:45 cd sample-istio/ + 2637 2021/09/02 12:56:45 ls + 2638 2021/09/02 12:56:48 vi remote_state.yml + 2639 2021/09/02 12:56:54 tf-directory-setup.py -l none + 2640 2021/09/02 12:56:56 cd ../common-services/ + 2641 2021/09/02 12:56:56 ls + 2642 2021/09/02 12:57:00 tf-directory-setup.py -l s3 + 2643 2021/09/02 12:57:02 git status . + 2644 2021/09/02 12:57:07 git commit -m'update linkl' . + 2645 2021/09/02 12:57:10 cd ../sample-istio/ + 2646 2021/09/02 12:57:10 ls + 2647 2021/09/02 12:57:16 grep kubectl * + 2648 2021/09/02 12:57:22 ln -s ../kubeconfig.tf . + 2649 2021/09/02 12:57:25 ls -al + 2650 2021/09/02 12:57:30 tf-init + 2651 2021/09/02 12:57:38 ln -sf ../variables.vpc.tf . + 2652 2021/09/02 12:57:41 tf-plan + 2653 2021/09/02 12:57:59 tf-apply + 2654 2021/09/02 13:07:31 history|grep kube + 2655 2021/09/02 13:07:41 kubectl --kubeconfig ../setup/kube.config get events + 2656 2021/09/02 13:07:48 kubectl --kubeconfig ../setup/kube.config get events --all-namespaces -o wide + 2657 2021/09/02 13:08:04 kubectl --kubeconfig ../setup/kube.config get events -n my-nginx -o wide + 2658 2021/09/02 13:08:08 clear + 2659 2021/09/02 13:08:09 kubectl --kubeconfig ../setup/kube.config get events -n my-nginx -o wide + 2660 2021/09/02 13:11:49 ls + 2661 2021/09/02 13:11:56 vi copy_images.tf + 2662 2021/09/02 13:12:45 diff copy_images.tf ../../eks-test2//sample-istio/ + 2663 2021/09/02 13:12:50 ls -al + 2664 2021/09/02 13:13:04 diff copy_image.sh ../../eks-test2/sample-istio/ + 2665 2021/09/02 13:13:07 vi main.tf + 2666 2021/09/02 13:13:18 diff main.tf ../../eks-test2/sample-istio/ + 2667 2021/09/02 13:13:27 grep copy *tf + 2668 2021/09/02 13:13:36 grep copy_images logs/*apply* + 2669 2021/09/02 13:14:20 cd ../../eks-test2 + 2670 2021/09/02 13:14:21 cd sample- + 2671 2021/09/02 13:14:22 cd sample-istio/ + 2672 2021/09/02 13:14:23 ls + 2673 2021/09/02 13:14:25 less logs/*apply* + 2674 2021/09/02 13:15:20 cd ../../eks-test3/sample-istio/ + 2675 2021/09/02 13:15:27 less logs/*apply* + 2676 2021/09/02 13:16:05 vi main.tf + 2677 2021/09/02 13:16:16 grep domian *tf + 2678 2021/09/02 13:16:17 grep domain *tf + 2679 2021/09/02 13:16:20 vi main.tf + 2680 2021/09/02 13:16:26 grep domain *tf + 2681 2021/09/02 13:16:28 vi main.tf + 2682 2021/09/02 13:16:40 grpe domain *tf + 2683 2021/09/02 13:16:42 grpe domain *tfvars + 2684 2021/09/02 13:16:43 grep domain *tfvars + 2685 2021/09/02 13:16:53 cd ../.. + 2686 2021/09/02 13:16:57 cd eks-test2/sample-istio/ + 2687 2021/09/02 13:16:57 grep domain *tfvars + 2688 2021/09/02 13:17:06 cd ../.. + 2689 2021/09/02 13:17:13 cd eks-test3/sample-istio/ + 2690 2021/09/02 13:17:13 ls + 2691 2021/09/02 13:17:21 vi test3.auto.tfvars + 2692 2021/09/02 13:17:31 cat ../../eks-test2/test2.auto.tfvars + 2693 2021/09/02 13:17:38 cat ../../eks-test2/sample-istio/test2.auto.tfvars + 2694 2021/09/02 13:17:44 ls + 2695 2021/09/02 13:17:47 ls *auto* + 2696 2021/09/02 13:17:59 grep domain * + 2697 2021/09/02 13:18:06 vi settings.auto.tfvars + 2698 2021/09/02 13:18:27 ls + 2699 2021/09/02 13:30:33 history|grep kube + 2700 2021/09/02 13:30:40 kubectl --kubeconfig ../setup/kube.config get pod --all-namespaces -o wide + 2701 2021/09/02 13:30:50 kubectl --kubeconfig ../setup/kube.config get service --all-namespaces -o wide + 2702 2021/09/02 13:41:04 cd .. + 2703 2021/09/02 13:41:05 ls + 2704 2021/09/02 13:41:18 grep LoadBalanacer * + 2705 2021/09/02 13:41:21 cd common-services/ + 2706 2021/09/02 13:41:22 grep LoadBalanacer * + 2707 2021/09/02 13:41:25 grep LoadBalanacer */* + 2708 2021/09/02 13:41:27 grep LoadBalanacer */*/* + 2709 2021/09/02 13:41:43 gre pingressgateway + 2710 2021/09/02 13:41:45 grep ingress * + 2711 2021/09/02 13:42:01 grep ingress */* + 2712 2021/09/02 13:42:05 grep ingress */*/* + 2713 2021/09/02 13:42:08 grep ingress */*/*/* + 2714 2021/09/02 13:42:17 vi main.tf + 2715 2021/09/02 13:42:35 grep -i clusterip * + 2716 2021/09/02 13:42:38 grep -i clusterip */ + 2717 2021/09/02 13:42:42 grep -i clusterip */*/* + 2718 2021/09/02 13:42:44 grep -i clusterip */*/*/* + 2719 2021/09/02 13:42:51 vi charts/istio-profile/templates/istiooperator.yaml + 2720 2021/09/02 13:43:55 kubectl --kubeconfig ../setup/kube.config get events -n my-nginx -o wide|grep ecr + 2721 2021/09/02 13:44:54 ls + 2722 2021/09/02 13:44:58 cd ../sample-istio/ + 2723 2021/09/02 13:44:59 ls + 2724 2021/09/02 13:45:01 vi main.tf + 2725 2021/09/02 13:45:19 vi *.tf + 2726 2021/09/02 13:47:06 ls + 2727 2021/09/02 13:47:15 vi variables.sample.tf + 2728 2021/09/02 13:47:19 ls + 2729 2021/09/02 13:47:30 less log/*app* + 2730 2021/09/02 13:47:35 less logs/*app* + 2731 2021/09/02 13:49:37 cd ../../ + 2732 2021/09/02 13:49:42 cd eks-test2/sample-istio/ + 2733 2021/09/02 13:49:47 less logs/*appy* + 2734 2021/09/02 13:49:50 less logs/*apply* + 2735 2021/09/02 13:50:17 cd ../../eks-test3 + 2736 2021/09/02 13:50:22 cd sample-istio/ + 2737 2021/09/02 13:50:25 less logs/*apply* + 2738 2021/09/02 13:50:48 grep login ../*READ* + 2739 2021/09/02 13:50:58 history|grep login + 2740 2021/09/02 13:51:08 pushd ~/docker + 2741 2021/09/02 13:51:08 ls + 2742 2021/09/02 13:51:13 cat pull.sh + 2743 2021/09/02 13:51:23 ls images/ + 2744 2021/09/02 13:51:24 less x + 2745 2021/09/02 13:51:26 ls + 2746 2021/09/02 13:51:33 history|grep export + 2747 2021/09/02 13:51:38 export HTTP_PROXY=http://proxy.tco.census.gov:3128 + 2748 2021/09/02 13:51:43 export ECR_NAME=252960665057.dkr.ecr.us-gov-east-1.amazonaws.com + 2749 2021/09/02 13:51:51 ls + 2750 2021/09/02 13:52:01 cat ~/.aws/config + 2751 2021/09/02 13:52:18 export AWS_PROFILE=252960665057-ma6-gov + 2752 2021/09/02 13:52:42 ecr get-login-password --region us-gov-east-1 > ecr-login.txt + 2753 2021/09/02 13:53:53 aws ecr get-login-password --region us-gov-east-1 > ecr-login.txt + 2754 2021/09/02 13:54:21 skopeo login --username AWS --password $(cat ecr-login.txt ) 252960665057.dkr.ecr.us-gov-east-1.amazonaws.com + 2755 2021/09/02 13:59:15 skopeo inspect + 2756 2021/09/02 13:59:18 [1:43 PM] Dayong Lu (CENSUS/ADSD CTR) + 2757 2021/09/02 13:59:18 + 2758 2021/09/02 13:59:18 Failed to pull image "079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx/nginx:1.21": rpc error: code = + 2759 2021/09/02 13:59:39 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx/nginx:1.21 + 2760 2021/09/02 13:59:56 history|grep login + 2761 2021/09/02 14:00:23 export AWS_PROFILE=252960665057-ma6-gov + 2762 2021/09/02 14:00:31 unexport AWS_PROFILE + 2763 2021/09/02 14:00:37 unset AWS_PROFILE + 2764 2021/09/02 14:01:10 aws --profile 079788916859-do2-cat ecr get-login-password --region us-east-1 > ecr-login.txt + 2765 2021/09/02 14:01:30 skopeo login --username AWS --password $(cat ecr-login.txt ) 079788916859.dkr.ecr.us-east-1.amazonaws.com + 2766 2021/09/02 14:01:38 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx/nginx:1.21 + 2767 2021/09/02 14:01:45 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test2/nginx/nginx:1.21 + 2768 2021/09/02 14:01:59 popd + 2769 2021/09/02 14:02:12 tf-plan + 2770 2021/09/02 14:03:27 tf-apply + 2771 2021/09/02 15:38:16 ls + 2772 2021/09/02 15:38:18 tf-destroy + 2773 2021/09/02 15:45:26 git status . + 2774 2021/09/02 15:45:40 git add settings.auto.tfvars remote*istio* remote*back* + 2775 2021/09/02 15:45:41 git status . + 2776 2021/09/02 15:45:44 git add kubeconfig.tf + 2777 2021/09/02 15:45:52 git commit -m'add remote state, kubeconfig' . + 2778 2021/09/02 15:45:54 ls + 2779 2021/09/02 15:45:57 git status . + 2780 2021/09/02 15:45:59 tf-directory-setup.py -l s3 + 2781 2021/09/02 15:46:04 git commit -m'change link' . + 2782 2021/09/02 15:46:06 ls -al set + 2783 2021/09/02 15:46:08 ls -al setup/ + 2784 2021/09/02 15:46:09 ls + 2785 2021/09/02 15:46:12 git status . + 2786 2021/09/02 15:46:14 cd .. + 2787 2021/09/02 15:46:14 ls + 2788 2021/09/02 15:46:18 cd sample-nlb/ + 2789 2021/09/02 15:46:18 ls + 2790 2021/09/02 15:46:20 tf-destroy + 2791 2021/09/02 15:58:10 git status . + 2792 2021/09/02 15:58:12 ls + 2793 2021/09/02 15:58:15 cd .. + 2794 2021/09/02 15:58:16 ls + 2795 2021/09/02 15:58:19 cd common-services/ + 2796 2021/09/02 15:58:20 git status . + 2797 2021/09/02 15:58:24 tf-destroy + 2798 2021/09/02 16:09:39 tf-destroy -auto-approve + 2799 2021/09/02 16:35:41 terraform state list + 2800 2021/09/02 16:35:51 cd .. + 2801 2021/09/02 16:35:52 ls + 2802 2021/09/02 16:35:54 cd efs/ + 2803 2021/09/02 16:35:55 lt + 2804 2021/09/02 16:35:57 tf-destroy + 2805 2021/09/02 16:37:30 cd .. + 2806 2021/09/02 16:37:31 ls + 2807 2021/09/02 16:37:36 cd alb-controller/ + 2808 2021/09/02 16:37:37 ls + 2809 2021/09/02 16:37:41 terraform state list + 2810 2021/09/02 16:37:44 cd .. + 2811 2021/09/02 16:37:44 ls + 2812 2021/09/02 16:37:52 cd aws-auth/ + 2813 2021/09/02 16:37:53 ls + 2814 2021/09/02 16:37:55 terraform state list + 2815 2021/09/02 16:38:09 less patch-aws-auth.tf + 2816 2021/09/02 16:38:16 tf-destroy + 2817 2021/09/02 16:38:37 cat setup/config_map.patch.yaml + 2818 2021/09/02 16:38:39 cd .. + 2819 2021/09/02 16:38:40 ls + 2820 2021/09/02 16:38:43 tf-destroy + 2821 2021/09/02 16:49:12 git status . + 2822 2021/09/02 16:49:28 git diff eniconfig.yaml + 2823 2021/09/02 16:49:39 git diff .|less + 2824 2021/09/02 16:49:43 ls + 2825 2021/09/02 16:54:39 mv test3.auto.tfvars settings.auto.tfvars + 2826 2021/09/02 16:54:40 vi set + 2827 2021/09/02 16:54:43 vbi settings.auto.tfvars + 2828 2021/09/02 16:54:45 vi settings.auto.tfvars + 2829 2021/09/02 16:55:21 ls */sett* + 2830 2021/09/02 16:55:26 vi sample-istio/variables.* + 2831 2021/09/02 16:55:48 ls + 2832 2021/09/02 16:55:52 vi variables.vpc.tf + 2833 2021/09/02 16:55:57 ls + 2834 2021/09/02 16:55:58 vi eks-console-access.tf + 2835 2021/09/02 16:55:59 ls + 2836 2021/09/02 16:56:01 vi variables.eks.tf + 2837 2021/09/02 16:56:18 ls + 2838 2021/09/02 16:56:41 terraform state list + 2839 2021/09/02 16:57:24 grep 1.20 * + 2840 2021/09/02 16:57:31 grep 1.20 * -A5 + 2841 2021/09/02 16:57:34 grep 1.20 * -B5 + 2842 2021/09/02 16:57:41 vi settings.auto.tfvars + 2843 2021/09/02 16:57:52 ls + 2844 2021/09/02 16:58:38 tf-state list + 2845 2021/09/02 16:58:51 history|grep target + 2846 2021/09/02 16:58:56 tf-plan -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy + 2847 2021/09/02 16:59:15 tf-apply -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy + 2848 2021/09/02 16:59:40 tf-plan + 2849 2021/09/02 17:10:46 tf-plan less + 2850 2021/09/02 17:17:44 tf-apply + 2851 2021/09/02 17:18:24 tf-apply -auto-approve + 2852 2021/09/02 19:25:37 tf-apply + 2853 2021/09/03 08:36:02 vi main.tf kubeconfig.tf + 2854 2021/09/03 08:37:11 grep aws_eks_cluster */*tf + 2855 2021/09/03 08:37:35 ls -al */provid* + 2856 2021/09/03 08:37:43 less alb-controller/providers.tf + 2857 2021/09/03 08:38:00 less main.tf + 2858 2021/09/03 08:38:09 terraform console + 2859 2021/09/03 08:38:52 vi kubeconfig.tf + 2860 2021/09/03 08:39:26 vi main.tf + 2861 2021/09/03 08:39:55 tf-plan + 2862 2021/09/03 08:40:10 vi kubeconfig.tf + 2863 2021/09/03 08:40:17 tf-plan + 2864 2021/09/03 08:41:05 ls -al */kubeconfig* + 2865 2021/09/03 08:41:20 rm */kubeconfig.tf + 2866 2021/09/03 08:41:24 cp kubeconfig.tf aws-auth/ + 2867 2021/09/03 08:41:27 cp kubeconfig.tf common-services/ + 2868 2021/09/03 08:41:31 cp kubeconfig.tf efs/ + 2869 2021/09/03 08:41:34 cp kubeconfig.tf sample-istio/ + 2870 2021/09/03 08:41:43 vi */kubeconfig.tf + 2871 2021/09/03 08:42:12 tf-apply + 2872 2021/09/03 09:13:54 ls + 2873 2021/09/03 09:13:57 cd aws-auth/ + 2874 2021/09/03 09:13:57 ls + 2875 2021/09/03 09:14:01 tf-init + 2876 2021/09/03 09:14:11 ls ../*auto* + 2877 2021/09/03 09:14:20 ln -s ../settings.auto.tfvars . + 2878 2021/09/03 09:14:23 rm test3.auto.tfvars + 2879 2021/09/03 09:14:30 cat test3.aws-auth.auto.tfvars + 2880 2021/09/03 09:14:45 mv test3.aws-auth.auto.tfvars aws-auth.auto.tvfars + 2881 2021/09/03 09:14:54 mv aws-auth.auto.tvfars aws-auth.auto.tfvars + 2882 2021/09/03 09:14:56 tf-plan + 2883 2021/09/03 09:15:27 grep cluster_version ../*tf + 2884 2021/09/03 09:15:38 ln -s ../variables.eks.tf . + 2885 2021/09/03 09:15:40 tf-plan + 2886 2021/09/03 09:15:48 vi variables.aws-auth.tf + 2887 2021/09/03 09:15:54 tf-plan + 2888 2021/09/03 09:16:23 tf-apply + 2889 2021/09/03 09:17:26 history|grpe kube + 2890 2021/09/03 09:17:28 history|grep kube + 2891 2021/09/03 09:17:43 kubectl --kubeconfig setup/kube.config get pod --all-namespaces -o wide + 2892 2021/09/03 09:17:58 kubectl --kubeconfig setup/kube.config get service --all-namespaces -o wide + 2893 2021/09/03 09:18:07 kubectl --kubeconfig setup/kube.config get events --all-namespaces -o wide + 2894 2021/09/03 09:18:16 kubectl --kubeconfig setup/kube.config get all --all-namespaces -o wide + 2895 2021/09/03 09:18:29 ls + 2896 2021/09/03 09:18:32 cd ../efs/ + 2897 2021/09/03 09:18:32 ls + 2898 2021/09/03 09:18:39 rm test3.auto.tfvars + 2899 2021/09/03 09:18:42 ln -s ../settings.auto.tfvars . + 2900 2021/09/03 09:18:46 ln -s ../variables.eks.tf . + 2901 2021/09/03 09:18:48 vi variables.efs.tf + 2902 2021/09/03 09:18:59 pwd + 2903 2021/09/03 09:19:00 ls + 2904 2021/09/03 09:19:10 vi main.tf efs.tf + 2905 2021/09/03 09:19:18 tf-plan + 2906 2021/09/03 09:35:07 ls + 2907 2021/09/03 09:35:10 vi variables.efs.tf + 2908 2021/09/03 09:35:29 ls + 2909 2021/09/03 09:35:31 tf-plan + 2910 2021/09/03 09:36:28 grep target README* + 2911 2021/09/03 09:36:37 tf-apply -target=aws_iam_policy.efs-policy + 2912 2021/09/03 09:37:26 tf-plan + 2913 2021/09/03 11:22:27 pwd + 2914 2021/09/03 11:22:28 ls + 2915 2021/09/03 11:22:32 tf-apply + 2916 2021/09/03 11:38:40 kubectl --kubeconfig setup/kube.config get all --all-namespaces -o wide + 2917 2021/09/03 11:41:10 ls + 2918 2021/09/03 11:41:12 cd .. + 2919 2021/09/03 11:41:12 ls + 2920 2021/09/03 11:59:57 pwd + 2921 2021/09/03 11:59:57 ls + 2922 2021/09/03 12:00:00 cd common-services/ + 2923 2021/09/03 12:00:01 ls + 2924 2021/09/03 12:00:03 ls -al certs/ + 2925 2021/09/03 12:00:21 rm certs/* + 2926 2021/09/03 12:00:21 ls + 2927 2021/09/03 12:00:25 rm test3.auto.tfvars + 2928 2021/09/03 12:00:30 ln -s ../settings.auto.tfvars . + 2929 2021/09/03 12:00:33 tf-init + 2930 2021/09/03 14:24:57 ls + 2931 2021/09/03 14:24:59 pwd + 2932 2021/09/03 14:25:01 tf-plan + 2933 2021/09/03 14:25:37 tf-apply + 2934 2021/09/03 14:26:07 git status . + 2935 2021/09/03 14:26:08 git status + 2936 2021/09/03 14:26:13 git add settings.auto.tfvars + 2937 2021/09/03 14:26:16 git status + 2938 2021/09/03 14:26:22 cd .. + 2939 2021/09/03 14:26:25 git add */settings.auto* + 2940 2021/09/03 14:26:27 git status + 2941 2021/09/03 14:26:32 git commit -m'add settings' . + 2942 2021/09/03 14:26:35 git push + 2943 2021/09/03 14:26:41 cd common-services/ + 2944 2021/09/03 14:26:43 tf-apply + 2945 2021/09/03 14:27:53 ls + 2946 2021/09/03 14:27:58 ln -s ../variables.eks.tf . + 2947 2021/09/03 14:28:00 cd .. + 2948 2021/09/03 14:28:02 ls -al set + 2949 2021/09/03 14:28:04 gi tadd sett + 2950 2021/09/03 14:28:07 git add settings.auto.tfvars + 2951 2021/09/03 14:28:11 git commit -m'add settings' . + 2952 2021/09/03 14:28:13 git push + 2953 2021/09/03 14:28:19 cd common-services/ + 2954 2021/09/03 14:28:20 tf-plan + 2955 2021/09/03 14:28:26 vi variables.common-services.tf + 2956 2021/09/03 14:28:36 tf-plan + 2957 2021/09/03 14:29:11 cd .. + 2958 2021/09/03 14:29:12 tf-plan + 2959 2021/09/03 14:29:27 terraform force-unlock 7080002d-b733-e956-f8d8-5241cc298832 + 2960 2021/09/03 14:29:38 cd common-services/ + 2961 2021/09/03 14:29:42 tf-apply + 2962 2021/09/03 14:36:59 history|grep kube + 2963 2021/09/03 14:37:03 kubectl --kubeconfig setup/kube.config get all --all-namespaces -o wide + 2964 2021/09/03 15:42:25 cd ../sample-istio/ + 2965 2021/09/03 15:42:25 ls + 2966 2021/09/03 15:42:28 rm test3.auto.tfvars + 2967 2021/09/03 15:42:32 ln -s ../settings.auto.tfvars + 2968 2021/09/03 15:42:37 cat settings.auto.tfvars + 2969 2021/09/03 15:42:43 cat ../settings.auto.tfvars + 2970 2021/09/03 15:42:49 ln -sf ../settings.auto.tfvars + 2971 2021/09/03 15:42:52 ln -s ../variables.eks.tf . + 2972 2021/09/03 15:42:54 tf-plan + 2973 2021/09/03 15:43:01 vi variables.sample.tf + 2974 2021/09/03 15:43:07 tf-plan + 2975 2021/09/03 15:43:43 tf-apply + 2976 2021/09/03 19:54:53 kubectl --kubeconfig setup/kube.config get all --all-namespaces -o wide + 2977 2021/09/03 19:55:21 kubectl --kubeconfig setup/kube.config get all -n my-nginx -o wide + 2978 2021/09/03 19:55:31 kubectl --kubeconfig setup/kube.config get events -n my-nginx -o wide + 2979 2021/09/03 19:55:52 cd ../.. + 2980 2021/09/03 19:55:55 cd eks-test2 + 2981 2021/09/03 19:56:00 cd sample-it + 2982 2021/09/03 19:56:01 cd sample-istio/ + 2983 2021/09/03 19:56:05 kubectl --kubeconfig setup/kube.config get events -n my-nginx -o wide + 2984 2021/09/03 19:56:18 cp ../../eks-test3/sample-istio/kubeconfig.tf . + 2985 2021/09/03 19:56:19 ls + 2986 2021/09/03 19:56:21 tf-plan + 2987 2021/09/03 19:56:36 tf-apply + 2988 2021/09/03 19:56:54 kubectl --kubeconfig setup/kube.config get events -n my-nginx -o wide + 2989 2021/09/03 19:57:12 kubectl --kubeconfig setup/kube.config get all --all-namespaces -o wide + 2990 2021/09/03 19:57:28 kubectl --kubeconfig setup/kube.config get all -n my-nginx -o wide + 2991 2021/09/03 19:57:42 cd .. + 2992 2021/09/03 19:57:46 cd ../eks-test3 + 2993 2021/09/03 19:57:53 cd sample-s + 2994 2021/09/03 19:57:53 ls + 2995 2021/09/03 19:57:55 cd sample-istio/ + 2996 2021/09/03 19:57:58 less logs/*appy* + 2997 2021/09/03 19:58:03 less logs/*apply* + 2998 2021/09/03 19:58:41 grep copy_images ../../eks-test2/sample-istio/logs/*apply* + 2999 2021/09/03 19:59:10 pwd + 3000 2021/09/03 19:59:18 less copy_images.tf + 3001 2021/09/03 20:01:22 grep nginx/nginx ../../eks-test2/sample-istio/logs/*apply* + 3002 2021/09/03 20:01:32 grep helm ../../eks-test2/sample-istio/logs/*apply* + 3003 2021/09/03 20:01:39 pwd + 3004 2021/09/03 20:01:49 diff ../../eks-test2/sample-istio/main.tf . + 3005* 2021/09/03 20:01:51 l + 3006 2021/09/03 20:02:00 diff ../../eks-test2/sample-istio/copy_images.tf . + 3007 2021/09/03 20:02:05 diff ../../eks-test2/sample-istio/copy_image.sh + 3008 2021/09/03 20:02:07 diff ../../eks-test2/sample-istio/copy_image.sh . + 3009 2021/09/03 20:02:18 ls charts/ + 3010 2021/09/03 20:02:21 ls charts/my-nginx/ + 3011 2021/09/03 20:02:34 diff ../../eks-test2/sample-istio/charts/my-nginx/values.yaml charts/my-nginx/ + 3012 2021/09/03 20:02:40 less charts/my-nginx/* + 3013 2021/09/03 20:02:54 pwd + 3014 2021/09/03 20:02:55 cd charts/ + 3015 2021/09/03 20:02:56 ls + 3016 2021/09/03 20:02:58 cd my-nginx/ + 3017 2021/09/03 20:02:58 ls + 3018 2021/09/03 20:03:02 grep test * + 3019 2021/09/03 20:03:04 vi values.yaml + 3020 2021/09/03 20:03:33 cat ../../settings.auto.tfvars + 3021 2021/09/03 20:03:45 vi values.yaml + 3022 2021/09/03 20:04:27 ls templates/ + 3023 2021/09/03 20:04:30 vi templates/* + 3024 2021/09/03 20:04:46 cd .. + 3025 2021/09/03 20:04:51 cd ../eks-test2 + 3026 2021/09/03 20:04:53 cd sample-istio/ + 3027 2021/09/03 20:04:53 ls + 3028 2021/09/03 20:04:54 tf-plan + 3029 2021/09/03 20:05:20 kubectl --kubeconfig setup/kube.config get all -n my-nginx -o wide + 3030 2021/09/03 20:05:39 kubectl --kubeconfig setup/kube.config get service -n my-nginx -o wide + 3031 2021/09/03 20:05:53 kubectl --kubeconfig setup/kube.config get deployment -n my-nginx -o wide + 3032 2021/09/03 20:06:00 pwd + 3033 2021/09/03 20:06:04 cd ../../eks-test2 + 3034 2021/09/03 20:06:06 cd sample- + 3035 2021/09/03 20:06:08 cd ../eks-test2 + 3036 2021/09/03 20:06:09 cd sample-istio/ + 3037 2021/09/03 20:06:10 ls + 3038 2021/09/03 20:06:12 pwd + 3039 2021/09/03 20:06:15 cd ../.. + 3040 2021/09/03 20:06:18 cd eks-test3 + 3041 2021/09/03 20:06:19 cd sample-istio/ + 3042 2021/09/03 20:06:20 ls + 3043 2021/09/03 20:06:23 tf-plan + 3044 2021/09/03 20:07:03 grep ^re copy_images.tf + 3045 2021/09/03 20:07:10 pwd + 3046 2021/09/03 20:07:10 ls + 3047 2021/09/03 20:07:14 less copy_images.tf + 3048 2021/09/03 20:07:37 terraform console + 3049 2021/09/03 20:08:12 #AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) + 3050 2021/09/03 20:08:17 grep copy_image logs/*app* + 3051 2021/09/03 20:09:41 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) SOURCE_IMAGE=public.ecr.aws/nginx/nginx:1.21 DESTINATION_IMAGE=079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx:1.21 DESTINATION_USERNAME=AWS DESTINATION_PASSWORD="eyJwYXlsb2FkIjoiNEVCdTVSTlpiWUh1K3NNa050OFd3VisrOURDL3VCYmNCOEhPOUZBdlhXaDlGMWRVVldobWdsQ3VZQ0xHQXpBUVJmZzBWN0JoY0ZrU3NDTldOUmdUemVPZnlIZDJNWTlORFczNCs3K05NVkFYUVhlTU9xRTJxN1dOdE4rU3cyRE5tS3ZyZWswOG5JOTlJWSs4UHRhd1pvQjVNRzNxYWF1ODNrNTJpeEJFQXZ1WGlOb3RMeUEwdHBSeUJGeFdXY3pJYUJ2T3lDODY0UGZ1UjJzMFFKUnJldjYyVFduSEY1eGkxOFVRcFVVZkRYeSsrZkF3TEhBbm5QbE5lMkxCSUM2NTJNQlVRQzRrQVN2elBOeld4dE0vSHl1aWpZQjF1aEZQbTJmVVdJRHVrMXcrOFNJU0lIeDI1S2lGTS9yOFYxWE5pemdZRzJidUpjakZaRGpWa0grclRBQmdSRnlCNjN4S09aUU05YjhSVkt3dEV3Y09hUzBrS2RIMXQvbXNUQXRWM0hEN2JKQVk1TG5pazJ5VE5LTlVTSTRSMDZXMkIrWlYveGdvUjBtSXBGN25oOWZ1SStyM0lTMUpaVUwzYzBFQ0Q5ZWFSejJURmxFd1VyMFJOVkcxNnl4MnVZRnFBTWZ5a2dsYUJ1ZTlPS0VUUzVZVkNlTkFDZXJHZDFNRmgrM1NCZFNJeWpVaWs2T3I3c2dNc3Aydm1jU3c4eDYwdHJLNVRNQm44ejBSRDA1RnBsVFVKS2YrNGlkelkyT1p3M1pqcnlLRjlORnZ4NkphVi9mc0V0eklpQTQybUo4YllkQWFZYmpQVVV3L0pMUGlybXlIdXdOTEFXNVVMeHFzNnJxTGlmSk0yQ082bUlFaTd2VUVobk1IUUNlbDVLa0FwMEpIRm5GWmk1d2pZWVNJbEdTRk4ySDFKaUFObzRlVk8wbURSZ0ZMd0Mrc1dvcW1Qbk0vNStaTGtnQUpUUDJWaWJUVHVhREF6S2N2eWJtQ2xYVmNIZk9rNXZycmdKL2ZhQ1VjK2dNYTIxRW5pUnA0Sm5nZFd2U3FhRHJQRnc0aHlEYUZlK0tVVmlqYjRjRUpFaUhNbVJRV1FCdDc2cExZbnVtdDczTjZPQUFLK2VpZnQ1aEU0MnlqUUF0STRhYWI5VnlvTnJnL01KQW8xc2JhaFpXV0dFK2N1RzdQZlhxNHFRek9mMHF3U1lwS3Yzc0JqNVNVblp6UU82T1dZWS9lSldWdVQrQ1pUdkR0Q21ERE9UZUVwOEVqMmhJT0s3YVp4VHp6USt0SzJlV1hoemhuZnV5YTFkYXJDSFhRV2Zsd25rbVI2RGlZQWdOWGI4VVBwUTFSZXZsa3VGVlFERHdkR1ZldHNSZVB5T21OTlE5dHBYWExhYjcvMkhLOFpOQ0Y2d2FLZ3c9PSIsImRhdGFrZXkiOiJBUUVCQUhod20wWWFJU0plUnRKbTVuMUc2dXFlZWtYdW9YWFBlNVVGY2U5UnE4LzE0d0FBQUg0d2ZBWUpLb1pJaHZjTkFRY0dvRzh3YlFJQkFEQm9CZ2txaGtpRzl3MEJCd0V3SGdZSllJWklBV1VEQkFFdU1CRUVETGlZWjVXUTVpdm1wUkNIcVFJQkVJQTdvZm1DZy9oR29uZmJYSVFIaWRNVmd4M1l2VDl0cnNTVEdKVmJPclBvQWw4dURqc3I0WEZSMVRUelZOaENPclFkS0ZuK21ZdkhMRmdKWGE0PSIsInZlcnNpb24iOiIyIiwidHlwZSI6IkRBVEFfS0VZIiwiZXhwaXJhdGlvbiI6MTYzMDc0MTQyOX0=" bash -x ./copy_image.sh |& tee XX + 3052 2021/09/03 20:10:30 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) aws ecr describe-repositories --region us-east-1 --output json --repository-names eks/test3/nginx + 3053 2021/09/03 20:10:41 get-profile + 3054 2021/09/03 20:10:48 get-region + 3055 2021/09/03 20:10:56 vi ~/.bash.a + 3056 2021/09/03 20:10:59 vi ~/.bash.als + 3057 2021/09/03 20:11:05 cat ~/.bash/aliases + 3058 2021/09/03 20:11:07 ls ~/.bash* + 3059 2021/09/03 20:11:11 ls ~/.bash_aliases + 3060 2021/09/03 20:11:14 cat ~/.bash_aliases + 3061 2021/09/03 20:11:18 pwd + 3062 2021/09/03 20:11:24 alias + 3063 2021/09/03 20:11:31 source ~/.bash_aliases + 3064 2021/09/03 20:11:37 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) SOURCE_IMAGE=public.ecr.aws/nginx/nginx:1.21 DESTINATION_IMAGE=079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx:1.21 DESTINATION_USERNAME=AWS DESTINATION_PASSWORD="eyJwYXlsb2FkIjoiNEVCdTVSTlpiWUh1K3NNa050OFd3VisrOURDL3VCYmNCOEhPOUZBdlhXaDlGMWRVVldobWdsQ3VZQ0xHQXpBUVJmZzBWN0JoY0ZrU3NDTldOUmdUemVPZnlIZDJNWTlORFczNCs3K05NVkFYUVhlTU9xRTJxN1dOdE4rU3cyRE5tS3ZyZWswOG5JOTlJWSs4UHRhd1pvQjVNRzNxYWF1ODNrNTJpeEJFQXZ1WGlOb3RMeUEwdHBSeUJGeFdXY3pJYUJ2T3lDODY0UGZ1UjJzMFFKUnJldjYyVFduSEY1eGkxOFVRcFVVZkRYeSsrZkF3TEhBbm5QbE5lMkxCSUM2NTJNQlVRQzRrQVN2elBOeld4dE0vSHl1aWpZQjF1aEZQbTJmVVdJRHVrMXcrOFNJU0lIeDI1S2lGTS9yOFYxWE5pemdZRzJidUpjakZaRGpWa0grclRBQmdSRnlCNjN4S09aUU05YjhSVkt3dEV3Y09hUzBrS2RIMXQvbXNUQXRWM0hEN2JKQVk1TG5pazJ5VE5LTlVTSTRSMDZXMkIrWlYveGdvUjBtSXBGN25oOWZ1SStyM0lTMUpaVUwzYzBFQ0Q5ZWFSejJURmxFd1VyMFJOVkcxNnl4MnVZRnFBTWZ5a2dsYUJ1ZTlPS0VUUzVZVkNlTkFDZXJHZDFNRmgrM1NCZFNJeWpVaWs2T3I3c2dNc3Aydm1jU3c4eDYwdHJLNVRNQm44ejBSRDA1RnBsVFVKS2YrNGlkelkyT1p3M1pqcnlLRjlORnZ4NkphVi9mc0V0eklpQTQybUo4YllkQWFZYmpQVVV3L0pMUGlybXlIdXdOTEFXNVVMeHFzNnJxTGlmSk0yQ082bUlFaTd2VUVobk1IUUNlbDVLa0FwMEpIRm5GWmk1d2pZWVNJbEdTRk4ySDFKaUFObzRlVk8wbURSZ0ZMd0Mrc1dvcW1Qbk0vNStaTGtnQUpUUDJWaWJUVHVhREF6S2N2eWJtQ2xYVmNIZk9rNXZycmdKL2ZhQ1VjK2dNYTIxRW5pUnA0Sm5nZFd2U3FhRHJQRnc0aHlEYUZlK0tVVmlqYjRjRUpFaUhNbVJRV1FCdDc2cExZbnVtdDczTjZPQUFLK2VpZnQ1aEU0MnlqUUF0STRhYWI5VnlvTnJnL01KQW8xc2JhaFpXV0dFK2N1RzdQZlhxNHFRek9mMHF3U1lwS3Yzc0JqNVNVblp6UU82T1dZWS9lSldWdVQrQ1pUdkR0Q21ERE9UZUVwOEVqMmhJT0s3YVp4VHp6USt0SzJlV1hoemhuZnV5YTFkYXJDSFhRV2Zsd25rbVI2RGlZQWdOWGI4VVBwUTFSZXZsa3VGVlFERHdkR1ZldHNSZVB5T21OTlE5dHBYWExhYjcvMkhLOFpOQ0Y2d2FLZ3c9PSIsImRhdGFrZXkiOiJBUUVCQUhod20wWWFJU0plUnRKbTVuMUc2dXFlZWtYdW9YWFBlNVVGY2U5UnE4LzE0d0FBQUg0d2ZBWUpLb1pJaHZjTkFRY0dvRzh3YlFJQkFEQm9CZ2txaGtpRzl3MEJCd0V3SGdZSllJWklBV1VEQkFFdU1CRUVETGlZWjVXUTVpdm1wUkNIcVFJQkVJQTdvZm1DZy9oR29uZmJYSVFIaWRNVmd4M1l2VDl0cnNTVEdKVmJPclBvQWw4dURqc3I0WEZSMVRUelZOaENPclFkS0ZuK21ZdkhMRmdKWGE0PSIsInZlcnNpb24iOiIyIiwidHlwZSI6IkRBVEFfS0VZIiwiZXhwaXJhdGlvbiI6MTYzMDc0MTQyOX0=" bash -x ./copy_image.sh |& tee XX + 3065 2021/09/03 20:12:01 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) aws ecr describe-repositories --region us-east-1 --output json --repository-names eks/test3/nginx + 3066 2021/09/03 20:12:56 skopeo inspect --dest-creds AWS:eyJwYXlsb2FkIjoiNEVCdTVSTlpiWUh1K3NNa050OFd3VisrOURDL3VCYmNCOEhPOUZBdlhXaDlGMWRVVldobWdsQ3VZQ0xHQXpBUVJmZzBWN0JoY0ZrU3NDTldOUmdUemVPZnlIZDJNWTlORFczNCs3K05NVkFYUVhlTU9xRTJxN1dOdE4rU3cyRE5tS3ZyZWswOG5JOTlJWSs4UHRhd1pvQjVNRzNxYWF1ODNrNTJpeEJFQXZ1WGlOb3RMeUEwdHBSeUJGeFdXY3pJYUJ2T3lDODY0UGZ1UjJzMFFKUnJldjYyVFduSEY1eGkxOFVRcFVVZkRYeSsrZkF3TEhBbm5QbE5lMkxCSUM2NTJNQlVRQzRrQVN2elBOeld4dE0vSHl1aWpZQjF1aEZQbTJmVVdJRHVrMXcrOFNJU0lIeDI1S2lGTS9yOFYxWE5pemdZRzJidUpjakZaRGpWa0grclRBQmdSRnlCNjN4S09aUU05YjhSVkt3dEV3Y09hUzBrS2RIMXQvbXNUQXRWM0hEN2JKQVk1TG5pazJ5VE5LTlVTSTRSMDZXMkIrWlYveGdvUjBtSXBGN25oOWZ1SStyM0lTMUpaVUwzYzBFQ0Q5ZWFSejJURmxFd1VyMFJOVkcxNnl4MnVZRnFBTWZ5a2dsYUJ1ZTlPS0VUUzVZVkNlTkFDZXJHZDFNRmgrM1NCZFNJeWpVaWs2T3I3c2dNc3Aydm1jU3c4eDYwdHJLNVRNQm44ejBSRDA1RnBsVFVKS2YrNGlkelkyT1p3M1pqcnlLRjlORnZ4NkphVi9mc0V0eklpQTQybUo4YllkQWFZYmpQVVV3L0pMUGlybXlIdXdOTEFXNVVMeHFzNnJxTGlmSk0yQ082bUlFaTd2VUVobk1IUUNlbDVLa0FwMEpIRm5GWmk1d2pZWVNJbEdTRk4ySDFKaUFObzRlVk8wbURSZ0ZMd0Mrc1dvcW1Qbk0vNStaTGtnQUpUUDJWaWJUVHVhREF6S2N2eWJtQ2xYVmNIZk9rNXZycmdKL2ZhQ1VjK2dNYTIxRW5pUnA0Sm5nZFd2U3FhRHJQRnc0aHlEYUZlK0tVVmlqYjRjRUpFaUhNbVJRV1FCdDc2cExZbnVtdDczTjZPQUFLK2VpZnQ1aEU0MnlqUUF0STRhYWI5VnlvTnJnL01KQW8xc2JhaFpXV0dFK2N1RzdQZlhxNHFRek9mMHF3U1lwS3Yzc0JqNVNVblp6UU82T1dZWS9lSldWdVQrQ1pUdkR0Q21ERE9UZUVwOEVqMmhJT0s3YVp4VHp6USt0SzJlV1hoemhuZnV5YTFkYXJDSFhRV2Zsd25rbVI2RGlZQWdOWGI4VVBwUTFSZXZsa3VGVlFERHdkR1ZldHNSZVB5T21OTlE5dHBYWExhYjcvMkhLOFpOQ0Y2d2FLZ3c9PSIsImRhdGFrZXkiOiJBUUVCQUhod20wWWFJU0plUnRKbTVuMUc2dXFlZWtYdW9YWFBlNVVGY2U5UnE4LzE0d0FBQUg0d2ZBWUpLb1pJaHZjTkFRY0dvRzh3YlFJQkFEQm9CZ2txaGtpRzl3MEJCd0V3SGdZSllJWklBV1VEQkFFdU1CRUVETGlZWjVXUTVpdm1wUkNIcVFJQkVJQTdvZm1DZy9oR29uZmJYSVFIaWRNVmd4M1l2VDl0cnNTVEdKVmJPclBvQWw4dURqc3I0WEZSMVRUelZOaENPclFkS0ZuK21ZdkhMRmdKWGE0PSIsInZlcnNpb24iOiIyIiwidHlwZSI6IkRBVEFfS0VZIiwiZXhwaXJhdGlvbiI6MTYzMDc0MTQyOX0= docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx:1.21 + 3067 2021/09/03 20:13:05 ls + 3068 2021/09/03 20:13:07 vi main.tf + 3069 2021/09/03 20:13:22 ls + 3070 2021/09/03 20:13:23 vi locals.tf + 3071 2021/09/03 20:13:25 ls + 3072 2021/09/03 20:13:32 grep repos * + 3073 2021/09/03 20:13:46 grep image_repos *tfvars + 3074 2021/09/03 20:13:47 grep image_repos *tf + 3075 2021/09/03 20:13:56 grep images *tf + 3076 2021/09/03 20:14:03 vi copy_images.tf + 3077 2021/09/03 20:14:58 pwd + 3078 2021/09/03 20:15:01 tf-plan + 3079 2021/09/03 20:15:17 tf-apply + 3080 2021/09/07 08:21:25 git br + 3081 2021/09/07 10:08:50 history|grep login + 3082 2021/09/07 10:08:57 ls ecr* + 3083 2021/09/07 10:09:02 pushd ~/dock + 3084 2021/09/07 10:09:02 ls + 3085 2021/09/07 10:09:04 pushd ~/docker/ + 3086 2021/09/07 10:09:05 ls + 3087 2021/09/07 10:09:07 cat pull.sh + 3088 2021/09/07 10:09:12 cat x + 3089 2021/09/07 10:09:20 history|grep ecr + 3090 2021/09/07 10:09:24 history|grep ecr > Y + 3091 2021/09/07 10:09:25 vi Y + 3092 2021/09/07 10:13:13 mv Y ecr-login.sh + 3093 2021/09/07 10:13:15 chmod 755 ecr-login. + 3094 2021/09/07 10:13:16 chmod 755 ecr-login.sh + 3095 2021/09/07 10:13:22 vi ecr-login.sh + 3096 2021/09/07 10:13:40 cat ecr-login. + 3097 2021/09/07 10:13:42 cat ecr-login.sh + 3098 2021/09/07 10:13:46 # AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) ./ecr-login.sh + 3099 2021/09/07 10:13:50 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) ./ecr-login.sh + 3100 2021/09/07 10:13:52 ls + 3101 2021/09/07 10:19:16 history|grep PRO + 3102 2021/09/07 10:19:27 echo $AWS_PROFILE + 3103 2021/09/07 10:19:35 popd + 3104 2021/09/07 10:19:55 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) ./ecr-login.sh + 3105 2021/09/07 10:19:59 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) ./docker/ecr-login.sh + 3106 2021/09/07 10:20:06 AWS_PROFILE=$(get-profile) AWS_REGION=$(get-region) $HOME/docker/ecr-login.sh + 3107 2021/09/07 10:20:17 tf-apply less + 3108 2021/09/07 10:20:31 skopeo inspect docker://"079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx" + 3109 2021/09/07 10:20:37 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx + 3110 2021/09/07 10:21:07 grep nginx logs/*app* + 3111 2021/09/07 10:21:12 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx:1.21 + 3112 2021/09/07 10:21:31 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx/nginx:1.21 + 3113 2021/09/07 10:21:36 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test2/nginx/nginx:1.21 + 3114 2021/09/07 10:21:42 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx/nginx:1.21 + 3115 2021/09/07 10:21:50 skopeo inspect docker://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/nginx:1.21 + 3116 2021/09/07 10:22:06 ls + 3117 2021/09/07 10:23:01 vi main.tf + 3118 2021/09/07 10:23:15 cd cah + 3119 2021/09/07 10:23:17 cd charts/ + 3120 2021/09/07 10:23:17 la + 3121 2021/09/07 10:23:18 ls + 3122 2021/09/07 10:23:20 cd my-nginx/ + 3123 2021/09/07 10:23:21 ls + 3124 2021/09/07 10:23:22 vi * + 3125 2021/09/07 10:23:54 cd .. + 3126 2021/09/07 10:23:58 cd ../eks-test2 + 3127 2021/09/07 10:23:58 ls + 3128 2021/09/07 10:24:00 cd sample-istio/ + 3129 2021/09/07 10:24:01 l + 3130 2021/09/07 10:24:02 vi main.tf + 3131 2021/09/07 10:24:10 cd charts/ + 3132 2021/09/07 10:24:11 ls + 3133 2021/09/07 10:24:12 cd my-nginx/ + 3134 2021/09/07 10:24:13 ls + 3135 2021/09/07 10:24:15 vi Chart.yaml + 3136 2021/09/07 10:24:17 ls + 3137 2021/09/07 10:24:19 vi values.yaml + 3138 2021/09/07 10:25:16 cd .. + 3139 2021/09/07 10:25:19 less main.tf + 3140 2021/09/07 10:25:39 less logs/*app* + 3141 2021/09/07 10:26:21 vi main.tf + 3142 2021/09/07 10:26:27 ls + 3143 2021/09/07 10:26:33 vi copy_images.tf + 3144 2021/09/07 10:28:28 :q + 3145 2021/09/07 10:43:33 ls + 3146 2021/09/07 10:43:40 vi copy_images.tf + 3147 2021/09/07 10:44:06 grep name.*nginx ../sample-*/*tf + 3148 2021/09/07 10:44:12 ls .. + 3149 2021/09/07 10:44:22 less ../sample-*/*tf + 3150 2021/09/07 10:44:41 less ../sample-*/main* + 3151 2021/09/07 10:44:50 less ../sample-*/copy*tf + 3152 2021/09/07 10:45:16 :q + 3153 2021/09/07 10:45:18 less ../sample-*/copy*tf + 3154 2021/09/07 10:45:54 vi copy_images.tf + 3155 2021/09/07 10:46:03 tf-plan + 3156 2021/09/07 10:47:50 grep ngingx ../sample*/copy*tf + 3157 2021/09/07 10:47:55 grep nginx ../sample*/copy*tf + 3158 2021/09/07 10:48:01 grep nginx ../sample*/copy*tf|grep image + 3159 2021/09/07 10:48:19 grep 'image.*nginx' ../sample*/copy*tf + 3160 2021/09/07 10:59:11 ls + 3161 2021/09/07 10:59:15 vi copy_images.tf + 3162 2021/09/07 10:59:27 tf-plan + 3163 2021/09/07 11:02:47 vi main.tf + 3164 2021/09/07 11:03:06 vi copy_images.tf + 3165 2021/09/07 11:03:47 terraform state list|grep copy_ + 3166 2021/09/07 11:03:56 terraform state list + 3167 2021/09/07 11:04:09 pwd + 3168 2021/09/07 11:04:12 ls + 3169 2021/09/07 11:04:19 pwd + 3170 2021/09/07 11:04:28 vi copy_images.tf + 3171 2021/09/07 11:04:36 cd .. + 3172 2021/09/07 11:04:40 cdeks-test3cd + 3173 2021/09/07 11:04:45 cd ekts-test + 3174 2021/09/07 11:04:47 cd eks-test3 + 3175 2021/09/07 11:04:49 cd sample-istio/ + 3176 2021/09/07 11:04:51 vi copy_images.tf + 3177 2021/09/07 11:04:58 tf-plan + 3178 2021/09/07 11:05:25 vi main.tf + 3179 2021/09/07 11:05:34 tf-plan + 3180 2021/09/07 11:07:20 tf-apply + 3181 2021/09/07 11:08:40 tf-apply -auto-approve + 3182 2021/09/07 11:24:54 sipcalc 100.64.0.0/15 + 3183 2021/09/07 11:31:21 kubecfl --kubconfig setup/kube.config get pods -o wide + 3184 2021/09/07 11:31:24 kubectl --kubconfig setup/kube.config get pods -o wide + 3185 2021/09/07 11:31:30 kubectl --kubeconfig setup/kube.config get pods -o wide + 3186 2021/09/07 11:31:36 kubectl --kubeconfig setup/kube.config get pods -o wide --all-namespaces + 3187 2021/09/07 11:31:55 kubectl --kubeconfig setup/kube.config get events -o wide -n my-nginx + 3188 2021/09/07 11:32:34 vi copy_images.tf + 3189 2021/09/07 11:33:22 grep copy_ logs/*app* + 3190 2021/09/07 11:33:31 vi main.tf + 3191 2021/09/07 11:33:37 vi copy_images.tf + 3192 2021/09/07 11:34:27 tf-plan + 3193 2021/09/07 11:35:07 tf-apply + 3194 2021/09/07 11:37:01 kubectl --kubeconfig setup/kube.config get events -o wide -n my-nginx + 3195 2021/09/07 11:37:15 kubectl --kubeconfig setup/kube.config get all -o wide -n my-nginx + 3196 2021/09/07 11:38:38 kubectl --kubeconfig setup/kube.config get svc -o wide --all-namespaces + 3197 2021/09/07 11:41:19 less main.tf + 3198 2021/09/07 11:41:42 less charts/my-nginx/* + 3199 2021/09/07 11:41:51 cat settings.auto.tfvars + 3200 2021/09/07 11:44:03 kubectl --kubeconfig setup/kube.config get nodes -o wide + 3201 2021/09/07 11:44:09 kubectl --kubeconfig setup/kube.config get pods -o wide + 3202 2021/09/07 11:44:18 kubectl --kubeconfig setup/kube.config get pods -o wide --all-namespaces + 3203 2021/09/07 11:44:24 kubectl --kubeconfig setup/kube.config get pods -o wide --all-namespaces|grep 100.64 + 3204 2021/09/07 12:02:43 kubectl --kubeconfig setup/kube.config get services -o wide --all-namespaces + 3205 2021/09/07 12:11:09 cd .. + 3206 2021/09/07 12:11:10 ls + 3207 2021/09/07 12:11:14 mkdir kube-bench + 3208 2021/09/07 12:11:15 cd kube-bench/ + 3209 2021/09/07 12:11:15 ls + 3210 2021/09/07 12:11:37 vi README.md + 3211 2021/09/07 12:13:49 curl -k -o https://raw.githubusercontent.com/aquasecurity/kube-bench/main/job-eks.yaml + 3212 2021/09/07 12:13:52 curl -k -O https://raw.githubusercontent.com/aquasecurity/kube-bench/main/job-eks.yaml + 3213 2021/09/07 12:13:55 ls -al + 3214 2021/09/07 12:13:59 history|grpe kube + 3215 2021/09/07 12:14:19 kubectl --kubeconfig ../sample-istio/setup/kube.config get pods -o wide + 3216 2021/09/07 12:14:31 kubectl --kubeconfig ../sample-istio/setup/kube.config get pods -o wide --all-namespaces + 3217 2021/09/07 12:14:36 less job-eks.yaml + 3218 2021/09/07 12:15:20 kubectl --kubeconfig ../sample-istio/setup/kube.config apply -f job-eks.yaml + 3219 2021/09/07 12:15:25 kubectl --kubeconfig ../sample-istio/setup/kube.config get pods -o wide --all-namespaces + 3220 2021/09/07 12:15:33 kubectl --kubeconfig ../sample-istio/setup/kube.config get pods -o wide + 3221 2021/09/07 12:15:54 kubectl --kubeconfig ../sample-istio/setup/kube.config get logs kube-bench-bcxpw + 3222 2021/09/07 12:16:04 kubectl --kubeconfig ../sample-istio/setup/kube.config logs kube-bench-bcxpw + 3223 2021/09/07 12:16:11 kubectl --kubeconfig ../sample-istio/setup/kube.config get pods -o wide + 3224 2021/09/07 12:16:18 kubectl --kubeconfig ../sample-istio/setup/kube.config get events -o wide + 3225 2021/09/07 12:16:25 ls + 3226 2021/09/07 12:16:35 cp ../sample-istio/copy_images.tf . + 3227 2021/09/07 12:16:41 cd ../sample-istio/ + 3228 2021/09/07 12:16:42 ls + 3229 2021/09/07 12:16:49 ls -al copy_image.sh + 3230 2021/09/07 12:16:51 cd .. + 3231 2021/09/07 12:16:54 cd kube-bench/ + 3232 2021/09/07 12:16:59 ln -s ../common-services/copy_image.sh . + 3233 2021/09/07 12:17:08 vi copy_images.tf + 3234 2021/09/07 12:17:32 cat *yaml + 3235 2021/09/07 12:17:36 vi copy_images.tf + 3236 2021/09/07 12:18:00 tf-init + 3237 2021/09/07 12:18:07 tf-apply + 3238 2021/09/07 12:18:12 ls + 3239 2021/09/07 12:18:20 cp ../sample-istio/remote_state.yml . + 3240 2021/09/07 12:18:21 vi remote_state.yml + 3241 2021/09/07 12:18:37 tf-directory-setup.py -l none + 3242 2021/09/07 12:18:38 ls + 3243 2021/09/07 12:18:40 setup-new-directory.sh + 3244 2021/09/07 12:18:41 ls + 3245 2021/09/07 12:18:47 tf-init + 3246 2021/09/07 12:18:53 tf-plan + 3247 2021/09/07 12:19:02 cp ../locals.tf . + 3248 2021/09/07 12:19:04 cat locals.tf + 3249 2021/09/07 12:19:06 tf-plan + 3250 2021/09/07 12:19:11 ls + 3251 2021/09/07 12:19:14 ln -s ../settings.auto.tfvars . + 3252 2021/09/07 12:19:17 tf-plan + 3253 2021/09/07 12:19:29 ln -s ../variables.eks.tf . + 3254 2021/09/07 12:19:30 tf-plan + 3255 2021/09/07 12:19:45 ls + 3256 2021/09/07 12:19:54 cd ../sample-istio/ + 3257 2021/09/07 12:19:54 ls + 3258 2021/09/07 12:20:02 cd ../kube-bench/ + 3259 2021/09/07 12:20:06 ln -s ../variables.vpc.tf . + 3260 2021/09/07 12:20:07 tf-plan + 3261 2021/09/07 12:20:21 tf-apply + 3262 2021/09/07 12:22:03 kubectl --kubeconfig ../sample-istio/setup/kube.config get events -o wide + 3263 2021/09/07 12:22:13 kubectl --kubeconfig ../sample-istio/setup/kube.config get pods -o wide + 3264 2021/09/07 12:22:30 kubectl --kubeconfig ../sample-istio/setup/kube.config log kube-bench-bcxpw + 3265 2021/09/07 12:22:59 export KUBECONFIG $(pwd)/../sample-istio/setup/kube.config + 3266 2021/09/07 12:23:08 export KUBECONFIG=$(pwd)/../sample-istio/setup/kube.config + 3267 2021/09/07 12:23:12 kubectl get pods -o wide + 3268 2021/09/07 12:23:26 kubectl describe pod kube-bench + 3269 2021/09/07 12:23:58 env|grep ECR + 3270 2021/09/07 12:24:10 cat ~/docker//ecr-login.sh + 3271 2021/09/07 12:24:17 export ECR_NAME="079788916859.dkr.ecr.us-east-1.amazonaws.com" + 3272 2021/09/07 12:24:39 skopeo inspect docker://$ECR_NAME/aquasec/kube-bench + 3273 2021/09/07 12:24:51 skopeo inspect docker://$ECR_NAME/aquasec/kube-bench:latest + 3274 2021/09/07 12:24:57 echo $ECR_NAME + 3275 2021/09/07 12:25:05 skopeo inspect docker://$ECR_NAME/eks/test3/aquasec/kube-bench:latest + 3276 2021/09/07 12:25:09 skopeo inspect docker://$ECR_NAME/eks/test3/aquasec/kube-bench + 3277 2021/09/07 12:25:16 grep copy logs/*app* + 3278 2021/09/07 12:25:35 cp job-eks.yaml job-eks.yaml.orig + 3279 2021/09/07 12:25:37 vi job-eks.yaml + 3280 2021/09/07 12:25:59 kubectl help + 3281 2021/09/07 12:26:06 kubectl delete pod kube-bench + 3282 2021/09/07 12:26:19 kubectl get pod + 3283 2021/09/07 12:26:26 kubectl delete pod kube-bench-bcxpw + 3284 2021/09/07 12:26:41 kubectl get pod -o wide + 3285 2021/09/07 12:26:45 kubectl get pods -o wide + 3286 2021/09/07 12:26:56 history|grep apply + 3287 2021/09/07 12:27:03 kubectl apply -f job-eks.yaml + 3288 2021/09/07 12:27:21 kubectl delete job kube-bench + 3289 2021/09/07 12:27:28 kubectl get pods -o wide + 3290 2021/09/07 12:27:37 kubectl get pods -o wide --wach + 3291 2021/09/07 12:27:39 kubectl get pods -o wide --watch + 3292 2021/09/07 12:28:53 kubectl get pods -o wide + 3293 2021/09/07 12:28:59 kubectl apply -f job-eks.yaml + 3294 2021/09/07 12:29:03 kubectl get pods -o wide + 3295 2021/09/07 12:29:15 kubectl describe pod kube-bench + 3296 2021/09/07 12:29:27 kubectl get events kube-bench + 3297 2021/09/07 12:29:37 kubectl get events kube-bench-rcr6k + 3298 2021/09/07 12:29:43 kubectl get events + 3299 2021/09/07 12:30:01 vi job-eks.yaml + 3300 2021/09/07 12:30:43 kubectl delete job kube-bench + 3301* 2021/09/07 12:30:49 kubectl descri + 3302 2021/09/07 12:35:16 kubectl log kube-bench + 3303 2021/09/07 12:35:19 kubectl logs kube-bench + 3304 2021/09/07 12:35:31 kubectl get pods -o wide + 3305 2021/09/07 12:35:35 history|grep app + 3306 2021/09/07 12:35:40 kubectl apply -f job-eks.yaml + 3307 2021/09/07 12:35:44 kubectl get pods -o wide + 3308 2021/09/07 12:35:48 history|grep descr + 3309 2021/09/07 12:35:52 kubectl describe pod kube-bench + 3310 2021/09/07 12:36:02 kubectl logs kube-bench + 3311 2021/09/07 12:36:10 kubectl describe pod kube-bench + 3312 2021/09/07 12:58:23 bc + 3313 2021/09/07 14:01:51 kubectl describe pod kube-bench + 3314 2021/09/07 14:02:04 kubectl logs kube-bench + 3315 2021/09/07 14:02:14 kubectl logs kube-bench-89l6b + 3316 2021/09/07 14:02:57 history|grep app + 3317 2021/09/07 14:03:18 kubectl describe pod kube-bench + 3318 2021/09/07 14:14:47 kubectl describe pod kube-bench > pod.txt + 3319 2021/09/07 14:14:57 kubectl logs kube-bench-89l6b > output.log + 3320 2021/09/07 14:14:58 less output + 3321 2021/09/07 14:15:04 less output.log + 3322 2021/09/07 14:16:32 kubectl get pods + 3323 2021/09/07 14:16:40 kubectl delete -f job-eks.yaml + 3324 2021/09/07 14:16:59 history >> history.1 diff --git a/examples/established-cluster-examples/kube-bench/job-eks.yaml b/examples/established-cluster-examples/kube-bench/job-eks.yaml new file mode 100644 index 0000000..b5b5c76 --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/job-eks.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + spec: + hostPID: true + containers: + - name: kube-bench + # Push the image to your ECR and then refer to it here + # image: + image: 079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/kube-bench:latest + # To send findings to AWS Security Hub, refer to `job-eks-asff.yaml` instead + command: ["kube-bench", "run", "--targets", "node", "--benchmark", "eks-1.0"] + volumeMounts: + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + readOnly: true + - name: etc-systemd + mountPath: /etc/systemd + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + restartPolicy: Never + volumes: + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" diff --git a/examples/established-cluster-examples/kube-bench/job-eks.yaml.orig b/examples/established-cluster-examples/kube-bench/job-eks.yaml.orig new file mode 100644 index 0000000..cbad7f2 --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/job-eks.yaml.orig @@ -0,0 +1,37 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + spec: + hostPID: true + containers: + - name: kube-bench + # Push the image to your ECR and then refer to it here + # image: + image: aquasec/kube-bench:latest + # To send findings to AWS Security Hub, refer to `job-eks-asff.yaml` instead + command: ["kube-bench", "run", "--targets", "node", "--benchmark", "eks-1.0"] + volumeMounts: + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + readOnly: true + - name: etc-systemd + mountPath: /etc/systemd + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + restartPolicy: Never + volumes: + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" diff --git a/examples/established-cluster-examples/kube-bench/locals.tf b/examples/established-cluster-examples/kube-bench/locals.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/locals.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/established-cluster-examples/kube-bench/main.tf b/examples/established-cluster-examples/kube-bench/main.tf new file mode 100644 index 0000000..61cdfed --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/main.tf @@ -0,0 +1,56 @@ +resource "kubernetes_job" "kube-bench" { + metadata { + name = "kube-bench" + } + spec { + template { + metadata { } + spec { + host_pid = true + container { + name = "kube-bench" + image = "079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test4/kube-bench:latest" + # To send findings to AWS Security Hub, refer to `job-eks-asff.yaml` instead + command = ["kube-bench", "run", "--targets", "node", "--benchmark", "eks-1.0"] + + volume_mount { + name = "var-lib-kubelet" + mount_path = "/var/lib/kubelet" + read_only = true + } + volume_mount { + name = "etc-systemd" + mount_path = "/etc/systemd" + read_only = true + } + volume_mount { + name = "etc-kubernetes" + mount_path = "/etc/kubernetes" + read_only = true + } + } + restart_policy = "Never" + volume { + name = "var-lib-kubelet" + host_path { + path = "/var/lib/kubelet" + } + } + volume { + name = "etc-systemd" + host_path { + path = "/etc/systemd" + } + } + volume { + name = "etc-kubernetes" + host_path { + path = "/etc/kubernetes" + } + } + } + } +# backoff_limit = 4 + } + wait_for_completion = false +} diff --git a/examples/established-cluster-examples/kube-bench/output.log b/examples/established-cluster-examples/kube-bench/output.log new file mode 100644 index 0000000..71ee8cd --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/output.log @@ -0,0 +1,41 @@ +[INFO] 3 Worker Node Security Configuration +[INFO] 3.1 Worker Node Configuration Files +[PASS] 3.1.1 Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Scored) +[PASS] 3.1.2 Ensure that the proxy kubeconfig file ownership is set to root:root (Scored) +[PASS] 3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Scored) +[PASS] 3.1.4 Ensure that the kubelet configuration file ownership is set to root:root (Scored) +[INFO] 3.2 Kubelet +[PASS] 3.2.1 Ensure that the --anonymous-auth argument is set to false (Scored) +[PASS] 3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Scored) +[PASS] 3.2.3 Ensure that the --client-ca-file argument is set as appropriate (Scored) +[PASS] 3.2.4 Ensure that the --read-only-port argument is set to 0 (Scored) +[PASS] 3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Scored) +[PASS] 3.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Scored) +[PASS] 3.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Scored) +[PASS] 3.2.8 Ensure that the --hostname-override argument is not set (Scored) +[WARN] 3.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Scored) +[PASS] 3.2.10 Ensure that the --rotate-certificates argument is not set to false (Scored) +[PASS] 3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) + +== Remediations node == +3.2.9 If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +== Summary node == +14 checks PASS +0 checks FAIL +1 checks WARN +0 checks INFO + +== Summary total == +14 checks PASS +0 checks FAIL +1 checks WARN +0 checks INFO + diff --git a/examples/established-cluster-examples/kube-bench/pod.txt b/examples/established-cluster-examples/kube-bench/pod.txt new file mode 100644 index 0000000..09f3718 --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/pod.txt @@ -0,0 +1,71 @@ +Name: kube-bench-89l6b +Namespace: default +Priority: 0 +Node: ip-10-194-25-15.ec2.internal/10.194.25.15 +Start Time: Tue, 07 Sep 2021 12:35:41 -0400 +Labels: controller-uid=2dc67fe7-a273-4db3-bd09-3bc491dbf681 + job-name=kube-bench +Annotations: kubernetes.io/psp: eks.privileged +Status: Succeeded +IP: 100.64.2.164 +IPs: + IP: 100.64.2.164 +Controlled By: Job/kube-bench +Containers: + kube-bench: + Container ID: docker://8bf8279d9195e122fc396f7399e100e7985af6ef9714259b6a7060c3271cdcee + Image: 079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/kube-bench:latest + Image ID: docker-pullable://079788916859.dkr.ecr.us-east-1.amazonaws.com/eks/test3/kube-bench@sha256:e02aa2eb58c9a6bee9e2b060684051be14b266f0e9952cadd8f71f32f578b5d7 + Port: + Host Port: + Command: + kube-bench + run + --targets + node + --benchmark + eks-1.0 + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Tue, 07 Sep 2021 12:35:44 -0400 + Finished: Tue, 07 Sep 2021 12:35:44 -0400 + Ready: False + Restart Count: 0 + Environment: + Mounts: + /etc/kubernetes from etc-kubernetes (ro) + /etc/systemd from etc-systemd (ro) + /var/lib/kubelet from var-lib-kubelet (ro) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-9mhvq (ro) +Conditions: + Type Status + Initialized True + Ready False + ContainersReady False + PodScheduled True +Volumes: + var-lib-kubelet: + Type: HostPath (bare host directory volume) + Path: /var/lib/kubelet + HostPathType: + etc-systemd: + Type: HostPath (bare host directory volume) + Path: /etc/systemd + HostPathType: + etc-kubernetes: + Type: HostPath (bare host directory volume) + Path: /etc/kubernetes + HostPathType: + kube-api-access-9mhvq: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + ConfigMapOptional: + DownwardAPI: true +QoS Class: BestEffort +Node-Selectors: +Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s +Events: + diff --git a/examples/established-cluster-examples/kube-bench/prefixes.tf b/examples/established-cluster-examples/kube-bench/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/kube-bench/providers.tf b/examples/established-cluster-examples/kube-bench/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/kube-bench/variables.eks.tf b/examples/established-cluster-examples/kube-bench/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/kube-bench/variables.vpc.tf b/examples/established-cluster-examples/kube-bench/variables.vpc.tf new file mode 120000 index 0000000..f672f33 --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/variables.vpc.tf @@ -0,0 +1 @@ +../variables.vpc.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/kube-bench/version.tf b/examples/established-cluster-examples/kube-bench/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/established-cluster-examples/kube-bench/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-alb/README.md b/examples/established-cluster-examples/sample-alb/README.md new file mode 100644 index 0000000..ae0b0da --- /dev/null +++ b/examples/established-cluster-examples/sample-alb/README.md @@ -0,0 +1,24 @@ +# Sample app + +Must have tags on subnets for `internal-elb` and `cluster/{cluster_name}` + +```hcl +private_subnets = [ + { + base_cidr = "10.194.24.0/22" + label = "apps" + bits = 2 + private = true + tags = { + "kubernetes.io/cluster/cat-ced-edde-eks-cluster" = "shared" + "kubernetes.io/cluster/test1" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + }, +``` + + + +kubectl -n kube-system logs aws-load-balancer-controller-54fdf64896-qqt6d + + diff --git a/examples/established-cluster-examples/sample-alb/copy_image.sh b/examples/established-cluster-examples/sample-alb/copy_image.sh new file mode 120000 index 0000000..cc5c083 --- /dev/null +++ b/examples/established-cluster-examples/sample-alb/copy_image.sh @@ -0,0 +1 @@ +../efs/copy_image.sh \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-alb/copy_images.tf b/examples/established-cluster-examples/sample-alb/copy_images.tf new file mode 100644 index 0000000..85b694c --- /dev/null +++ b/examples/established-cluster-examples/sample-alb/copy_images.tf @@ -0,0 +1,53 @@ +data "aws_ecr_authorization_token" "token" {} + +locals { + repo_parent_name = format("eks/%v", var.cluster_name) + images = [ + { + image = "nginx/nginx" + tag = var.nginx_tag + }, + ] +} + +#data "aws_ecr_repository" "repository" { +# for_each = { for image in local.images : image.image => image } +# name = format("%v/%v", local.repo_parent_name, each.value.image) +#} + +resource "aws_ecr_repository" "repository" { + for_each = { for image in local.images : image.image => image } + + name = format("%v/%v/%v", local.repo_parent_name, local.app_name, each.value.image) + image_tag_mutability = "IMMUTABLE" + + image_scanning_configuration { + scan_on_push = true + } + + encryption_configuration { + encryption_type = "KMS" + } + + tags = merge( + #local.common_tags, + #local.base_tags, + #var.application_tags, + tomap({ "Name" = format("ecr-eks-%v-%v", var.cluster_name, each.value.image) }), + ) +} + +resource "null_resource" "copy_images" { + for_each = { for image in local.images : image.image => image } + + provisioner "local-exec" { + command = "${path.module}/copy_image.sh" + environment = { + SOURCE_IMAGE = format("%v/%v:%v", local.public_reg, each.value.image, each.value.tag) + DESTINATION_IMAGE = format("%v:%v", aws_ecr_repository.repository[each.key].repository_url, each.value.tag) + DESTINATION_USERNAME = data.aws_ecr_authorization_token.token.user_name + DESTINATION_PASSWORD = data.aws_ecr_authorization_token.token.password + } + } +} + diff --git a/examples/established-cluster-examples/sample-alb/data.eks.tf b/examples/established-cluster-examples/sample-alb/data.eks.tf new file mode 100644 index 0000000..4cebea9 --- /dev/null +++ b/examples/established-cluster-examples/sample-alb/data.eks.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster +# for main.tf +# aws_eks_cluster = aws_eks_cluster.eks_cluster +# for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/established-cluster-examples/sample-alb/ecr.tf b/examples/established-cluster-examples/sample-alb/ecr.tf new file mode 120000 index 0000000..654d0cc --- /dev/null +++ b/examples/established-cluster-examples/sample-alb/ecr.tf @@ -0,0 +1 @@ +../efs/ecr.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-alb/locals.tf b/examples/established-cluster-examples/sample-alb/locals.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/established-cluster-examples/sample-alb/locals.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/established-cluster-examples/sample-alb/main.tf b/examples/established-cluster-examples/sample-alb/main.tf new file mode 100644 index 0000000..ef11beb --- /dev/null +++ b/examples/established-cluster-examples/sample-alb/main.tf @@ -0,0 +1,126 @@ +# This is a sample application that deploys an nginx pod and a load balancer +# service as a simple verification that the network routing is working +# properly. + +locals { + app_name = "sample-alb" +} + +resource "kubernetes_namespace" "app" { + metadata { + name = local.app_name + } +} + +resource "kubernetes_service" "app" { + metadata { + name = local.app_name + namespace = kubernetes_namespace.app.metadata[0].name + } + spec { + selector = { + app = kubernetes_deployment.app.metadata[0].labels.app + } + port { + name = "http" + port = 8080 + target_port = 80 + } + + type = "ClusterIP" + } + + timeouts { + create = "1m" + } +} + +resource "kubernetes_ingress" "app" { + depends_on = [kubernetes_service.app] + metadata { + name = local.app_name + namespace = kubernetes_namespace.app.metadata[0].name + + annotations = { + # "service.beta.kubernetes.io/aws-load-balancer-scheme" = "internal" + "service.beta.kubernetes.io/aws-load-balancer-internal" = "true" + "kubernetes.io/ingress.class" = "alb" + } + } + + spec { + rule { + http { + path { + backend { + service_name = local.app_name + service_port = 8080 + } + path = "/" + } + } + } + } +} + +resource "kubernetes_deployment" "app" { + depends_on = [null_resource.copy_images] + metadata { + name = local.app_name + namespace = kubernetes_namespace.app.metadata[0].name + labels = { + app = local.app_name + } + } + spec { + replicas = 3 + + selector { + match_labels = { + app = local.app_name + } + } + + template { + metadata { + labels = { + app = local.app_name + } + } + + spec { + container { + image = format("%v:%v", aws_ecr_repository.repository["nginx/nginx"].repository_url, var.nginx_tag) + name = local.app_name + + resources { + limits = { + cpu = "0.5" + memory = "512Mi" + } + requests = { + cpu = "100m" + memory = "50Mi" + } + } + + liveness_probe { + http_get { + path = "/" + port = 80 + } + + initial_delay_seconds = 30 + period_seconds = 60 + } + } + } + } + } + + timeouts { + create = "2m" + update = "1m" + delete = "2m" + } +} diff --git a/examples/established-cluster-examples/sample-alb/prefixes.tf b/examples/established-cluster-examples/sample-alb/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/established-cluster-examples/sample-alb/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-alb/providers.tf b/examples/established-cluster-examples/sample-alb/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/established-cluster-examples/sample-alb/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-alb/variables.sample.tf b/examples/established-cluster-examples/sample-alb/variables.sample.tf new file mode 100644 index 0000000..e322761 --- /dev/null +++ b/examples/established-cluster-examples/sample-alb/variables.sample.tf @@ -0,0 +1,12 @@ +variable "cluster_name" { + description = "The name of the EKS cluster into which the efs-provisioner is to be installed." + type = string +} + +# See https://gallery.ecr.aws/nginx/nginx for the latest tag version. +variable "nginx_tag" { + description = "The version of the nginx image to copy to the account registry." + type = string + default = "1.21" +} + diff --git a/examples/established-cluster-examples/sample-alb/variables.vpc.tf b/examples/established-cluster-examples/sample-alb/variables.vpc.tf new file mode 120000 index 0000000..f672f33 --- /dev/null +++ b/examples/established-cluster-examples/sample-alb/variables.vpc.tf @@ -0,0 +1 @@ +../variables.vpc.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-alb/version.tf b/examples/established-cluster-examples/sample-alb/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/established-cluster-examples/sample-alb/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-elb/README.md b/examples/established-cluster-examples/sample-elb/README.md new file mode 100644 index 0000000..2cf5ca0 --- /dev/null +++ b/examples/established-cluster-examples/sample-elb/README.md @@ -0,0 +1,74 @@ +# Sample app + +```script +% kubectl -n sample describe service sample +Name: sample +Namespace: sample +Labels: +Annotations: service.beta.kubernetes.io/aws-load-balancer-internal: true + service.beta.kubernetes.io/aws-load-balancer-scheme: internal +Selector: app=sample +Type: LoadBalancer +IP Families: +IP: 172.20.93.208 +IPs: 172.20.93.208 +LoadBalancer Ingress: internal-a717e6e95397247fbb6ec512e17bd4f4-227943433.us-east-1.elb.amazonaws.com +Port: http 8080/TCP +TargetPort: 80/TCP +NodePort: http 31758/TCP +Endpoints: 10.194.24.196:80,10.194.25.125:80,10.194.26.98:80 +Session Affinity: None +External Traffic Policy: Cluster +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal EnsuringLoadBalancer 34s service-controller Ensuring load balancer + Normal EnsuredLoadBalancer 31s service-controller Ensured load balancer +``` + +https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer +https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/service/annotations/ + +Docs say to use the scheme. It doesn't actually work. Need the internal = true + +```hcl + annotations = { +# does not work with eks +# "service.beta.kubernetes.io/aws-load-balancer-scheme" = "internal" + "service.beta.kubernetes.io/aws-load-balancer-internal" = "true" + } +``` + +Must have tags on subnets for `internal-elb` and `cluster/{cluster_name}` + +```hcl +private_subnets = [ + { + base_cidr = "10.194.24.0/22" + label = "apps" + bits = 2 + private = true + tags = { + "kubernetes.io/cluster/cat-ced-edde-eks-cluster" = "shared" + "kubernetes.io/cluster/test1" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + }, +``` + +# TO-DO + +1. how to do ALB and NLB +1. how to do tags on ALB (LB and TG), NLB +1. how to add name entry to DNS +1. add dns zone for cluster +1. add alias to some othe domain to a service entry +1. look at all the various contrller options with aws +1. add dns zone for cluster +1. look at all the various contrller options with aws +1. certificate authority + * vault + * istio (not for prod) + * consul (acm pca, vault, etc.) +1. build new cluster documenting all steps +1. automate the tagging of subnets for LB and k8s nodes diff --git a/examples/established-cluster-examples/sample-elb/copy_image.sh b/examples/established-cluster-examples/sample-elb/copy_image.sh new file mode 120000 index 0000000..cc5c083 --- /dev/null +++ b/examples/established-cluster-examples/sample-elb/copy_image.sh @@ -0,0 +1 @@ +../efs/copy_image.sh \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-elb/copy_images.tf b/examples/established-cluster-examples/sample-elb/copy_images.tf new file mode 100644 index 0000000..a136151 --- /dev/null +++ b/examples/established-cluster-examples/sample-elb/copy_images.tf @@ -0,0 +1,48 @@ +data "aws_ecr_authorization_token" "token" {} + +locals { + repo_parent_name = format("eks/%v", var.cluster_name) + images = [ + { + image = "nginx/nginx" + tag = var.nginx_tag + }, + ] +} + +resource "aws_ecr_repository" "repository" { + for_each = { for image in local.images : image.image => image } + + name = format("%v/%v", local.repo_parent_name, each.value.image) + image_tag_mutability = "IMMUTABLE" + + image_scanning_configuration { + scan_on_push = true + } + + encryption_configuration { + encryption_type = "KMS" + } + + tags = merge( + #local.common_tags, + #local.base_tags, + #var.application_tags, + tomap({ "Name" = format("ecr-eks-%v-%v", var.cluster_name, each.value.image) }), + ) +} + +resource "null_resource" "copy_images" { + for_each = { for image in local.images : image.image => image } + + provisioner "local-exec" { + command = "${path.module}/copy_image.sh" + environment = { + SOURCE_IMAGE = format("%v/%v:%v", local.public_reg, each.value.image, each.value.tag) + DESTINATION_IMAGE = format("%v:%v", aws_ecr_repository.repository[each.key].repository_url, each.value.tag) + DESTINATION_USERNAME = data.aws_ecr_authorization_token.token.user_name + DESTINATION_PASSWORD = data.aws_ecr_authorization_token.token.password + } + } +} + diff --git a/examples/established-cluster-examples/sample-elb/data.eks.tf b/examples/established-cluster-examples/sample-elb/data.eks.tf new file mode 100644 index 0000000..4cebea9 --- /dev/null +++ b/examples/established-cluster-examples/sample-elb/data.eks.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster +# for main.tf +# aws_eks_cluster = aws_eks_cluster.eks_cluster +# for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/established-cluster-examples/sample-elb/ecr.tf b/examples/established-cluster-examples/sample-elb/ecr.tf new file mode 120000 index 0000000..654d0cc --- /dev/null +++ b/examples/established-cluster-examples/sample-elb/ecr.tf @@ -0,0 +1 @@ +../efs/ecr.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-elb/locals.tf b/examples/established-cluster-examples/sample-elb/locals.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/established-cluster-examples/sample-elb/locals.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/established-cluster-examples/sample-elb/main.tf b/examples/established-cluster-examples/sample-elb/main.tf new file mode 100644 index 0000000..b5f3760 --- /dev/null +++ b/examples/established-cluster-examples/sample-elb/main.tf @@ -0,0 +1,103 @@ +# This is a sample application that deploys an nginx pod and a load balancer +# service as a simple verification that the network routing is working +# properly. + +locals { + app_name = "sample-elb" +} + +resource "kubernetes_namespace" "app" { + metadata { + name = local.app_name + } +} + +resource "kubernetes_service" "app" { + metadata { + name = local.app_name + namespace = kubernetes_namespace.app.metadata[0].name + + annotations = { +# "service.beta.kubernetes.io/aws-load-balancer-scheme" = "internal" + "service.beta.kubernetes.io/aws-load-balancer-internal" = "true" + } + } + spec { + selector = { + app = kubernetes_deployment.app.metadata[0].labels.app + } + port { + name = "http" + port = 8080 + target_port = 80 + } + + type = "LoadBalancer" + } + + timeouts { + create = "5m" + } +} + +resource "kubernetes_deployment" "app" { + depends_on = [null_resource.copy_images] + metadata { + name = local.app_name + namespace = kubernetes_namespace.app.metadata[0].name + labels = { + app = local.app_name + } + } + spec { + replicas = 3 + + selector { + match_labels = { + app = local.app_name + } + } + + template { + metadata { + labels = { + app = local.app_name + } + } + + spec { + container { + image = format("%v:%v", aws_ecr_repository.repository["nginx/nginx"].repository_url, var.nginx_tag) + name = local.app_name + + resources { + limits = { + cpu = "0.5" + memory = "512Mi" + } + requests = { + cpu = "100m" + memory = "50Mi" + } + } + + liveness_probe { + http_get { + path = "/" + port = 80 + } + + initial_delay_seconds = 30 + period_seconds = 60 + } + } + } + } + } + + timeouts { + create = "2m" + update = "1m" + delete = "2m" + } +} diff --git a/examples/established-cluster-examples/sample-elb/prefixes.tf b/examples/established-cluster-examples/sample-elb/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/established-cluster-examples/sample-elb/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-elb/providers.tf b/examples/established-cluster-examples/sample-elb/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/established-cluster-examples/sample-elb/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-elb/variables.sample.tf b/examples/established-cluster-examples/sample-elb/variables.sample.tf new file mode 100644 index 0000000..e322761 --- /dev/null +++ b/examples/established-cluster-examples/sample-elb/variables.sample.tf @@ -0,0 +1,12 @@ +variable "cluster_name" { + description = "The name of the EKS cluster into which the efs-provisioner is to be installed." + type = string +} + +# See https://gallery.ecr.aws/nginx/nginx for the latest tag version. +variable "nginx_tag" { + description = "The version of the nginx image to copy to the account registry." + type = string + default = "1.21" +} + diff --git a/examples/established-cluster-examples/sample-elb/variables.vpc.tf b/examples/established-cluster-examples/sample-elb/variables.vpc.tf new file mode 120000 index 0000000..f672f33 --- /dev/null +++ b/examples/established-cluster-examples/sample-elb/variables.vpc.tf @@ -0,0 +1 @@ +../variables.vpc.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-elb/version.tf b/examples/established-cluster-examples/sample-elb/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/established-cluster-examples/sample-elb/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-istio/README.md b/examples/established-cluster-examples/sample-istio/README.md new file mode 100644 index 0000000..eabe41b --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/README.md @@ -0,0 +1,70 @@ +```console +% kubectl --kubeconfig setup/kube.config get pods -n my-nginx -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +app-my-nginx-7957968965-bwvfc 2/2 Running 0 3m14s 10.188.22.242 ip-10-188-22-168.us-gov-east-1.compute.internal +app-my-nginx-7957968965-p4grl 2/2 Running 0 3m14s 10.188.22.46 ip-10-188-22-117.us-gov-east-1.compute.internal +``` + +```console +% kubectl --kubeconfig setup/kube.config get secrets -n istio-system >> README.md +NAME TYPE DATA AGE +default-token-btk9b kubernetes.io/service-account-token 3 7d18h +istio-ca-secret istio.io/ca-root 5 7d18h +istio-egressgateway-service-account-token-2pgn7 kubernetes.io/service-account-token 3 7d2h +istio-ingressgateway-service-account-token-s6k4s kubernetes.io/service-account-token 3 7d2h +istio-reader-service-account-token-vmqkn kubernetes.io/service-account-token 3 7d2h +istiod-service-account-token-bfrft kubernetes.io/service-account-token 3 7d2h +nginx-cert kubernetes.io/tls 3 3m45s +sh.helm.release.v1.istio-operator.v1 helm.sh/release.v1 1 7d18h +sh.helm.release.v1.istio-peerauthentication.v1 helm.sh/release.v1 1 7d2h +sh.helm.release.v1.istio-profile.v1 helm.sh/release.v1 1 7d2h +``` + +```console +% kubectl --kubeconfig setup/kube.config get secret nginx-cert -n istio-system -o json | jq -r .data[\"tls.crt\"] | base64 -d | openssl x509 -in /dev/stdin -text -noout + +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + d1:51:04:5f:04:5c:33:f3:49:bb:7e:a1:50:72:b5:0e + Signature Algorithm: sha256WithRSAEncryption + Issuer: C = US, O = U.S. Census Bureau, OU = eks-adsd-cumulus-dev-vpc2-dice-dev-PKI, CN = pki.adsd-cumulus-dev.dev.dice.census.gov + Validity + Not Before: Oct 5 14:45:01 2021 GMT + Not After : Jan 3 14:45:01 2022 GMT + Subject: O = census.gov + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public-Key: (2048 bit) + Modulus: + 00:b1:b9:ca:c9:d7:a6:7c:a8:93:e0:a9:d1:11:66: + 52:53:c2:3a:3d:00:10:e5:23:9b:69:a4:29:96:aa: + 65:f3:9f:9b:4c:4d:a3:ca:70:3a:34:15:f0:f7:96: + 5a:33:31:04:9a:b5:3c:d5:5e:0c:d0:93:d3:9b:f6: + de:28:1e:10:7b:29:fa:74:ab:71:37:2c:73:b9:f5: + d3:1c:22:fb:3b:fb:9e:9c:99:8b:5a:4d:f2:53:06: + 77:29:e6:56:69:ae:a9:de:74:0a:07:48:8d:60:c9: + 76:35:e2:86:22:e3:37:f6:f0:4a:f8:0e:dd:e7:14: + 20:df:0e:d3:a3:7d:58:44:28:04:8e:a9:bc:7a:1b: + 94:a2:70:8b:cd:69:f5:12:16:40:a1:da:01:a6:3b: + 74:5e:0f:0f:58:6f:8c:56:d1:76:72:42:31:62:40: + 29:86:dc:b3:fa:10:93:e8:52:ad:a7:bc:92:d2:06: + 53:95:8a:5a:5f:3b:42:91:61:9b:89:c8:5a:51:04: + 71:19:da:d3:cf:b3:1d:7c:af:0e:6e:c6:7f:d9:10: + 63:58:3f:fb:c5:d1:77:f6:fa:7e:27:d7:97:5a:75: + bd:f1:7f:75:c5:02:f7:02:89:7a:04:bd:72:d6:36: + a1:90:97:f5:df:43:ba:a8:b2:60:df:f3:e2:39:d2: + cb:7d + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Key Usage: critical + Digital Signature, Key Encipherment + X509v3 Basic Constraints: critical + CA:FALSE + X509v3 Authority Key Identifier: + keyid:B4:6D:A3:84:27:74:73:08:83:A7:61:FE:50:E1:3A:93:2C:05:AF:EF + + X509v3 Subject Alternative Name: + DNS:nginx.adsd-cumulus-dev.dev.dice.census.gov +``` diff --git a/examples/established-cluster-examples/sample-istio/charts/my-nginx/.helmignore b/examples/established-cluster-examples/sample-istio/charts/my-nginx/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/charts/my-nginx/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/established-cluster-examples/sample-istio/charts/my-nginx/Chart.yaml b/examples/established-cluster-examples/sample-istio/charts/my-nginx/Chart.yaml new file mode 100644 index 0000000..39fd536 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/charts/my-nginx/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: my-nginx +description: Sample application deployed behind istio +type: application +version: 0.1.1 +appVersion: "1.0.0" diff --git a/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/_helpers.tpl b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/_helpers.tpl new file mode 100644 index 0000000..8a61c21 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "my-nginx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "my-nginx.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "my-nginx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "my-nginx.labels" -}} +helm.sh/chart: {{ include "my-nginx.chart" . }} +{{ include "my-nginx.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "my-nginx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "my-nginx.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "my-nginx.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "my-nginx.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/certificate.yaml b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/certificate.yaml new file mode 100644 index 0000000..cbb4093 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/certificate.yaml @@ -0,0 +1,16 @@ +apiVersion: cert-manager.io/v1beta1 +kind: Certificate +metadata: + name: {{ .Values.istio.publicHostname }}-cert + namespace: {{ .Values.istio.namespace }} + labels: {{- include "my-nginx.labels" . | nindent 4 }} +spec: + secretName: '{{ .Values.istio.publicHostname }}-cert' + subject: + organizations: + - census.gov + dnsNames: + - '{{ .Values.istio.publicHostname }}.{{ .Values.istio.publicDomain }}' + issuerRef: + kind: '{{ .Values.istio.issuerType }}' + name: '{{ .Values.istio.issuer }}' diff --git a/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/deployment.yaml b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/deployment.yaml new file mode 100644 index 0000000..f021c04 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/deployment.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "my-nginx.fullname" . }} + labels: + {{- include "my-nginx.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "my-nginx.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "my-nginx.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "my-nginx.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/gateway.yaml b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/gateway.yaml new file mode 100644 index 0000000..b8317dd --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/gateway.yaml @@ -0,0 +1,27 @@ +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: {{ .Values.istio.publicHostname }} + namespace: {{ .Values.istio.namespace }} + labels: {{- include "my-nginx.labels" . | nindent 4 }} +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + tls: + httpsRedirect: true + hosts: + - '{{ .Values.istio.publicHostname }}.{{ .Values.istio.publicDomain }}' + - port: + number: 443 + name: https + protocol: HTTPS + tls: + mode: {{ .Values.istio.tlsMode }} + credentialName: "{{ .Values.istio.publicHostname }}-cert" + hosts: + - '{{ .Values.istio.publicHostname }}.{{ .Values.istio.publicDomain }}' diff --git a/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/hpa.yaml b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/hpa.yaml new file mode 100644 index 0000000..426cf2c --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "my-nginx.fullname" . }} + labels: + {{- include "my-nginx.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "my-nginx.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/service.yaml b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/service.yaml new file mode 100644 index 0000000..02ef87e --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "my-nginx.fullname" . }} + labels: + {{- include "my-nginx.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "my-nginx.selectorLabels" . | nindent 4 }} diff --git a/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/serviceaccount.yaml b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/serviceaccount.yaml new file mode 100644 index 0000000..d9e4780 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "my-nginx.serviceAccountName" . }} + labels: + {{- include "my-nginx.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/virtualservice.yaml b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/virtualservice.yaml new file mode 100644 index 0000000..5f54394 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/charts/my-nginx/templates/virtualservice.yaml @@ -0,0 +1,25 @@ +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: {{ .Values.istio.publicHostname }}-routes + namespace: {{ .Values.istio.namespace }} + labels: {{- include "my-nginx.labels" . | nindent 4 }} +spec: + gateways: + - '{{ .Values.istio.publicHostname }}' + hosts: + - '{{ .Values.istio.publicHostname }}.{{ .Values.istio.publicDomain }}' + http: + - name: "nginx-server-route" + match: + - uri: + prefix: "/" + headers: + request: + set: + X-Forwarded-Port: "443" + route: + - destination: + host: {{ include "my-nginx.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + port: + number: 80 diff --git a/examples/established-cluster-examples/sample-istio/charts/my-nginx/values.yaml b/examples/established-cluster-examples/sample-istio/charts/my-nginx/values.yaml new file mode 100644 index 0000000..a76ede5 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/charts/my-nginx/values.yaml @@ -0,0 +1,75 @@ +# Default values for my-nginx. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 2 + +image: + registry: "" + repository: nginx + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "1.21" + +istio: + namespace: istio-system + issuerType: ClusterIssuer + issuer: clusterissuer + publicHostname: nginx + publicDomain: adsd-cumulus-dev.dev.dice.census.gov + tlsMode: SIMPLE + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/examples/established-cluster-examples/sample-istio/copy_image.sh b/examples/established-cluster-examples/sample-istio/copy_image.sh new file mode 120000 index 0000000..534e41c --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/copy_image.sh @@ -0,0 +1 @@ +../common-services/copy_image.sh \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-istio/copy_images.tf b/examples/established-cluster-examples/sample-istio/copy_images.tf new file mode 100644 index 0000000..7bfa0df --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/copy_images.tf @@ -0,0 +1,37 @@ +data "aws_ecr_authorization_token" "token" {} + +locals { + account_id = data.aws_caller_identity.current.account_id + repo_parent_name = format("eks/%v", var.cluster_name) + + account_ecr = format("%v.dkr.ecr.%v.amazonaws.com/%v", local.account_id, var.region, local.repo_parent_name) + + images = [ + # nginx related images: + { + name = "nginx" + full_name = "nginx/nginx" +# image = "public.ecr.aws/nginx/nginx" +# tag = var.nginx_tag + tag = "1.21" + }, + ] + image_repos = { for image in local.images : image.name => format("%v/%v", local.account_ecr, image.name) } +} + +resource "null_resource" "copy_images" { + for_each = { for image in local.images : image.name => image } + + provisioner "local-exec" { + command = "${path.module}/copy_image.sh" + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + SOURCE_IMAGE = format("%v:%v", each.value.image, each.value.tag) + DESTINATION_IMAGE = format("%v/%v:%v", local.account_ecr, each.value.name, each.value.tag) + DESTINATION_USERNAME = data.aws_ecr_authorization_token.token.user_name + DESTINATION_PASSWORD = data.aws_ecr_authorization_token.token.password + } + } +} + diff --git a/examples/established-cluster-examples/sample-istio/data.eks.tf b/examples/established-cluster-examples/sample-istio/data.eks.tf new file mode 100644 index 0000000..870e8c6 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/data.eks.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster + # for main.tf + # aws_eks_cluster = aws_eks_cluster.eks_cluster + # for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/established-cluster-examples/sample-istio/ecr-login.txt b/examples/established-cluster-examples/sample-istio/ecr-login.txt new file mode 100644 index 0000000..d490ee0 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/ecr-login.txt @@ -0,0 +1 @@ +eyJwYXlsb2FkIjoibEltSnJXV2JFMU1VYWNrUjBuN3A1bVJRTDFvTUNZaloxMEx2TEN5Rjl1a1IwSmlCbnR5VW0zWHZpTmhXR3h6c0NUMHhIelBvcC9DOWcvVUFTclQ0OUlVRDdjOVYyZEtuS2lHOUFRN2xJdysxUkNzSWg4aCt0MjZ2SjVQVmdIaHprWEZ5aTB1K2JvVTNwcGZqbGxTZjlzU3ZlUHpTZnV6U1dSV083NGgvcFAzb3JYMVl1MlZNNk95QUVZSFhBWXJvbnBHUms1cWJtUkIrallxK2NDaE9jKzhDWkhvRHlHazg3Nlk5Szc0NmpoR0I4eFVPd0dSU3pURFdhNnRDcVNCSlIxYUgveEFvb0N6QjNvYjRlQm1LR0oxeDdnYmdGMDRIS242S2NWZTNpZlNtVzhsRHBkVEZnNWtRUm0wTmZWS2N2c3R0ZkE2SVN4eWRaOURSeTlGSllUcjV3MThYZ0dxTWJ0VEJzc2RaaFh2UGdOUG53VEZhZmdWUE5TYkJ3V0YrbER3TmU5Tk1RRFcwSk5RZHR4aGpYL1lzdXdYdW1tNnFtR3FzZ09FU1A0eWNNWEhQL0t4RkVjZlU4enNlelVGY0NQMllFMGo2aTFvOTF2Q05qTnlrRy9qZ1pqeTZqelkvZ1ZaOHNkbkhud016VkFmdm5mZzI0MEZib2xOa0syMWZVY3JRNDlhd2V4NDh2UDJJMWZJSCtuZ0JvMTVIYytyVGYrSzZiNUxCdlZpVC9CcC9CZUhiZlJyc2I0OEtMR0c2RXVIZkwvL1U0dE44ZW90MkhZYmZpMENDeE1HZDdDZlhaa2pNaW02ekpoOUQ3MXl2TU1UYTZNZzc2QnNIb3VkNmhGU1lPbEVnaFpOeUlQZ0JaMzVCa3FDK1V4RjlTL1QrNGhDcWxWN05iWDdJOXRQdjFpMGFGZHpPa1B1aGdoREV2THpja3FhUHlpL1g2UkpCb0ErcS9NT1c3a2FpMm10b1lzZUhJU0hES214a0p0WHZESWt2cnllZTZzT3FHZnNiVktFVy9ZbVpYdWpyNmRKakZmdWpnbXVsTFVwSUJmVDZhQSt1WTdad2JuQjJLWGNZU3kvOW9JdHBmZm8zRlA2UjZHemwwTE8yK1F0cVlSSHQrUlBzVjgreW05djk4R0xNK3pHRkpBaDgraHpRSVBldXF3anJuNVlBNFY0ckNOK0FwUnRYZlJORElXbENFVVM0Skc0dWdNYzNrMW9YeHoyelNtUTBlcWpKZkhydFhLWUoyaElCSDNNaHRZOXlrSS9peGNOQWlZK0ovdHB5NE5DQXU4N2lxd0NzVnVLOWRBZThEbzRxRncxM05jQ1FFLzdkSnJRbWpBcW9zREE2My9Ec29URGZUTlhMUk9zWGNuNlUwUXRqZitjK2pQb1QwNG5SRUE9PSIsImRhdGFrZXkiOiJBUUVCQUhod20wWWFJU0plUnRKbTVuMUc2dXFlZWtYdW9YWFBlNVVGY2U5UnE4LzE0d0FBQUg0d2ZBWUpLb1pJaHZjTkFRY0dvRzh3YlFJQkFEQm9CZ2txaGtpRzl3MEJCd0V3SGdZSllJWklBV1VEQkFFdU1CRUVERkJza2tmdEZRcWlvNXd6QkFJQkVJQTdvczRsU0t2TDVYbnkxYVdFaWdzTlVSU3hGY1V5TlN0SjMrNXVlL09SSTdwanhWNjJmbmRWODNtYWRrYXJKcVJRQUk4ZXdLR05hZkx3L1N3PSIsInZlcnNpb24iOiIyIiwidHlwZSI6IkRBVEFfS0VZIiwiZXhwaXJhdGlvbiI6MTYzMjE4MDI4N30= diff --git a/examples/established-cluster-examples/sample-istio/kubeconfig.tf b/examples/established-cluster-examples/sample-istio/kubeconfig.tf new file mode 100644 index 0000000..5e386f5 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/kubeconfig.tf @@ -0,0 +1,29 @@ +resource "null_resource" "kubeconfig" { + triggers = { + always_run = timestamp() + } + provisioner "local-exec" { + command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + } + provisioner "local-exec" { + command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" + } + provisioner "local-exec" { + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + } + command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" + } + depends_on = [data.aws_eks_cluster.cluster] +} + +#--- +# call it like +#--- +## provisioner "local-exec" { +## environment = { +## KUBECONFIG = "${path.root}/setup/kube.config" +## } +## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" +## } diff --git a/examples/established-cluster-examples/sample-istio/locals.tf b/examples/established-cluster-examples/sample-istio/locals.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/locals.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/established-cluster-examples/sample-istio/main.tf b/examples/established-cluster-examples/sample-istio/main.tf new file mode 100644 index 0000000..947f0f3 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/main.tf @@ -0,0 +1,49 @@ +# Sample application deployed behind istio. + +locals { + app_name = "my-nginx" +} + +resource "kubernetes_namespace" "app" { + metadata { + name = local.app_name + labels = { + istio-injection = "enabled" + } + } +} + +# Should be configured in DNS, but we're not there yet. +data "kubernetes_service" "istio" { + metadata { + name = "istio-ingressgateway" + namespace = "istio-system" + } +} + +locals { + domain = ( + (length(var.domain) > 0) ? + var.domain : + data.kubernetes_service.istio.status[0].load_balancer[0].ingress[0].hostname + ) +} + +resource "helm_release" "app" { + chart = "my-nginx" + name = "app" + namespace = kubernetes_namespace.app.metadata[0].name + repository = "${path.module}/charts/" + + timeout = 60 + +# use dirname() to get just the path without the image + set { + name = "image.registry" + value = dirname(local.image_repos["nginx"]) + } + set { + name = "istio.publicDomain" + value = local.domain + } +} diff --git a/examples/established-cluster-examples/sample-istio/prefixes.tf b/examples/established-cluster-examples/sample-istio/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-istio/providers.tf b/examples/established-cluster-examples/sample-istio/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-istio/setup/kube.config b/examples/established-cluster-examples/sample-istio/setup/kube.config new file mode 100644 index 0000000..9cc4c76 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/setup/kube.config @@ -0,0 +1,53 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeE1Ea3dNakl6TXpRME0xb1hEVE14TURnek1USXpNelEwTTFvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTC9rCkIvLzhlZmNVWDBrSk1URlVVVlM4MUxnY01FL1NxWWJNZTluQURYbEJRRTBJcHhrZTZIYTdiNU1WbjQ4VWNlL20KTjF3WHE4T0NkZnJJdnJGTEI1dHdGNzlRakpsOXIyUTZ3M1hESjVpQTBLZW1lOHd6OWhyU2UxYnRFeEdFeHZaUQp4bDJaZ1pEVzZTbTRKOVBRSEVMWDZkRkF4MXpTdDNWUHQ2NkNBOXFTTC82V2pjVW1pSWE0cGx6a00vOUhpR3BCCm0zMm14Qm9EUVdRNE9nVjRLKzlYMm56UEoyQUN1V1JaUXZuRkhoUkhhejAvVENuN3A2Y1lYZngzTkdJZlRrSUIKWWNXUXJzek9mRUVNTGVCYUhsUGpmRlVXUmNkNjdWUEJWZHRhaDU2RGpSRGJ4SE1oTWJMblMvR0lVR2J5UnpBTwpHZ01tcG9pc3l6NXI1SE1UOHprQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZCczdZNmJscHN3WlZtK3FybHZtV3dZRmYxaHVNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBbFJZcEpWRGxIOHB1d2xaWTdyeWY4SFE5KzAxKzNSV3FVd3lXcVoxQitaRjZWQWhnVQpRZHVDRXlYWW5JTlBEQlhOYlE4dUEzYmFnS0tPZVl5Q2JFem9nREg0U1A0dU95ZFNwdDQ2UEVuenhvV0FHUVFFCllyaGRka2h3UzhHQy9KQzBCRWZ3cGxTNnMyVjdJOE5lTXJONTNuZkkyWHRsbmVjRTNPNmhjZ0taNWJBKzhMMVkKMm5keXh6SkNXRE1TbXptbGRYUXg4MUZxNGgxRWcvN0FJWkdQcFY3Z0R4eWtqSDhXOXp5WUp5eWxJQWRtWXlENApFa3JlaGp5bDhUM1MvN0lMRld3VGJZUXFubWE2aFduR2dXdWxUVE5mQ1lUNmxZNVFsWVU5MitvMFBSODZSdkw3CjJHcVBOZ0p6czZ2WHBKUzdhTTkxUlhHOTZiTitOVzcwMTV0OAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://BD631FD6BAD1D94B6C702C3842043423.gr7.us-east-1.eks.amazonaws.com + name: arn:aws:eks:us-east-1:079788916859:cluster/test3 +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeE1Ea3lOekUwTlRRek1sb1hEVE14TURreU5URTBOVFF6TWxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTkNLCm9USTRrTVEzSWlXaUlhODRFUzFNQ3E0ZEFGVjRHTzhEakU2c21jY0tGbDdPOEdJeHhvQnJ2UndSRmtKWTJrSFIKV24rczVyT1VZV2R6VXU0R1hqTVhCSEJ4MVlSYzNoSEkxNTd0cEpxVmY5ODBYRlNrSHZHQkVyN2wyaXAvWWpndQoyaGdrZ2I1Z1QvTzlRRHh3dDQ4SXEzYTUzTDVaRVJqTGlDeUdlK2ZSNDFGb3JnQW1IenZ5OWNMMjNRM1hlRXZLClVmY29NRXE2aDM5Ynl0c3lFNTgzSjBHRGZpZXVFaDY4Yjg3bndYRmlvSUVKY29ra3pJdXczMlVDYTMrRUE3aGcKUk1tNkxOYVdERTZ5c1dPWk5FSWdHRHlkWnNBcEtSQVV2ZFdGcGFyVXJYZHFSY1FCVmpKMGlXRmtWVGNoR2p0SAp0UFBJZWIwZXBMcDV1d0s3WjNjQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZNN3U4cTZsWTJRTE9Rd2N1bW13UTZYeFdOUDZNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBbkY2cGxNS3VXaWxPcmZCMDFSeUkxaUc5ajNCY1hIRVZWeFl1R2UwQnJaeDRTMXMxawp4RnZuRW5KNmtPUWc1Rmg4UWdsMXUxLzFpSHR2akF3LzYzYjMzb1N0NC92YitBM2dHazdXMXN3SENSSnAveDFSCi9YaCt6VHBCNUx6Y0hzaGs2bkFQRDJzUUFvalBGOW1iVkRCTVVBSHZpLzV3WDZwSHNpS3M1TnZPOEdtbjQzS1QKS0ZzVFJCNS9sWWdDaEx1S2hTQWg5cERLTExDdmd4L1VnQXpDZ09nS01YTEh1ZXpTV1Q2VGFVOFJsMy9EMHNwRgpVMEcxUUFiWTBxM1psRnVUOUhjYnV6K25pRnN3V2c5TjJJSVByWTJKQWxEZWJvRmpXNHhueWMvM1hWaEF2bkpaClJ3YlpGeVB3VG4vSStPREFCL1RyL3pSQW90OUc5QnBEdEV3KwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://6A8807DB18F49B78F7B884F4F1915640.sk1.us-gov-east-1.eks.amazonaws.com + name: arn:aws-us-gov:eks:us-gov-east-1:252960665057:cluster/adsd-cumulus-dev +contexts: +- context: + cluster: arn:aws:eks:us-east-1:079788916859:cluster/test3 + user: arn:aws:eks:us-east-1:079788916859:cluster/test3 + name: arn:aws:eks:us-east-1:079788916859:cluster/test3 +- context: + cluster: arn:aws-us-gov:eks:us-gov-east-1:252960665057:cluster/adsd-cumulus-dev + user: arn:aws-us-gov:eks:us-gov-east-1:252960665057:cluster/adsd-cumulus-dev + name: arn:aws-us-gov:eks:us-gov-east-1:252960665057:cluster/adsd-cumulus-dev +current-context: arn:aws-us-gov:eks:us-gov-east-1:252960665057:cluster/adsd-cumulus-dev +kind: Config +preferences: {} +users: +- name: arn:aws:eks:us-east-1:079788916859:cluster/test3 + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + args: + - --region + - us-east-1 + - eks + - get-token + - --cluster-name + - test3 + command: aws + env: + - name: AWS_PROFILE + value: 079788916859-do2-cat +- name: arn:aws-us-gov:eks:us-gov-east-1:252960665057:cluster/adsd-cumulus-dev + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + args: + - --region + - us-gov-east-1 + - eks + - get-token + - --cluster-name + - adsd-cumulus-dev + command: aws + env: + - name: AWS_PROFILE + value: 252960665057-ma6-gov diff --git a/examples/established-cluster-examples/sample-istio/variables.eks.tf b/examples/established-cluster-examples/sample-istio/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-istio/variables.sample.tf b/examples/established-cluster-examples/sample-istio/variables.sample.tf new file mode 100644 index 0000000..624b1a2 --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/variables.sample.tf @@ -0,0 +1,5 @@ +variable "nginx_tag" { + description = "The version of the nginx image to copy." + type = string + default = "1.21" +} diff --git a/examples/established-cluster-examples/sample-istio/version.tf b/examples/established-cluster-examples/sample-istio/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/established-cluster-examples/sample-istio/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-nlb/README.md b/examples/established-cluster-examples/sample-nlb/README.md new file mode 100644 index 0000000..f2d481b --- /dev/null +++ b/examples/established-cluster-examples/sample-nlb/README.md @@ -0,0 +1,57 @@ +# Sample app + +```script +% kubectl -n sample describe service sample +Name: sample +Namespace: sample +Labels: +Annotations: service.beta.kubernetes.io/aws-load-balancer-internal: true + service.beta.kubernetes.io/aws-load-balancer-scheme: internal +Selector: app=sample +Type: LoadBalancer +IP Families: +IP: 172.20.93.208 +IPs: 172.20.93.208 +LoadBalancer Ingress: internal-a717e6e95397247fbb6ec512e17bd4f4-227943433.us-east-1.elb.amazonaws.com +Port: http 8080/TCP +TargetPort: 80/TCP +NodePort: http 31758/TCP +Endpoints: 10.194.24.196:80,10.194.25.125:80,10.194.26.98:80 +Session Affinity: None +External Traffic Policy: Cluster +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal EnsuringLoadBalancer 34s service-controller Ensuring load balancer + Normal EnsuredLoadBalancer 31s service-controller Ensured load balancer +``` + +https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer +https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/service/annotations/ + +Docs say to use the scheme. It doesn't actually work. Need the internal = true + +```hcl + annotations = { +# does not work with eks +# "service.beta.kubernetes.io/aws-load-balancer-scheme" = "internal" + "service.beta.kubernetes.io/aws-load-balancer-internal" = "true" + } +``` + +Must have tags on subnets for `internal-elb` and `cluster/{cluster_name}` + +```hcl +private_subnets = [ + { + base_cidr = "10.194.24.0/22" + label = "apps" + bits = 2 + private = true + tags = { + "kubernetes.io/cluster/cat-ced-edde-eks-cluster" = "shared" + "kubernetes.io/cluster/test1" = "shared" + "kubernetes.io/role/internal-elb" = 1 + } + }, +``` diff --git a/examples/established-cluster-examples/sample-nlb/copy_image.sh b/examples/established-cluster-examples/sample-nlb/copy_image.sh new file mode 120000 index 0000000..cc5c083 --- /dev/null +++ b/examples/established-cluster-examples/sample-nlb/copy_image.sh @@ -0,0 +1 @@ +../efs/copy_image.sh \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-nlb/copy_images.tf b/examples/established-cluster-examples/sample-nlb/copy_images.tf new file mode 100644 index 0000000..bfabb83 --- /dev/null +++ b/examples/established-cluster-examples/sample-nlb/copy_images.tf @@ -0,0 +1,48 @@ +data "aws_ecr_authorization_token" "token" {} + +locals { + repo_parent_name = format("eks/%v", var.cluster_name) + images = [ + { + image = "nginx/nginx" + tag = var.nginx_tag + }, + ] +} + +resource "aws_ecr_repository" "repository" { + for_each = { for image in local.images : image.image => image } + + name = format("%v/%v/%v", local.repo_parent_name, local.app_name, each.value.image) + image_tag_mutability = "IMMUTABLE" + + image_scanning_configuration { + scan_on_push = true + } + + encryption_configuration { + encryption_type = "KMS" + } + + tags = merge( + #local.common_tags, + #local.base_tags, + #var.application_tags, + tomap({ "Name" = format("ecr-eks-%v-%v", var.cluster_name, each.value.image) }), + ) +} + +resource "null_resource" "copy_images" { + for_each = { for image in local.images : image.image => image } + + provisioner "local-exec" { + command = "${path.module}/copy_image.sh" + environment = { + SOURCE_IMAGE = format("%v/%v:%v", local.public_reg, each.value.image, each.value.tag) + DESTINATION_IMAGE = format("%v:%v", aws_ecr_repository.repository[each.key].repository_url, each.value.tag) + DESTINATION_USERNAME = data.aws_ecr_authorization_token.token.user_name + DESTINATION_PASSWORD = data.aws_ecr_authorization_token.token.password + } + } +} + diff --git a/examples/established-cluster-examples/sample-nlb/data.eks.tf b/examples/established-cluster-examples/sample-nlb/data.eks.tf new file mode 100644 index 0000000..4cebea9 --- /dev/null +++ b/examples/established-cluster-examples/sample-nlb/data.eks.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster +# for main.tf +# aws_eks_cluster = aws_eks_cluster.eks_cluster +# for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/established-cluster-examples/sample-nlb/ecr.tf b/examples/established-cluster-examples/sample-nlb/ecr.tf new file mode 120000 index 0000000..654d0cc --- /dev/null +++ b/examples/established-cluster-examples/sample-nlb/ecr.tf @@ -0,0 +1 @@ +../efs/ecr.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-nlb/locals.tf b/examples/established-cluster-examples/sample-nlb/locals.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/established-cluster-examples/sample-nlb/locals.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/established-cluster-examples/sample-nlb/main.tf b/examples/established-cluster-examples/sample-nlb/main.tf new file mode 100644 index 0000000..9024fc5 --- /dev/null +++ b/examples/established-cluster-examples/sample-nlb/main.tf @@ -0,0 +1,104 @@ +# This is a sample application that deploys an nginx pod and a load balancer +# service as a simple verification that the network routing is working +# properly. + +locals { + app_name = "sample-nlb" +} + +resource "kubernetes_namespace" "app" { + metadata { + name = local.app_name + } +} + +resource "kubernetes_service" "app" { + metadata { + name = local.app_name + namespace = kubernetes_namespace.app.metadata[0].name + + annotations = { +# "service.beta.kubernetes.io/aws-load-balancer-scheme" = "internal" + "service.beta.kubernetes.io/aws-load-balancer-internal" = "true" + "service.beta.kubernetes.io/aws-load-balancer-type" = "nlb" + } + } + spec { + selector = { + app = kubernetes_deployment.app.metadata[0].labels.app + } + port { + name = "http" + port = 8080 + target_port = 80 + } + + type = "LoadBalancer" + } + + timeouts { + create = "5m" + } +} + +resource "kubernetes_deployment" "app" { + depends_on = [null_resource.copy_images] + metadata { + name = local.app_name + namespace = kubernetes_namespace.app.metadata[0].name + labels = { + app = local.app_name + } + } + spec { + replicas = 3 + + selector { + match_labels = { + app = local.app_name + } + } + + template { + metadata { + labels = { + app = local.app_name + } + } + + spec { + container { + image = format("%v:%v", aws_ecr_repository.repository["nginx/nginx"].repository_url, var.nginx_tag) + name = local.app_name + + resources { + limits = { + cpu = "0.5" + memory = "512Mi" + } + requests = { + cpu = "100m" + memory = "50Mi" + } + } + + liveness_probe { + http_get { + path = "/" + port = 80 + } + + initial_delay_seconds = 30 + period_seconds = 60 + } + } + } + } + } + + timeouts { + create = "2m" + update = "1m" + delete = "2m" + } +} diff --git a/examples/established-cluster-examples/sample-nlb/prefixes.tf b/examples/established-cluster-examples/sample-nlb/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/established-cluster-examples/sample-nlb/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-nlb/providers.tf b/examples/established-cluster-examples/sample-nlb/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/established-cluster-examples/sample-nlb/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-nlb/variables.sample.tf b/examples/established-cluster-examples/sample-nlb/variables.sample.tf new file mode 100644 index 0000000..e322761 --- /dev/null +++ b/examples/established-cluster-examples/sample-nlb/variables.sample.tf @@ -0,0 +1,12 @@ +variable "cluster_name" { + description = "The name of the EKS cluster into which the efs-provisioner is to be installed." + type = string +} + +# See https://gallery.ecr.aws/nginx/nginx for the latest tag version. +variable "nginx_tag" { + description = "The version of the nginx image to copy to the account registry." + type = string + default = "1.21" +} + diff --git a/examples/established-cluster-examples/sample-nlb/variables.vpc.tf b/examples/established-cluster-examples/sample-nlb/variables.vpc.tf new file mode 120000 index 0000000..f672f33 --- /dev/null +++ b/examples/established-cluster-examples/sample-nlb/variables.vpc.tf @@ -0,0 +1 @@ +../variables.vpc.tf \ No newline at end of file diff --git a/examples/established-cluster-examples/sample-nlb/version.tf b/examples/established-cluster-examples/sample-nlb/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/established-cluster-examples/sample-nlb/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster/.gitignore b/examples/full-cluster/.gitignore new file mode 100644 index 0000000..f416fe8 --- /dev/null +++ b/examples/full-cluster/.gitignore @@ -0,0 +1,4 @@ +kube.config +ecr-login.txt +setup/ec2-ssh-eks-* +!setup/ec2-ssh-eks-*.pub diff --git a/examples/full-cluster/.terraform-docs.yml b/examples/full-cluster/.terraform-docs.yml new file mode 100644 index 0000000..8391b9d --- /dev/null +++ b/examples/full-cluster/.terraform-docs.yml @@ -0,0 +1,44 @@ +formatter: markdown table + +header-from: main.tf +footer-from: "" + +sections: +## hide: [] + show: + - data-sources + - header + - footer + - inputs + - modules + - outputs + - providers + - requirements + - resources + +output: + file: README.md + mode: inject + template: |- + + {{ .Content }} + + +## output-values: +## enabled: false +## from: "" +## +## sort: +## enabled: true +## by: name +## +## settings: +## anchor: true +## color: true +## default: true +## description: false +## escape: true +## indent: 2 +## required: true +## sensitive: true +## type: true diff --git a/examples/full-cluster/OFF/empty/locals.tf b/examples/full-cluster/OFF/empty/locals.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster/OFF/empty/locals.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster/OFF/empty/prefixes.tf b/examples/full-cluster/OFF/empty/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster/OFF/empty/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster/OFF/empty/test.tf b/examples/full-cluster/OFF/empty/test.tf new file mode 100644 index 0000000..96cd77c --- /dev/null +++ b/examples/full-cluster/OFF/empty/test.tf @@ -0,0 +1,5 @@ +data "aws_ebs_default_kms_key" "current" {} + +data "aws_kms_key" "ebs_key" { + key_id = data.aws_ebs_default_kms_key.current.key_arn +} diff --git a/examples/full-cluster/OFF/empty/version.tf b/examples/full-cluster/OFF/empty/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster/OFF/empty/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster/README.md b/examples/full-cluster/README.md index e69de29..5bb67ca 100644 --- a/examples/full-cluster/README.md +++ b/examples/full-cluster/README.md @@ -0,0 +1,419 @@ +# About + +This directory constructs the appropriate resources for an EKS cluster for ADSD Cumulus in the DICE-DEV environent.` + +# Application Information + +* Application: {name of application} +* Organization: {division} +* Project: {project} +* Point of Contact(s): {username list} +* Creation Date: {yyyy-mm-dd} +* References: + * Requirements: {url} + * Remedy Ticket: {number} + * Other: {url} +* Related Configurations: + * {directory-path} + +# Application Requirements + +# Terraform Directions + +There are a number of steps to end up with a cluster. + +1. From main repository, in the same `vpc/{region}/vpc{number}` directory + 1. [Tag subnets](#subnet-tagging) in main repository (before creating nodegroup) + 1. [Copy variables.vpc.*](#copy-variable-settings) from main respository in the same `vpc/{region}/vpc{number}` + 1. Copy the [includes.d structure](#copy-includesd) +1. In the submodule repository, in the `vpc/{region}/vpc{number}/apps/{clustername}` directory + 1. Update `settings.auto.tfvars` + 1. Initialize [Cluster Main](#initialize-cluster-main) directory + 1. Create [policies](#policies) + 1. Create [EC2 Keypair](#keypair-creation) + 1. Finish [cluster setup](#cluster-creation) +1. Setup [aws-auth](#setup-aws-auth) +1. Setup [EFS](#setup-efs) + +## Post-Setup Tasks + +1. Connect DNS zone from on-prem to Route53 Resolvers with a forwarder + +## Subnet Tagging + +A tag needs to be added to the subnet(s) where the cluster will run. We haven't figured out yet how to incorporate this more +automatically. + +The file to update is the `variable.subnets.auto.tfvars`, in this case `vpc/east/vpc3/variables.subnets.auto.tfvars`: + +```hcl +private_subnets = [ + { base_cidr = "10.188.18.0/23", label = "private-lb", bits = 2, private = true, + tags = { "kubernetes.io/role/internal-elb" = 1 } + }, + { base_cidr = "10.188.17.0/24", label = "endpoints", bits = 2, private = true, tags = {} }, + { base_cidr = "10.188.20.0/23", label = "db", bits = 2, private = true, tags = {} }, + { base_cidr = "10.188.22.0/23", label = "apps", bits = 2, private = true, tags = {} }, + { base_cidr = "10.188.24.0/21", label = "container", bits = 2, private = true, + tags = { + "kubernetes.io/cluster/org-project-env" = "shared" + }, + } +# space all used up +] +``` + +We add the tag `"kubernetes.io/cluster/{cluster_name}" = "shared"` in order for the node groups to pick up the +cluster subnets. This is on the new `container` ubnet. + +For creating a service which uses load balancers (ELB, ALB, or NLB), the last tag listed here is needed +`"kubernetes.io/role/internal-elb" = 1`. This is only one tag for all EKS, not one per cluster, and it should apply +to the subnet(s) for load balancing. A separate set of subnets exist for load balacning, with a name including `private-lb`. + + +## Copy Variable Settings + +We need the `variables.vpc.tf` and `variables.vpc.auto.tfvars` from the main repository. These are not to be modified in +this submodule. + +```shell +cd MAIN-REPOSITORY +MAINTOP=$(git rev-parse --show-toplevel) +cd applications/{APPNAME} +cd vpc/{region}/vpc{number} +for f in $(ls $MAINTOP/vpc/{region}/vpc{number}/variables.vpc*) + do + cp $f ./ +done +``` + +Replace {region} and {number} and {APPNAME} with the correct values. + +## Copy includes.d + +This makes a copy of the entire `MAIN/includes.d` structure in the submodule, for use as soft links to bring in +application variables for tagging. + +```shell +cd MAIN-REPOSITORY +MAINTOP=$(git rev-parse --show-toplevel) +cd applications/{APPNAME} +rsync -avRWH $MAINTOP/./includes.d ./ +``` + +Replace {APPNAME} with the correct value. + +## Initialize Cluster Main + +We need to setup the main directory for the cluster. Be sure `remote_state.yml` is correct. Then: + +```shell +tf-directory-setup.py -l none +tf-init +``` + +## Policies + +First, we have to create the two polices. The roles will not get created until they do. + +```shell +TFTARGET=$(grep ^res.*iam_policy *.tf |awk '{print "-target=" $2 "." $3}'|sed -e 's/"//g') +tf-plan $TFTARGET +tf-apply $TFTARGET +unset TFTARGET +``` + +## Keypair Creation + +We need to create the SSH key, which then allows for the public key to be uploaded. + +```shell +tf-plan -target=null_resource.generate_keypair +tf-apply -target=null_resource.generate_keypair + +tf-plan -target=aws_key_pair.cluster_keypair +tf-apply -target=aws_key_pair.cluster_keypair +``` + +## Cluster Creation + +One created, we can run the rest of the code + +```shell +tf-plan +tf-apply +``` + +Finalize by linking to the remote state file: + +```shell +tf-directory-setup.py -l s3 +``` + +## Setup aws-auth + +Be sure `remote_state.yml` is correct. Examine the `settings.aws-auth.tfvars` and replace any remote state references to the proper +objects. There is at least one, a `rolearn`. You can get the remote state path with + +```shell +grep ^data remote_state.*{clustername}.tf | awk '{print $1 "." $2 "." $3}' |sed -e 's/"//g' +``` + +Next, we setup the remote state files, link to the parent remote state, and initialize terraform. + +```shell +tf-directory-setup.py -l none +# should only be one file here +ln -s ../remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_*.tf . +setup-new-directory.sh +tf-init +``` + +Then, we can plan, apply, and finalize: + +```shell +tf-pan +tf-apply +tf-directory-setup.py -l s3 +``` + +## Setup EFS + +Be sure `remote_state.yml` is correct. Examine the `main.tf` and replace any remote state references to the proper +objects. You can find where they are used: + +```console +% grep data.terraform_remote_state *.tf +main.tf: vpc_id = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_vpc_id +main.tf: subnet_ids = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_subnet_ids +main.tf: cluster_worker_sg_id = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_worker_sg_id +main.tf: oidc_provider_url = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.oidc_provider_url +main.tf: oidc_provider_arn = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.oidc_provider_arn +``` + +Find the value to replace these with: + +```shell +grep ^data remote_state.*{clustername}.tf | awk '{print $1 "." $2 "." $3}' |sed -e 's/"//g' +``` + +Next, we setup the remote state files, link to the parent remote state, and initialize terraform. + +```shell +tf-directory-setup.py -l none +# should only be one file here +ln -s ../remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_*.tf . +setup-new-directory.sh +``` + +Then, we have to create the polices. The roles will not get created until they do. + +```shell +TFTARGET=$(grep ^res.*iam_policy *.tf |awk '{print "-target=" $2 "." $3}'|sed -e 's/"//g') +tf-plan $TFTARGET +tf-apply $TFTARGET +unset TFTARGET +``` + +Finally, you can apply the rest: + + +```shell +tf-plan +tf-apply +``` + +## Common Services +### Certificate Authority + +Set the download to `false` + +```shell +# ca-cert.tf + ca_cert_download = false +``` + +Do the first apply, which generates the key and csr. You'll need to then submit the CSR. (directions generated) + +```shell +tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') +``` + + +```shell +# terraform taint null_resource.ca_cert[0] +# # (wait for submitted cert to be ready) +tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') +tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') +``` + +### Rest of Setup + +```shell +tf-plan +tf-apply +tf-directory-setup.py -l s3 +``` + +## Access to the cluster + +There are two ways to access the cluster. One is from the AWS Console and the other is via the IAM account or role. + +The cluster access vi console is found in the EKS section, under *clusters*. + +For IAM access, one must have IAM account credentials configured in `$HOME/.aws/credentials` and `$HOME/.aws/config`. [Here](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html) +are the docs, and we have an example below. Region is important, otherwise it defaults to `us-gov-west-1` and the STS will fail. + +```script +# $HOME/.aws/credentials +[252960665057-ma6-gov] +aws_access_key_id = ABCD1234... +aws_secret_access_key = abcd5678... + +# $HOME/.aws/config +[profile 252960665057-ma6-gov-eks-org-project-env] +source_profile = 252960665057-ma6-gov +region = us-gov-east-1 +role_arn = arn:aws-us-gov:iam::252960665057:role/r-eks-org-project-env-cluster-admin +role_session_name = badra001 +``` + +With this configuration, using the proifle `252960665057-ma6-gov` gives you the normal IAM access + +```console +% aws --profile 252960665057-ma6-gov sts get-caller-identity +{ + "UserId": "AIDATVZNBNXQ5UPHMBGPY", + "Account": "252960665057", + "Arn": "arn:aws-us-gov:iam::252960665057:user/a-badra001" +} +``` + +Using the other profile will use the source profile (which has to have permission to assume the role), the role arn, and a session +name mapping it back to your Census username (JBID). + +```console +% aws --profile 252960665057-ma6-gov-eks-org-project-env sts get-caller-identity +{ + "UserId": "AROATVZNBNXQ7AV7W2ISZ:badra001", + "Account": "252960665057", + "Arn": "arn:aws-us-gov:sts::252960665057:assumed-role/r-eks-org-project-env-cluster-admin/badra001" +} +``` + +----- +OLD LAB SETUP +----- + +# Cluster Setup + +## Download Configuration + +Now that the cluster is created, we need the `kubectl` command and to download the configuration. + +* get [kubectl](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) + +```console +% aws eks --profile $(get-profile) --region $(get-region) update-kubeconfig --name test2 --kubeconfig ./test2.kube.config +Added new context arn:aws:eks:us-east-1:079788916859:cluster/test2 to /data/git-repos/terraform/079788916859-do2-cat_apps-adsd-eks/vpc/east-1/vpc4/apps/eks-test2/test2.kube.config +% export KUBECONFIG=$(pwd)/test2.kube.config +% kubectl get nodes +NAME STATUS ROLES AGE VERSION +ip-10-194-24-49.ec2.internal Ready 24m v1.20.4-eks-6b7464 +ip-10-194-24-90.ec2.internal Ready 24m v1.20.4-eks-6b7464 +ip-10-194-25-120.ec2.internal Ready 24m v1.20.4-eks-6b7464 +ip-10-194-26-252.ec2.internal Ready 24m v1.20.4-eks-6b7464 +``` + +## Authentication + +### Automated + +This is in theh `aws-auth` subdirectory. + +```shell +cd aws-auth +tf-init +tf-plan +tf-apply +``` + +### Manual + +To allow users and roles to manipulate the cluster, we add to the mapRole or mapUsera. + +```shell +kubectl edit -n kube-system configmap/aws-auth +``` + +Add sections for `mapRoles`: + +```yaml + mapRoles: | + - rolearn: arn:aws:iam::079788916859:role/r-inf-cloud-admin + username: system:node:{{EC2PrivateDNSName}} + groups: + - system:bootstrappers + - system:nodes + - eks-console-dashboard-full-access-group +``` + +Add sections for `mapUsers`: + +```yaml + mapUsers: | + - userarn: arn:aws:iam::079788916859:user/u-zawac002 + username: admin + groups: + - system:masters +``` + +We will like want to do this through templating. + +* users + * arn:aws:iam::079788916859:user/u-badra001 + * arn:aws:iam::079788916859:user/u-ashle001 + * arn:aws:iam::079788916859:user/u-mcgin314 + * arn:aws:iam::079788916859:user/u-sall0002 + * arn:aws:iam::079788916859:user/u-zawac002 +* roles + * arn:aws:iam::079788916859:role/r-inf-cloud-admin + * arn:aws:iam::079788916859:role/r-adsd-cumulus + * arn:aws:iam::079788916859:role/r-adsd-eks + * arn:aws:iam::079788916859:role/r-adsd-tools + +## Adding Cluster Roles for AWS Console + +To allow [console access](https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml), we need these steps. + +It requires the cluster to be up and the `{clustername}.kube.config` file to exist along with the environment variable pointing to it. + +### Automated + +This appies just the full access cluste role, as the restricted one needs additional configuration. + +```shell +tf-apply -target=null_resource.apply_cluster_roles +``` + +### Manual + +```shell +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml +``` + +For full console, we'll use the first one. + +```console +% kubectl apply -f eks-console-full-access.yaml +clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created +clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created +``` + + +# Details + + +{{ .Content }} + diff --git a/examples/full-cluster/ROLES.md b/examples/full-cluster/ROLES.md new file mode 100644 index 0000000..3880590 --- /dev/null +++ b/examples/full-cluster/ROLES.md @@ -0,0 +1,119 @@ +# Roles + +There are several types of roles we handle within the EKS cluster. + +1. IAM Role for Service Account (IRSA) +These roles involve an IAM role with a formatted name of r-eks-{cluster}-irsa__{k8snamespace}__{k8suser}. This will +grant approproriate IAM permissions to a pod. It includes specific conditions for the local OIDC provider mapping to +system:serviceaccount:{k8snamespace}:{k8suse}. This is super important because the pod inherits the permissions +of the node group, which grants far too much access to the running pods. These are not mapped into the ConfigMap aws-auth. + +A default:default will exists which grants little to no AWS permissions. + +1. Cluster Admin Role +This role is used for the cluster administration. It is of the form r-eks-{cluster}-cluster-admin. It has read access to the +[EKS Console](https://console.amazonaws-us-gov.com/eks/home). It has: +* access to read and write ECR for the specific repositories used for the cluster at /eks/{clustername} +* access to the EKS API for the cluster +* can download the kube.config file +* is mapped with the ConfigMap aws-auth into k8suser admin and k8sgroup system:masters +* permissions to update the node groups (via cli) +* others as discovered + +Users will use this role through the use of STS:AssumeRole either with the console or CLI. + +1. Additional Application Roles +These will be for granting access to clusterroles via namespace and k8suser to IAM or SAML users. They will take the form +r-eks-{cluster}-{name} where name should consider some portion of the namespace and purpose, and the name cannot be one of the existing +roles already in existence. These will typically not need any AWS Access beyond that of the update-config or get-token to obtain +the configuration file. These will require a clusterrole and clusterrolebinding, and will need a username to go along with them. +See [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) and [here](https://aws.amazon.com/premiumsupport/knowledge-center/eks-iam-permissions-namespaces/) +for details about this. The configuration file to create this (yaml) will be stored in github, and ideally, it will be created through the use of Terraform to be able +to easily add these as needed. + +Users will use this role through the use of STS:AssumeRole either with the console or CLI. + +## IRSA Roles + +```hcl + condition { + test = "StringEquals" + variable = "${local.oidc_provider_url}:sub" + values = ["system:serviceaccount:${local.app2_namespace}:${local.app2_name}"] + } +``` + +* irsa-roles.aws-cli.tf +* irsa-roles.cumulus.tf +* irsa-roles.jenkins.tf + + +## Cluster Admin Role + +## Additional Application Roles + +## cumulus-dba +## cumulus-deployer +## cicd-deployer + +## jenkins + +* Tool: Jenkins +* Purpose: Used for CICD Pipeline + * build images + * copy images + * deploy pods + * deploy services + * other things as necessary +* Source System: VM on-prem +* AWS Access + * IAM Service account tied to the cluster name + * s-eks-{cluster}-cicd + * permissions to read and write ECR * but NOT eks/{clustername} + * permission to eks get-token + * permission to eks update-cluster (get kubeconfig) +* Kubernetes Access + * Username + * recommend the same pattern: eks-{cluster}-cicd + * Group + * group names needed + * Permissions + * defined in K8S thing .. + * Files for configuration of K8S + * yml: + * tf: + +# AWS Commands + +```shell +aws eks get token +aws eks update-config +``` + +## CICD + +There are a number of ways to handle the CICD pipline. How in part depends on whether it runs outside of the cluster or inside of the cluster. These + +* service account for CICD (say, s-adsd-cicd-deployer) with full permissions to ECR and to get eks config and token along with k8s permissions through +ConfigMap aws-auth. +* role for CICD per cluster, say r-eks-{cluster}-cicd-deployer with same permissions above. +* These are all account specific, so running CICD across multiple accounts will need multiple IAM accounts and roles. +* consider some central way of doing this so a CICD can deploy to any cluster in any account in any region. +* perhaps start with a smaller per cluster user/role and work towards a better solution later + +# TBD + +1. Determine how to create a default:default IRSA role which grants little to no AWS permissions (maybe sts get-caller-identity). +1. Create a module for IRSA +1. Explore the use of the OIDC integration with Access Manager +1. Develop a strategy for CICD access + +# Links + +* [AWS RBAC](https://aws.amazon.com/premiumsupport/knowledge-center/eks-iam-permissions-namespaces/) +* [K8S RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) +* [Add User Role](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html) +* [OIDC Identity Provider](https://docs.aws.amazon.com/eks/latest/userguide/authenticate-oidc-identity-provider.html) +* [OIDC with MicroFocus](https://community.microfocus.com/cyberres/accessmanager/w/access_manager_tips/27815/access-amazon-web-services-using-amazon-cognito-for-mobile-applications-and-netiq-access-manager-4-1) + + diff --git a/examples/full-cluster/aws-auth/config_map.aws-auth.yaml.tpl b/examples/full-cluster/aws-auth/config_map.aws-auth.yaml.tpl new file mode 100644 index 0000000..7c58ada --- /dev/null +++ b/examples/full-cluster/aws-auth/config_map.aws-auth.yaml.tpl @@ -0,0 +1,17 @@ +data: +%{ if length(roles) > 0 } + mapRoles: | + %{ for k, v in roles ~} + - rolearn: ${v.rolearn} + username: ${v.username} + groups: ${v.groups} + %{ endfor ~} +%{ endif } +%{ if length(users) > 0 } + mapUsers: | + %{ for k, v in users ~} + - userarn: ${v.userarn} + username: ${v.username} + groups: ${v.groups} + %{ endfor ~} +%{ endif } diff --git a/examples/full-cluster/aws-auth/data.eks.tf b/examples/full-cluster/aws-auth/data.eks.tf new file mode 100644 index 0000000..870e8c6 --- /dev/null +++ b/examples/full-cluster/aws-auth/data.eks.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster + # for main.tf + # aws_eks_cluster = aws_eks_cluster.eks_cluster + # for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/full-cluster/aws-auth/kubeconfig.tf b/examples/full-cluster/aws-auth/kubeconfig.tf new file mode 100644 index 0000000..5e386f5 --- /dev/null +++ b/examples/full-cluster/aws-auth/kubeconfig.tf @@ -0,0 +1,29 @@ +resource "null_resource" "kubeconfig" { + triggers = { + always_run = timestamp() + } + provisioner "local-exec" { + command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + } + provisioner "local-exec" { + command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" + } + provisioner "local-exec" { + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + } + command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" + } + depends_on = [data.aws_eks_cluster.cluster] +} + +#--- +# call it like +#--- +## provisioner "local-exec" { +## environment = { +## KUBECONFIG = "${path.root}/setup/kube.config" +## } +## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" +## } diff --git a/examples/full-cluster/aws-auth/patch-aws-auth.tf b/examples/full-cluster/aws-auth/patch-aws-auth.tf new file mode 100644 index 0000000..88e0bbe --- /dev/null +++ b/examples/full-cluster/aws-auth/patch-aws-auth.tf @@ -0,0 +1,135 @@ +data "kubernetes_config_map" "aws-auth" { + metadata { + namespace = "kube-system" + name = "aws-auth" + } +} + +data "aws_iam_user" "auth_users" { + for_each = toset([for u in local.joined_auth_users : u.aws_username]) + user_name = each.key +} + +data "aws_iam_role" "auth_roles" { + for_each = toset([for r in local.joined_auth_roles : r.aws_rolename]) + name = each.key +} + + +locals { + existing_roles_string = lookup(data.kubernetes_config_map.aws-auth.data, "mapRoles", "") + existing_users_string = lookup(data.kubernetes_config_map.aws-auth.data, "mapUsers", "") + + existing_roles = local.existing_roles_string != "" ? yamldecode(local.existing_roles_string) : [] + existing_users = local.existing_users_string != "" ? yamldecode(local.existing_users_string) : [] + + joined_auth_users = concat(local.aws_auth_users, var.aws_auth_users) + joined_auth_roles = concat(local.aws_auth_roles, var.aws_auth_roles) + + mapped_auth_users = [for u in local.joined_auth_users : { + userarn = data.aws_iam_user.auth_users[u.aws_username].arn + aws_username = u.aws_username + username = u.username + groups = u.groups + }] + mapped_auth_roles = [for u in local.joined_auth_roles : { + rolearn = data.aws_iam_role.auth_roles[u.aws_rolename].arn + aws_rolename = u.aws_rolename + username = u.username + groups = u.groups + }] + + merged_users = merge( + { for user in local.existing_users : user.userarn => user }, + # { for user in local.aws_auth_users : user.userarn => user }, + # { for user in var.aws_auth_users : user.userarn => user } + { for user in local.mapped_auth_users : user.userarn => user }, + ) + + merged_roles = merge( + { for role in local.existing_roles : role.rolearn => role }, + # { for role in local.aws_auth_roles : role.rolearn => role }, + # { for role in var.aws_auth_roles : role.rolearn => role } + { for role in local.mapped_auth_roles : role.rolearn => role }, + ) + + # patch = yamlencode({ + # "data" = { + # "mapUsers" = values(local.merged_users) + # "mapRoles" = values(local.merged_roles) + # } + # }) + patch = < 0~} + mapRoles: | +%{for k, v in local.merged_roles~} + - rolearn: ${v.rolearn} + username: ${v.username} + groups: +%{for g in v.groups~} + - ${g} +%{endfor~} +%{endfor~} +%{endif~} +%{if length(local.merged_users) > 0~} + mapUsers: | +%{for k, v in local.merged_users~} + - userarn: ${v.userarn} + username: ${v.username} + groups: +%{for g in v.groups~} + - ${g} +%{endfor~} +%{endfor~} +%{endif~} +EOM + + # patch_t = templatefile("${path.root}/config_map.aws-auth.yaml.tpl",{ + # users = values(local.merged_users) + # roles = values(local.merged_roles) + # }) +} + +resource "null_resource" "patch-aws-auth" { + triggers = { + users = join(",", sort(keys(local.merged_users))) + roles = join(",", sort(keys(local.merged_roles))) + } + depends_on = [null_resource.kubeconfig] + # provisioner "local-exec" { + # command = "if [ -z $KUBECONFIG ]; then 'echo missing KUBECONFIG'; exit 1; else exit 0; fi" + # } + # provisioner "local-exec" { + # command = "if [ ! -r $KUBECONFIG ]; then 'echo unreadable KUBECONFIG'; exit 1; else exit 0; fi" + # } + # provisioner "local-exec" { + # command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + # } + provisioner "local-exec" { + command = "test -d setup || mkdir setup" + } + provisioner "local-exec" { + command = "echo '${local.patch}' > setup/config_map.patch.yaml" + } + # provisioner "local-exec" { + # command = "echo '${local.patch_t}' > config_map.patch_t.yaml" + # } + provisioner "local-exec" { + # command = "kubectl patch --type merge -n kube-system configmap/aws-auth -p '${local.patch}'" + command = "kubectl --kubeconfig ${path.root}/setup/kube.config patch --type merge -n kube-system configmap/aws-auth --patch-file setup/config_map.patch.yaml" + } +} + +# output "map" { +# value = data.kubernetes_config_map.aws-auth +# } +# output "map_output" { +# value = { +# "object" = data.kubernetes_config_map.aws-auth +# "existing_users" = local.existing_users +# "existing_roles" = local.existing_roles +# "patch" = local.patch +# "patch_text" = local.patch_t +# } +# } diff --git a/examples/full-cluster/aws-auth/prefixes.tf b/examples/full-cluster/aws-auth/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster/aws-auth/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster/aws-auth/providers.tf b/examples/full-cluster/aws-auth/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster/aws-auth/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster/aws-auth/region.tf b/examples/full-cluster/aws-auth/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster/aws-auth/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster/aws-auth/settings.aws-auth.tf b/examples/full-cluster/aws-auth/settings.aws-auth.tf new file mode 100644 index 0000000..4d3259d --- /dev/null +++ b/examples/full-cluster/aws-auth/settings.aws-auth.tf @@ -0,0 +1,11 @@ +locals { + aws_auth_users = [] + aws_auth_roles = [ + { + rolearn : "" + aws_rolename : format("%v%v-cluster-admin", local._prefixes["eks-role"], var.cluster_name) + username : "admin" + groups = ["system:masters", "eks-console-dashboard-full-access-group"] + }, + ] +} diff --git a/examples/full-cluster/aws-auth/tf-run.data b/examples/full-cluster/aws-auth/tf-run.data new file mode 100644 index 0000000..44f61ff --- /dev/null +++ b/examples/full-cluster/aws-auth/tf-run.data @@ -0,0 +1,6 @@ +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +COMMAND tf-init -upgrade +ALL +COMMAND tf-directory-setup.py -l s3 +STOP cd ../efs and tf-run.sh apply diff --git a/examples/full-cluster/aws-auth/variables.aws-auth.tf b/examples/full-cluster/aws-auth/variables.aws-auth.tf new file mode 100644 index 0000000..05708d5 --- /dev/null +++ b/examples/full-cluster/aws-auth/variables.aws-auth.tf @@ -0,0 +1,23 @@ +# maybe just ignore the ARN entirely and force a read + +variable "aws_auth_users" { + description = "A list of objects where each object has userarn, username, k8s_username, and groups, where groups is a list of groups to associate with the user. Leaving userarn as an empty string will pull the user ARN from AWS." + type = list(object({ + userarn = string + aws_username = string + username = string + groups = list(string) + })) + default = [] +} + +variable "aws_auth_roles" { + description = "A list of objects where each object has rolearn, rolename, k8s_username, and groups, where groups is a list of groups to associate with the role. Leaving rolearn as an empty string will pull the role ARN from AWS." + type = list(object({ + rolearn = string + aws_rolename = string + username = string + groups = list(string) + })) + default = [] +} diff --git a/examples/full-cluster/aws-auth/variables.eks.tf b/examples/full-cluster/aws-auth/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster/aws-auth/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster/aws-auth/variables.vpc.tf b/examples/full-cluster/aws-auth/variables.vpc.tf new file mode 120000 index 0000000..f672f33 --- /dev/null +++ b/examples/full-cluster/aws-auth/variables.vpc.tf @@ -0,0 +1 @@ +../variables.vpc.tf \ No newline at end of file diff --git a/examples/full-cluster/aws-auth/version.tf b/examples/full-cluster/aws-auth/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster/aws-auth/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster/bin/copy_image.sh b/examples/full-cluster/bin/copy_image.sh new file mode 100755 index 0000000..60e8847 --- /dev/null +++ b/examples/full-cluster/bin/copy_image.sh @@ -0,0 +1,324 @@ +#!/bin/bash + +############################################################################### +# This script uses skopeo to copy a docker image from one repository to +# another. The primary intent is to copy the image from a public repository +# to a private repository. +############################################################################### +# Expected environment variables: +# +# SOURCE_IMAGE - The image to copy to to another location. Example: +# paradyme-docker-local.jfrog.io/appetizer:dev +# SOURCE_INSECURE - Set this to 1 of the source repository is in an insecure +# docker registry. Set it to 0 or leave it unset if the +# docker registry is secure. +# +# DESTINATION_IMAGE - The image to copy to to another location. Example: +# paradyme-docker-local.jfrog.io/appetizer:dev +# DESTINATION_INSECURE - Set this to 1 of the destination repository is in +# an insecure docker registry. Set it to 0 or leave it unset +# if the docker registry is secure. +# +# When the source repository requires authentication to access, configure +# these values. Otherwise do not set them. +# +# SOURCE_USERNAME - The username to supply for credentialed access to the +# repository. `anthony-zawacki` is an example. +# SOURCE_PASSWORD - The password to supply for credentialed access to the +# repository. An artifactory API_KEY for example. +# +# When the destination repository requires authentication to access, configure +# these values. Otherwise do not set them. +# +# DESTINATION_USERNAME - The username to supply for credentialed access to the +# repository. `anthony-zawacki` is an example. +# DESTINATION_PASSWORD - The password to supply for credentialed access to the +# repository. The output of: +# `aws ecr get-login-password --region us-east-2` for example. +# +# If the destination repository does not exist, the copy_image.sh script will +# create the repository automatically. In cases where the newly created +# repository should have a mutable image (perhaps always pushing to a `latest` +# tag in a development environment), it is possible to configure the +# repository to allow mutability by configuring this environment variable. +# Otherwise, do not set it. +# +# +############################################################################### + +ensure_skopeo() { + skopeo=$(command -v skopeo) + if [[ "$skopeo" == "" ]]; then + echo "The required executable, skopeo, was not found." + echo "Please install it and ensure it is in the path." + return 1 + fi + + return 0 +} + +usage() { + local msg="${1}"; shift; + + cat < (SOURCE_IMAGE) The name of the image to copy to another + registry. + -src-username (SOURCE_USERNAME) Optional parameter in cases where + the source registry requires authentication. Use this username for the + credentials. + -src-password (SOURCE_PASSWORD) Optional parameter in cases where + the source registry requires authentication. Use this password for the + credentials. + -src-insecure (SOURCE_INSECURE=1) Optional parameter indicates that the + source registry is not a secured registry and that tls validation + should be disabled for the processing of the image. The default is + to assume that the source registry is secured. + +src-insecure (SOURCE_INSECURE=0) Optional parameter explicitly indicating + that the source registry is secure and TLS must be used to access the + registry. + + -dest-image (DESTINATION_IMAGE) The name of the image to to use in the + destination registry. + -dest-username (DESTINATION_USERNAME) Optional parameter in cases + where the destination registry requires authentication. Use this + username for the credentials. + -dest-password (DESTINATION_PASSWORD) Optional parameter in cases + where the destination registry requires authentication. Use this + password for the credentials. + -dest-insecure (DESTINATION_INSECURE=1) Optional parameter indicates that the + destination registry is not a secured registry and that tls validation + should be disabled for the processing of the image. The default is + to assume that the destination registry is secured. + +dest-insecure (DESTINATION_INSECURE=0) Optional parameter explicitly + indicating that the destination registry is secure and TLS must be + used to access the registry. + -dest-mutable (DESTINATION_MUTABLE=1) Optional parameter indicates that if + creating the ECR repository is required, create it allowing mutable + images. + +dest-mutable (DESTNATION_MUTABLE=0) Optional parameter explicitly + indicating that if creating the ECR repository is required, create it + with immutable images. + +EOF + + exit 1 +} + +parse_commandline() { + local key + local positional=() + + while [[ $# -gt 0 ]]; do + key="$1"; shift + + case "$key" in + -src-image) + SOURCE_IMAGE="$1"; shift + ;; + -src-username) + SOURCE_USERNAME="$1"; shift + ;; + -src-password) + SOURCE_PASSWORD="$1"; shift + ;; + -src-insecure) + SOURCE_INSECURE=1 + ;; + +src-insecure) + SOURCE_INSECURE=0 + ;; + -dest-image) + DESTINATION_IMAGE="$1"; shift + ;; + -dest-username) + DESTINATION_USERNAME="$1"; shift + ;; + -dest-password) + DESTINATION_PASSWORD="$1"; shift + ;; + -dest-insecure) + DESTINATION_INSECURE=1 + ;; + +dest-insecure) + DESTINATION_INSECURE=0 + ;; + -dest-mutable) + DESTINATION_MUTABLE=1 + ;; + +dest-mutable) + DESTINATION_MUTABLE=0 + ;; + *) + positional+=("$key") + ;; + esac + done + + if [[ ${#positional[@]} -gt 0 ]]; then + usage "Unrecognized parameters: ${positional[*]}" + fi +} + +ensure_parameters() { + if [[ "$SOURCE_IMAGE" == "" ]]; then + usage "Must specify SOURCE_IMAGE" + fi + + if [[ "$DESTINATION_IMAGE" == "" ]]; then + usage "Must specify DESTINATION_IMAGE" + fi + + if [[ "$SOURCE_USERNAME" != "" || "$SOURCE_PASSWORD" != "" ]]; then + if [[ "$SOURCE_USERNAME" == "" || "$SOURCE_PASSWORD" == "" ]]; then + usage "Must specify both the SOURCE_USERNAME and SOURCE_PASSWORD." + fi + fi + + if [[ "$DESTINATION_USERNAME" != "" || "$DESTINATION_PASSWORD" != "" ]]; then + if [[ "$DESTINATION_USERNAME" == "" || "$DESTINATION_PASSWORD" == "" ]]; then + usage "Must specify both the DESTINATION_USERNAME and DESTINATION_PASSWORD." + fi + fi + + return 0 +} + +image_exists() { + declare src_creds="$SOURCE_USERNAME:$SOURCE_PASSWORD" + declare command=(skopeo inspect --insecure-policy) + + if [[ "$SOURCE_USERNAME" != "" ]]; then +# command+=(--src-creds "$src_creds") + command+=(--creds "$src_creds") + else +# command+=(--src-no-creds) + command+=(--no-creds) + fi + +# if [[ "$SOURCE_INSECURE" == "1" ]]; then +# command+=(--src-tls-verify=false) +# else +# command+=(--src-tls-verify=true) +# fi + + command+=("docker://$SOURCE_IMAGE") + + ${command[@]} > /dev/null 2>&1 + status=$? + echo "* source_image_exists() status=$status" + # return 0 if it does, 1 if not + return $? +} + +destination_image_exists() { + declare dst_creds="$DESTINATION_USERNAME:$DESTINATION_PASSWORD" + declare command=(skopeo inspect --insecure-policy) + + if [[ "$DESTINATION_USERNAME" != "" ]]; then +# command+=(--dest-creds "$dst_creds") + command+=(--creds "$dst_creds") + else +# command+=(--dest-no-creds) + command+=(--no-creds) + fi + +# if [[ "$DESTINATION_INSECURE" == "1" ]]; then +# command+=(--dest-tls-verify=false) +# else +# command+=(--dest-tls-verify=true) +# fi + + command+=("docker://$DESTINATION_IMAGE") + + ${command[@]} > /dev/null 2>&1 + status=$? + echo "* destination_image_exists() status=$status" + # return 0 if it does, 1 if not + return $? +} + +copy_image() { + declare src_creds="$SOURCE_USERNAME:$SOURCE_PASSWORD" + declare dest_creds="$DESTINATION_USERNAME:$DESTINATION_PASSWORD" + declare command=(skopeo copy --insecure-policy) + + if [[ "$SOURCE_USERNAME" != "" ]]; then + command+=(--src-creds "$src_creds") + else + command+=(--src-no-creds) + fi + + if [[ "$SOURCE_INSECURE" == "1" ]]; then + command+=(--src-tls-verify=false) + else + command+=(--src-tls-verify=true) + fi + + if [[ "$DESTINATION_USERNAME" != "" ]]; then + command+=(--dest-creds "$dest_creds") + else + command+=(--dest-no-creds) + fi + + if [[ "$DESTINATION_INSECURE" == "1" ]]; then + command+=(--dest-tls-verify=false) + else + command+=(--dest-tls-verify=true) + fi + + command+=("docker://$SOURCE_IMAGE" "docker://$DESTINATION_IMAGE") + + if [[ "$DESTINATION_IMAGE" == *.dkr.ecr.*.amazonaws.com/* ]]; then + echo "ECR registry detected, ensuring repository." + declare repository="${DESTINATION_IMAGE##*.amazonaws.com/}" + repository="${repository%%:*}" + declare region="${DESTINATION_IMAGE%%.amazonaws.com/*}" + region="${region##*.}" + export AWS_PAGER="" + if ! aws ecr describe-repositories \ + --region "$region" \ + --output "json" \ + --repository-names "$repository" \ + > /dev/null 2>&1; then + local mutability="IMMUTABLE" + if [ "$DESTINATION_MUTABLE" == "1" ]; then + mutability="MUTABLE" + fi + echo "creating repository $repository." + aws ecr create-repository \ + --image-tag-mutability "$mutability" \ + --image-scanning-configuration "scanOnPush=true" \ + --encryption-configuration "encryptionType=KMS" \ + --repository-name "$repository" \ + --region "$region" \ + > /dev/null 2>&1 || return $? + else + echo "repository $repository exists." + fi + fi + + echo "Copying $SOURCE_IMAGE" + echo "to $DESTINATION_IMAGE" + + ${command[@]} +} + + +ensure_image() { + ( image_exists && ! destination_image_exists ) || copy_image +} + +main() { + ensure_skopeo && \ + parse_commandline "$@" && \ + ensure_parameters && \ + ensure_image && \ + echo "Done" +} + +return 0 > /dev/null 2>&1 || main "$@" + diff --git a/examples/full-cluster/bin/fix-terminating-namespace.sh b/examples/full-cluster/bin/fix-terminating-namespace.sh new file mode 100755 index 0000000..7282e79 --- /dev/null +++ b/examples/full-cluster/bin/fix-terminating-namespace.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# fix_terminating_namespace() { +# local -r namespace="${1}"; shift; +# +# kubectl get ns "$namespace" 2>&1 | grep -q Terminating +# +# if [ $? -eq 0 ]; then +# kubectl get namespace "$namespace" -o json | \ +# grep -v '^ "kubernetes"$' | \ +# kubectl replace --raw "/api/v1/namespaces/$namespace/finalize" -f - +# else +# echo "Namespace $namespace not found or not stuck in terminating state." +# fi +# } +# } + +namespace="${1}" +shift; + +kubectl get ns "$namespace" 2>&1 | grep -q Terminating +if [ $? -eq 0 ] +then + kubectl get namespace "$namespace" -o json |\ + grep -v '^ "kubernetes"$' |\ + kubectl replace --raw "/api/v1/namespaces/$namespace/finalize" -f - +else + echo "Namespace $namespace not found or not stuck in terminating state." +fi diff --git a/examples/full-cluster/bin/show-k8s-things.sh b/examples/full-cluster/bin/show-k8s-things.sh new file mode 100755 index 0000000..c5f6290 --- /dev/null +++ b/examples/full-cluster/bin/show-k8s-things.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +for f in all clusterrolebindings clusterroles nodes pods pvc pv rolebindings roles sc secrets services +do + echo "kubectl --config setup/kube.config get $f --all-namespaces -o wide > OUT.get-$f.txt" + kubectl --kubeconfig setup/kube.config get $f --all-namespaces -o wide > OUT.get-$f.txt +done diff --git a/examples/full-cluster/cluster-roles/.terraform-docs.yml b/examples/full-cluster/cluster-roles/.terraform-docs.yml new file mode 100644 index 0000000..8391b9d --- /dev/null +++ b/examples/full-cluster/cluster-roles/.terraform-docs.yml @@ -0,0 +1,44 @@ +formatter: markdown table + +header-from: main.tf +footer-from: "" + +sections: +## hide: [] + show: + - data-sources + - header + - footer + - inputs + - modules + - outputs + - providers + - requirements + - resources + +output: + file: README.md + mode: inject + template: |- + + {{ .Content }} + + +## output-values: +## enabled: false +## from: "" +## +## sort: +## enabled: true +## by: name +## +## settings: +## anchor: true +## color: true +## default: true +## description: false +## escape: true +## indent: 2 +## required: true +## sensitive: true +## type: true diff --git a/examples/full-cluster/cluster-roles/README.md b/examples/full-cluster/cluster-roles/README.md new file mode 100644 index 0000000..15664f8 --- /dev/null +++ b/examples/full-cluster/cluster-roles/README.md @@ -0,0 +1,236 @@ +# About cluster-roles + +This directory constructs the resources for roles, permissions and Kubernetes resources +for the EKS cluster adsd-cumulus-dev. + +# Application Information + +* Application: EKS adsd-cumulus-dev +* Organization: ADSD +* Project: DICE-dev +* Point of Contact(s): badra001, +* Creation Date: 2021-10-08 +* References: + * Requirements: {url} + * Remedy Ticket: {number} + * Other: {url} +* Related Configurations: + * {directory-path} + +# Application Requirements: EKS Cluster RBAC + +In order to let CICD pipeline and DBA to manage the applications and databases which Cumulus needed. 3 cluster roles need to be create + +1. Deployer Application Role +2. Deployer Istio System Role +3. DBA Administrator Role + +CICD deployer will be binding to Deployer roles in the namespaces that CICD will manager. Same as DBA Admin user, they only have admin roles for the namespaces that they are going to manage. + +## Deployer Application Role + +This role defines the k8s resources that CICD pipeline need to create for application deployment. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: deployer-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" +rules: + - apiGroups: + - cert-manager.io + - acme.cert-manager.io + resources: + - "*" + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - networking.istio.io + - security.istio.io + resources: + - virtualservices + - authorizationpolicies + - destinationrules + - peerauthentications + - requestauthentications + verbs: + - get + - list + - watch + - create + - delete + - patch + +``` +## Deployer Istio System Role + +This Role defines that deployer need to create gateway and certificate in istio-system namespace, per istio requires, TLS certificate need stay in the same +namespace as istio-ingressgateway. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: deployer-istiosystem-role +rules: + - apiGroups: + - cert-manager.io + - acme.cert-manager.io + resources: + - "*" + verbs: + - get + - list + - watch + - create + - update + - patch + - apiGroups: + - networking.istio.io + resources: + - gateways + verbs: + - get + - list + - watch + - create + - delete + - patch + +``` + +## DBA Administrator Role +This is admin role for a particular namespace or namespaces that DBA need to access and managed the DBs. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dba-admin-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: + - cert-manager.io + - acme.cert-manager.io + resources: + - "*" + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - networking.istio.io + - security.istio.io + resources: + - virtualservices + - authorizationpolicies + - destinationrules + - peerauthentications + - requestauthentications + verbs: + - get + - list + - watch + - create + - delete + - patch + +``` + +# Terraform Directions + + + + +# Details + + +account_alias = "" +account_id = "" +application_tags = {} +aws_environment = "" +census_private_cidr = [ + "148.129.0.0/16", + "172.16.0.0/12", + "192.168.0.0/16" +] +census_public_cidr = [ + "148.129.0.0/16" +] +cicd_k8s_group_name = "s-eks-adsd-cumulus-dev-cicd-deployer" +cicd_k8s_user_name = "cicd-deployer" +cicd_managed_namespaces = [ + "adsd-cumulus-dev-apps", + "adsd-cumulus-dev-addressupdate", + "adsd-cumulus-dev-adminmatchrecord", + "adsd-cumulus-dev-cbs-apps", + "adsd-cumulus-dev-collectionevent", + "adsd-cumulus-dev-collectionintervention", + "adsd-cumulus-dev-collectionoperation", + "adsd-cumulus-dev-collectionresponse", + "adsd-cumulus-dev-common", + "adsd-cumulus-dev-mft", + "adsd-cumulus-dev-monitoring" +] +cluster_name = "" +cluster_version = "1.20" +dba_admin_rolebinding_name = "dba-admin-rolebinding" +dba_administrator_role_name = "dba-admin-role" +dba_k8s_group_name = "s-eks-adsd-cumulus-dev-dba-admin" +dba_k8s_user_name = "dba-admin" +dba_managed_namespaces = [ + "adsd-cumulus-dev-db" +] +deployer_application_role_name = "deployer-application-role" +deployer_application_rolebinding_name = "deployer-application-rolebinding" +deployer_istiosystem_role_name = "deployer-istiosystem-role" +domain = "" +eks_instance_disk_size = 40 +eks_instance_type = "t3.xlarge" +eks_ng_desire_size = 4 +eks_ng_max_size = 16 +eks_ng_min_size = 4 +eks_vpc_name = "*vpc4*" +istio_installed_namespace = "istio-system" +kms_tfstate_key = "k-kms-inf-tfstate" +profile = "" +region = "" +region_map = {} +regions = [] +subnets_name = "*-apps-*" +tag_costallocation = "csvd:infrastructure" +tag_creator = "" +tfstate_bucket = "inf-tfstate-252960665057" +tfstate_bucket_prefix = "inf-tfstate" +tfstate_key_prefix = "ma6-gov" +tfstate_key_suffix = "terraform.tfstate" +tfstate_region = "us-gov-east-1" +tfstate_table = "tf_remote_state" +vpc_dns_servers = [ + "148.129.127.22", + "148.129.191.22" +] +vpc_domain_name = "dice.census.gov" +vpc_full_name = "" +vpc_ntp_servers = [ + "148.129.127.23", + "148.129.191.23" +] + diff --git a/examples/full-cluster/cluster-roles/RESULTS.md b/examples/full-cluster/cluster-roles/RESULTS.md new file mode 100644 index 0000000..5d31a20 --- /dev/null +++ b/examples/full-cluster/cluster-roles/RESULTS.md @@ -0,0 +1,41 @@ +## Cluster Roles + +```console +% kubectl --kubeconfig setup/kube.config get clusterrole -o wide |grep -iE "dba|deployer" +cumulus-dba-role 2021-10-07T14:36:45Z +dba-admin-role 2021-10-13T12:12:33Z +deployer-application-role 2021-10-13T12:12:33Z +deployer-istiosystem-role 2021-10-13T12:12:33Z +deployer-role 2021-10-07T16:37:43Z +``` + +## Role Binding + +```console +% kubectl --kubeconfig setup/kube.config get rolebinding -o wide --all-namespaces |grep -iE "deployer|dba" +adsd-cumulus-dev-addressupdate deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-addressupdate deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-adminmatchrecord deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-adminmatchrecord deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-apps deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-apps deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-cbs-apps deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-cbs-apps deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-collectionevent deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-collectionevent deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-collectionintervention deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-collectionintervention deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-collectionoperation deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-collectionoperation deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-collectionresponse deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-collectionresponse deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-common deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-common deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-db cumulus-dba-rolebinding ClusterRole/cumulus-dba-role 5d22h dba-admin cumulus-dba kube-system/dba +adsd-cumulus-dev-db dba-admin-rolebinding ClusterRole/dba-admin-role 56m dba-admin s-eks-adsd-cumulus-dev-dba-admin +adsd-cumulus-dev-mft deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-mft deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +adsd-cumulus-dev-monitoring deployer-application-rolebinding ClusterRole/deployer-application-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +adsd-cumulus-dev-monitoring deployer-rolebinding ClusterRole/deployer-role 5d20h cumulus-deployer kube-system/deployer +istio-system deployer_istiosystem_role_binding ClusterRole/deployer-istiosystem-role 56m cicd-deployer s-eks-adsd-cumulus-dev-cicd-deployer +``` diff --git a/examples/full-cluster/cluster-roles/cm.tf.off b/examples/full-cluster/cluster-roles/cm.tf.off new file mode 100644 index 0000000..f84cb4b --- /dev/null +++ b/examples/full-cluster/cluster-roles/cm.tf.off @@ -0,0 +1,6 @@ +data "kubernetes_config_map" "awsauth" { + metadata { + name = "aws-auth" + namespace = "kube-system" + } +} diff --git a/examples/full-cluster/cluster-roles/data.eks.tf b/examples/full-cluster/cluster-roles/data.eks.tf new file mode 100644 index 0000000..870e8c6 --- /dev/null +++ b/examples/full-cluster/cluster-roles/data.eks.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster + # for main.tf + # aws_eks_cluster = aws_eks_cluster.eks_cluster + # for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/full-cluster/cluster-roles/dba-clusterrole.tf b/examples/full-cluster/cluster-roles/dba-clusterrole.tf new file mode 100644 index 0000000..e60e7b5 --- /dev/null +++ b/examples/full-cluster/cluster-roles/dba-clusterrole.tf @@ -0,0 +1,24 @@ +resource "kubernetes_cluster_role" "dba_administrator_cluster_role" { + metadata { + name = var.dba_administrator_role_name + } + aggregation_rule { + cluster_role_selectors { + match_labels = { + "rbac.authorization.k8s.io/aggregate-to-admin" = "true" + } + } + } + + rule { + api_groups = ["cert-manager.io", "acme.cert-manager.io"] + resources = ["certificates", "challenges", "orders", "certificaterequests", "issuers"] + verbs = ["get", "list", "watch", "create", "update", "patch"] + } + + rule { + verbs = ["get", "list", "watch", "create", "update", "patch"] + api_groups = ["networking.istio.io", "security.istio.io"] + resources = ["virtualservices", "authorizationpolicies", "destinationrules", "peerauthentications", "requestauthentications"] + } +} diff --git a/examples/full-cluster/cluster-roles/dba-rolebinding.tf b/examples/full-cluster/cluster-roles/dba-rolebinding.tf new file mode 100644 index 0000000..64fdb3d --- /dev/null +++ b/examples/full-cluster/cluster-roles/dba-rolebinding.tf @@ -0,0 +1,40 @@ +locals { + dba_managed_namespaces = formatlist("%v-%v", var.cluster_name, var.dba_managed_namespaces) + dba_k8s_group_name = format("%v%v-%v", local._prefixes["eks-user"], var.cluster_name, var.dba_k8s_group_name) +} + +resource "kubernetes_namespace" "dba_managed_namespaces" { + for_each = toset(local.dba_managed_namespaces) + metadata { + name = each.key + labels = { + istio-injection = "enabled" + } + } +} + +resource "kubernetes_role_binding" "dba_admin_rolebinding" { +# for_each = toset(local.dba_managed_namespaces) + for_each = kubernetes_namespace.dba_managed_namespaces + + metadata { + name = var.dba_admin_rolebinding_name + namespace = each.key + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = var.dba_administrator_role_name + } + subject { + kind = "User" + name = var.dba_k8s_user_name + api_group = "rbac.authorization.k8s.io" + } + subject { + kind = "Group" + name = local.dba_k8s_group_name + api_group = "rbac.authorization.k8s.io" + } +# depends_on = [kubernetes_namespace.dba_managed_namespaces] +} diff --git a/examples/full-cluster/cluster-roles/dba.iam.tf b/examples/full-cluster/cluster-roles/dba.iam.tf new file mode 100644 index 0000000..22e6780 --- /dev/null +++ b/examples/full-cluster/cluster-roles/dba.iam.tf @@ -0,0 +1,113 @@ +locals { + policy_dba_k8s_group_name = replace(local.dba_k8s_group_name, local._prefixes["eks-user"], local._prefixes["eks-policy"]) + role_dba_k8s_group_name = format("%v%v-%v", local._prefixes["eks"], var.cluster_name, var.dba_k8s_group_name) +} + +module "role_dba_administrator" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git" + + role_name = local.role_dba_k8s_group_name + role_description = "Role for EKS cluster ${var.cluster_name} for access by ${var.dba_k8s_group_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.dba_administrator_allow_sts.json + attached_policies = [aws_iam_policy.dba_administrator.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +resource "aws_iam_policy" "dba_administrator" { + name = local.policy_dba_k8s_group_name + path = "/" + description = "Policy for EKS ${var.cluster_name} IAM access ${var.dba_k8s_group_name}" + policy = data.aws_iam_policy_document.dba_administrator.json +} + +locals { + dba_administrator_policy_statements = { + ECRRead = { + actions = [ + "ecr:Describe*", + "ecr:Get*", + "ecr:ListImages", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + ] + resources = ["*"] + } + EKSRead = { + actions = [ + "eks:ListClusters", + ] + resources = ["*"] + } + EKSReadMyClusters = { + actions = [ + "eks:DescribeCluster", + "eks:AccessKubernetesApi", + ] + resources = [format(local.common_arn, "eks", format("%v/%v", "cluster", var.cluster_name))] + } + } +} + +data "aws_iam_policy_document" "dba_administrator" { + dynamic "statement" { + for_each = local.dba_administrator_policy_statements + iterator = s + content { + sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) + effect = lookup(s.value, "effect", "Allow") + actions = lookup(s.value, "actions", []) + resources = lookup(s.value, "resources", []) + not_resources = lookup(s.value, "not_resources", []) + } + } +} + +# allow anyone in this account to assume the role, if they have the permission to do so +data "aws_iam_policy_document" "dba_administrator_allow_sts" { + statement { + sid = "AllowSTSAssume" + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "AWS" + identifiers = [ + format(local.iam_arn, "root"), + ] + } + } +} + +# output "role_dba_administrator_arn" { +# description = "DBA Adminstrator role ARN" +# value = module.role_dba_administrator.role_arn +# } + +module "group_dba_administrator" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git" + + group_name = local.role_dba_k8s_group_name + attached_policies = [aws_iam_policy.dba_administrator.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +output "info_dba_administrator" { + description = "DBA Adminstrator IAM details" + value = { + role_name = module.role_dba_administrator.role_name + role_arn = module.role_dba_administrator.role_arn + group_name = module.group_dba_administrator.group_name + group_arn = module.group_dba_administrator.group_arn + } +} diff --git a/examples/full-cluster/cluster-roles/deployer-clusterrole.tf b/examples/full-cluster/cluster-roles/deployer-clusterrole.tf new file mode 100644 index 0000000..2fa46af --- /dev/null +++ b/examples/full-cluster/cluster-roles/deployer-clusterrole.tf @@ -0,0 +1,41 @@ +resource "kubernetes_cluster_role" "cicd_deployer_istiosystem_cluster_role" { + metadata { + name = var.deployer_istiosystem_role_name + } + + rule { + api_groups = ["cert-manager.io", "acme.cert-manager.io"] + resources = ["certificates", "challenges", "orders", "certificaterequests", "issuers"] + verbs = ["get", "list", "watch", "create", "update", "patch"] + } + rule { + verbs = ["get", "list", "watch", "create", "update", "patch"] + api_groups = ["networking.istio.io"] + resources = ["gateways"] + } +} + +resource "kubernetes_cluster_role" "cicd_deployer_application_cluster_role" { + metadata { + name = var.deployer_application_role_name + } + aggregation_rule { + cluster_role_selectors { + match_labels = { + "rbac.authorization.k8s.io/aggregate-to-edit" = "true" + } + } + } + + rule { + api_groups = ["cert-manager.io", "acme.cert-manager.io"] + resources = ["certificates", "challenges", "orders", "certificaterequests", "issuers"] + verbs = ["get", "list", "watch", "create", "update", "patch"] + } + + rule { + verbs = ["get", "list", "watch", "create", "update", "patch"] + api_groups = ["networking.istio.io", "security.istio.io"] + resources = ["virtualservices", "authorizationpolicies", "destinationrules", "peerauthentications", "requestauthentications"] + } +} diff --git a/examples/full-cluster/cluster-roles/deployer-rolebinding.tf b/examples/full-cluster/cluster-roles/deployer-rolebinding.tf new file mode 100644 index 0000000..0d6e7f3 --- /dev/null +++ b/examples/full-cluster/cluster-roles/deployer-rolebinding.tf @@ -0,0 +1,64 @@ +resource "kubernetes_role_binding" "deployer_istio_role_binding" { + metadata { + name = "deployer_istiosystem_role_binding" + namespace = var.istio_installed_namespace + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = var.deployer_istiosystem_role_name + } + subject { + kind = "User" + name = var.cicd_k8s_user_name + api_group = "rbac.authorization.k8s.io" + } + subject { + kind = "Group" +# name = format("%v%v-%v", local._prefixes["eks-user"], var.cluster_name, var.cicd_k8s_group_name) + name = local.cicd_k8s_iam_username + api_group = "rbac.authorization.k8s.io" + } +} + +locals { + cicd_managed_namespaces = formatlist("%v-%v", var.cluster_name, var.cicd_managed_namespaces) + cicd_k8s_iam_username = format("%v%v-%v", local._prefixes["eks-user"], var.cluster_name, var.cicd_k8s_group_name) + cicd_k8s_group_name = format("%v%v-%v", local._prefixes["eks"], var.cluster_name, var.cicd_k8s_group_name) +} + +resource "kubernetes_namespace" "cicd_managed_namespaces" { + for_each = toset(local.cicd_managed_namespaces) + metadata { + name = each.key + labels = { + istio-injection = "enabled" + } + } +} + +resource "kubernetes_role_binding" "deployer_application_rolebinding" { +# for_each = toset(local.cicd_managed_namespaces) + for_each = kubernetes_namespace.cicd_managed_namespaces + + metadata { + name = var.deployer_application_rolebinding_name + namespace = each.key + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = var.deployer_application_role_name + } + subject { + kind = "User" + name = var.cicd_k8s_user_name + api_group = "rbac.authorization.k8s.io" + } + subject { + kind = "Group" + name = local.cicd_k8s_iam_username + api_group = "rbac.authorization.k8s.io" + } +# depends_on = [kubernetes_namespace.cicd_managed_namespaces] +} diff --git a/examples/full-cluster/cluster-roles/deployer.iam.tf b/examples/full-cluster/cluster-roles/deployer.iam.tf new file mode 100644 index 0000000..dfe46f4 --- /dev/null +++ b/examples/full-cluster/cluster-roles/deployer.iam.tf @@ -0,0 +1,132 @@ +locals { + policy_cicd_k8s_group_name = replace(local.cicd_k8s_iam_username, local._prefixes["eks-user"], local._prefixes["eks-policy"]) + iam_policies_cicd = ["p-inf-manage-access-keys"] +} + +data "aws_iam_policy" "cicd_deployer_policies" { + for_each = toset(local.iam_policies_cicd) + name = each.key +} + +module "service_cicd_deployer" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-user.git" + + iam_username = local.cicd_k8s_iam_username + username = "" + email_address = "" + groups = ["g-inf-ip-restriction"] + generate_password = false + service_account = true + enable_sending_mail = false + create_access_keys = false + attached_policies = flatten(concat([for k, v in data.aws_iam_policy.cicd_deployer_policies : v.arn], [aws_iam_policy.cicd_deployer.arn])) + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +resource "aws_iam_policy" "cicd_deployer" { + name = local.policy_cicd_k8s_group_name + path = "/" + description = "Policy for EKS ${var.cluster_name} IAM access ${var.cicd_k8s_group_name}" + policy = data.aws_iam_policy_document.cicd_deployer.json +} + +locals { + cicd_deployer_policy_statements = { + ECRRead = { + actions = [ + "ecr:Describe*", + "ecr:Get*", + "ecr:ListImages", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + ] + resources = ["*"] + } + ECRWrite = { + effect = "Deny" + actions = [ + "ecr:BatchDeleteImage", + "ecr:CompleteLayerUpload", + "ecr:CreateRepository", + "ecr:DeleteRepository", + "ecr:InitiateLayerUpload", + "ecr:PutImage", + "ecr:UploadLayerPart" + ] + # not_resources = [format(local.common_arn, "ecr", format("repository/eks/%v/*", var.cluster_name))] + not_resources = [format(local.common_arn, "ecr", "repository/eks/*")] + } + EKSRead = { + actions = [ + "eks:ListClusters", + ] + resources = ["*"] + } + EKSReadMyClusters = { + actions = [ + "eks:AccessKubernetesApi", + "eks:DescribeCluster", + ] + resources = [format(local.common_arn, "eks", format("%v/%v", "cluster", var.cluster_name))] + } + # IAMRead = { + # actions = [ + # "iam:ListRoles", + # ] + # resources = ["*"] + # } + } +} + +data "aws_iam_policy_document" "cicd_deployer" { + dynamic "statement" { + for_each = local.cicd_deployer_policy_statements + iterator = s + content { + sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) + effect = lookup(s.value, "effect", "Allow") + actions = lookup(s.value, "actions", []) + resources = lookup(s.value, "resources", []) + not_resources = lookup(s.value, "not_resources", []) + } + } +} + +# output "service_cicd_deployer_arn" { +# description = "CICD Deployer user ARN" +# value = module.service_cicd_deployer.user_arn +# } +# +# output "service_cicd_deployer_username" { +# description = "CICD Deployer username" +# value = module.service_cicd_deployer.user_name +# } + +module "group_cicd_deployer" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git" + + group_name = local.cicd_k8s_group_name + attached_policies = flatten(concat([for k, v in data.aws_iam_policy.cicd_deployer_policies : v.arn], [aws_iam_policy.cicd_deployer.arn])) + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + ) +} + +output "info_cicd_deployer" { + description = "CID Deployer IAM details" + value = { + user_name = module.service_cicd_deployer.user_name + user_arn = module.service_cicd_deployer.user_arn + group_name = module.group_cicd_deployer.group_name + group_arn = module.group_cicd_deployer.group_arn + } +} diff --git a/examples/full-cluster/cluster-roles/kubeconfig.tf b/examples/full-cluster/cluster-roles/kubeconfig.tf new file mode 100644 index 0000000..5e386f5 --- /dev/null +++ b/examples/full-cluster/cluster-roles/kubeconfig.tf @@ -0,0 +1,29 @@ +resource "null_resource" "kubeconfig" { + triggers = { + always_run = timestamp() + } + provisioner "local-exec" { + command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + } + provisioner "local-exec" { + command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" + } + provisioner "local-exec" { + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + } + command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" + } + depends_on = [data.aws_eks_cluster.cluster] +} + +#--- +# call it like +#--- +## provisioner "local-exec" { +## environment = { +## KUBECONFIG = "${path.root}/setup/kube.config" +## } +## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" +## } diff --git a/examples/full-cluster/cluster-roles/locals.tf b/examples/full-cluster/cluster-roles/locals.tf new file mode 100644 index 0000000..92d0613 --- /dev/null +++ b/examples/full-cluster/cluster-roles/locals.tf @@ -0,0 +1,11 @@ +locals { + base_arn = format("arn:%v:%%v:%v:%v:%%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) + iam_arn = format("arn:%v:iam::%v:%%v", data.aws_arn.current.partition, data.aws_caller_identity.current.account_id) + common_arn = format("arn:%v:%%v:%v:%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) + + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } +} diff --git a/examples/full-cluster/cluster-roles/main.tf b/examples/full-cluster/cluster-roles/main.tf new file mode 100644 index 0000000..ef02738 --- /dev/null +++ b/examples/full-cluster/cluster-roles/main.tf @@ -0,0 +1,30 @@ +locals { + aws_auth_users = [ + { + userarn = module.service_cicd_deployer.user_arn + aws_username = "" + username = var.cicd_k8s_user_name + groups = [local.cicd_k8s_group_name] + }, + ] + aws_auth_roles = [ + { + rolearn : module.role_dba_administrator.role_arn + aws_rolename : "" + username : var.dba_k8s_user_name + groups = [local.dba_k8s_group_name] + }, + ] +} + +module "awsauth_cluster-roles" { + source = "git@github.e.it.census.gov:terraform-modules/aws-eks.git//patch-aws-auth" + + region = local.region + profile = var.profile + cluster_name = var.cluster_name + aws_auth_users = local.aws_auth_users + aws_auth_roles = local.aws_auth_roles + + keep_temporary_files = false +} diff --git a/examples/full-cluster/cluster-roles/prefixes.tf b/examples/full-cluster/cluster-roles/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster/cluster-roles/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster/cluster-roles/providers.tf b/examples/full-cluster/cluster-roles/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster/cluster-roles/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster/cluster-roles/region.tf b/examples/full-cluster/cluster-roles/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster/cluster-roles/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster/cluster-roles/remote_state.yml b/examples/full-cluster/cluster-roles/remote_state.yml new file mode 100644 index 0000000..b1c5141 --- /dev/null +++ b/examples/full-cluster/cluster-roles/remote_state.yml @@ -0,0 +1,9 @@ +directory: "applications/apps-adsd-eks/vpc/east/vpc3/apps/eks-adsd-cumulus-qa/cluster-roles" +profile: "252960665057-ma6-gov" +bucket: "inf-tfstate-252960665057" +bucket_region: "us-gov-east-1" +region: "us-gov-east-1" +regions: ["us-gov-east-1"] +account_id: "252960665057" +account_alias: "ma6-gov" +aws_environment: "gov" diff --git a/examples/full-cluster/cluster-roles/variables.eks.tf b/examples/full-cluster/cluster-roles/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster/cluster-roles/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster/cluster-roles/variables.tf b/examples/full-cluster/cluster-roles/variables.tf new file mode 100644 index 0000000..b11041c --- /dev/null +++ b/examples/full-cluster/cluster-roles/variables.tf @@ -0,0 +1,85 @@ +variable "deployer_istiosystem_role_name" { + description = "The kubernetes cluster role name of CIDR Deployer" + type = string + default = "deployer-istiosystem-role" +} + +variable "deployer_application_role_name" { + description = "The kubernetes cluster role name of CICD Deployer" + type = string + default = "deployer-application-role" +} + +variable "dba_administrator_role_name" { + description = "The kubernetes cluster role name of DBA Administrator" + type = string + default = "dba-admin-role" +} + +variable "istio_installed_namespace" { + description = "Namespace that Istio installed" + type = string + default = "istio-system" +} + +variable "cicd_k8s_user_name" { + description = "The user name of CICD Deployer" + type = string + default = "cicd-deployer" +} +variable "cicd_k8s_group_name" { + description = "The Group name of CICD Deployer belongs to (excluding prefix for service account and cluster)" + type = string + default = "cicd-deployer" +} + +variable "dba_k8s_user_name" { + description = "the user name of DBA Administrator" + type = string + default = "dba-admin" +} +variable "dba_k8s_group_name" { + description = "The Group name of dba-admin belongs to (excluding prefix for service account and cluster)" + type = string + default = "dba-admin" +} + +variable "deployer_application_rolebinding_name" { + description = "Role binding name of deployer that binding to role deployer_application_cluster_role" + type = string + default = "deployer-application-rolebinding" +} + +variable "dba_admin_rolebinding_name" { + description = "Role binding name of deployer that binding to role deployer_application_cluster_role" + type = string + default = "dba-admin-rolebinding" +} + +variable "cicd_managed_namespaces" { + description = "Deployer managed namespaces that deploy can create resources in (excluding cluster name prefix)" + type = list + default = [ + "apps", + "addressupdate", + "adminmatchrecord", + "cbs-apps", + "collectionevent", + "collectionintervention", + "collectionoperation", + "collectionresponse", + "common", + "mft", + "monitoring", + ] + +} + +variable "dba_managed_namespaces" { + description = "DBA admin managed namespaces (excluding cluster name prefix)" + type = list + default = [ + "db" + ] + +} diff --git a/examples/full-cluster/cluster-roles/version.tf b/examples/full-cluster/cluster-roles/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster/cluster-roles/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster/common-services/.gitignore b/examples/full-cluster/common-services/.gitignore new file mode 100644 index 0000000..1ae9a3f --- /dev/null +++ b/examples/full-cluster/common-services/.gitignore @@ -0,0 +1 @@ +certs/*.key diff --git a/examples/full-cluster/common-services/README.certs.md b/examples/full-cluster/common-services/README.certs.md new file mode 100644 index 0000000..14fb411 --- /dev/null +++ b/examples/full-cluster/common-services/README.certs.md @@ -0,0 +1,5 @@ +this is a special CA certificate + +command to sign the certificate: +./sign-subordinate-ca-cert.sh pki.adsd-cumulus-dev.dev.dice.census.gov.csr "c=US,o=U.S. Census Bureau,OU=PKI,ou=EKS,ou=vpc2-dice-dev,ou=adsd-cumulus-dev,cn=pki.adsd-cumulus-dev.dev.dice.census.gov" 1825 + diff --git a/examples/full-cluster/common-services/README.md b/examples/full-cluster/common-services/README.md new file mode 100644 index 0000000..089cab7 --- /dev/null +++ b/examples/full-cluster/common-services/README.md @@ -0,0 +1,84 @@ +```console +% kubectl -n kube-system get pods -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +aws-load-balancer-controller-54fdf64896-jzwsr 1/1 Running 0 23h 10.194.26.74 ip-10-194-26-252.ec2.internal +aws-load-balancer-controller-54fdf64896-qqt6d 1/1 Running 0 23h 10.194.24.242 ip-10-194-24-49.ec2.internal +aws-node-29kmc 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal +aws-node-6d8ls 1/1 Running 1 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal +aws-node-6vrbg 1/1 Running 1 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal +aws-node-ldgxc 1/1 Running 1 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal +coredns-65bfc5645f-g86rx 1/1 Running 0 7d2h 10.194.24.207 ip-10-194-24-90.ec2.internal +coredns-65bfc5645f-xj9rl 1/1 Running 0 7d2h 10.194.24.69 ip-10-194-24-90.ec2.internal +efs-csi-controller-65fb886fd4-7slw6 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal +efs-csi-controller-65fb886fd4-vcf9l 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal +efs-csi-node-6t6v6 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal +efs-csi-node-kxqfb 3/3 Running 0 2d21h 10.194.24.49 ip-10-194-24-49.ec2.internal +efs-csi-node-p8hzn 3/3 Running 0 2d21h 10.194.26.252 ip-10-194-26-252.ec2.internal +efs-csi-node-xxq9h 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-proxy-78n7f 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-proxy-cms7c 1/1 Running 0 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-proxy-h2t6n 1/1 Running 0 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-proxy-jkxnz 1/1 Running 0 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal +``` + +```console +% kubectl get pods --all-namespaces -o wide +NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +cert-manager cert-manager-7fcbc79fc5-xwt4s 1/1 Running 0 51m 10.194.24.138 ip-10-194-24-49.ec2.internal +cert-manager cert-manager-cainjector-6b7f4575f4-xpgnc 1/1 Running 0 51m 10.194.24.56 ip-10-194-24-49.ec2.internal +cert-manager cert-manager-webhook-6cd54b96fc-rvld4 1/1 Running 0 51m 10.194.24.170 ip-10-194-24-90.ec2.internal +istio-system istio-egressgateway-7fcc58ddf7-dtx25 1/1 Running 0 95m 10.194.26.120 ip-10-194-26-252.ec2.internal +istio-system istio-ingressgateway-75f76c546b-vx2v6 1/1 Running 0 95m 10.194.24.8 ip-10-194-24-90.ec2.internal +istio-system istiod-85b6f86f94-vqfj2 1/1 Running 0 95m 10.194.25.155 ip-10-194-25-120.ec2.internal +kube-system aws-load-balancer-controller-54fdf64896-jzwsr 1/1 Running 0 23h 10.194.26.74 ip-10-194-26-252.ec2.internal +kube-system aws-load-balancer-controller-54fdf64896-qqt6d 1/1 Running 0 23h 10.194.24.242 ip-10-194-24-49.ec2.internal +kube-system aws-node-29kmc 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-system aws-node-6d8ls 1/1 Running 1 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal +kube-system aws-node-6vrbg 1/1 Running 1 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-system aws-node-ldgxc 1/1 Running 1 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-system coredns-65bfc5645f-g86rx 1/1 Running 0 7d2h 10.194.24.207 ip-10-194-24-90.ec2.internal +kube-system coredns-65bfc5645f-xj9rl 1/1 Running 0 7d2h 10.194.24.69 ip-10-194-24-90.ec2.internal +kube-system efs-csi-controller-65fb886fd4-7slw6 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-system efs-csi-controller-65fb886fd4-vcf9l 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal +kube-system efs-csi-node-6t6v6 3/3 Running 0 2d21h 10.194.25.120 ip-10-194-25-120.ec2.internal +kube-system efs-csi-node-kxqfb 3/3 Running 0 2d21h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-system efs-csi-node-p8hzn 3/3 Running 0 2d21h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-system efs-csi-node-xxq9h 3/3 Running 0 2d21h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-system kube-proxy-78n7f 1/1 Running 0 7d1h 10.194.24.90 ip-10-194-24-90.ec2.internal +kube-system kube-proxy-cms7c 1/1 Running 0 7d1h 10.194.24.49 ip-10-194-24-49.ec2.internal +kube-system kube-proxy-h2t6n 1/1 Running 0 7d1h 10.194.26.252 ip-10-194-26-252.ec2.internal +kube-system kube-proxy-jkxnz 1/1 Running 0 7d1h 10.194.25.120 ip-10-194-25-120.ec2.internal +operators istio-operator-7cc8974d48-f2j2m 1/1 Running 0 14h 10.194.26.211 ip-10-194-26-252.ec2.internal +sample-alb sample-alb-8744f54f9-7w4cj 1/1 Running 0 23h 10.194.25.67 ip-10-194-25-120.ec2.internal +sample-alb sample-alb-8744f54f9-gs8f5 1/1 Running 0 23h 10.194.24.147 ip-10-194-24-49.ec2.internal +sample-alb sample-alb-8744f54f9-v6kgr 1/1 Running 0 23h 10.194.26.168 ip-10-194-26-252.ec2.internal +sample-elb sample-elb-69786b5f7d-d7nb4 1/1 Running 0 2d21h 10.194.26.178 ip-10-194-26-252.ec2.internal +sample-elb sample-elb-69786b5f7d-mw7jb 1/1 Running 0 2d21h 10.194.24.193 ip-10-194-24-49.ec2.internal +sample-elb sample-elb-69786b5f7d-tqz2s 1/1 Running 0 2d21h 10.194.25.96 ip-10-194-25-120.ec2.internal +sample-nlb sample-nlb-6cd5769dfb-n8dmd 1/1 Running 0 2d21h 10.194.25.198 ip-10-194-25-120.ec2.internal +sample-nlb sample-nlb-6cd5769dfb-qw8n4 1/1 Running 0 2d21h 10.194.24.132 ip-10-194-24-49.ec2.internal +sample-nlb sample-nlb-6cd5769dfb-t2nhp 1/1 Running 0 2d21h 10.194.26.18 ip-10-194-26-252.ec2.internal +``` + +```console +% kubectl -n istio-system get secret | grep -iE "ca-secret|tls" +istio-ca-secret istio.io/ca-root 5 7d2h +nginx-cert kubernetes.io/tls 3 6d20h +root-secret kubernetes.io/tls 3 7d14h +``` + + kubectl get pods --all-namespaces -o wide|grep -i cert +cert-manager cert-manager-7fcbc79fc5-xwt4s 1/1 Running 0 7d22h 10.194.24.138 ip-10-194-24-49.ec2.internal +cert-manager cert-manager-cainjector-6b7f4575f4-xpgnc 1/1 Running 0 7d22h 10.194.24.56 ip-10-194-24-49.ec2.internal +cert-manager cert-manager-webhook-6cd54b96fc-rvld4 1/1 Running 0 7d22h 10.194.24.170 ip-10-194-24-90.ec2.internal + +$ kubectl -n cert-manager get secrets +NAME TYPE + DATA AGE +ca-key-pair Opaque + 2 5m2s +... +$ kubectl get clusterissuer +NAME READY AGE +clusterissuer True 5m36s + diff --git a/examples/full-cluster/common-services/ca-cert.tf b/examples/full-cluster/common-services/ca-cert.tf new file mode 100644 index 0000000..8e1c01b --- /dev/null +++ b/examples/full-cluster/common-services/ca-cert.tf @@ -0,0 +1,119 @@ +# tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') +# terraform taint null_resource.ca_cert[0] +# # (wait for submitted cert to be ready) +# tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') +# tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') + +#--- +# ca +#--- +locals { + ca_dns_name = format("pki.%v.%v", var.cluster_name, var.vpc_domain_name) + # ca_ou = format("ou=%v,ou=EKS,ou=%v,ou=PKI",var.cluster_name,var.vpc_full_name) + ca_ou = format("eks-%v-%v-PKI", var.cluster_name, var.vpc_full_name) + ca_cert_download = false + ca_cert_san = [local.ca_dns_name] + + ca_key_filename = format("${path.root}/certs/%v.key", local.ca_dns_name) + ca_key_exists = fileexists(local.ca_key_filename) + ca_cert_filename = format("${path.root}/certs/%v.crt", local.ca_dns_name) + ca_cert_exists = fileexists(local.ca_cert_filename) + ca_root_filename = "${path.root}/certs/ca-root.crt" + ca_root_exists = fileexists(local.ca_root_filename) + ca_bundle_contents = local.ca_cert_exists && local.ca_root_exists ? format("%v%v", file(local.ca_cert_filename), file(local.ca_root_filename)) : "" + ca_bundle_filename = format("${path.root}/certs/%v.bundle.crt", local.ca_dns_name) +} + +resource "tls_private_key" "ca" { + algorithm = "RSA" + rsa_bits = 4096 +} + +resource "tls_cert_request" "ca" { + key_algorithm = "RSA" + private_key_pem = tls_private_key.ca.private_key_pem + + dns_names = local.ca_cert_san + subject { + common_name = local.ca_dns_name + organizational_unit = local.ca_ou + organization = "U.S. Census Bureau" + country = "US" + } +} + +resource "null_resource" "ca_root_cert" { + provisioner "local-exec" { + command = "test -d certs || mkdir certs" + } + provisioner "local-exec" { + command = "curl -o ${local.ca_root_filename} http://ca.apps.tco.census.gov/certs/ca" + } +} + +resource "null_resource" "ca_files" { + triggers = { + ca_key_public = sha256(tls_private_key.ca.public_key_pem) + ca_csr = sha256(tls_cert_request.ca.cert_request_pem) + } + + # get key + provisioner "local-exec" { + command = "test -d certs || mkdir certs" + } + provisioner "local-exec" { + command = "echo '${tls_private_key.ca.private_key_pem}' > certs/${local.ca_dns_name}.key" + } + provisioner "local-exec" { + command = "echo '${tls_private_key.ca.public_key_pem}' > certs/${local.ca_dns_name}.public_key" + } + # get csr + provisioner "local-exec" { + command = "echo '${tls_cert_request.ca.cert_request_pem}' > certs/${local.ca_dns_name}.csr" + } + + # detail how to get certs + provisioner "local-exec" { + command = "echo 'add the key file to .gitignore, add it to git-secret, and hide it. Then add the .secret to git'" + } + provisioner "local-exec" { + command = "echo 'now submit file to TCO for signing and return the result as below:\n csr = certs/${local.ca_dns_name}.csr\n cert = certs/${local.ca_dns_name}.crt\n'" + } + provisioner "local-exec" { + command = "echo command = ./sign-subordinate-ca-cert.sh ${local.ca_dns_name}.csr 'c=US,o=U.S. Census Bureau,OU=PKI,ou=EKS,ou=${var.vpc_full_name},ou=${var.cluster_name},cn=${local.ca_dns_name}' 730" + } + provisioner "local-exec" { + command = "echo 'curl -O http://ca.apps.tco.census.gov/certs/server?host=${local.ca_dns_name}&format=crt&download=1'" + } +} + +resource "null_resource" "ca_cert" { + count = local.ca_cert_download ? 1 : 0 + # get cert + provisioner "local-exec" { + command = "curl -o ${path.root}/certs/${local.ca_dns_name}.crt 'http://ca.apps.tco.census.gov/certs/server?host=${local.ca_dns_name}&format=crt&download=1'" + } +} + +resource "local_file" "ca_bundle_cert" { + count = local.ca_cert_download && local.ca_cert_exists && local.ca_root_exists && length(local.ca_bundle_contents) > 0 ? 1 : 0 + + content = local.ca_bundle_contents + filename = local.ca_bundle_filename + file_permission = "0644" +} + +#--- +# once the cert is in place, you can use the ACM certificate soemthign like below +#--- +## resource "aws_acm_certificate" "ca" { +## count = local.ca_cert_exists ? 1 : 0 +## private_key = file("${path.root}/certs/${local.ca_dns_name}.key") +## certificate_body = file("${path.root}/certs/${local.ca_dns_name}.crt") +## certificate_chain = file("/etc/pki/tls/certs/cacert.crt") +## +## tags = merge( +## local.common_tags, +## map("Name", local.ca_dns_name), +## ) +## } diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/.helmignore b/examples/full-cluster/common-services/charts/cluster-autoscaler/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/Chart.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/Chart.yaml new file mode 100644 index 0000000..0b94b05 --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v2 +appVersion: 1.21.0 +description: Scales Kubernetes worker nodes within autoscaling groups. +engine: gotpl +home: https://github.com/kubernetes/autoscaler +icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png +maintainers: + - email: e.bailey@sportradar.com + name: yurrriq + - email: mgoodness@gmail.com + name: mgoodness + - email: guyjtempleton@googlemail.com + name: gjtempleton + - email: scott.crooks@gmail.com + name: sc250024 +name: cluster-autoscaler +sources: + - https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler +type: application +version: 9.10.7 diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/README.md b/examples/full-cluster/common-services/charts/cluster-autoscaler/README.md new file mode 100644 index 0000000..43bf4bf --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/README.md @@ -0,0 +1,415 @@ +# cluster-autoscaler + +Scales Kubernetes worker nodes within autoscaling groups. + +## TL;DR: + +```console +$ helm repo add autoscaler https://kubernetes.github.io/autoscaler + +# Method 1 - Using Autodiscovery +$ helm install my-release autoscaler/cluster-autoscaler \ +--set 'autoDiscovery.clusterName'= + +# Method 2 - Specifying groups manually +$ helm install my-release autoscaler/cluster-autoscaler \ +--set "autoscalingGroups[0].name=your-asg-name" \ +--set "autoscalingGroups[0].maxSize=10" \ +--set "autoscalingGroups[0].minSize=1" +``` + +## Introduction + +This chart bootstraps a cluster-autoscaler deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Helm 3+ +- Kubernetes 1.8+ + - [Older versions](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#releases) may work by overriding the `image`. Cluster autoscaler internally simulates the scheduler and bugs between mismatched versions may be subtle. +- Azure AKS specific Prerequisites: + - Kubernetes 1.10+ with RBAC-enabled. + +## Previous Helm Chart + +The previous `cluster-autoscaler` Helm chart hosted at [helm/charts](https://github.com/helm/charts) has been moved to this repository in accordance with the [Deprecation timeline](https://github.com/helm/charts#deprecation-timeline). Note that a few things have changed between this version and the old version: + +- This repository **only** supports Helm chart installations using Helm 3+ since the `apiVersion` on the charts has been marked as `v2`. +- Previous versions of the Helm chart have not been migrated + +## Migration from 1.X to 9.X+ versions of this Chart + +**TL;DR:** +You should choose to use versions >=9.0.0 of the `cluster-autoscaler` chart published from this repository; previous versions, and the `cluster-autoscaler-chart` with versioning 1.X.X published from this repository are deprecated. + +
+ Previous versions of this chart - further details +On initial migration of this chart from the `helm/charts` repository this chart was renamed from `cluster-autoscaler` to `cluster-autoscaler-chart` due to technical limitations. This affected all `1.X` releases of the chart, version 2.0.0 of this chart exists only to mark the [`cluster-autoscaler-chart` chart](https://artifacthub.io/packages/helm/cluster-autoscaler/cluster-autoscaler-chart) as deprecated. + +Releases of the chart from `9.0.0` onwards return the naming of the chart to `cluster-autoscaler` and return to following the versioning established by the chart's previous location at . + +To migrate from a 1.X release of the chart to a `9.0.0` or later release, you should first uninstall your `1.X` install of the `cluster-autoscaler-chart` chart, before performing the installation of the new `cluster-autoscaler` chart. +
+ +## Migration from 9.0 to 9.1 + +Starting from `9.1.0` the `envFromConfigMap` value is expected to contain the name of a ConfigMap that is used as ref for `envFrom`, similar to `envFromSecret`. If you want to keep the previous behaviour of `envFromConfigMap` you must rename it to `extraEnvConfigMaps`. + +## Installing the Chart + +**By default, no deployment is created and nothing will autoscale**. + +You must provide some minimal configuration, either to specify instance groups or enable auto-discovery. It is not recommended to do both. + +Either: + +- Set `autoDiscovery.clusterName` and provide additional autodiscovery options if necessary **or** +- Set static node group configurations for one or more node groups (using `autoscalingGroups` or `autoscalingGroupsnamePrefix`). + +To create a valid configuration, follow instructions for your cloud provider: + +* [AWS](#aws---using-auto-discovery-of-tagged-instance-groups) +* [GCE](#gce) +* [Azure AKS](#azure-aks) +* [OpenStack Magnum](#openstack-magnum) + +### AWS - Using auto-discovery of tagged instance groups + +Auto-discovery finds ASGs tags as below and automatically manages them based on the min and max size specified in the ASG. `cloudProvider=aws` only. + +- Tag the ASGs with keys to match `.Values.autoDiscovery.tags`, by default: `k8s.io/cluster-autoscaler/enabled` and `k8s.io/cluster-autoscaler/` +- Verify the [IAM Permissions](#aws---iam) +- Set `autoDiscovery.clusterName=` +- Set `awsRegion=` +- Set (option) `awsAccessKeyID=` and `awsSecretAccessKey=` if you want to [use AWS credentials directly instead of an instance role](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) + +```console +$ helm install my-release autoscaler/cluster-autoscaler --set autoDiscovery.clusterName= --set awsRegion= +``` + +Alternatively with your own AWS credentials + +```console +$ helm install my-release autoscaler/cluster-autoscaler --set autoDiscovery.clusterName= --set awsRegion= --set awsAccessKeyID= --set awsSecretAccessKey= +``` + +#### Specifying groups manually + +Without autodiscovery, specify an array of elements each containing ASG name, min size, max size. The sizes specified here will be applied to the ASG, assuming IAM permissions are correctly configured. + +- Verify the [IAM Permissions](#aws---iam) +- Either provide a yaml file setting `autoscalingGroups` (see values.yaml) or use `--set` e.g.: + +```console +$ helm install my-release autoscaler/cluster-autoscaler \ +--set "autoscalingGroups[0].name=your-asg-name" \ +--set "autoscalingGroups[0].maxSize=10" \ +--set "autoscalingGroups[0].minSize=1" +``` + +#### Auto-discovery + +For auto-discovery of instances to work, they must be tagged with the keys in `.Values.autoDiscovery.tags`, which by default are +`k8s.io/cluster-autoscaler/enabled` and `k8s.io/cluster-autoscaler/` + +The value of the tag does not matter, only the key. + +An example kops spec excerpt: + +```yaml +apiVersion: kops/v1alpha2 +kind: Cluster +metadata: + name: my.cluster.internal +spec: + additionalPolicies: + node: | + [ + {"Effect":"Allow","Action":["autoscaling:DescribeAutoScalingGroups","autoscaling:DescribeAutoScalingInstances","autoscaling:DescribeLaunchConfigurations","autoscaling:DescribeTags","autoscaling:SetDesiredCapacity","autoscaling:TerminateInstanceInAutoScalingGroup"],"Resource":"*"} + ] + ... +--- +apiVersion: kops/v1alpha2 +kind: InstanceGroup +metadata: + labels: + kops.k8s.io/cluster: my.cluster.internal + name: my-instances +spec: + cloudLabels: + k8s.io/cluster-autoscaler/enabled: "" + k8s.io/cluster-autoscaler/my.cluster.internal: "" + image: kops.io/k8s-1.8-debian-jessie-amd64-hvm-ebs-2018-01-14 + machineType: r4.large + maxSize: 4 + minSize: 0 +``` + +In this example you would need to `--set autoDiscovery.clusterName=my.cluster.internal` when installing. + +It is not recommended to try to mix this with setting `autoscalingGroups` + +See [autoscaler AWS documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup) for a more discussion of the setup. + +### GCE + +The following parameters are required: + +- `autoDiscovery.clusterName=any-name` +- `cloud-provider=gce` +- `autoscalingGroupsnamePrefix[0].name=your-ig-prefix,autoscalingGroupsnamePrefix[0].maxSize=10,autoscalingGroupsnamePrefix[0].minSize=1` + +To use Managed Instance Group (MIG) auto-discovery, provide a YAML file setting `autoscalingGroupsnamePrefix` (see values.yaml) or use `--set` when installing the Chart - e.g. + +```console +$ helm install my-release autoscaler/cluster-autoscaler \ +--set "autoscalingGroupsnamePrefix[0].name=your-ig-prefix,autoscalingGroupsnamePrefix[0].maxSize=10,autoscalingGroupsnamePrefi[0].minSize=1" \ +--set autoDiscovery.clusterName= \ +--set cloudProvider=gce +``` + +Note that `your-ig-prefix` should be a _prefix_ matching one or more MIGs, and _not_ the full name of the MIG. For example, to match multiple instance groups - `k8s-node-group-a-standard`, `k8s-node-group-b-gpu`, you would use a prefix of `k8s-node-group-`. + +In the event you want to explicitly specify MIGs instead of using auto-discovery, set members of the `autoscalingGroups` array directly - e.g. + +``` +# where 'n' is the index, starting at 0 +-- set autoscalingGroups[n].name=https://content.googleapis.com/compute/v1/projects/$PROJECTID/zones/$ZONENAME/instanceGroupManagers/$FULL-MIG-NAME,autoscalingGroups[n].maxSize=$MAXSIZE,autoscalingGroups[n].minSize=$MINSIZE +``` + +### Azure AKS + +The following parameters are required: + +- `cloudProvider=azure` +- `autoscalingGroups[0].name=your-agent-pool,autoscalingGroups[0].maxSize=10,autoscalingGroups[0].minSize=1` +- `azureClientID: "your-service-principal-app-id"` +- `azureClientSecret: "your-service-principal-client-secret"` +- `azureSubscriptionID: "your-azure-subscription-id"` +- `azureTenantID: "your-azure-tenant-id"` +- `azureClusterName: "your-aks-cluster-name"` +- `azureResourceGroup: "your-aks-cluster-resource-group-name"` +- `azureVMType: "AKS"` +- `azureNodeResourceGroup: "your-aks-cluster-node-resource-group"` + +### OpenStack Magnum + +`cloudProvider: magnum` must be set, and then one of + +- `magnumClusterName=` and `autoscalingGroups` with the names of node groups and min/max node counts +- or `autoDiscovery.clusterName=` with one or more `autoDiscovery.roles`. + +Additionally, `cloudConfigPath: "/etc/kubernetes/cloud-config"` must be set as this should be the location +of the cloud-config file on the host. + +Example values files can be found [here](../../cluster-autoscaler/cloudprovider/magnum/examples). + +Install the chart with + +``` +$ helm install my-release autoscaler/cluster-autoscaler -f myvalues.yaml +``` + +## Uninstalling the Chart + +To uninstall `my-release`: + +```console +$ helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +> **Tip**: List all releases using `helm list` or start clean with `helm uninstall my-release` + +## Additional Configuration + +### AWS - IAM + +The worker running the cluster autoscaler will need access to certain resources and actions: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup" + ], + "Resource": "*" + } + ] +} +``` + +- `DescribeTags` is required for autodiscovery. +- `DescribeLaunchConfigurations` is required to scale up an ASG from 0. + +If you would like to limit the scope of the Cluster Autoscaler to ***only*** modify ASGs for a particular cluster, use the following policy instead: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeLaunchTemplateVersions" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:UpdateAutoScalingGroup" + ], + "Resource": [ + "arn:aws:autoscaling:::autoScalingGroup::autoScalingGroupName/node-group-1", + "arn:aws:autoscaling:::autoScalingGroup::autoScalingGroupName/node-group-2", + "arn:aws:autoscaling:::autoScalingGroup::autoScalingGroupName/node-group-3" + ], + "Condition": { + "StringEquals": { + "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled": "true", + "autoscaling:ResourceTag/kubernetes.io/cluster/": "owned" + } + } + } + ] +} +``` + +Make sure to replace the variables ``, ``, ``, and the ARNs of the ASGs where applicable. + +### AWS - IAM Roles for Service Accounts (IRSA) + +For Kubernetes clusters that use Amazon EKS, the service account can be configured with an IAM role using [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) to avoid needing to grant access to the worker nodes for AWS resources. + +In order to accomplish this, you will first need to create a new IAM role with the above mentions policies. Take care in [configuring the trust relationship](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html#iam-role-configuration) to restrict access just to the service account used by cluster autoscaler. + +Once you have the IAM role configured, you would then need to `--set rbac.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=arn:aws:iam::123456789012:role/MyRoleName` when installing. + +## Troubleshooting + +The chart will succeed even if the container arguments are incorrect. A few minutes after starting +`kubectl logs -l "app=aws-cluster-autoscaler" --tail=50` should loop through something like + +``` +polling_autoscaler.go:111] Poll finished +static_autoscaler.go:97] Starting main loop +utils.go:435] No pod using affinity / antiaffinity found in cluster, disabling affinity predicate for this loop +static_autoscaler.go:230] Filtering out schedulables +``` + +If not, find a pod that the deployment created and `describe` it, paying close attention to the arguments under `Command`. e.g.: + +``` +Containers: + cluster-autoscaler: + Command: + ./cluster-autoscaler + --cloud-provider=aws +# if specifying ASGs manually + --nodes=1:10:your-scaling-group-name +# if using autodiscovery + --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ + --v=4 +``` + +### PodSecurityPolicy + +Though enough for the majority of installations, the default PodSecurityPolicy _could_ be too restrictive depending on the specifics of your release. Please make sure to check that the template fits with any customizations made or disable it by setting `rbac.pspEnabled` to `false`. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| additionalLabels | object | `{}` | Labels to add to each object of the chart. | +| affinity | object | `{}` | Affinity for pod assignment | +| autoDiscovery.clusterName | string | `nil` | Enable autodiscovery for `cloudProvider=aws`, for groups matching `autoDiscovery.tags`. Enable autodiscovery for `cloudProvider=gce`, but no MIG tagging required. Enable autodiscovery for `cloudProvider=magnum`, for groups matching `autoDiscovery.roles`. | +| autoDiscovery.roles | list | `["worker"]` | Magnum node group roles to match. | +| autoDiscovery.tags | list | `["k8s.io/cluster-autoscaler/enabled","k8s.io/cluster-autoscaler/{{ .Values.autoDiscovery.clusterName }}"]` | ASG tags to match, run through `tpl`. | +| autoscalingGroups | list | `[]` | For AWS, Azure AKS or Magnum. At least one element is required if not using `autoDiscovery`. For example:
 - name: asg1
maxSize: 2
minSize: 1
| +| autoscalingGroupsnamePrefix | list | `[]` | For GCE. At least one element is required if not using `autoDiscovery`. For example:
 - name: ig01
maxSize: 10
minSize: 0
| +| awsAccessKeyID | string | `""` | AWS access key ID ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) | +| awsRegion | string | `"us-east-1"` | AWS region (required if `cloudProvider=aws`) | +| awsSecretAccessKey | string | `""` | AWS access secret key ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) | +| azureClientID | string | `""` | Service Principal ClientID with contributor permission to Cluster and Node ResourceGroup. Required if `cloudProvider=azure` | +| azureClientSecret | string | `""` | Service Principal ClientSecret with contributor permission to Cluster and Node ResourceGroup. Required if `cloudProvider=azure` | +| azureClusterName | string | `""` | Azure AKS cluster name. Required if `cloudProvider=azure` | +| azureNodeResourceGroup | string | `""` | Azure resource group where the cluster's nodes are located, typically set as `MC___`. Required if `cloudProvider=azure` | +| azureResourceGroup | string | `""` | Azure resource group that the cluster is located. Required if `cloudProvider=azure` | +| azureSubscriptionID | string | `""` | Azure subscription where the resources are located. Required if `cloudProvider=azure` | +| azureTenantID | string | `""` | Azure tenant where the resources are located. Required if `cloudProvider=azure` | +| azureUseManagedIdentityExtension | bool | `false` | Whether to use Azure's managed identity extension for credentials. If using MSI, ensure subscription ID and resource group are set. | +| azureVMType | string | `"AKS"` | Azure VM type. | +| cloudConfigPath | string | `"/etc/gce.conf"` | Configuration file for cloud provider. | +| cloudProvider | string | `"aws"` | The cloud provider where the autoscaler runs. Currently only `gce`, `aws`, `azure` and `magnum` are supported. `aws` supported for AWS. `gce` for GCE. `azure` for Azure AKS. `magnum` for OpenStack Magnum. | +| containerSecurityContext | object | `{}` | [Security context for container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) | +| dnsPolicy | string | `"ClusterFirst"` | Defaults to `ClusterFirst`. Valid values are: `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`. If autoscaler does not depend on cluster DNS, recommended to set this to `Default`. | +| envFromConfigMap | string | `""` | ConfigMap name to use as envFrom. | +| envFromSecret | string | `""` | Secret name to use as envFrom. | +| expanderPriorities | object | `{}` | The expanderPriorities is used if `extraArgs.expander` is set to `priority` and expanderPriorities is also set with the priorities. If `extraArgs.expander` is set to `priority`, then expanderPriorities is used to define cluster-autoscaler-priority-expander priorities. See: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/expander/priority/readme.md | +| extraArgs | object | `{"logtostderr":true,"stderrthreshold":"info","v":4}` | Additional container arguments. Refer to https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca for the full list of cluster autoscaler parameters and their default values. Everything after the first _ will be ignored allowing the use of multi-string arguments. | +| extraEnv | object | `{}` | Additional container environment variables. | +| extraEnvConfigMaps | object | `{}` | Additional container environment variables from ConfigMaps. | +| extraEnvSecrets | object | `{}` | Additional container environment variables from Secrets. | +| extraVolumeMounts | list | `[]` | Additional volumes to mount. | +| extraVolumeSecrets | object | `{}` | Additional volumes to mount from Secrets. | +| extraVolumes | list | `[]` | Additional volumes. | +| fullnameOverride | string | `""` | String to fully override `cluster-autoscaler.fullname` template. | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.pullSecrets | list | `[]` | Image pull secrets | +| image.repository | string | `"k8s.gcr.io/autoscaling/cluster-autoscaler"` | Image repository | +| image.tag | string | `"v1.21.0"` | Image tag | +| kubeTargetVersionOverride | string | `""` | Allow overriding the `.Capabilities.KubeVersion.GitVersion` check. Useful for `helm template` commands. | +| magnumCABundlePath | string | `"/etc/kubernetes/ca-bundle.crt"` | Path to the host's CA bundle, from `ca-file` in the cloud-config file. | +| magnumClusterName | string | `""` | Cluster name or ID in Magnum. Required if `cloudProvider=magnum` and not setting `autoDiscovery.clusterName`. | +| nameOverride | string | `""` | String to partially override `cluster-autoscaler.fullname` template (will maintain the release name) | +| nodeSelector | object | `{}` | Node labels for pod assignment. Ref: https://kubernetes.io/docs/user-guide/node-selection/. | +| podAnnotations | object | `{}` | Annotations to add to each pod. | +| podDisruptionBudget | object | `{"maxUnavailable":1}` | Pod disruption budget. | +| podLabels | object | `{}` | Labels to add to each pod. | +| priorityClassName | string | `""` | priorityClassName | +| priorityConfigMapAnnotations | object | `{}` | Annotations to add to `cluster-autoscaler-priority-expander` ConfigMap. | +| prometheusRule.additionalLabels | object | `{}` | Additional labels to be set in metadata. | +| prometheusRule.enabled | bool | `false` | If true, creates a Prometheus Operator PrometheusRule. | +| prometheusRule.interval | string | `nil` | How often rules in the group are evaluated (falls back to `global.evaluation_interval` if not set). | +| prometheusRule.namespace | string | `"monitoring"` | Namespace which Prometheus is running in. | +| prometheusRule.rules | list | `[]` | Rules spec template (see https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule). | +| rbac.create | bool | `true` | If `true`, create and use RBAC resources. | +| rbac.pspEnabled | bool | `false` | If `true`, creates and uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. Must be used with `rbac.create` set to `true`. | +| rbac.serviceAccount.annotations | object | `{}` | Additional Service Account annotations. | +| rbac.serviceAccount.automountServiceAccountToken | bool | `true` | Automount API credentials for a Service Account. | +| rbac.serviceAccount.create | bool | `true` | If `true` and `rbac.create` is also true, a Service Account will be created. | +| rbac.serviceAccount.name | string | `""` | The name of the ServiceAccount to use. If not set and create is `true`, a name is generated using the fullname template. | +| replicaCount | int | `1` | Desired number of pods | +| resources | object | `{}` | Pod resource requests and limits. | +| securityContext | object | `{}` | [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) | +| service.annotations | object | `{}` | Annotations to add to service | +| service.externalIPs | list | `[]` | List of IP addresses at which the service is available. Ref: https://kubernetes.io/docs/user-guide/services/#external-ips. | +| service.labels | object | `{}` | Labels to add to service | +| service.loadBalancerIP | string | `""` | IP address to assign to load balancer (if supported). | +| service.loadBalancerSourceRanges | list | `[]` | List of IP CIDRs allowed access to load balancer (if supported). | +| service.portName | string | `"http"` | Name for service port. | +| service.servicePort | int | `8085` | Service port to expose. | +| service.type | string | `"ClusterIP"` | Type of service to create. | +| serviceMonitor.enabled | bool | `false` | If true, creates a Prometheus Operator ServiceMonitor. | +| serviceMonitor.interval | string | `"10s"` | Interval that Prometheus scrapes Cluster Autoscaler metrics. | +| serviceMonitor.namespace | string | `"monitoring"` | Namespace which Prometheus is running in. | +| serviceMonitor.path | string | `"/metrics"` | The path to scrape for metrics; autoscaler exposes `/metrics` (this is standard) | +| serviceMonitor.selector | object | `{"release":"prometheus-operator"}` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install. | +| tolerations | list | `[]` | List of node taints to tolerate (requires Kubernetes >= 1.6). | +| updateStrategy | object | `{}` | [Deployment update strategy](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) | diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/README.md.gotmpl b/examples/full-cluster/common-services/charts/cluster-autoscaler/README.md.gotmpl new file mode 100644 index 0000000..dda305c --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/README.md.gotmpl @@ -0,0 +1,335 @@ +{{ template "chart.header" . }} + +{{ template "chart.description" . }} + +## TL;DR: + +```console +$ helm repo add autoscaler https://kubernetes.github.io/autoscaler + +# Method 1 - Using Autodiscovery +$ helm install my-release autoscaler/cluster-autoscaler \ +--set 'autoDiscovery.clusterName'= + +# Method 2 - Specifying groups manually +$ helm install my-release autoscaler/cluster-autoscaler \ +--set "autoscalingGroups[0].name=your-asg-name" \ +--set "autoscalingGroups[0].maxSize=10" \ +--set "autoscalingGroups[0].minSize=1" +``` + +## Introduction + +This chart bootstraps a cluster-autoscaler deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Helm 3+ +- Kubernetes 1.8+ + - [Older versions](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#releases) may work by overriding the `image`. Cluster autoscaler internally simulates the scheduler and bugs between mismatched versions may be subtle. +- Azure AKS specific Prerequisites: + - Kubernetes 1.10+ with RBAC-enabled. + +## Previous Helm Chart + +The previous `cluster-autoscaler` Helm chart hosted at [helm/charts](https://github.com/helm/charts) has been moved to this repository in accordance with the [Deprecation timeline](https://github.com/helm/charts#deprecation-timeline). Note that a few things have changed between this version and the old version: + +- This repository **only** supports Helm chart installations using Helm 3+ since the `apiVersion` on the charts has been marked as `v2`. +- Previous versions of the Helm chart have not been migrated + +## Migration from 1.X to 9.X+ versions of this Chart + +**TL;DR:** +You should choose to use versions >=9.0.0 of the `cluster-autoscaler` chart published from this repository; previous versions, and the `cluster-autoscaler-chart` with versioning 1.X.X published from this repository are deprecated. + +
+ Previous versions of this chart - further details +On initial migration of this chart from the `helm/charts` repository this chart was renamed from `cluster-autoscaler` to `cluster-autoscaler-chart` due to technical limitations. This affected all `1.X` releases of the chart, version 2.0.0 of this chart exists only to mark the [`cluster-autoscaler-chart` chart](https://artifacthub.io/packages/helm/cluster-autoscaler/cluster-autoscaler-chart) as deprecated. + +Releases of the chart from `9.0.0` onwards return the naming of the chart to `cluster-autoscaler` and return to following the versioning established by the chart's previous location at . + +To migrate from a 1.X release of the chart to a `9.0.0` or later release, you should first uninstall your `1.X` install of the `cluster-autoscaler-chart` chart, before performing the installation of the new `cluster-autoscaler` chart. +
+ +## Migration from 9.0 to 9.1 + +Starting from `9.1.0` the `envFromConfigMap` value is expected to contain the name of a ConfigMap that is used as ref for `envFrom`, similar to `envFromSecret`. If you want to keep the previous behaviour of `envFromConfigMap` you must rename it to `extraEnvConfigMaps`. + +## Installing the Chart + +**By default, no deployment is created and nothing will autoscale**. + +You must provide some minimal configuration, either to specify instance groups or enable auto-discovery. It is not recommended to do both. + +Either: + +- Set `autoDiscovery.clusterName` and provide additional autodiscovery options if necessary **or** +- Set static node group configurations for one or more node groups (using `autoscalingGroups` or `autoscalingGroupsnamePrefix`). + +To create a valid configuration, follow instructions for your cloud provider: + +* [AWS](#aws---using-auto-discovery-of-tagged-instance-groups) +* [GCE](#gce) +* [Azure AKS](#azure-aks) +* [OpenStack Magnum](#openstack-magnum) + +### AWS - Using auto-discovery of tagged instance groups + +Auto-discovery finds ASGs tags as below and automatically manages them based on the min and max size specified in the ASG. `cloudProvider=aws` only. + +- Tag the ASGs with keys to match `.Values.autoDiscovery.tags`, by default: `k8s.io/cluster-autoscaler/enabled` and `k8s.io/cluster-autoscaler/` +- Verify the [IAM Permissions](#aws---iam) +- Set `autoDiscovery.clusterName=` +- Set `awsRegion=` +- Set (option) `awsAccessKeyID=` and `awsSecretAccessKey=` if you want to [use AWS credentials directly instead of an instance role](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) + +```console +$ helm install my-release autoscaler/cluster-autoscaler --set autoDiscovery.clusterName= --set awsRegion= +``` + +Alternatively with your own AWS credentials + +```console +$ helm install my-release autoscaler/cluster-autoscaler --set autoDiscovery.clusterName= --set awsRegion= --set awsAccessKeyID= --set awsSecretAccessKey= +``` + +#### Specifying groups manually + +Without autodiscovery, specify an array of elements each containing ASG name, min size, max size. The sizes specified here will be applied to the ASG, assuming IAM permissions are correctly configured. + +- Verify the [IAM Permissions](#aws---iam) +- Either provide a yaml file setting `autoscalingGroups` (see values.yaml) or use `--set` e.g.: + +```console +$ helm install my-release autoscaler/cluster-autoscaler \ +--set "autoscalingGroups[0].name=your-asg-name" \ +--set "autoscalingGroups[0].maxSize=10" \ +--set "autoscalingGroups[0].minSize=1" +``` + +#### Auto-discovery + +For auto-discovery of instances to work, they must be tagged with the keys in `.Values.autoDiscovery.tags`, which by default are +`k8s.io/cluster-autoscaler/enabled` and `k8s.io/cluster-autoscaler/` + +The value of the tag does not matter, only the key. + +An example kops spec excerpt: + +```yaml +apiVersion: kops/v1alpha2 +kind: Cluster +metadata: + name: my.cluster.internal +spec: + additionalPolicies: + node: | + [ + {"Effect":"Allow","Action":["autoscaling:DescribeAutoScalingGroups","autoscaling:DescribeAutoScalingInstances","autoscaling:DescribeLaunchConfigurations","autoscaling:DescribeTags","autoscaling:SetDesiredCapacity","autoscaling:TerminateInstanceInAutoScalingGroup"],"Resource":"*"} + ] + ... +--- +apiVersion: kops/v1alpha2 +kind: InstanceGroup +metadata: + labels: + kops.k8s.io/cluster: my.cluster.internal + name: my-instances +spec: + cloudLabels: + k8s.io/cluster-autoscaler/enabled: "" + k8s.io/cluster-autoscaler/my.cluster.internal: "" + image: kops.io/k8s-1.8-debian-jessie-amd64-hvm-ebs-2018-01-14 + machineType: r4.large + maxSize: 4 + minSize: 0 +``` + +In this example you would need to `--set autoDiscovery.clusterName=my.cluster.internal` when installing. + +It is not recommended to try to mix this with setting `autoscalingGroups` + +See [autoscaler AWS documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup) for a more discussion of the setup. + +### GCE + +The following parameters are required: + +- `autoDiscovery.clusterName=any-name` +- `cloud-provider=gce` +- `autoscalingGroupsnamePrefix[0].name=your-ig-prefix,autoscalingGroupsnamePrefix[0].maxSize=10,autoscalingGroupsnamePrefix[0].minSize=1` + +To use Managed Instance Group (MIG) auto-discovery, provide a YAML file setting `autoscalingGroupsnamePrefix` (see values.yaml) or use `--set` when installing the Chart - e.g. + +```console +$ helm install my-release autoscaler/cluster-autoscaler \ +--set "autoscalingGroupsnamePrefix[0].name=your-ig-prefix,autoscalingGroupsnamePrefix[0].maxSize=10,autoscalingGroupsnamePrefi[0].minSize=1" \ +--set autoDiscovery.clusterName= \ +--set cloudProvider=gce +``` + +Note that `your-ig-prefix` should be a _prefix_ matching one or more MIGs, and _not_ the full name of the MIG. For example, to match multiple instance groups - `k8s-node-group-a-standard`, `k8s-node-group-b-gpu`, you would use a prefix of `k8s-node-group-`. + +In the event you want to explicitly specify MIGs instead of using auto-discovery, set members of the `autoscalingGroups` array directly - e.g. + +``` +# where 'n' is the index, starting at 0 +-- set autoscalingGroups[n].name=https://content.googleapis.com/compute/v1/projects/$PROJECTID/zones/$ZONENAME/instanceGroupManagers/$FULL-MIG-NAME,autoscalingGroups[n].maxSize=$MAXSIZE,autoscalingGroups[n].minSize=$MINSIZE +``` + +### Azure AKS + +The following parameters are required: + +- `cloudProvider=azure` +- `autoscalingGroups[0].name=your-agent-pool,autoscalingGroups[0].maxSize=10,autoscalingGroups[0].minSize=1` +- `azureClientID: "your-service-principal-app-id"` +- `azureClientSecret: "your-service-principal-client-secret"` +- `azureSubscriptionID: "your-azure-subscription-id"` +- `azureTenantID: "your-azure-tenant-id"` +- `azureClusterName: "your-aks-cluster-name"` +- `azureResourceGroup: "your-aks-cluster-resource-group-name"` +- `azureVMType: "AKS"` +- `azureNodeResourceGroup: "your-aks-cluster-node-resource-group"` + +### OpenStack Magnum + +`cloudProvider: magnum` must be set, and then one of + +- `magnumClusterName=` and `autoscalingGroups` with the names of node groups and min/max node counts +- or `autoDiscovery.clusterName=` with one or more `autoDiscovery.roles`. + +Additionally, `cloudConfigPath: "/etc/kubernetes/cloud-config"` must be set as this should be the location +of the cloud-config file on the host. + +Example values files can be found [here](../../cluster-autoscaler/cloudprovider/magnum/examples). + +Install the chart with + +``` +$ helm install my-release autoscaler/cluster-autoscaler -f myvalues.yaml +``` + +## Uninstalling the Chart + +To uninstall `my-release`: + +```console +$ helm uninstall my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +> **Tip**: List all releases using `helm list` or start clean with `helm uninstall my-release` + +## Additional Configuration + +### AWS - IAM + +The worker running the cluster autoscaler will need access to certain resources and actions: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup" + ], + "Resource": "*" + } + ] +} +``` + +- `DescribeTags` is required for autodiscovery. +- `DescribeLaunchConfigurations` is required to scale up an ASG from 0. + +If you would like to limit the scope of the Cluster Autoscaler to ***only*** modify ASGs for a particular cluster, use the following policy instead: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeLaunchTemplateVersions" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:UpdateAutoScalingGroup" + ], + "Resource": [ + "arn:aws:autoscaling:::autoScalingGroup::autoScalingGroupName/node-group-1", + "arn:aws:autoscaling:::autoScalingGroup::autoScalingGroupName/node-group-2", + "arn:aws:autoscaling:::autoScalingGroup::autoScalingGroupName/node-group-3" + ], + "Condition": { + "StringEquals": { + "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled": "true", + "autoscaling:ResourceTag/kubernetes.io/cluster/": "owned" + } + } + } + ] +} +``` + +Make sure to replace the variables ``, ``, ``, and the ARNs of the ASGs where applicable. + +### AWS - IAM Roles for Service Accounts (IRSA) + +For Kubernetes clusters that use Amazon EKS, the service account can be configured with an IAM role using [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) to avoid needing to grant access to the worker nodes for AWS resources. + +In order to accomplish this, you will first need to create a new IAM role with the above mentions policies. Take care in [configuring the trust relationship](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html#iam-role-configuration) to restrict access just to the service account used by cluster autoscaler. + +Once you have the IAM role configured, you would then need to `--set rbac.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=arn:aws:iam::123456789012:role/MyRoleName` when installing. + +## Troubleshooting + +The chart will succeed even if the container arguments are incorrect. A few minutes after starting +`kubectl logs -l "app=aws-cluster-autoscaler" --tail=50` should loop through something like + +``` +polling_autoscaler.go:111] Poll finished +static_autoscaler.go:97] Starting main loop +utils.go:435] No pod using affinity / antiaffinity found in cluster, disabling affinity predicate for this loop +static_autoscaler.go:230] Filtering out schedulables +``` + +If not, find a pod that the deployment created and `describe` it, paying close attention to the arguments under `Command`. e.g.: + +``` +Containers: + cluster-autoscaler: + Command: + ./cluster-autoscaler + --cloud-provider=aws +# if specifying ASGs manually + --nodes=1:10:your-scaling-group-name +# if using autodiscovery + --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ + --v=4 +``` + +### PodSecurityPolicy + +Though enough for the majority of installations, the default PodSecurityPolicy _could_ be too restrictive depending on the specifics of your release. Please make sure to check that the template fits with any customizations made or disable it by setting `rbac.pspEnabled` to `false`. + +{{ template "chart.valuesSection" . }} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/NOTES.txt b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/NOTES.txt new file mode 100644 index 0000000..94e211e --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/NOTES.txt @@ -0,0 +1,18 @@ +{{- if or .Values.autoDiscovery.clusterName .Values.autoscalingGroups -}} + +To verify that cluster-autoscaler has started, run: + + kubectl --namespace={{ .Release.Namespace }} get pods -l "app.kubernetes.io/name={{ template "cluster-autoscaler.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" + +{{- else -}} + +############################################################################## +#### ERROR: You must specify values for either #### +#### autoDiscovery.clusterName or autoscalingGroups[] #### +############################################################################## + +The deployment and pod will not be created and the installation is not functional +See README: + open https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler + +{{- end -}} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/_helpers.tpl b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/_helpers.tpl new file mode 100644 index 0000000..0723059 --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/_helpers.tpl @@ -0,0 +1,87 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cluster-autoscaler.name" -}} +{{- default (printf "%s-%s" .Values.cloudProvider .Chart.Name) .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "cluster-autoscaler.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default (printf "%s-%s" .Values.cloudProvider .Chart.Name) .Values.nameOverride -}} +{{- if ne $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cluster-autoscaler.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return instance and name labels. +*/}} +{{- define "cluster-autoscaler.instance-name" -}} +app.kubernetes.io/instance: {{ .Release.Name | quote }} +app.kubernetes.io/name: {{ include "cluster-autoscaler.name" . | quote }} +{{- end -}} + + +{{/* +Return labels, including instance and name. +*/}} +{{- define "cluster-autoscaler.labels" -}} +{{ include "cluster-autoscaler.instance-name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service | quote }} +helm.sh/chart: {{ include "cluster-autoscaler.chart" . | quote }} +{{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "deployment.apiVersion" -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "<1.9-0" $kubeTargetVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "<1.10-0" $kubeTargetVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the service account name used by the pod. +*/}} +{{- define "cluster-autoscaler.serviceAccountName" -}} +{{- if .Values.rbac.serviceAccount.create -}} + {{ default (include "cluster-autoscaler.fullname" .) .Values.rbac.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.rbac.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml new file mode 100644 index 0000000..409fbe2 --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/clusterrole.yaml @@ -0,0 +1,150 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - events + - endpoints + verbs: + - create + - patch + - apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create + - apiGroups: + - "" + resources: + - pods/status + verbs: + - update + - apiGroups: + - "" + resources: + - endpoints + resourceNames: + - cluster-autoscaler + verbs: + - get + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - watch + - list + - get + - update + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - replicationcontrollers + - persistentvolumeclaims + - persistentvolumes + verbs: + - watch + - list + - get + - apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - watch + - list + - get + - apiGroups: + - batch + - extensions + resources: + - jobs + verbs: + - get + - list + - patch + - watch + - apiGroups: + - extensions + resources: + - replicasets + - daemonsets + verbs: + - watch + - list + - get + - apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - watch + - list + - apiGroups: + - apps + resources: + - daemonsets + - replicasets + - statefulsets + verbs: + - watch + - list + - get + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + - csidrivers + - csistoragecapacities + verbs: + - watch + - list + - get + - apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - coordination.k8s.io + resourceNames: + - cluster-autoscaler + resources: + - leases + verbs: + - get + - update +{{- if .Values.rbac.pspEnabled }} + - apiGroups: + - extensions + - policy + resources: + - podsecuritypolicies + resourceNames: + - {{ template "cluster-autoscaler.fullname" . }} + verbs: + - use +{{- end -}} + +{{- end -}} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..d1e8308 --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "cluster-autoscaler.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "cluster-autoscaler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/deployment.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/deployment.yaml new file mode 100644 index 0000000..46246f2 --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/deployment.yaml @@ -0,0 +1,256 @@ +{{- if or .Values.autoDiscovery.clusterName .Values.autoscalingGroups }} +{{/* one of the above is required */}} +apiVersion: {{ template "deployment.apiVersion" . }} +kind: Deployment +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: +{{ include "cluster-autoscaler.instance-name" . | indent 6 }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 6 }} + {{- end }} +{{- if .Values.updateStrategy }} + strategy: + {{ toYaml .Values.updateStrategy | nindent 4 | trim }} +{{- end }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + labels: +{{ include "cluster-autoscaler.instance-name" . | indent 8 }} + {{- if .Values.additionalLabels }} +{{ toYaml .Values.additionalLabels | indent 8 }} + {{- end }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: "{{ .Values.dnsPolicy }}" + {{- end }} + containers: + - name: {{ template "cluster-autoscaler.name" . }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + command: + - ./cluster-autoscaler + - --cloud-provider={{ .Values.cloudProvider }} + - --namespace={{ .Release.Namespace }} + {{- if .Values.autoscalingGroups }} + {{- range .Values.autoscalingGroups }} + - --nodes={{ .minSize }}:{{ .maxSize }}:{{ .name }} + {{- end }} + {{- end }} + {{- if eq .Values.cloudProvider "aws" }} + {{- if .Values.autoDiscovery.clusterName }} + - --node-group-auto-discovery=asg:tag={{ tpl (join "," .Values.autoDiscovery.tags) . }} + {{- end }} + {{- else if eq .Values.cloudProvider "gce" }} + {{- if .Values.autoscalingGroupsnamePrefix }} + {{- range .Values.autoscalingGroupsnamePrefix }} + - --node-group-auto-discovery=mig:namePrefix={{ .name }},min={{ .minSize }},max={{ .maxSize }} + {{- end }} + {{- end }} + {{- else if eq .Values.cloudProvider "magnum" }} + {{- if .Values.autoDiscovery.clusterName }} + - --cluster-name={{ .Values.autoDiscovery.clusterName }} + - --node-group-auto-discovery=magnum:role={{ tpl (join "," .Values.autoDiscovery.roles) . }} + {{- else }} + - --cluster-name={{ .Values.magnumClusterName }} + {{- end }} + {{- end }} + {{- if eq .Values.cloudProvider "magnum" }} + - --cloud-config={{ .Values.cloudConfigPath }} + {{- end }} + {{- range $key, $value := .Values.extraArgs }} + {{- if not (kindIs "invalid" $value) }} + - --{{ $key | mustRegexFind "^[^_]+" }}={{ $value }} + {{- else }} + - --{{ $key | mustRegexFind "^[^_]+" }} + {{- end }} + {{- end }} + env: + {{- if and (eq .Values.cloudProvider "aws") (ne .Values.awsRegion "") }} + - name: AWS_REGION + value: "{{ .Values.awsRegion }}" + {{- if .Values.awsAccessKeyID }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: AwsAccessKeyId + name: {{ template "cluster-autoscaler.fullname" . }} + {{- end }} + {{- if .Values.awsSecretAccessKey }} + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AwsSecretAccessKey + name: {{ template "cluster-autoscaler.fullname" . }} + {{- end }} + {{- else if eq .Values.cloudProvider "azure" }} + - name: ARM_SUBSCRIPTION_ID + valueFrom: + secretKeyRef: + key: SubscriptionID + name: {{ template "cluster-autoscaler.fullname" . }} + - name: ARM_RESOURCE_GROUP + valueFrom: + secretKeyRef: + key: ResourceGroup + name: {{ template "cluster-autoscaler.fullname" . }} + - name: ARM_VM_TYPE + valueFrom: + secretKeyRef: + key: VMType + name: {{ template "cluster-autoscaler.fullname" . }} + {{- if .Values.azureUseManagedIdentityExtension }} + - name: ARM_USE_MANAGED_IDENTITY_EXTENSION + value: "true" + {{- else }} + - name: ARM_TENANT_ID + valueFrom: + secretKeyRef: + key: TenantID + name: {{ template "cluster-autoscaler.fullname" . }} + - name: ARM_CLIENT_ID + valueFrom: + secretKeyRef: + key: ClientID + name: {{ template "cluster-autoscaler.fullname" . }} + - name: ARM_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: ClientSecret + name: {{ template "cluster-autoscaler.fullname" . }} + - name: AZURE_CLUSTER_NAME + valueFrom: + secretKeyRef: + key: ClusterName + name: {{ template "cluster-autoscaler.fullname" . }} + - name: AZURE_NODE_RESOURCE_GROUP + valueFrom: + secretKeyRef: + key: NodeResourceGroup + name: {{ template "cluster-autoscaler.fullname" . }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.extraEnv }} + - name: {{ $key }} + value: "{{ $value }}" + {{- end }} + {{- range $key, $value := .Values.extraEnvConfigMaps }} + - name: {{ $key }} + valueFrom: + configMapKeyRef: + name: {{ default (include "cluster-autoscaler.fullname" $) $value.name }} + key: {{ required "Must specify key!" $value.key }} + {{- end }} + {{- range $key, $value := .Values.extraEnvSecrets }} + - name: {{ $key }} + valueFrom: + secretKeyRef: + name: {{ default (include "cluster-autoscaler.fullname" $) $value.name }} + key: {{ required "Must specify key!" $value.key }} + {{- end }} + {{- if or .Values.envFromSecret .Values.envFromConfigMap }} + envFrom: + {{- if .Values.envFromSecret }} + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + {{- if .Values.envFromConfigMap }} + - configMapRef: + name: {{ .Values.envFromConfigMap }} + {{- end }} + {{- end }} + livenessProbe: + httpGet: + path: /health-check + port: 8085 + ports: + - containerPort: 8085 + resources: +{{ toYaml .Values.resources | indent 12 }} + {{- if .Values.containerSecurityContext }} + securityContext: + {{ toYaml .Values.containerSecurityContext | nindent 12 | trim }} + {{- end }} + {{- if or (eq .Values.cloudProvider "magnum") .Values.extraVolumeSecrets .Values.extraVolumeMounts }} + volumeMounts: + {{- if eq .Values.cloudProvider "magnum" }} + - name: cloudconfig + mountPath: {{ .Values.cloudConfigPath }} + readOnly: true + {{- end }} + {{- if and (eq .Values.cloudProvider "magnum") (.Values.magnumCABundlePath) }} + - name: ca-bundle + mountPath: {{ .Values.magnumCABundlePath }} + readOnly: true + {{- end }} + {{- range $key, $value := .Values.extraVolumeSecrets }} + - name: {{ $key }} + mountPath: {{ required "Must specify mountPath!" $value.mountPath }} + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{ toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + serviceAccountName: {{ template "cluster-autoscaler.serviceAccountName" . }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- if .Values.securityContext }} + securityContext: + {{ toYaml .Values.securityContext | nindent 8 | trim }} + {{- end }} + {{- if or (eq .Values.cloudProvider "magnum") .Values.extraVolumeSecrets .Values.extraVolumes }} + volumes: + {{- if eq .Values.cloudProvider "magnum" }} + - name: cloudconfig + hostPath: + path: {{ .Values.cloudConfigPath }} + {{- end }} + {{- if and (eq .Values.cloudProvider "magnum") (.Values.magnumCABundlePath) }} + - name: ca-bundle + hostPath: + path: {{ .Values.magnumCABundlePath }} + {{- end }} + {{- range $key, $value := .Values.extraVolumeSecrets }} + - name: {{ $key }} + secret: + secretName: {{ default (include "cluster-autoscaler.fullname" $) $value.name }} + {{- if $value.items }} + items: + {{- toYaml $value.items | nindent 14 }} + {{- end }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 10 }} + {{- end }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/pdb.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/pdb.yaml new file mode 100644 index 0000000..da5bd56 --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/pdb.yaml @@ -0,0 +1,15 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} +spec: + selector: + matchLabels: +{{ include "cluster-autoscaler.instance-name" . | indent 6 }} +{{- if .Values.podDisruptionBudget }} + {{ toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} +{{- end -}} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..28369bf --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/podsecuritypolicy.yaml @@ -0,0 +1,46 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: {{ template "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "cluster-autoscaler.fullname" . }} + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} +spec: + # Prevents running in privileged mode + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'secret' + - 'hostPath' + - 'emptyDir' + - 'projected' + - 'downwardAPI' +{{- if eq .Values.cloudProvider "gce" }} + allowedHostPaths: + - pathPrefix: {{ .Values.cloudConfigPath }} +{{- end }} + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml new file mode 100644 index 0000000..5bb2024 --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/priority-expander-configmap.yaml @@ -0,0 +1,17 @@ +{{- if hasKey .Values.extraArgs "expander" }} +{{- if and (.Values.expanderPriorities) (eq .Values.extraArgs.expander "priority") -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-autoscaler-priority-expander + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + {{- if .Values.priorityConfigMapAnnotations }} + annotations: +{{ toYaml .Values.priorityConfigMapAnnotations | indent 4 }} + {{- end }} +data: + priorities: |- +{{ .Values.expanderPriorities | indent 4 }} +{{- end -}} +{{- end -}} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml new file mode 100644 index 0000000..097c969 --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/prometheusrule.yaml @@ -0,0 +1,15 @@ +{{- if .Values.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "cluster-autoscaler.fullname" . }} + {{- if .Values.prometheusRule.namespace }} + namespace: {{ .Values.prometheusRule.namespace }} + {{- end }} + labels: {{- toYaml .Values.prometheusRule.additionalLabels | nindent 4 }} +spec: + groups: + - name: {{ include "cluster-autoscaler.fullname" . }} + interval: {{ .Values.prometheusRule.interval }} + rules: {{- tpl (toYaml .Values.prometheusRule.rules) . | nindent 8 }} +{{- end }} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/role.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/role.yaml new file mode 100644 index 0000000..c1f226e --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/role.yaml @@ -0,0 +1,46 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create +{{- if eq (default "" .Values.extraArgs.expander) "priority" }} + - list + - watch +{{- end }} + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cluster-autoscaler-status +{{- if eq (default "" .Values.extraArgs.expander) "priority" }} + - cluster-autoscaler-priority-expander +{{- end }} + verbs: + - delete + - get + - update +{{- if eq (default "" .Values.extraArgs.expander) "priority" }} + - watch +{{- end }} +{{- if eq (default "" (index .Values.extraArgs "leader-elect-resource-lock")) "configmaps" }} + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cluster-autoscaler + verbs: + - get + - update +{{- end }} +{{- end -}} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml new file mode 100644 index 0000000..938bc03 --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/rolebinding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} + name: {{ template "cluster-autoscaler.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "cluster-autoscaler.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "cluster-autoscaler.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/secret.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/secret.yaml new file mode 100644 index 0000000..3f0ef09 --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/secret.yaml @@ -0,0 +1,20 @@ +{{- if or (eq .Values.cloudProvider "azure") (and (eq .Values.cloudProvider "aws") (not (has "" (list .Values.awsAccessKeyID .Values.awsSecretAccessKey)))) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "cluster-autoscaler.fullname" . }} +data: +{{- if eq .Values.cloudProvider "azure" }} + ClientID: "{{ .Values.azureClientID | b64enc }}" + ClientSecret: "{{ .Values.azureClientSecret | b64enc }}" + ResourceGroup: "{{ .Values.azureResourceGroup | b64enc }}" + SubscriptionID: "{{ .Values.azureSubscriptionID | b64enc }}" + TenantID: "{{ .Values.azureTenantID | b64enc }}" + VMType: "{{ .Values.azureVMType | b64enc }}" + ClusterName: "{{ .Values.azureClusterName | b64enc }}" + NodeResourceGroup: "{{ .Values.azureNodeResourceGroup | b64enc }}" +{{- else if eq .Values.cloudProvider "aws" }} + AwsAccessKeyId: "{{ .Values.awsAccessKeyID | b64enc }}" + AwsSecretAccessKey: "{{ .Values.awsSecretAccessKey | b64enc }}" +{{- end }} +{{- end }} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/service.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/service.yaml new file mode 100644 index 0000000..dd8903d --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/service.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} + labels: +{{ include "cluster-autoscaler.labels" . | indent 4 }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} + name: {{ template "cluster-autoscaler.fullname" . }} +spec: +{{- if .Values.service.clusterIP }} + clusterIP: "{{ .Values.service.clusterIP }}" +{{- end }} +{{- if .Values.service.externalIPs }} + externalIPs: +{{ toYaml .Values.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} +{{- end }} + ports: + - port: {{ .Values.service.servicePort }} + protocol: TCP + targetPort: 8085 + name: {{ .Values.service.portName }} + selector: +{{ include "cluster-autoscaler.instance-name" . | indent 4 }} + type: "{{ .Values.service.type }}" diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml new file mode 100644 index 0000000..a0b9d25 --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/templates/servicemonitor.yaml @@ -0,0 +1,24 @@ +{{ if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "cluster-autoscaler.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- end }} + labels: + {{- range $key, $value := .Values.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + selector: + matchLabels: +{{ include "cluster-autoscaler.instance-name" . | indent 6 }} + endpoints: + - port: {{ .Values.service.portName }} + interval: {{ .Values.serviceMonitor.interval }} + path: {{ .Values.serviceMonitor.path }} + namespaceSelector: + matchNames: + - {{.Release.Namespace}} +{{ end }} diff --git a/examples/full-cluster/common-services/charts/cluster-autoscaler/values.yaml b/examples/full-cluster/common-services/charts/cluster-autoscaler/values.yaml new file mode 100644 index 0000000..aebcb66 --- /dev/null +++ b/examples/full-cluster/common-services/charts/cluster-autoscaler/values.yaml @@ -0,0 +1,339 @@ +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# affinity -- Affinity for pod assignment +affinity: {} + +autoDiscovery: + # cloudProviders `aws`, `gce` and `magnum` are supported by auto-discovery at this time + # AWS: Set tags as described in https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup + + # autoDiscovery.clusterName -- Enable autodiscovery for `cloudProvider=aws`, for groups matching `autoDiscovery.tags`. + # Enable autodiscovery for `cloudProvider=gce`, but no MIG tagging required. + # Enable autodiscovery for `cloudProvider=magnum`, for groups matching `autoDiscovery.roles`. + clusterName: "adsd-cumulus-dev" + + # autoDiscovery.tags -- ASG tags to match, run through `tpl`. + tags: + - k8s.io/cluster-autoscaler/enabled + - k8s.io/cluster-autoscaler/{{ .Values.autoDiscovery.clusterName }} + # - kubernetes.io/cluster/{{ .Values.autoDiscovery.clusterName }} + + # autoDiscovery.roles -- Magnum node group roles to match. + roles: + - worker + +# autoscalingGroups -- For AWS, Azure AKS or Magnum. At least one element is required if not using `autoDiscovery`. For example: +#
+# - name: asg1
+# maxSize: 2
+# minSize: 1 +#
+autoscalingGroups: [] +# - name: asg1 +# maxSize: 2 +# minSize: 1 +# - name: asg2 +# maxSize: 2 +# minSize: 1 + +# autoscalingGroupsnamePrefix -- For GCE. At least one element is required if not using `autoDiscovery`. For example: +#
+# - name: ig01
+# maxSize: 10
+# minSize: 0 +#
+autoscalingGroupsnamePrefix: [] +# - name: ig01 +# maxSize: 10 +# minSize: 0 +# - name: ig02 +# maxSize: 10 +# minSize: 0 + +# awsAccessKeyID -- AWS access key ID ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) +awsAccessKeyID: "" + +# awsRegion -- AWS region (required if `cloudProvider=aws`) +awsRegion: us-gov-east-1 + +# awsSecretAccessKey -- AWS access secret key ([if AWS user keys used](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials)) +awsSecretAccessKey: "" + +# azureClientID -- Service Principal ClientID with contributor permission to Cluster and Node ResourceGroup. +# Required if `cloudProvider=azure` +azureClientID: "" + +# azureClientSecret -- Service Principal ClientSecret with contributor permission to Cluster and Node ResourceGroup. +# Required if `cloudProvider=azure` +azureClientSecret: "" + +# azureResourceGroup -- Azure resource group that the cluster is located. +# Required if `cloudProvider=azure` +azureResourceGroup: "" + +# azureSubscriptionID -- Azure subscription where the resources are located. +# Required if `cloudProvider=azure` +azureSubscriptionID: "" + +# azureTenantID -- Azure tenant where the resources are located. +# Required if `cloudProvider=azure` +azureTenantID: "" + +# azureVMType -- Azure VM type. +azureVMType: "AKS" + +# azureClusterName -- Azure AKS cluster name. +# Required if `cloudProvider=azure` +azureClusterName: "" + +# azureNodeResourceGroup -- Azure resource group where the cluster's nodes are located, typically set as `MC___`. +# Required if `cloudProvider=azure` +azureNodeResourceGroup: "" + +# azureUseManagedIdentityExtension -- Whether to use Azure's managed identity extension for credentials. If using MSI, ensure subscription ID and resource group are set. +azureUseManagedIdentityExtension: false + +# magnumClusterName -- Cluster name or ID in Magnum. +# Required if `cloudProvider=magnum` and not setting `autoDiscovery.clusterName`. +magnumClusterName: "" + +# magnumCABundlePath -- Path to the host's CA bundle, from `ca-file` in the cloud-config file. +magnumCABundlePath: "/etc/kubernetes/ca-bundle.crt" + +# cloudConfigPath -- Configuration file for cloud provider. +cloudConfigPath: /etc/gce.conf + +# cloudProvider -- The cloud provider where the autoscaler runs. +# Currently only `gce`, `aws`, `azure` and `magnum` are supported. +# `aws` supported for AWS. `gce` for GCE. `azure` for Azure AKS. +# `magnum` for OpenStack Magnum. +cloudProvider: aws + +# containerSecurityContext -- [Security context for container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) +containerSecurityContext: {} + # capabilities: + # drop: + # - ALL + +# dnsPolicy -- Defaults to `ClusterFirst`. Valid values are: +# `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`. +# If autoscaler does not depend on cluster DNS, recommended to set this to `Default`. +dnsPolicy: ClusterFirst + +## Priorities Expander +# expanderPriorities -- The expanderPriorities is used if `extraArgs.expander` is set to `priority` and expanderPriorities is also set with the priorities. +# If `extraArgs.expander` is set to `priority`, then expanderPriorities is used to define cluster-autoscaler-priority-expander priorities. +# See: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/expander/priority/readme.md +expanderPriorities: {} + +# priorityConfigMapAnnotations -- Annotations to add to `cluster-autoscaler-priority-expander` ConfigMap. +priorityConfigMapAnnotations: {} + # key1: "value1" + # key2: "value2" + +# extraArgs -- Additional container arguments. +# Refer to https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca for the full list of cluster autoscaler +# parameters and their default values. +# Everything after the first _ will be ignored allowing the use of multi-string arguments. +extraArgs: + logtostderr: true + stderrthreshold: info + v: 4 + # write-status-configmap: true + # status-config-map-name: cluster-autoscaler-status + # leader-elect: true + # leader-elect-resource-lock: endpoints + skip-nodes-with-local-storage: true + expander: least-waste + # scale-down-enabled: true + balance-similar-node-groups: true + aws-use-static-instance-list: true + # min-replica-count: 0 + # scale-down-utilization-threshold: 0.5 + # scale-down-non-empty-candidates-count: 30 + # max-node-provision-time: 15m0s + # scan-interval: 10s + # scale-down-delay-after-add: 10m + # scale-down-delay-after-delete: 0s + # scale-down-delay-after-failure: 3m + # scale-down-unneeded-time: 10m + skip-nodes-with-system-pods: false + # balancing-ignore-label_1: first-label-to-ignore + # balancing-ignore-label_2: second-label-to-ignore + +# extraEnv -- Additional container environment variables. +extraEnv: {} + +# extraEnvConfigMaps -- Additional container environment variables from ConfigMaps. +extraEnvConfigMaps: {} + +# extraEnvSecrets -- Additional container environment variables from Secrets. +extraEnvSecrets: {} + +# envFromConfigMap -- ConfigMap name to use as envFrom. +envFromConfigMap: "" + +# envFromSecret -- Secret name to use as envFrom. +envFromSecret: "" + +# extraVolumeSecrets -- Additional volumes to mount from Secrets. +extraVolumeSecrets: {} + # autoscaler-vol: + # mountPath: /data/autoscaler/ + # custom-vol: + # name: custom-secret + # mountPath: /data/custom/ + # items: + # - key: subkey + # path: mypath + +# extraVolumes -- Additional volumes. +extraVolumes: [] + # - name: ssl-certs + # hostPath: + # path: /etc/ssl/certs/ca-bundle.crt + +# extraVolumeMounts -- Additional volumes to mount. +extraVolumeMounts: [] + # - name: ssl-certs + # mountPath: /etc/ssl/certs/ca-certificates.crt + # readOnly: true + +# fullnameOverride -- String to fully override `cluster-autoscaler.fullname` template. +fullnameOverride: "" + +image: + # image.repository -- Image repository + repository: 252960665057.dkr.ecr.us-gov-east-1.amazonaws.com/eks/adsd-cumulus-dev/cluster-autoscaler + # image.tag -- Image tag + tag: v1.21.0 + # image.pullPolicy -- Image pull policy + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # image.pullSecrets -- Image pull secrets + pullSecrets: [] + # - myRegistrKeySecretName + +# kubeTargetVersionOverride -- Allow overriding the `.Capabilities.KubeVersion.GitVersion` check. Useful for `helm template` commands. +kubeTargetVersionOverride: "" + +# nameOverride -- String to partially override `cluster-autoscaler.fullname` template (will maintain the release name) +nameOverride: "" + +# nodeSelector -- Node labels for pod assignment. Ref: https://kubernetes.io/docs/user-guide/node-selection/. +nodeSelector: {} + +# podAnnotations -- Annotations to add to each pod. +podAnnotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + +# podDisruptionBudget -- Pod disruption budget. +podDisruptionBudget: + maxUnavailable: 1 + # minAvailable: 2 + +# podLabels -- Labels to add to each pod. +podLabels: {} + +# additionalLabels -- Labels to add to each object of the chart. +additionalLabels: {} + +# priorityClassName -- priorityClassName +priorityClassName: "" + +rbac: + # rbac.create -- If `true`, create and use RBAC resources. + create: true + # rbac.pspEnabled -- If `true`, creates and uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. + # Must be used with `rbac.create` set to `true`. + pspEnabled: false + serviceAccount: + # rbac.serviceAccount.annotations -- Additional Service Account annotations. + annotations: + eks.amazonaws.com/role-arn: "arn:aws:iam::252960665057:role/eks-adsd-cumulus-dev-irsa-kube-system-cluster-autoscaler" + # rbac.serviceAccount.create -- If `true` and `rbac.create` is also true, a Service Account will be created. + create: true + # rbac.serviceAccount.name -- The name of the ServiceAccount to use. If not set and create is `true`, a name is generated using the fullname template. + name: "cluster-autoscaler" + # rbac.serviceAccount.automountServiceAccountToken -- Automount API credentials for a Service Account. + automountServiceAccountToken: true + +# replicaCount -- Desired number of pods +replicaCount: 1 + +# resources -- Pod resource requests and limits. +resources: + limits: + cpu: 100m + memory: 600Mi + requests: + cpu: 100m + memory: 600Mi + +# securityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) +securityContext: {} + # runAsNonRoot: true + # runAsUser: 1001 + # runAsGroup: 1001 + +service: + # service.annotations -- Annotations to add to service + annotations: {} + # service.labels -- Labels to add to service + labels: {} + # service.externalIPs -- List of IP addresses at which the service is available. Ref: https://kubernetes.io/docs/user-guide/services/#external-ips. + externalIPs: [] + + # service.loadBalancerIP -- IP address to assign to load balancer (if supported). + loadBalancerIP: "" + # service.loadBalancerSourceRanges -- List of IP CIDRs allowed access to load balancer (if supported). + loadBalancerSourceRanges: [] + # service.servicePort -- Service port to expose. + servicePort: 8085 + # service.portName -- Name for service port. + portName: http + # service.type -- Type of service to create. + type: ClusterIP + +## Are you using Prometheus Operator? +serviceMonitor: + # serviceMonitor.enabled -- If true, creates a Prometheus Operator ServiceMonitor. + enabled: false + # serviceMonitor.interval -- Interval that Prometheus scrapes Cluster Autoscaler metrics. + interval: 10s + # serviceMonitor.namespace -- Namespace which Prometheus is running in. + namespace: monitoring + ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters) + # serviceMonitor.selector -- Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install. + selector: + release: prometheus-operator + # serviceMonitor.path -- The path to scrape for metrics; autoscaler exposes `/metrics` (this is standard) + path: /metrics + +## Custom PrometheusRule to be defined +## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart +## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions +prometheusRule: + # prometheusRule.enabled -- If true, creates a Prometheus Operator PrometheusRule. + enabled: false + # prometheusRule.additionalLabels -- Additional labels to be set in metadata. + additionalLabels: {} + # prometheusRule.namespace -- Namespace which Prometheus is running in. + namespace: monitoring + # prometheusRule.interval -- How often rules in the group are evaluated (falls back to `global.evaluation_interval` if not set). + interval: null + # prometheusRule.rules -- Rules spec template (see https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule). + rules: [] + +# tolerations -- List of node taints to tolerate (requires Kubernetes >= 1.6). +tolerations: [] + +# updateStrategy -- [Deployment update strategy](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) +updateStrategy: {} + # rollingUpdate: + # maxSurge: 1 + # maxUnavailable: 0 + # type: RollingUpdate diff --git a/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/.helmignore b/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/Chart.yaml b/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/Chart.yaml new file mode 100644 index 0000000..f428bb8 --- /dev/null +++ b/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: intermediate-certificate-issuer +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl new file mode 100644 index 0000000..5f6c44f --- /dev/null +++ b/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "intermediate-certificate-issuer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "intermediate-certificate-issuer.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "intermediate-certificate-issuer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "intermediate-certificate-issuer.labels" -}} +helm.sh/chart: {{ include "intermediate-certificate-issuer.chart" . }} +{{ include "intermediate-certificate-issuer.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "intermediate-certificate-issuer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "intermediate-certificate-issuer.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "intermediate-certificate-issuer.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "intermediate-certificate-issuer.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml b/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml new file mode 100644 index 0000000..ad99f63 --- /dev/null +++ b/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/templates/ca-key-pair.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ca-key-pair + namespace: {{ .Release.Namespace }} +data: + tls.crt: {{ .Values.tls.crt }} + tls.key: {{ .Values.tls.key }} diff --git a/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml b/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml new file mode 100644 index 0000000..76a3874 --- /dev/null +++ b/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/templates/clusterissuer.yaml @@ -0,0 +1,7 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + ca: + secretName: ca-key-pair diff --git a/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/values.yaml b/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/values.yaml new file mode 100644 index 0000000..50dfd22 --- /dev/null +++ b/examples/full-cluster/common-services/charts/intermediate-certificate-issuer/values.yaml @@ -0,0 +1,6 @@ +tls: + # tls.crt contains the issuers full chain in the correct order: + # issuer -> intermediate(s) -> root. + crt: + # tls.key contains the base64 encoded signing key. + key: diff --git a/examples/full-cluster/common-services/charts/istio-operator/Chart.yaml b/examples/full-cluster/common-services/charts/istio-operator/Chart.yaml new file mode 100644 index 0000000..7bca5e3 --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-operator/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +name: istio-operator +version: 1.10.1 +tillerVersion: ">=2.7.2" +description: Helm chart for deploying Istio operator +keywords: + - istio + - operator +sources: + - https://github.com/istio/istio/tree/master/operator +engine: gotpl +icon: https://istio.io/latest/favicons/android-192x192.png diff --git a/examples/full-cluster/common-services/charts/istio-operator/crds/crd-operator.yaml b/examples/full-cluster/common-services/charts/istio-operator/crds/crd-operator.yaml new file mode 100644 index 0000000..93ac1de --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-operator/crds/crd-operator.yaml @@ -0,0 +1,48 @@ +# SYNC WITH manifests/charts/base/files +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: istiooperators.install.istio.io + labels: + release: istio +spec: + conversion: + strategy: None + group: install.istio.io + names: + kind: IstioOperator + listKind: IstioOperatorList + plural: istiooperators + singular: istiooperator + shortNames: + - iop + - io + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Istio control plane revision + jsonPath: .spec.revision + name: Revision + type: string + - description: IOP current state + jsonPath: .status.status + name: Status + type: string + - description: 'CreationTimestamp is a timestamp representing the server time + when this object was created. It is not guaranteed to be set in happens-before + order across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + subresources: + status: {} + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true +--- diff --git a/examples/full-cluster/common-services/charts/istio-operator/files/gen-operator.yaml b/examples/full-cluster/common-services/charts/istio-operator/files/gen-operator.yaml new file mode 100644 index 0000000..e77d5aa --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-operator/files/gen-operator.yaml @@ -0,0 +1,220 @@ +--- +# Source: istio-operator/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: istio-operator + labels: + istio-operator-managed: Reconcile + istio-injection: disabled +--- +# Source: istio-operator/templates/service_account.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: istio-operator + name: istio-operator +--- +# Source: istio-operator/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: istio-operator +rules: +# istio groups +- apiGroups: + - authentication.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - config.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - install.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - security.istio.io + resources: + - '*' + verbs: + - '*' +# k8s groups +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions.apiextensions.k8s.io + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - deployments + - deployments/finalizers + - replicasets + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - update +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - roles + - rolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - update +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - namespaces + - pods + - pods/proxy + - persistentvolumeclaims + - secrets + - services + - serviceaccounts + verbs: + - '*' +--- +# Source: istio-operator/templates/clusterrole_binding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-operator +subjects: +- kind: ServiceAccount + name: istio-operator + namespace: istio-operator +roleRef: + kind: ClusterRole + name: istio-operator + apiGroup: rbac.authorization.k8s.io +--- +# Source: istio-operator/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + namespace: istio-operator + labels: + name: istio-operator + name: istio-operator +spec: + ports: + - name: http-metrics + port: 8383 + targetPort: 8383 + protocol: TCP + selector: + name: istio-operator +--- +# Source: istio-operator/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: istio-operator + name: istio-operator +spec: + replicas: 1 + selector: + matchLabels: + name: istio-operator + template: + metadata: + labels: + name: istio-operator + spec: + serviceAccountName: istio-operator + containers: + - name: istio-operator + image: gcr.io/istio-testing/operator:1.10-dev + command: + - operator + - server + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsGroup: 1337 + runAsUser: 1337 + runAsNonRoot: true + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 50m + memory: 128Mi + env: + - name: WATCH_NAMESPACE + value: "istio-system" + - name: LEADER_ELECTION_NAMESPACE + value: "istio-operator" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "istio-operator" + - name: WAIT_FOR_RESOURCES_TIMEOUT + value: "300s" + - name: REVISION + value: "" diff --git a/examples/full-cluster/common-services/charts/istio-operator/templates/clusterrole.yaml b/examples/full-cluster/common-services/charts/istio-operator/templates/clusterrole.yaml new file mode 100644 index 0000000..4e6bd74 --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-operator/templates/clusterrole.yaml @@ -0,0 +1,115 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +rules: +# istio groups +- apiGroups: + - authentication.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - config.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - install.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - security.istio.io + resources: + - '*' + verbs: + - '*' +# k8s groups +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions.apiextensions.k8s.io + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - deployments + - deployments/finalizers + - replicasets + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create + - update +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - roles + - rolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - update +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - namespaces + - pods + - pods/proxy + - persistentvolumeclaims + - secrets + - services + - serviceaccounts + verbs: + - '*' +--- diff --git a/examples/full-cluster/common-services/charts/istio-operator/templates/clusterrole_binding.yaml b/examples/full-cluster/common-services/charts/istio-operator/templates/clusterrole_binding.yaml new file mode 100644 index 0000000..9b9df7d --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-operator/templates/clusterrole_binding.yaml @@ -0,0 +1,13 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +subjects: +- kind: ServiceAccount + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} + namespace: {{.Values.operatorNamespace}} +roleRef: + kind: ClusterRole + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} + apiGroup: rbac.authorization.k8s.io +--- diff --git a/examples/full-cluster/common-services/charts/istio-operator/templates/crds.yaml b/examples/full-cluster/common-services/charts/istio-operator/templates/crds.yaml new file mode 100644 index 0000000..a370365 --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-operator/templates/crds.yaml @@ -0,0 +1,6 @@ +{{- if .Values.enableCRDTemplates -}} +{{- range $path, $bytes := .Files.Glob "crds/*.yaml" -}} +--- +{{ $.Files.Get $path }} +{{- end -}} +{{- end -}} diff --git a/examples/full-cluster/common-services/charts/istio-operator/templates/deployment.yaml b/examples/full-cluster/common-services/charts/istio-operator/templates/deployment.yaml new file mode 100644 index 0000000..1baaa8d --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-operator/templates/deployment.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: {{.Values.operatorNamespace}} + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +spec: + replicas: 1 + selector: + matchLabels: + name: istio-operator + template: + metadata: + labels: + name: istio-operator + spec: + serviceAccountName: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} + containers: + - name: istio-operator + image: {{.Values.hub}}/operator:{{.Values.tag}} + command: + - operator + - server + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsGroup: 1337 + runAsUser: 1337 + runAsNonRoot: true + imagePullPolicy: IfNotPresent + resources: +{{ toYaml .Values.operator.resources | trim | indent 12 }} + env: + - name: WATCH_NAMESPACE + value: {{.Values.watchedNamespaces | quote}} + - name: LEADER_ELECTION_NAMESPACE + value: {{.Values.operatorNamespace | quote}} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: {{.Values.operatorNamespace | quote}} + - name: WAIT_FOR_RESOURCES_TIMEOUT + value: {{.Values.waitForResourcesTimeout | quote}} + - name: REVISION + value: {{.Values.revision | quote}} +--- diff --git a/examples/full-cluster/common-services/charts/istio-operator/templates/namespace.yaml b/examples/full-cluster/common-services/charts/istio-operator/templates/namespace.yaml new file mode 100644 index 0000000..31dc5aa --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-operator/templates/namespace.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: {{.Values.operatorNamespace}} + labels: + istio-operator-managed: Reconcile + istio-injection: disabled +--- diff --git a/examples/full-cluster/common-services/charts/istio-operator/templates/service.yaml b/examples/full-cluster/common-services/charts/istio-operator/templates/service.yaml new file mode 100644 index 0000000..ab3ed57 --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-operator/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + namespace: {{.Values.operatorNamespace}} + labels: + name: istio-operator + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +spec: + ports: + - name: http-metrics + port: 8383 + targetPort: 8383 + protocol: TCP + selector: + name: istio-operator +--- diff --git a/examples/full-cluster/common-services/charts/istio-operator/templates/service_account.yaml b/examples/full-cluster/common-services/charts/istio-operator/templates/service_account.yaml new file mode 100644 index 0000000..03e9377 --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-operator/templates/service_account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: {{.Values.operatorNamespace}} + name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} +{{- if .Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} +- name: {{ . }} +{{- end }} +{{- end }} +--- diff --git a/examples/full-cluster/common-services/charts/istio-operator/values.yaml b/examples/full-cluster/common-services/charts/istio-operator/values.yaml new file mode 100644 index 0000000..39a5bd2 --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-operator/values.yaml @@ -0,0 +1,29 @@ +hub: docker.io/istio +tag: 1.10.1 + +# ImagePullSecrets for operator ServiceAccount, list of secrets in the same namespace +# used to pull operator image. Must be set for any cluster configured with private docker registry. +imagePullSecrets: [] + +operatorNamespace: istio-operator + +# Used to replace istioNamespace to support operator watch multiple namespaces. +watchedNamespaces: istio-system +waitForResourcesTimeout: 300s + +# Used for helm2 to add the CRDs to templates. +enableCRDTemplates: false + +# revision for the operator resources +revision: "" + +# Operator resource defaults +operator: + resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 50m + memory: 128Mi + diff --git a/examples/full-cluster/common-services/charts/istio-peerauthentication/.helmignore b/examples/full-cluster/common-services/charts/istio-peerauthentication/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-peerauthentication/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster/common-services/charts/istio-peerauthentication/Chart.yaml b/examples/full-cluster/common-services/charts/istio-peerauthentication/Chart.yaml new file mode 100644 index 0000000..5995e6b --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-peerauthentication/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: istio-peerauthentication +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/examples/full-cluster/common-services/charts/istio-peerauthentication/templates/_helpers.tpl b/examples/full-cluster/common-services/charts/istio-peerauthentication/templates/_helpers.tpl new file mode 100644 index 0000000..94c398d --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-peerauthentication/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "istio-peerauthentication.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "istio-peerauthentication.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "istio-peerauthentication.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "istio-peerauthentication.labels" -}} +helm.sh/chart: {{ include "istio-peerauthentication.chart" . }} +{{ include "istio-peerauthentication.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "istio-peerauthentication.selectorLabels" -}} +app.kubernetes.io/name: {{ include "istio-peerauthentication.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "istio-peerauthentication.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "istio-peerauthentication.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml b/examples/full-cluster/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml new file mode 100644 index 0000000..3238311 --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-peerauthentication/templates/peerauthentication.yaml @@ -0,0 +1,9 @@ +{{ if .Values.requireMutualTLS }} +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: "default" +spec: + mtls: + mode: STRICT +{{ end }} diff --git a/examples/full-cluster/common-services/charts/istio-peerauthentication/values.yaml b/examples/full-cluster/common-services/charts/istio-peerauthentication/values.yaml new file mode 100644 index 0000000..e69de29 diff --git a/examples/full-cluster/common-services/charts/istio-profile/.helmignore b/examples/full-cluster/common-services/charts/istio-profile/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-profile/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster/common-services/charts/istio-profile/Chart.yaml b/examples/full-cluster/common-services/charts/istio-profile/Chart.yaml new file mode 100644 index 0000000..fbd07c2 --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-profile/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: istio-profile +description: Configuration for istio to be picked up by istio's operator. +type: application +version: 0.1.2 +appVersion: "1.10.1" diff --git a/examples/full-cluster/common-services/charts/istio-profile/templates/_helpers.tpl b/examples/full-cluster/common-services/charts/istio-profile/templates/_helpers.tpl new file mode 100644 index 0000000..8a02937 --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-profile/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "istio-profile.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "istio-profile.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "istio-profile.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "istio-profile.labels" -}} +helm.sh/chart: {{ include "istio-profile.chart" . }} +{{ include "istio-profile.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "istio-profile.selectorLabels" -}} +app.kubernetes.io/name: {{ include "istio-profile.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "istio-profile.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "istio-profile.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster/common-services/charts/istio-profile/templates/istiooperator.yaml b/examples/full-cluster/common-services/charts/istio-profile/templates/istiooperator.yaml new file mode 100644 index 0000000..5062e95 --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-profile/templates/istiooperator.yaml @@ -0,0 +1,188 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +metadata: + name: istio-profile +spec: + hub: {{ .Values.hub | default "docker.io/istio" }} + tag: {{ .Values.tag | default "1.10.1" }} + + meshConfig: +{{- if .Values.envoy.accessLog.enabled }} + accessLogFile: /dev/stdout +{{- end }} +{{- if and .Values.envoy.accessLog.enabled .Values.envoy.accessLog.format }} + accessLogFormat: {{ .Values.envoy.accessLog.format }} +{{- end }} +{{- if and .Values.envoy.accessLog.enabled .Values.envoy.accessLog.encoding }} + accessLogEncoding: {{ .Values.envoy.accessLog.encoding }} +{{- end }} + defaultConfig: + proxyMetadata: {} + enablePrometheusMerge: true + + components: + base: + enabled: true + pilot: + enabled: true + + ingressGateways: + - name: istio-ingressgateway + enabled: true + k8s: + serviceAnnotations: + "service.beta.kubernetes.io/aws-load-balancer-internal": "true" + "service.beta.kubernetes.io/aws-load-balancer-type": "nlb" + + egressGateways: + - name: istio-egressgateway + enabled: {{ .Values.egressGateways.enabled }} + + cni: + enabled: false + + istiodRemote: + enabled: false + + values: + global: + istioNamespace: {{ .Values.namespace }} + istiod: + enableAnalysis: false + logging: + level: "default:info" + logAsJson: false + pilotCertProvider: istiod + jwtPolicy: third-party-jwt + proxy: + image: proxyv2 + clusterDomain: "cluster.local" + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 2000m + memory: 1024Mi + logLevel: warning + componentLogLevel: "misc:error" + privileged: false + enableCoreDump: false + statusPort: 15020 + readinessInitialDelaySeconds: 1 + readinessPeriodSeconds: 2 + readinessFailureThreshold: 30 + includeIPRanges: "*" + excludeIPRanges: {{ default "" .Values.apiserver | quote }} + excludeOutboundPorts: "" + excludeInboundPorts: "" + autoInject: enabled + tracer: "zipkin" + proxy_init: + image: proxyv2 + resources: + limits: + cpu: 2000m + memory: 1024Mi + requests: + cpu: 10m + memory: 10Mi + # Specify image pull policy if default behavior isn't desired. + # Default behavior: latest images will be Always else IfNotPresent. + imagePullPolicy: "" + operatorManageWebhooks: false + tracer: + lightstep: {} + zipkin: {} + datadog: {} + stackdriver: {} + imagePullSecrets: [] + oneNamespace: false + defaultNodeSelector: {} + configValidation: true + multiCluster: + enabled: false + clusterName: "" + omitSidecarInjectorConfigMap: false + network: "" + defaultResources: + requests: + cpu: 10m + defaultPodDisruptionBudget: + enabled: true + priorityClassName: "" + useMCP: false + sds: + token: + aud: istio-ca + sts: + servicePort: 0 + meshNetworks: {} + mountMtlsCerts: false + base: + enableCRDTemplates: false + validationURL: "" + pilot: + autoscaleEnabled: true + autoscaleMin: 1 + autoscaleMax: 5 + replicaCount: 1 + image: pilot + traceSampling: 1.0 + env: {} + cpu: + targetAverageUtilization: 80 + nodeSelector: {} + keepaliveMaxServerConnectionAge: 30m + enableProtocolSniffingForOutbound: true + enableProtocolSniffingForInbound: true + deploymentLabels: + configMap: true + + telemetry: + enabled: {{ .Values.telemetry.enabled }} + v2: + enabled: true + metadataExchange: + wasmEnabled: false + prometheus: + wasmEnabled: false + enabled: true + stackdriver: + enabled: false + logging: false + monitoring: false + topology: false + configOverride: {} + + istiodRemote: + injectionURL: "" + + gateways: + istio-egressgateway: + zvpn: {} + env: {} + autoscaleEnabled: true + type: ClusterIP + name: istio-egressgateway + secretVolumes: + - name: egressgateway-certs + secretName: istio-egressgateway-certs + mountPath: /etc/istio/egressgateway-certs + - name: egressgateway-ca-certs + secretName: istio-egressgateway-ca-certs + mountPath: /etc/istio/egressgateway-ca-certs + + istio-ingressgateway: + autoscaleEnabled: true + type: LoadBalancer + name: istio-ingressgateway + zvpn: {} + env: {} + secretVolumes: + - name: ingressgateway-certs + secretName: istio-ingressgateway-certs + mountPath: /etc/istio/ingressgateway-certs + - name: ingressgateway-ca-certs + secretName: istio-ingressgateway-ca-certs + mountPath: /etc/istio/ingressgateway-ca-certs diff --git a/examples/full-cluster/common-services/charts/istio-profile/values.yaml b/examples/full-cluster/common-services/charts/istio-profile/values.yaml new file mode 100644 index 0000000..9b43fab --- /dev/null +++ b/examples/full-cluster/common-services/charts/istio-profile/values.yaml @@ -0,0 +1,44 @@ + +namespace: istio-system +requireMutualTLS: true +hub: docker.io/istio +tag: 1.10.1 +apiserver: "" + +############################################################################## +# Observability options: +############################################################################## + +# Controls settings for the envoy proxy that is added as a sidecar +envoy: + # Controls settings related to access the service. + accessLog: + # When enabled, envoy is configured to log to stdout. + enabled: true + # Format for the proxy access log. Default value is envoy's format. + # Controls accessLogFormat istio configuration. + format: + # Encoding for the proxy access log (text or json.) Default value is text. + # Controls accessLogEncoding istio configuration. + encoding: + +# When set to true, istio provides telemetry data to prometheus. +# False disables collecting telemetry data. +telemetry: + enabled: true + +# When set to true, enables tracking of a request through mesh that is +# destributed across mutliple services. +tracing: + enabled: true + +############################################################################## +# Traffic Management options: +############################################################################## + +# Egress gateways allow you to apply Istio features, for example, monitoring +# and route rules, to traffic exiting the mesh. +# When set to true, the egress gateway is created. +egressGateways: + enabled: true + diff --git a/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/.helmignore b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/Chart.yaml b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/Chart.yaml new file mode 100644 index 0000000..9cfc3c1 --- /dev/null +++ b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: self-signed-certificate-issuer +description: A Helm chart for Kubernetes +type: application +version: 0.1.0 +appVersion: "1.0.0" diff --git a/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl new file mode 100644 index 0000000..e62a63b --- /dev/null +++ b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "self-signed-certificate-issuer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "self-signed-certificate-issuer.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "self-signed-certificate-issuer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "self-signed-certificate-issuer.labels" -}} +helm.sh/chart: {{ include "self-signed-certificate-issuer.chart" . }} +{{ include "self-signed-certificate-issuer.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "self-signed-certificate-issuer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "self-signed-certificate-issuer.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "self-signed-certificate-issuer.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "self-signed-certificate-issuer.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml new file mode 100644 index 0000000..ab1ee31 --- /dev/null +++ b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/ca-issuer.yaml @@ -0,0 +1,8 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + ca: + secretName: root-secret + diff --git a/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml new file mode 100644 index 0000000..84e895d --- /dev/null +++ b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-ca.yaml @@ -0,0 +1,17 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: selfsigned-ca + namespace: {{ .Release.Namespace }} +spec: + isCA: true + commonName: selfsigned-ca + secretName: root-secret + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: selfsigned-issuer + kind: ClusterIssuer + group: cert-manager.io + diff --git a/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml new file mode 100644 index 0000000..81660bd --- /dev/null +++ b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/templates/selfsigned-clusterissuer.yaml @@ -0,0 +1,7 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: selfsigned-issuer +spec: + selfSigned: {} + diff --git a/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/values.yaml b/examples/full-cluster/common-services/charts/self-signed-certificate-issuer/values.yaml new file mode 100644 index 0000000..e69de29 diff --git a/examples/full-cluster/common-services/charts/vault-certificate-issuer/.helmignore b/examples/full-cluster/common-services/charts/vault-certificate-issuer/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/examples/full-cluster/common-services/charts/vault-certificate-issuer/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/examples/full-cluster/common-services/charts/vault-certificate-issuer/Chart.yaml b/examples/full-cluster/common-services/charts/vault-certificate-issuer/Chart.yaml new file mode 100644 index 0000000..e179122 --- /dev/null +++ b/examples/full-cluster/common-services/charts/vault-certificate-issuer/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: vault-certificate-issuer +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl b/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl new file mode 100644 index 0000000..a9a1425 --- /dev/null +++ b/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "vault-certificate-issuer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "vault-certificate-issuer.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "vault-certificate-issuer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "vault-certificate-issuer.labels" -}} +helm.sh/chart: {{ include "vault-certificate-issuer.chart" . }} +{{ include "vault-certificate-issuer.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "vault-certificate-issuer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "vault-certificate-issuer.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "vault-certificate-issuer.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "vault-certificate-issuer.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml b/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml new file mode 100644 index 0000000..8880f1c --- /dev/null +++ b/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/app-role-issuer.yaml @@ -0,0 +1,18 @@ +{{ if eq .Values.vault.authentication_type "AppRole" }} +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + vault: + path: {{ .Values.vault.path }} + server: {{ .Values.vault.url }} + caBundle: {{ .Values.vault.ca_bundle }} + auth: + appRole: + path: {{ .Values.approle.role_path }} + roleId: {{ .Values.approle.role_id }} + secretRef: + name: cert-manager-vault-approle + key: secretId +{{- end }} diff --git a/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml b/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml new file mode 100644 index 0000000..23d58e1 --- /dev/null +++ b/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/app-role-secret.yaml @@ -0,0 +1,10 @@ +{{ if eq .Values.vault.authentication_type "AppRole" }} +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: cert-manager-vault-approle + namespace: {{ .Release.Namespace }} +data: + secretId: {{ .Values.approle.secret_id }} +{{- end }} diff --git a/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml b/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml new file mode 100644 index 0000000..f964aed --- /dev/null +++ b/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/service-account-issuer.yaml @@ -0,0 +1,20 @@ +{{ if eq .Values.vault.authentication_type "ServiceAccount" }} +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + vault: + path: {{ .Values.vault.path }} + server: {{ .Values.vault.url }} + caBundle: {{ .Values.vault.ca_bundle }} + auth: + kubernetes: + role: {{ .Values.serviceAccount.role }} +{{- if .Values.serviceAccount.MountPath }} + path: {{ .Values.serviceAccount.mountPath }} +{{- end }} + secretRef: + name: {{ .Values.serviceAccount.secret }} + key: token +{{- end }} diff --git a/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml b/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml new file mode 100644 index 0000000..0410d30 --- /dev/null +++ b/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/token-issuer.yaml @@ -0,0 +1,15 @@ +{{ if eq .Values.vault.authentication_type "Token" }} +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: clusterissuer +spec: + vault: + path: {{ .Values.vault.path }} + server: {{ .Values.vault.url }} + caBundle: {{ .Values.vault.ca_bundle }} + auth: + tokenSecretRef: + name: cert-manager-vault-token + key: token +{{- end }} diff --git a/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml b/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml new file mode 100644 index 0000000..35bb13d --- /dev/null +++ b/examples/full-cluster/common-services/charts/vault-certificate-issuer/templates/token-secret.yaml @@ -0,0 +1,10 @@ +{{ if eq .Values.vault.authentication_type "Token" }} +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: cert-manager-vault-token + namespace: {{ .Release.Namespace }} +data: + token: {{ .Values.token.token }} +{{- end }} diff --git a/examples/full-cluster/common-services/charts/vault-certificate-issuer/values.yaml b/examples/full-cluster/common-services/charts/vault-certificate-issuer/values.yaml new file mode 100644 index 0000000..4cac439 --- /dev/null +++ b/examples/full-cluster/common-services/charts/vault-certificate-issuer/values.yaml @@ -0,0 +1,47 @@ + +# Common settings for all types of authentication +vault: + # the URL whereby Vault is reachable. + url: + # the Vault path that will be used for signing. Note that the path + # must use the sign endpoint. + path: + # an optional field containing a base64 encoded string of the + # Certificate Authority to trust the Vault connection. This is + # typically always required when using an https URL. + ca_bundle: + # the type of authenciation to use, must be one of: + # - AppRole + # - Token + # - ServiceAccount + authentication_type: + +# AppRole authentication type: +approle: + # secret key + secret_id: + # RoleID of the role to assume + role_id: + # the app role path + role_path: + +# Token authentication type: +token: + # a token string that has been generated from one of the many + # authentication backends that Vault supports. These tokens have + # an expiry and so need to be periodically refreshed. cert-manager + # does not refresh these token automatically and so another process + # must be put in place to do this. The token is stored in the + # cert-manager-vault-token secret in the cert-manager namespace. + token: + +# ServiceAccount authenication type: +serviceAccount: + # the name of the secret associated with the service account in the + # cert-manager namespace to use to authenticate with vault + secret: + # the role which is the Vault role that the Service Account is to assume + role: + # optional value which is the authentication mount path, defaulting + # to kubernetes. + mountPath: diff --git a/examples/full-cluster/common-services/copy_image.sh b/examples/full-cluster/common-services/copy_image.sh new file mode 120000 index 0000000..889e269 --- /dev/null +++ b/examples/full-cluster/common-services/copy_image.sh @@ -0,0 +1 @@ +../bin/copy_image.sh \ No newline at end of file diff --git a/examples/full-cluster/common-services/copy_images.tf b/examples/full-cluster/common-services/copy_images.tf new file mode 100644 index 0000000..9d2a2e1 --- /dev/null +++ b/examples/full-cluster/common-services/copy_images.tf @@ -0,0 +1,88 @@ +data "aws_ecr_authorization_token" "token" {} + +locals { + account_id = data.aws_caller_identity.current.account_id + repo_parent_name = format("eks/%v", var.cluster_name) + + account_ecr_registry = format("%v.dkr.ecr.%v.amazonaws.com", local.account_id, var.region) + account_ecr = format("%v/%v", local.account_ecr_registry, local.repo_parent_name) + + images = [ + # cert-manager related images: + { + name = "cert-manager-controller" + image = "quay.io/jetstack/cert-manager-controller" + tag = var.cert_manager_controller_tag + enabled = true + }, + { + name = "cluster-autoscaler" + image = "public.ecr.aws/v0g0y9g5/cluster-autoscaler" + tag = var.cluster_autoscaler_tag + enabled = true + }, + { + name = "metrics-server" + image = "docker.io/bitnami/metrics-server" + tag = var.metrics_server_tag + enabled = true + }, + { + name = "cert-manager-cainjector" + image = "quay.io/jetstack/cert-manager-cainjector" + tag = var.cert_manager_cainjector_tag + enabled = true + }, + { + name = "cert-manager-webhook" + image = "quay.io/jetstack/cert-manager-webhook" + tag = var.cert_manager_webhook_tag + enabled = true + }, + # istio related images: + { + name = "istio/operator" + image = "docker.io/istio/operator" + tag = var.istio_tag + enabled = true + }, + { + name = "istio/pilot" + image = "docker.io/istio/pilot" + tag = var.istio_tag + enabled = true + }, + { + name = "istio/proxyv2" + image = "docker.io/istio/proxyv2" + tag = var.istio_tag + enabled = true + }, + ] + image_repos = { for image in local.images : image.name => format("%v/%v", local.account_ecr, image.name) } + image_map = { for image in local.images : image.name => + merge( + image, + tomap( + { "full_path"=local.image_repos[image.name], + "registry"=local.account_ecr_registry, + "repository"=format("%v/%v",local.repo_parent_name,image.name), } + ) ) } +} + +resource "null_resource" "copy_images" { + for_each = { for image in local.images : image.name => image if image.enabled } + + provisioner "local-exec" { + command = "${path.module}/copy_image.sh" + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + SOURCE_IMAGE = format("%v:%v", each.value.image, each.value.tag) + DESTINATION_IMAGE = format("%v/%v:%v", local.account_ecr, each.value.name, each.value.tag) + DESTINATION_USERNAME = data.aws_ecr_authorization_token.token.user_name + DESTINATION_PASSWORD = data.aws_ecr_authorization_token.token.password + } + } +} + diff --git a/examples/full-cluster/common-services/data.eks.tf b/examples/full-cluster/common-services/data.eks.tf new file mode 100644 index 0000000..870e8c6 --- /dev/null +++ b/examples/full-cluster/common-services/data.eks.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster + # for main.tf + # aws_eks_cluster = aws_eks_cluster.eks_cluster + # for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/full-cluster/common-services/dns.tf b/examples/full-cluster/common-services/dns.tf new file mode 100644 index 0000000..a8fba0c --- /dev/null +++ b/examples/full-cluster/common-services/dns.tf @@ -0,0 +1,25 @@ +data "kubernetes_service" "istio-ingressgateway" { + metadata { + name = "istio-ingressgateway" + namespace = "istio-system" + } +} + +locals { + is_gateway_active = data.kubernetes_service.istio-ingressgateway.status != null +} + +data "aws_lb" "lb" { + count = local.is_gateway_active ? 1 : 0 + name = split("-", data.kubernetes_service.istio-ingressgateway.status.0.load_balancer.0.ingress.0.hostname)[0] +} + +resource "aws_route53_record" "istio-ingress" { + count = local.is_gateway_active ? 1 : 0 + name = format("*.%v", local.parent_rs.cluster_domain_name) + type = "CNAME" + ttl = 900 + zone_id = local.parent_rs.cluster_domain_id + + records = [ data.aws_lb.lb[0].dns_name ] +} diff --git a/examples/full-cluster/common-services/kubeconfig.tf b/examples/full-cluster/common-services/kubeconfig.tf new file mode 100644 index 0000000..5e386f5 --- /dev/null +++ b/examples/full-cluster/common-services/kubeconfig.tf @@ -0,0 +1,29 @@ +resource "null_resource" "kubeconfig" { + triggers = { + always_run = timestamp() + } + provisioner "local-exec" { + command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + } + provisioner "local-exec" { + command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" + } + provisioner "local-exec" { + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + } + command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" + } + depends_on = [data.aws_eks_cluster.cluster] +} + +#--- +# call it like +#--- +## provisioner "local-exec" { +## environment = { +## KUBECONFIG = "${path.root}/setup/kube.config" +## } +## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" +## } diff --git a/examples/full-cluster/common-services/locals.tf b/examples/full-cluster/common-services/locals.tf new file mode 100644 index 0000000..d1f92d0 --- /dev/null +++ b/examples/full-cluster/common-services/locals.tf @@ -0,0 +1,19 @@ +locals { + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } +} + +# replace TF remote state accordingly in parent_rs with that from the parent directory, and be sure to make the link +locals { + parent_rs = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east_vpc3_apps_eks-adsd-cumulus-qa.outputs + + vpc_id = local.parent_rs.cluster_vpc_id + subnet_ids = local.parent_rs.cluster_subnet_ids + cluster_worker_sg_id = local.parent_rs.cluster_worker_sg_id + + oidc_provider_url = local.parent_rs.oidc_provider_url + oidc_provider_arn = local.parent_rs.oidc_provider_arn +} diff --git a/examples/full-cluster/common-services/main.tf b/examples/full-cluster/common-services/main.tf new file mode 100644 index 0000000..45ee939 --- /dev/null +++ b/examples/full-cluster/common-services/main.tf @@ -0,0 +1,399 @@ +locals { + charts = { + "cert-manager" = { + name = "cert-manager" + repository = "https://charts.jetstack.io" + version = "v1.4.3" + use_remote = true + } + + "metrics-server" = { + name = "metrics-server" + repository = "https://charts.bitnami.com/bitnami" + version = "5.10.4" + use_remote = true + } + # a standard chart that is downloaded as part of the istio-bundle. It's not + # available standalone in a repository + # name = "istio-operator" + # these are all custom + # name = "certificate-issuer" + # name = "istio-profile" + } + + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } +} + +resource "kubernetes_namespace" "cert-manager" { + metadata { + name = "cert-manager" + } +} + +resource "kubernetes_namespace" "istio-system" { + metadata { + name = "istio-system" + } +} + +# Install Metrics-Server +resource "helm_release" "metrics-server" { + chart = "metrics-server" + name = "metrics-server" + namespace = "kube-system" + repository = local.charts["metrics-server"].use_remote ? local.charts["metrics-server"].repository : "${path.module}/charts" + version = local.charts["metrics-server"].use_remote ? local.charts["metrics-server"].version : null + + depends_on = [null_resource.copy_images] + set { + name = "extraArgs.kubelet-preferred-address-types" +# value = "InternalIP,ExternalIP,Hostname" + value = "InternalIP" + } + set { + name = "apiService.create" + value = "true" + } + set { + name = "extraArgs.cert-dir" + value = "/tmp" + } + set { + name = "extraArgs.kubelet-use-node-status-port" + value = "" + } + set { + name = "extraArgs.metric-resolution" + value = "15s" + } +# set { +# name = "extraArgs.kubelet-insecure-tls" +# value = "true" +# } + set { + name = "image.registry" + value = local.account_ecr_registry + } + set { + name = "image.repository" +# value = format("%v/%v", local.repo_parent_name, local.images["metric-server"].name) + value = local.image_map["metrics-server"].repository + } + + set { + name = "image.tag" + value = var.metrics_server_tag + } + + timeout = 180 +} + +resource "helm_release" "cluster-autoscaler" { + chart = "cluster-autoscaler" + name = "cluster-autoscaler" + namespace = "kube-system" + repository = "${path.module}/charts/" + depends_on = [null_resource.copy_images] + set { + name = "image.repository" + value = local.image_repos["cluster-autoscaler"] + } + set { + name = "image.tag" + value = var.cluster_autoscaler_tag + } + set { + name = "autoDiscovery.clusterName" + value = var.cluster_name + } +} + +# Install cert-manager +resource "helm_release" "cert-manager" { + chart = "cert-manager" + name = "cert-manager" + namespace = kubernetes_namespace.cert-manager.metadata[0].name + repository = local.charts["cert-manager"].use_remote ? local.charts["cert-manager"].repository : "${path.module}/charts" + version = local.charts["cert-manager"].use_remote ? local.charts["cert-manager"].version : null + + depends_on = [null_resource.copy_images] + + set { + name = "installCRDs" + value = "true" + } + set { + name = "extraArgs" + value = "{--enable-certificate-owner-ref=true}" + } + + set { + name = "image.repository" + value = local.image_repos["cert-manager-controller"] + } + set { + name = "image.tag" + value = var.cert_manager_controller_tag + } + + set { + name = "cainjector.image.repository" + value = local.image_repos["cert-manager-cainjector"] + } + set { + name = "cainjector.image.tag" + value = var.cert_manager_cainjector_tag + } + + set { + name = "webhook.image.repository" + value = local.image_repos["cert-manager-webhook"] + } + set { + name = "webhook.image.tag" + value = var.cert_manager_webhook_tag + } + + timeout = 180 +} + +# cert-manager reports ready before the cert-manager-webhook pod +# has completely started and is ready to process requests. This sleep +# is set for a completely arbitrary time to allow cert-manager-webhook +# to finish starting. On slow systems, this may not be long enough, +# but on t3.xlarge, it works fine. +resource "time_sleep" "let_cert-manager-webhook_boot" { + depends_on = [helm_release.cert-manager] + + create_duration = "19s" +} + +locals { + tls_crt_file = length(var.tls_crt_file) > 0 ? var.tls_crt_file : "certs/${local.ca_dns_name}.bundle.crt" + tls_crt_contents = (length(local.tls_crt_file) > 0 && fileexists(local.tls_crt_file)) ? file(local.tls_crt_file) : var.tls_crt_contents + tls_crt_b64 = length(local.tls_crt_contents) > 0 ? base64encode(local.tls_crt_contents) : var.tls_crt_b64 + + tls_key_file = length(var.tls_key_file) > 0 ? var.tls_key_file : "certs/${local.ca_dns_name}.key" + tls_key_contents = (length(local.tls_key_file) > 0 && fileexists(local.tls_key_file)) ? file(local.tls_key_file) : var.tls_key_contents + tls_key_b64 = length(local.tls_key_contents) > 0 ? base64encode(local.tls_key_contents) : var.tls_key_b64 + + intermediate_ca = (length(local.tls_crt_b64) > 0) && (length(local.tls_key_b64) > 0) + + vault_ca_bundle_pem_file = var.vault_ca_bundle_pem_file + vault_ca_bundle_pem = ((length(local.vault_ca_bundle_pem_file) > 0) ? + file(local.vault_ca_bundle_pem_file) + : var.vault_ca_bundle_pem) + vault_ca_bundle_pem_b64 = ((length(local.vault_ca_bundle_pem) > 0) ? + base64encode(local.vault_ca_bundle_pem) + : var.vault_ca_bundle_pem_b64) + + vault_ca = ! local.intermediate_ca && length(var.vault_url) > 0 + + self_signed_ca = ! local.intermediate_ca && ! local.vault_ca + + defined_ca = (local.self_signed_ca ? 1 : 0) + (local.intermediate_ca ? 1 : 0) + (local.vault_ca ? 1 : 0) +} + +# configure the certificate issuer. + +# when self-signed certs requested +resource "helm_release" "self-signed-certificate-issuer" { + count = local.self_signed_ca == true ? 1 : 0 + + chart = "self-signed-certificate-issuer" + name = "certificate-issuer" + namespace = kubernetes_namespace.cert-manager.metadata[0].name + repository = "${path.module}/charts/" + + depends_on = [time_sleep.let_cert-manager-webhook_boot] + + # Required because the chart creates "non-standard" kubernetes resources + # that use the cert-manager CRDs. + disable_openapi_validation = true +} + +# when using an internediate CA is requested +resource "helm_release" "intermediate-certificate-issuer" { + count = local.intermediate_ca == true ? 1 : 0 + + chart = "intermediate-certificate-issuer" + name = "certificate-issuer" + namespace = kubernetes_namespace.cert-manager.metadata[0].name + repository = "${path.module}/charts/" + + depends_on = [time_sleep.let_cert-manager-webhook_boot] + + # Required because the chart creates "non-standard" kubernetes resources + # that use the cert-manager CRDs. + disable_openapi_validation = true + + set { + name = "tls.crt" + value = local.tls_crt_b64 + } + set { + name = "tls.key" + value = local.tls_key_b64 + } +} + +# when using vault as a CA is requested +resource "helm_release" "vault-certificate-issuer" { + count = local.vault_ca == true ? 1 : 0 + + chart = "vault-certificate-issuer" + name = "certificate-issuer" + namespace = kubernetes_namespace.cert-manager.metadata[0].name + repository = "${path.module}/charts/" + + depends_on = [time_sleep.let_cert-manager-webhook_boot] + + # Required because the chart creates "non-standard" kubernetes resources + # that use the cert-manager CRDs. + disable_openapi_validation = true + + set { + name = "vault.url" + value = var.vault_url + } + set { + name = "vault.path" + value = var.vault_path + } + set { + name = "vault.ca_bundle" + value = local.vault_ca_bundle_pem_b64 + } + set { + name = "vault.authentication_type" + value = var.vault_authentication + } + + set { + name = "approle.secret_id" + value = var.vault_approle_secret_id + } + set { + name = "approle.role_id" + value = var.vault_approle_secret_id + } + set { + name = "approle.role_path" + value = var.vault_approle_role_path + } + + set { + name = "token.token" + value = var.vault_token + } + + set { + name = "serviceAccount.serviceAccount" + value = var.vault_serviceaccount_sa + } + + set { + name = "serviceAccount.role" + value = var.vault_serviceaccount_role + } + set { + name = "serviceAccount.mountPath" + value = var.vault_serviceaccount_mountpath + } +} + +# installs the istio-operator that will listen for profile configurations to +# install / configure modify the istio components. +resource "helm_release" "istio-operator" { + chart = "istio-operator" + name = "istio-operator" + namespace = kubernetes_namespace.istio-system.metadata[0].name + repository = "${path.module}/charts/" + + depends_on = [helm_release.cert-manager] + + set { + name = "hub" + value = format("%v/%v", local.account_ecr, "istio") + } + set { + name = "tag" + value = var.istio_tag + } + set { + name = "operatorNamespace" + value = "operators" + } + set { + name = "watchedNamespaces" + value = kubernetes_namespace.istio-system.metadata[0].name + } + + timeout = 180 +} + +# Need to access the IP address of the apiserver for the next step. +data "kubernetes_service" "apiserver" { + metadata { + name = "kubernetes" + } +} + +# sets up service mesh +resource "helm_release" "istio-profile" { + chart = "istio-profile" + name = "istio-profile" + namespace = kubernetes_namespace.istio-system.metadata[0].name + repository = "${path.module}/charts/" + + depends_on = [helm_release.istio-operator, null_resource.certificate-issuers] + + set { + name = "hub" + value = format("%v/%v", local.account_ecr, "istio") + } + set { + name = "tag" + value = var.istio_tag + } + # Passes in the API server so it can be excluded from requiring mTLS from + # pods that are protected by istio. It already implements SSL. + set { + name = "apiserver" + value = "${data.kubernetes_service.apiserver.spec[0].cluster_ip}/32" + } +} + +# Creating the istio profile is very quick. Time is needed to allow +# istio-operator to install the CRDs and deploy istio. +resource "time_sleep" "let_istio-operator_install_istio" { + depends_on = [helm_release.istio-profile] + + create_duration = "19s" +} + +# Require all pods in the service mesh to use mTLS +resource "helm_release" "istio-peer-authentication" { + chart = "istio-peerauthentication" + name = "istio-peerauthentication" + namespace = kubernetes_namespace.istio-system.metadata[0].name + repository = "${path.module}/charts/" + + depends_on = [time_sleep.let_istio-operator_install_istio] +} + +resource "null_resource" "certificate-issuers" { + triggers = { + self_signed_ca = join(",", helm_release.self-signed-certificate-issuer[*].id) + intermediate_ca = join(",", helm_release.intermediate-certificate-issuer[*].id) + vault_ca = join(",", helm_release.vault-certificate-issuer[*].id) + } + provisioner "local-exec" { + command = "if [ ${local.defined_ca} == 0 ]; then echo 'no-certificate-issuer defined'; exit 1; fi" + } +} + diff --git a/examples/full-cluster/common-services/prefixes.tf b/examples/full-cluster/common-services/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster/common-services/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster/common-services/providers.tf b/examples/full-cluster/common-services/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster/common-services/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster/common-services/region.tf b/examples/full-cluster/common-services/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster/common-services/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster/common-services/remote_state.yml b/examples/full-cluster/common-services/remote_state.yml new file mode 100644 index 0000000..f314303 --- /dev/null +++ b/examples/full-cluster/common-services/remote_state.yml @@ -0,0 +1,9 @@ +directory: "applications/apps-adsd-eks/vpc/east/vpc3/apps/eks-adsd-cumulus-qa/common-services" +profile: "252960665057-ma6-gov" +bucket: "inf-tfstate-252960665057" +bucket_region: "us-gov-east-1" +region: "us-gov-east-1" +regions: ["us-gov-east-1"] +account_id: "252960665057" +account_alias: "ma6-gov" +aws_environment: "gov" diff --git a/examples/full-cluster/common-services/test-cluster-autoscaling.json b/examples/full-cluster/common-services/test-cluster-autoscaling.json new file mode 100644 index 0000000..ab00596 --- /dev/null +++ b/examples/full-cluster/common-services/test-cluster-autoscaling.json @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 4 # tells deployment to run 2 pods matching the template + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + resources: + requests: + cpu: 3 + limits: + cpu: 3 + image: "252960665057.dkr.ecr.us-gov-east-1.amazonaws.com/eks/adsd-cumulus-dev/nginx:1.21" + ports: + - containerPort: 80 diff --git a/examples/full-cluster/common-services/tf-run.data b/examples/full-cluster/common-services/tf-run.data new file mode 100644 index 0000000..63f8c73 --- /dev/null +++ b/examples/full-cluster/common-services/tf-run.data @@ -0,0 +1,27 @@ +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +COMMAND tf-init -upgrade +tls_private_key.ca +tls_cert_request.ca +null_resource.ca_root_cert +null_resource.ca_files +null_resource.ca_cert +local_file.ca_bundle_cert +COMMAND tf-directory-setup.py -l s3 + +COMMENT submit certs/*csr using command ouptut listed in apply to TCO for signing +STOP once that is availabile, change cert_download to true + +COMMAND terraform taint null_resource.ca_cert +null_resource.ca_root_cert +null_resource.ca_files +null_resource.ca_cert +COMMENT second run is to complete the steps +null_resource.ca_root_cert +null_resource.ca_files +null_resource.ca_cert + +ALL + +COMMENT run: git-secret add certs/*.key; git-secret hide +COMMENT be sure to add all files to git, and be sure to commit -a to get .gitsecret/ changes diff --git a/examples/full-cluster/common-services/variables.common-services.tf b/examples/full-cluster/common-services/variables.common-services.tf new file mode 100644 index 0000000..14770e6 --- /dev/null +++ b/examples/full-cluster/common-services/variables.common-services.tf @@ -0,0 +1,209 @@ +############################################################################# +# Options for configuring cert-manager to generate certificates for https +# termination in the cluster: +# +# - By not configuring any other method, cert-manager is configured to +# generate a private key and a self-signed CA which will be stored in the +# root-secret secret in the cert-manager namespace. Certificates are then +# signed using this internal CA. +# - tls_cert / tls_key - intermediate CA - By configuring a tls_cert and +# tls_key (either file, contents or base64 encoded data, see below,) +# cert-manager will be configured to create certificates based upon the +# intermediate certificate provided. +# - vault - By configuring information about the vault, cert-manager will be +# configured to interact with the vault to create certificates. +# +# For fields that ultimately need to be base64 encoded, there are +# typically three input variables for each field. +# 1. variable with a path to a file that holds the unencoded data which +# will be read by terraform and encoded into a base64 string to be used +# as needed. This field has the highest precedence of the three fields. +# 2. variable with the raw unencoded data which will be encoded into a +# base64 string to be used as needed. This field has the second highest +# precedence of the three fields. +# 3. variable with the base64 encoded data ready for use. This field has +# the lowest priority of the three fields. +############################################################################# + +############################################################################# +# vault +# +# To use Vault as the certificate authority for cert-manager, first supply +# the common configuration elements. Once complete, configure the selected +# authenication method and fill in the details for that authentication type. +############################################################################# +variable "vault_url" { + description = "URL to the vault server." + type = string + default = "" +} + +variable "vault_path" { + description = "Path is the Vault path that will be used for signing. Note that the path must use the sign endpoint." + type = string + default = "" +} + +variable "vault_ca_bundle_pem_file" { + description = "Path to the pem file that holds the CA bundle containing the Certificate Authority to trust the Vault connection. This is typically always required when using an https URL." + type = string + default = "" +} + +variable "vault_ca_bundle_pem" { + description = "Contents of the pem file holding the CA bundle containing the Certificate Authority to trust the Vault connection. This is typically always required when using an https URL." + type = string + default = "" +} + +variable "vault_ca_bundle_pem_b64" { + description = "Base64 encoded contents of the pem file holding the CA bundle containing the Certificate Authority to trust the Vault connection. This is typically always required when using an https URL." + type = string + default = "" +} + +variable "vault_authentication" { + description = "How to authenticate with the vault. This value must be blank when not using the value, or one of 'AppRole', 'Token', or 'ServiceAccount'." + type = string + default = "" +} + +############################################################################# +# for AppRole authentication +variable "vault_approle_secret_id" { + description = "The vault SecretID for the AppRole. This is stored in the vault secret in the cert-manager namespace." + type = string + default = "" + # sensitive = true +} + +variable "vault_approle_role_id" { + description = "The vault RoleId for cert-manager to assume." + type = string + default = "" +} + +variable "vault_approle_role_path" { + description = "The vault app role path for the role for cert-manager to assume." + type = string + default = "" +} + +############################################################################# +# for Token authentication +variable "vault_token" { + description = "The vault token that cert-manager should use to authenticate with vault. Note that tokens expire, and the token must be refreshed manually. This token is stored in the valut secret in the cert-manager namespace." + type = string + default = "" + # sensitive = true +} + +############################################################################# +# for ServiceAccount authentication +variable "vault_serviceaccount_sa" { + description = "The name of the service account in the cert-manager namespace to use to access the token to communicate with vault." + type = string + default = "" +} + +variable "vault_serviceaccount_role" { + description = "The role cert-manager is to assume." + type = string + default = "" +} + +variable "vault_serviceaccount_mountpath" { + description = "The location to mount the secret into the filesystem. Defaults to kubernetes" + type = string + default = "" +} + +############################################################################# +# tls_cert / tls_key - intermediate CA +# +# To use an intermediate CA, configure two of these fields with correct +# values which configures cert-manager to sign cert requests with an +# intermediate key. +# +# Input can be the file, file contents, or base64 encoded file contents to +# allow chaining the output of a module that can generate an intermediate CA +# to the input of this script. Depending on how the intermediate CA is +# generated, pass the output as input in whichever form is easiest. +# +# See https://cert-manager.io/docs/configuration/ca/ +############################################################################# + +variable "tls_crt_file" { + description = "Path to the file that holds the tls.crt representing the issuer's full chain in the correct order (issuer -> intermediate(s) -> root.) When left blank, cert-manager is configured with a self-signed CA." + type = string + default = "" +} + +variable "tls_key_file" { + description = "Path to the file that holds the signing private key. When left blank, cert-manager is configured with a self-signed CA." + type = string + default = "" +} + +variable "tls_crt_contents" { + description = "The contents of the file that holds the tls.crt representing the issuer's full chain in the correct order (issuer -> intermediate(s) -> root.) When left blank, cert-manager is configured with a self-signed CA." + type = string + default = "" +} + +variable "tls_key_contents" { + description = "The contents of the file that holds the signing private key. When left blank, cert-manager is configured with a self-signed CA." + type = string + default = "" +} + +variable "tls_crt_b64" { + description = "The base64 encoded contents of the file that holds the tls.crt representing the issuer's full chain in the correct order (issuer -> intermediate(s) -> root.) When left blank, cert-manager is configured with a self-signed CA." + type = string + default = "" +} + +variable "tls_key_b64" { + description = "The base64 encoded contents of the file that holds the signing private key. When left blank, cert-manager is configured with a self-signed CA." + type = string + default = "" +} + +# See the readme `Updating the cert-manager chart` to find these values. +variable "cert_manager_controller_tag" { + description = "Which tag of public.ecr.aws/eks-anywhere/jetstack/cert-manager-controller" + type = string + default = "v1.4.3" +} + +variable "cluster_autoscaler_tag" { + description = "Image tag of public.ecr.aws/v0g0y9g5/cluster-autoscaler" + type = string + default = "v1.21.0" +} + +variable "metrics_server_tag" { + description = "Which tag of metrics-server" + type = string + default = "0.5.0-debian-10-r83" +} + +variable "cert_manager_cainjector_tag" { + description = "Which tag of public.ecr.aws/eks-anywhere/jetstack/cert-manager-cainjector" + type = string + default = "v1.4.3" +} + +variable "cert_manager_webhook_tag" { + description = "Which tag of public.ecr.aws/eks-anywhere/jetstack/cert-manager-webhook" + type = string + default = "v1.4.3" +} + +# Set the readme `Updating the istio chart` to find these values. +variable "istio_tag" { + description = "The version of istio to install" + type = string + default = "1.10.1" +} + diff --git a/examples/full-cluster/common-services/variables.eks.tf b/examples/full-cluster/common-services/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster/common-services/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster/common-services/variables.vpc.tf b/examples/full-cluster/common-services/variables.vpc.tf new file mode 120000 index 0000000..f672f33 --- /dev/null +++ b/examples/full-cluster/common-services/variables.vpc.tf @@ -0,0 +1 @@ +../variables.vpc.tf \ No newline at end of file diff --git a/examples/full-cluster/common-services/version.tf b/examples/full-cluster/common-services/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster/common-services/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster/data.eks.tf b/examples/full-cluster/data.eks.tf new file mode 100644 index 0000000..9452be6 --- /dev/null +++ b/examples/full-cluster/data.eks.tf @@ -0,0 +1,18 @@ +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster +# for main.tf + aws_eks_cluster = aws_eks_cluster.eks_cluster +# for all subdirectories +## aws_eks_cluster = data.aws_eks_cluster.cluster +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +#--- +# for all subdirectories only +#--- +## data "aws_eks_cluster" "cluster" { +## name = var.cluster_name +## } diff --git a/examples/full-cluster/dns-zone.tf b/examples/full-cluster/dns-zone.tf new file mode 100644 index 0000000..e26e584 --- /dev/null +++ b/examples/full-cluster/dns-zone.tf @@ -0,0 +1,42 @@ +locals { + cluster_domain_name = format("%v.%v", var.cluster_name, var.vpc_domain_name) + cluster_domain_description = format("%v EKS Cluster DNS Zone", var.cluster_name) +} + +resource "aws_route53_zone" "cluster_domain" { + name = local.cluster_domain_name + comment = local.cluster_domain_description + force_destroy = false + + vpc { + vpc_id = data.aws_vpc.eks_vpc.id + vpc_region = local.region + } + + # lifecycle { + # ignore_changes + # } + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + tomap({ "Name" = local.cluster_domain_name }), + ) +} + +output "cluster_domain_name" { + description = "DNS Zone Name" + value = local.cluster_domain_name +} + +output "cluster_domain_id" { + description = "DNS Zone ID" + value = aws_route53_zone.cluster_domain.zone_id +} + +output "cluster_domain_ns" { + description = "DNS Zone Nameservers" + value = aws_route53_zone.cluster_domain.name_servers +} diff --git a/examples/full-cluster/ebs-encryption.tf b/examples/full-cluster/ebs-encryption.tf new file mode 100644 index 0000000..7243a3d --- /dev/null +++ b/examples/full-cluster/ebs-encryption.tf @@ -0,0 +1,81 @@ +resource "kubernetes_storage_class" "ebs_encrypted" { + metadata { + name = "gp2-encrypted" + annotations = { + "storageclass.kubernetes.io/is-default-class" = "true" + } + } + parameters = { + fsType = "ext4" + type = "gp2" + encrypted = "true" +# kms_key_id = data.aws_kms_key.ebs_key.arn + kmsKeyId = data.aws_kms_key.ebs_key.arn + } + storage_provisioner = "kubernetes.io/aws-ebs" + reclaim_policy = "Delete" + volume_binding_mode = "Immediate" + allow_volume_expansion = "true" +} + +# run once. This deletes the default storage class created by eks called 'gp2' +# vs trying to patch it + +resource "null_resource" "delete_default_sc" { + triggers = { + id = kubernetes_storage_class.ebs_encrypted.id + } + depends_on = [null_resource.kubeconfig] + provisioner "local-exec" { + command = "kubectl --kubeconfig ${path.root}/setup/kube.config delete sc gp2" + } +} + +## { +## "apiVersion": "storage.k8s.io/v1", +## "kind": "StorageClass", +## "metadata": { +## "annotations": { +## "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"storage.k8s.io/v1\",\"kind\":\"StorageClass\",\"metadata\":{\"annotations\":{\"storageclass.kubernetes.io/is-default-class\":\"true\"},\"name\":\"gp2\"},\"parameters\":{\"fsType\":\"ext4\",\"type\":\"gp2\"},\"provisioner\":\"kubernetes.io/aws-ebs\",\"volumeBindingMode\":\"WaitForFirstConsumer\"}\n", +## "storageclass.kubernetes.io/is-default-class": "true" +## }, +## "creationTimestamp": "2021-09-20T16:10:48Z", +## "managedFields": [ +## { +## "apiVersion": "storage.k8s.io/v1", +## "fieldsType": "FieldsV1", +## "fieldsV1": { +## "f:metadata": { +## "f:annotations": { +## ".": {}, +## "f:kubectl.kubernetes.io/last-applied-configuration": {}, +## "f:storageclass.kubernetes.io/is-default-class": {} +## } +## }, +## "f:parameters": { +## ".": {}, +## "f:fsType": {}, +## "f:type": {} +## }, +## "f:provisioner": {}, +## "f:reclaimPolicy": {}, +## "f:volumeBindingMode": {} +## }, +## "manager": "kubectl-client-side-apply", +## "operation": "Update", +## "time": "2021-09-20T16:10:48Z" +## } +## ], +## "name": "gp2", +## "resourceVersion": "253", +## "uid": "5768ea51-ae73-450e-b0de-38a07be0a5d3" +## }, +## "parameters": { +## "fsType": "ext4", +## "type": "gp2" +## }, +## "provisioner": "kubernetes.io/aws-ebs", +## "reclaimPolicy": "Delete", +## "volumeBindingMode": "WaitForFirstConsumer" + +## } diff --git a/examples/full-cluster/ec2-keypair.tf b/examples/full-cluster/ec2-keypair.tf new file mode 100644 index 0000000..e47db54 --- /dev/null +++ b/examples/full-cluster/ec2-keypair.tf @@ -0,0 +1,36 @@ +locals { + keypair_name = format("ec2-ssh-%v%v", local._prefixes["eks"], var.cluster_name) +} + +# two-step process to create +# terraform apply -target=null_resource.generate_keypair +# terraform apply +# when done, add to git +# cd setup +# echo inf-ec2-keypair >> .gitignore +# git-secret add inf-ec2-keypair +# git-secret hide +# git add inf-ec2-keypair.{pub,secret} +# git commit -m'add ec2-keypair: inf-ec2-keypair' inf-ec2-keypair.{pub,secret} .gitignore + +# inf-keypair +resource "null_resource" "generate_keypair" { + provisioner "local-exec" { + command = "test -d setup || mkdir setup" + } + provisioner "local-exec" { + working_dir = "./setup" + command = "ssh-keygen -f ${local.keypair_name} -N '' -t rsa -b 2048 -C '${local.keypair_name}@${var.cluster_name}.${var.vpc_domain_name}'" + } +} + +resource "aws_key_pair" "cluster_keypair" { + key_name = local.keypair_name + public_key = file("setup/${local.keypair_name}.pub") + depends_on = [null_resource.generate_keypair] +} + +output "cluster_keypair" { + description = "EC2 keypair for EKS Cluster" + value = aws_key_pair.cluster_keypair.key_name +} diff --git a/examples/full-cluster/efs/README.efs.md b/examples/full-cluster/efs/README.efs.md new file mode 100644 index 0000000..14039bd --- /dev/null +++ b/examples/full-cluster/efs/README.efs.md @@ -0,0 +1,81 @@ +# eks-efs + +A standard EKS cluster only provides the `gp2` storage class, which is an EBS based persistent volume. +`gp2` can only be used with ReadWriteOnce persistent volumes. +If an application requires ReadOnlyMany or ReadWriteMany, a different type of persistent volume is required. +The eks-efs module installs an efs-provisioner in the cluster with a storage class of `efs` which allows all types of persistent volumes. + +## Parameters + +| Name | Description | +| ---- | ----------- | +| region | The AWS region that EKS cluster is located. | +| cluster_name | The name of the cluster in which efs-provisioner will be installed. | +| subnet_ids | A list of subnets inside the VPC. Used for EFS mount points. | +| security_groups | Security groups for all worker management | +| aws_efs_csi_driver_version | Which version of the aws-efs-csi-driver helm chart to use. Currently defaults to 2.1.4. | +| external_provisioner_tag | Which tag of public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner to use. Currently defaults to v2.1.1-eks-1-18-2 | +| livenessprobe_tag | Which tag of public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe to use. Currently defaults to v2.2.0-eks-1-18-2 | +| node_driver_registrar_tag | Which tag of public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar to use. Currently defaults to v2.1.0-eks-1-18-2 | + +## Updating the aws-efs-csi-driver chart + +When using a private VPC, the helm chart cannot be downloaded from "https://kubernetes-sigs.github.io/aws-efs-csi-driver/" during installation. +A local copy of the chart is maintained within the terraform script. +The lastest version of the helm chart can be found by looking at https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/charts/aws-efs-csi-driver/Chart.yaml and checking the `version:` tag (not the `appVersion` tag.) +To update this helm chart to the latest version, the procedure is to: + +```script +cd charts +helm add repo https://kubernetes-sigs.github.io/aws-efs-csi-driver/ aws-efs-csi-driver +helm repo update +rm -fr aws-efs-csi-driver +helm pull aws-efs-csi-driver --untar +``` + +After completing these steps, be sure to examine aws-efs-csi-driver/values.yaml and confirm that the tags listed for the sidecar images match the tags assigned by default in input.tf. +For example, the values.yaml file: + +```json +sidecars: + livenessProbe: + image: + repository: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe + tag: v2.2.0-eks-1-18-2 + pullPolicy: IfNotPresent + resources: {} + nodeDriverRegistrar: + image: + repository: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar + tag: v2.1.0-eks-1-18-2 + pullPolicy: IfNotPresent + resources: {} + csiProvisioner: + image: + repository: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner + tag: v2.1.1-eks-1-18-2 + pullPolicy: IfNotPresent + resources: {} +``` + +Entries in input.tf: + +```hcl +variable "livenessprobe_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/livenessp +robe to use." + default = "v2.2.0-eks-1-18-2" +} + +variable "node_driver_registrar_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/node-driv +er-registrar to use." + default = "v2.1.0-eks-1-18-2" +} + +variable "external_provisioner_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/external- +provisioner to use." + default = "v2.1.1-eks-1-18-2" +} +``` diff --git a/examples/full-cluster/efs/README.md b/examples/full-cluster/efs/README.md new file mode 100644 index 0000000..5b68efd --- /dev/null +++ b/examples/full-cluster/efs/README.md @@ -0,0 +1,167 @@ +# EFS + +This sets up the needed EFS resources for persistent volumes. See [this](README.efs.md) for more details. + +## Links + +https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html +https://github.com/kubernetes-sigs/aws-efs-csi-driver +https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/433 +https://github.com/hashicorp/terraform-provider-kubernetes/issues/723#issuecomment-679423792 +https://dev.to/vidyasagarmsc/update-multiple-lines-in-a-yaml-file-49fb + +## Initialize + +* Proxy setup + +Proxy is needed because system may not have access to the `registry.terraform.io` site directory, +and if indirectly, it may not be able to handle a proxy redirect. You may not need to use this, but if you get +errors from the `tf-init`, this is your first thing to setup. + +```shell +export HTTP_PROXY=http://proxy.tco.census.gov:3128 +export HTTPS_PROXY=http://proxy.tco.census.gov:3128 +``` + +## Terraform Automated + +A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. + +* copy the `remote_state.yml` from the parent and update `directory` to be the current directory +* run the tf-run.sh + +```console +% tf-run.sh apply +``` + +* example of the tf-run.sh`steps + +This is part of a larger cluster configuration, so at the end of the run it indicates another directory +to visit when done. + +```console +% tf-run.sh list +* running action=plan +* START: tf-run.sh v1.1.2 start=1636558187 end= logfile=logs/run.plan.20211110.1636558187.log (not-created) +* reading from tf-run.data +* read 7 entries from tf-run.data +> list +** START: start=1636558187 +* 1 COMMAND> tf-directory-setup.py -l none -f +* 2 COMMAND> setup-new-directory.sh +* 3 COMMAND> tf-init -upgrade +* 4 POLICY> (*.tf) aws_iam_policy.efs-policy +* 4 tf-plan -target=aws_iam_policy.efs-policy +* 5 tf-plan +* 6 COMMAND> tf-directory-setup.py -l s3 +* 7 STOP> cd ../common-services and tf-run.sh apply +** END: start=1636558187 end=1636558187 elapsed=0 logfile=logs/run.plan.20211110.1636558187.log (not-created) +``` + +It is highly recommended to use the `tf-run.sh` approach. + +## Terraform Manual + +First, cppy the `remote_state.yml` from the parent and update `directory` to be the current directory. + +```shell + +tf-directory-setup.py -l none +setup-new-directory.sh +tf-init +```` + +* Apply the EFS policy first (before the role) + +```shell +tf-apply -target=aws_iam_policy.efs-policy +``` + +* Apply the rest + +This must be done from a system with the skopeo command, so RHEL8+. + +To use the local install, The efs/charts/ directory +must be populated with the expected code (see [README.md](README.md)) outside of terraform, +much like the .tf files are created. Currently, as the box we run this from has internet access, +we can deploy by pulling the helm stuff from the internet. + +```shell +tf-apply +tf-directory-setup.py -l s3 +``` + +## Post Setup Examination + +This gives us (look at the efs-csi-* ones) to see what was setup. Your `kubectl` configuration file +needs to be setup (one is extracted in `setup/kube.config` as part of this configuration). + +```console +% kubectl --kubeconfig setup/kube.config get pods -n kube-system +NAME READY STATUS RESTARTS AGE +aws-node-j6n6z 1/1 Running 1 27h +aws-node-nmgqm 1/1 Running 1 27h +aws-node-t5ggn 1/1 Running 1 27h +aws-node-vxlvw 1/1 Running 0 27h +coredns-65bfc5645f-254kx 1/1 Running 0 29h +coredns-65bfc5645f-zpvld 1/1 Running 0 29h +efs-csi-controller-7c88dbd56d-chdkt 3/3 Running 0 3m36s +efs-csi-controller-7c88dbd56d-hsws7 3/3 Running 0 3m36s +efs-csi-node-4gjdh 3/3 Running 0 3m36s +efs-csi-node-g49r7 3/3 Running 0 3m36s +efs-csi-node-hq6q9 3/3 Running 0 3m36s +efs-csi-node-lcdmd 3/3 Running 0 3m36s +kube-proxy-dp9zl 1/1 Running 0 27h +kube-proxy-n9l75 1/1 Running 0 27h +kube-proxy-qrv2w 1/1 Running 0 27h +kube-proxy-zssvb 1/1 Running 0 27h +``` + +* Create PV + +** Automated + +Use the `persistent-volume.tf`, which is setup by default, and should happen +as part of the final apply above. + +** Manual + +```json +# pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: efs-test3-claim +spec: + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 25Gi + storageClassName: efs +``` + +```console +% kubectl get pv +No resources found +% kubectl get pvc +No resources found in default namespace. +% kubectl apply -f pvc.yaml +persistentvolumeclaim/efs-test3-claim created +% kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +efs-test3-claim Pending efs 39s +``` + +** Examine + +```shell +kubectl --kubeconfig setup/kube.config describe pvc efs-test3-claim +``` + +To patch to make it work with the regional STS endpoint (this is handled in the TF code): + +```shell +kubectl --kubeconfig setup/kube.config -n kube-system set env deployment/efs-csi-controller AWS_STS_REGIONAL_ENDPOINTS=regional +``` diff --git a/examples/full-cluster/efs/copy_image.sh b/examples/full-cluster/efs/copy_image.sh new file mode 120000 index 0000000..889e269 --- /dev/null +++ b/examples/full-cluster/efs/copy_image.sh @@ -0,0 +1 @@ +../bin/copy_image.sh \ No newline at end of file diff --git a/examples/full-cluster/efs/copy_images.tf b/examples/full-cluster/efs/copy_images.tf new file mode 100644 index 0000000..bf89085 --- /dev/null +++ b/examples/full-cluster/efs/copy_images.tf @@ -0,0 +1,57 @@ + +data "aws_ecr_authorization_token" "token" {} + +locals { + repo_parent_name = format("eks/%v", var.cluster_name) + images = [ + { + image = "external-provisioner" + tag = var.external_provisioner_tag + }, + { + image = "livenessprobe" + tag = var.livenessprobe_tag + }, + { + image = "node-driver-registrar" + tag = var.node_driver_registrar_tag + }, + ] +} + +resource "aws_ecr_repository" "repository" { + for_each = { for image in local.images : image.image => image } + + name = format("%v/%v", local.repo_parent_name, each.value.image) + image_tag_mutability = "IMMUTABLE" + + image_scanning_configuration { + scan_on_push = true + } + + encryption_configuration { + encryption_type = "KMS" + } + + tags = merge( + #local.common_tags, + #local.base_tags, + #var.application_tags, + tomap({ "Name" = format("ecr-eks-%v-%v", var.cluster_name, each.value.image) }), + ) +} + +resource "null_resource" "copy_images" { + for_each = { for image in local.images : image.image => image } + + provisioner "local-exec" { + command = "${path.module}/copy_image.sh" + environment = { + SOURCE_IMAGE = format("%v/%v:%v", local.src_reg, each.value.image, each.value.tag) + DESTINATION_IMAGE = format("%v:%v", aws_ecr_repository.repository[each.key].repository_url, each.value.tag) + DESTINATION_USERNAME = data.aws_ecr_authorization_token.token.user_name + DESTINATION_PASSWORD = data.aws_ecr_authorization_token.token.password + } + } +} + diff --git a/examples/full-cluster/efs/data.eks.tf b/examples/full-cluster/efs/data.eks.tf new file mode 100644 index 0000000..870e8c6 --- /dev/null +++ b/examples/full-cluster/efs/data.eks.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster + # for main.tf + # aws_eks_cluster = aws_eks_cluster.eks_cluster + # for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/full-cluster/efs/ecr.tf b/examples/full-cluster/efs/ecr.tf new file mode 100644 index 0000000..228a775 --- /dev/null +++ b/examples/full-cluster/efs/ecr.tf @@ -0,0 +1,57 @@ + +# Populated from: +# https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html + +data "aws_caller_identity" "whoami" {} + +locals { + af_south_1 = (var.region == "af-south-1" ? "877085696533.dkr.ecr.af-south-1.amazonaws.com/" : "") + af = local.af_south_1 + + ap_east_1 = var.region == "ap-east-1" ? "800184023465.dkr.ecr.ap-east-1.amazonaws.com/" : "" + ap_northeast_1 = var.region == "ap-northeast-1" ? "602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/" : "" + ap_northeast_2 = var.region == "ap-northeast-2" ? "602401143452.dkr.ecr.ap-northeast-2.amazonaws.com/" : "" + ap_northeast_3 = var.region == "ap-northeast-3" ? "602401143452.dkr.ecr.ap-northeast-3.amazonaws.com/" : "" + ap_south_1 = var.region == "ap-south-1" ? "602401143452.dkr.ecr.ap-south-1.amazonaws.com/" : "" + ap_southeast_1 = var.region == "ap-southeast-1" ? "602401143452.dkr.ecr.ap-southeast-1.amazonaws.com/" : "" + ap_southeast_2 = var.region == "ap-southeast-2" ? "602401143452.dkr.ecr.ap-southeast-2.amazonaws.com/" : "" + ap_1 = "${local.ap_east_1}${local.ap_northeast_1}${local.ap_northeast_2}${local.ap_northeast_3}${local.ap_south_1}" + ap_2 = "${local.ap_southeast_1}${local.ap_southeast_2}" + ap = "${local.ap_1}${local.ap_2}" + + ca_central_1 = var.region == "ca-central-1" ? "602401143452.dkr.ecr.ca-central-1.amazonaws.com/" : "" + ca = local.ca_central_1 + + cn_north_1 = var.region == "cn-north-1" ? "918309763551.dkr.ecr.cn-north-1.amazonaws.com.cn/" : "" + cn_northwest_1 = var.region == "cn-northwest-1" ? "961992271922.dkr.ecr.cn-northwest-1.amazonaws.com.cn/" : "" + cn = "${local.cn_north_1}${local.cn_northwest_1}" + + eu_central_1 = var.region == "eu-central-1" ? "602401143452.dkr.ecr.eu-central-1.amazonaws.com/" : "" + eu_north_1 = var.region == "eu-north-1" ? "602401143452.dkr.ecr.eu-north-1.amazonaws.com/" : "" + eu_south_1 = var.region == "eu-south-1" ? "590381155156.dkr.ecr.eu-south-1.amazonaws.com/" : "" + eu_west_1 = var.region == "eu-west-1" ? "602401143452.dkr.ecr.eu-west-1.amazonaws.com/" : "" + eu_west_2 = var.region == "eu-west-2" ? "602401143452.dkr.ecr.eu-west-2.amazonaws.com/" : "" + eu_west_3 = var.region == "eu-west-3" ? "602401143452.dkr.ecr.eu-west-3.amazonaws.com/" : "" + eu = "${local.eu_central_1}${local.eu_north_1}${local.eu_south_1}${local.eu_west_1}${local.eu_west_2}${local.eu_west_3}" + + me_south_1 = var.region == "me-south-1" ? "558608220178.dkr.ecr.me-south-1.amazonaws.com/" : "" + me = local.me_south_1 + + sa_east_1 = var.region == "sa-east-1" ? "602401143452.dkr.ecr.sa-east-1.amazonaws.com/" : "" + sa = local.sa_east_1 + + us_east_1 = var.region == "us-east-1" ? "602401143452.dkr.ecr.us-east-1.amazonaws.com/" : "" + us_east_2 = var.region == "us-east-2" ? "602401143452.dkr.ecr.us-east-2.amazonaws.com/" : "" + us_gov_east_1 = var.region == "us-gov-east-1" ? "151742754352.dkr.ecr.us-gov-east-1.amazonaws.com/" : "" + us_gov_west_1 = var.region == "us-gov-west-1" ? "013241004608.dkr.ecr.us-gov-west-1.amazonaws.com/" : "" + us_west_1 = var.region == "us-west-1" ? "602401143452.dkr.ecr.us-west-1.amazonaws.com/" : "" + us_west_2 = var.region == "us-west-2" ? "602401143452.dkr.ecr.us-west-2.amazonaws.com/" : "" + us = "${local.us_east_1}${local.us_east_2}${local.us_gov_east_1}${local.us_gov_west_1}${local.us_west_1}${local.us_west_2}" + + ecr = "${local.af}${local.ap}${local.ca}${local.cn}${local.eu}${local.me}${local.sa}${local.us}" + + + public_reg = "public.ecr.aws" + src_reg = format("%v/eks-distro/kubernetes-csi", local.public_reg) + account_ecr = "${data.aws_caller_identity.whoami.account_id}.dkr.ecr.${var.region}.amazonaws.com/${var.cluster_name}" +} diff --git a/examples/full-cluster/efs/efs.tf b/examples/full-cluster/efs/efs.tf new file mode 100644 index 0000000..9fb5563 --- /dev/null +++ b/examples/full-cluster/efs/efs.tf @@ -0,0 +1,26 @@ +# Create an Amazon EFS file system for the EKS cluster. +# Step 4a: Create a file system. +# Step 4b: Create mount targets. +module "efs" { + source = "git@github.e.it.census.gov:terraform-modules/aws-efs.git" + + name = var.cluster_name + vpc_id = local.vpc_id + subnet_ids = local.subnet_ids + security_groups = [local.cluster_worker_sg_id] + ## subnet_ids = local.cni_subnet_ids + ## security_groups = [local.cluster_cni_worker_sg_id] + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + tomap({ "efs.csi.aws.com/cluster" = "true" }), + ) +} + +# look at efs module. Add +# efs_tags +# kms_tags +# moint_point_tags +# or use the override tags thing diff --git a/examples/full-cluster/efs/kubeconfig.tf b/examples/full-cluster/efs/kubeconfig.tf new file mode 100644 index 0000000..5e386f5 --- /dev/null +++ b/examples/full-cluster/efs/kubeconfig.tf @@ -0,0 +1,29 @@ +resource "null_resource" "kubeconfig" { + triggers = { + always_run = timestamp() + } + provisioner "local-exec" { + command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + } + provisioner "local-exec" { + command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" + } + provisioner "local-exec" { + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + } + command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" + } + depends_on = [data.aws_eks_cluster.cluster] +} + +#--- +# call it like +#--- +## provisioner "local-exec" { +## environment = { +## KUBECONFIG = "${path.root}/setup/kube.config" +## } +## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" +## } diff --git a/examples/full-cluster/efs/locals.tf b/examples/full-cluster/efs/locals.tf new file mode 100644 index 0000000..3ad38f5 --- /dev/null +++ b/examples/full-cluster/efs/locals.tf @@ -0,0 +1,19 @@ +locals { + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } +} + +# replace TF remote state accordingly in parent_rs with that from the parent directory, and be sure to make the link +locals { + parent_rs = data.terraform_remote_state.{vpc-state-path}_{application-state-path}-eks-{cluster-name}.outputs + + vpc_id = local.parent_rs.cluster_vpc_id + subnet_ids = local.parent_rs.cluster_subnet_ids + cluster_worker_sg_id = local.parent_rs.cluster_worker_sg_id + + oidc_provider_url = local.parent_rs.oidc_provider_url + oidc_provider_arn = local.parent_rs.oidc_provider_arn +} diff --git a/examples/full-cluster/efs/main.tf b/examples/full-cluster/efs/main.tf new file mode 100644 index 0000000..446e3b9 --- /dev/null +++ b/examples/full-cluster/efs/main.tf @@ -0,0 +1,125 @@ +# Most of this file references the AWS documentation to install the +# Amazon EFS CSI driver. This documentation is found here: +# https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html + + +## data "tls_certificate" "certs" { +## url = data.aws_eks_cluster.cluster.identity[0].oidc[0].issuer +## } + +locals { + charts = { + "efs-provisioner" = { + name = "aws-efs-csi-driver" + repository = "https://kubernetes-sigs.github.io/aws-efs-csi-driver" + version = "2.1.4" + use_remote = true + } + } +} + +# Create an IAM policy and role +# Step 1b: +# Contents of the policy are found here: +# https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/docs/iam-policy-example.json +# +# See policy.tf + +# Create an IAM policy and role +# Step 2b: +# +# See role.tf + +#resource "null_resource" "helm_charts" { +# for_each = toset(local.charts) +# provisioner "local-exec" { +# command = "test -d ${path.module}/charts/${each.key} || mkdir -p ${path.module}/charts/${each.key}" +# } +#} + +# Install the Amazon EFS driver +# Step 3: +# See the readme `Updating the aws-efs-csi-driver chart` on updating this chart. +resource "helm_release" "efs-provisioner" { + depends_on = [null_resource.copy_images] + + chart = "aws-efs-csi-driver" + name = "efs-provisioner" + namespace = "kube-system" + # repository = "${path.module}/charts" + repository = local.charts["efs-provisioner"].use_remote ? local.charts["efs-provisioner"].repository : "${path.module}/charts" + version = local.charts["efs-provisioner"].use_remote ? local.charts["efs-provisioner"].version : null + recreate_pods = true + timeout = 300 + set { + name = "image.repository" + value = "${local.ecr}eks/aws-efs-csi-driver" + } + set { + name = "sidecars.livenessProbe.image.repository" + value = aws_ecr_repository.repository["livenessprobe"].repository_url + } + set { + name = "sidecars.livenessProbe.image.tag" + value = var.livenessprobe_tag + } + set { + name = "sidecars.nodeDriverRegistrar.image.repository" + value = aws_ecr_repository.repository["node-driver-registrar"].repository_url + } + set { + name = "sidecars.nodeDriverRegistrar.image.tag" + value = var.node_driver_registrar_tag + } + set { + name = "sidecars.csiProvisioner.image.repository" + value = aws_ecr_repository.repository["external-provisioner"].repository_url + } + set { + name = "sidecars.csiProvisioner.image.tag" + value = var.external_provisioner_tag + } + set { + name = "controller.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" + # value = aws_iam_role.cluster_efs_role.arn + value = module.role_efs-driver.role_arn + } +} + +# The efs-provisioner defaults to using sts.amazonaws.com which resolves to +# a public IP address. The cluster cannot access it. However, this issue: +# https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/433 +# was resolved with the ability to use an environment variable to tell the +# provisioner to use the regional sts instead. The problem is that the +# helm chart does not have a provision to set this. So instead, after the +# provisioner is deployed, patch the deployment: +resource "null_resource" "patch-efs-provisioner-for-regional-sts" { + depends_on = [helm_release.efs-provisioner] + provisioner "local-exec" { + environment = { + KUBECONFIG = "${path.root}/setup/kube.config" + } + command = "kubectl -n kube-system set env deployment/efs-csi-controller AWS_STS_REGIONAL_ENDPOINTS=regional" + } +} + +# Create an Amazon EFS file system for the EKS cluster. +# Step 4a: Create a file system. +# Step 4b: Create mount targets. + +# Create a default storage class. +resource "kubernetes_storage_class" "efs-sc" { + depends_on = [module.efs] + + metadata { + name = "efs" + } + storage_provisioner = "efs.csi.aws.com" + parameters = { + provisioningMode = "efs-ap" + fileSystemId = module.efs.id + directoryPerms = "700" + } + mount_options = ["tls"] +} + diff --git a/examples/full-cluster/efs/persistent-volume.tf b/examples/full-cluster/efs/persistent-volume.tf new file mode 100644 index 0000000..7ff0766 --- /dev/null +++ b/examples/full-cluster/efs/persistent-volume.tf @@ -0,0 +1,19 @@ +resource "kubernetes_persistent_volume_claim" "cluster-base-efs" { + metadata { + name = format("%v%v-%v", local._prefixes["eks"], var.cluster_name, "base-claim") + # namespace = kubernetes_namespace.cicd_namespace.metadata[0].name + } + wait_until_bound = false + spec { + access_modes = ["ReadWriteMany"] + # capacity = { + # storage = "25Gi" + # } + resources { + requests = { + storage = "25Gi" + } + } + storage_class_name = "efs" + } +} diff --git a/examples/full-cluster/efs/policy.tf b/examples/full-cluster/efs/policy.tf new file mode 100644 index 0000000..4ec462d --- /dev/null +++ b/examples/full-cluster/efs/policy.tf @@ -0,0 +1,55 @@ +# apply policy before creating role +# tf-apply -target=aws_iam_policy.efs-policy + +resource "aws_iam_policy" "efs-policy" { + name = format("%v%v-efs-driver", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow configuration of the EFS" + policy = data.aws_iam_policy_document.efs-policy.json + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + tomap({ "Name" = format("%v%v-efs-driver", local._prefixes["eks-policy"], var.cluster_name) }), + ) +} + +# TBD: refine resources to limit only to eks configurations +data "aws_iam_policy_document" "efs-policy" { + statement { + sid = "EKSEFSDescribe" + effect = "Allow" + resources = ["*"] + actions = [ + "elasticfilesystem:DescribeAccessPoints", + "elasticfilesystem:DescribeFileSystems", + ] + } + statement { + sid = "EKSEFSCreateAccessPoint" + effect = "Allow" + resources = ["*"] + actions = [ + "elasticfilesystem:CreateAccessPoint" + ] + condition { + test = "StringLike" + variable = "aws:RequestTag/efs.csi.aws.com/cluster" + values = ["true"] + } + } + statement { + sid = "EKSEFSDeleteAccessPoint" + effect = "Allow" + resources = ["*"] + actions = [ + "elasticfilesystem:DeleteAccessPoint" + ] + condition { + test = "StringLike" + variable = "aws:Resource/efs.csi.aws.com/cluster" + values = ["true"] + } + } +} diff --git a/examples/full-cluster/efs/prefixes.tf b/examples/full-cluster/efs/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster/efs/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster/efs/providers.tf b/examples/full-cluster/efs/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster/efs/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster/efs/region.tf b/examples/full-cluster/efs/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster/efs/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster/efs/role.tf b/examples/full-cluster/efs/role.tf new file mode 100644 index 0000000..d70f981 --- /dev/null +++ b/examples/full-cluster/efs/role.tf @@ -0,0 +1,48 @@ +#--- +# cluster +#--- +locals { + # oidc = replace(data.aws_eks_cluster.cluster.identity[0].oidc[0].issuer, "https://", "") + account_id = data.aws_caller_identity.current.account_id + principal = format("arn:%v:iam::%v:oidc-provider/%v", data.aws_arn.current.partition, local.account_id, local.oidc_provider_url) +} + +# create: aws_iam_policy.efs-policy first +module "role_efs-driver" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git" + + role_name = format("%v%v-efs-driver", local._prefixes["eks"], var.cluster_name) + role_description = "EKS EFS Driver Role for ${var.cluster_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.efs_assume_webidentity.json + attached_policies = [aws_iam_policy.efs-policy.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.application_tags, + tomap({ "Name" = format("%v%v-efs-driver", local._prefixes["eks-role"], var.cluster_name) }), + ) +} + +data "aws_iam_policy_document" "efs_assume_webidentity" { + statement { + sid = "EFSAssumeRoleWebIdentity" + effect = "Allow" + actions = ["sts:AssumeRoleWithWebIdentity"] + principals { + type = "Federated" + identifiers = [local.principal] + } + condition { + test = "StringEquals" + variable = "${local.oidc_provider_url}:sub" + values = ["system:serviceaccount:kube-system:efs-csi-controller-sa"] + } + } +} + +output "role_efs-driver_arn" { + description = "Role ARN for EKS EFS Driver Role" + value = module.role_efs-driver.role_arn +} diff --git a/examples/full-cluster/efs/tf-run.data b/examples/full-cluster/efs/tf-run.data new file mode 100644 index 0000000..8bb6677 --- /dev/null +++ b/examples/full-cluster/efs/tf-run.data @@ -0,0 +1,7 @@ +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +COMMAND tf-init -upgrade +POLICY +ALL +COMMAND tf-directory-setup.py -l s3 +STOP cd ../common-services and tf-run.sh apply diff --git a/examples/full-cluster/efs/variables.efs.tf b/examples/full-cluster/efs/variables.efs.tf new file mode 100644 index 0000000..0e2acb6 --- /dev/null +++ b/examples/full-cluster/efs/variables.efs.tf @@ -0,0 +1,37 @@ +# variable "eks_vpc_name" { +# description = "Define the VPC name that will be used by this cluster" +# type = string +# default = "*vpc4*" +# } +# +# variable "subnets_name" { +# description = "Define the name of the subnets to be used by this cluster" +# type = string +# default = "*-apps-*" +# } + +variable "cluster_worker_sg_id" { + description = "Security group for all worker management." + type = string + default = "" +} + +# See the readme `Updating the aws-efs-csi-driver chart` to find these values. +variable "livenessprobe_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/liveness" + type = string + default = "v2.2.0-eks-1-18-2" +} + +variable "node_driver_registrar_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/node-dri" + type = string + default = "v2.1.0-eks-1-18-2" +} + +variable "external_provisioner_tag" { + description = "Which tag of public.ecr.aws/eks-distro/kubernetes-csi/external" + type = string + default = "v2.1.1-eks-1-18-2" +} + diff --git a/examples/full-cluster/efs/variables.eks.tf b/examples/full-cluster/efs/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster/efs/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster/efs/variables.vpc.tf b/examples/full-cluster/efs/variables.vpc.tf new file mode 120000 index 0000000..f672f33 --- /dev/null +++ b/examples/full-cluster/efs/variables.vpc.tf @@ -0,0 +1 @@ +../variables.vpc.tf \ No newline at end of file diff --git a/examples/full-cluster/efs/version.tf b/examples/full-cluster/efs/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster/efs/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster/eks-console-access.tf b/examples/full-cluster/eks-console-access.tf new file mode 100644 index 0000000..f590c3f --- /dev/null +++ b/examples/full-cluster/eks-console-access.tf @@ -0,0 +1,70 @@ +# ```shell +# curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml +# curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml +# ``` +# +# For full console, we'll use the first one. +# +# ```console +# % kubectl apply -f eks-console-full-access.yaml +# clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created +# clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created +# ``` + +locals { + cluster_roles = [ + { + name = "eks-console-full-access" + url = "https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml" + enabled = true + }, + { + name = "eks-console-restricted-access" + url = "https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml" + enabled = false + }, + ] + cluster_roles_map = { for cr in local.cluster_roles : cr.name => cr } +} + + +data "http" "cluster_roles" { + for_each = local.cluster_roles_map + url = each.value.url +} + +resource "null_resource" "cluster_roles" { + for_each = local.cluster_roles_map + triggers = { + roles = join(",", [each.key, each.value.url]) + } + provisioner "local-exec" { + command = "test -d setup || mkdir setup" + } + provisioner "local-exec" { + command = "echo '${data.http.cluster_roles[each.key].body}' > setup/${each.value.name}.yaml" + } +} + +resource "null_resource" "apply_cluster_roles" { + for_each = { for k, v in local.cluster_roles_map : k => v if v.enabled } + triggers = { + roles = join(",", [each.key, each.value.url]) + } + depends_on = [null_resource.kubeconfig] + # provisioner "local-exec" { + # command = "if [ -z $KUBECONFIG ]; then 'echo missing KUBECONFIG'; exit 1; else exit 0; fi" + # } + # provisioner "local-exec" { + # command = "if [ ! -r $KUBECONFIG ]; then 'echo unreadable KUBECONFIG'; exit 1; else exit 0; fi" + # } + # provisioner "local-exec" { + # command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + # } + provisioner "local-exec" { + environment = { + KUBECONFIG = "${path.root}/setup/kube.config" + } + command = "kubectl apply -f setup/${each.value.name}.yaml" + } +} diff --git a/examples/full-cluster/group.tf b/examples/full-cluster/group.tf new file mode 100644 index 0000000..cdffce9 --- /dev/null +++ b/examples/full-cluster/group.tf @@ -0,0 +1,13 @@ +module "group_cluster-admin" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git" + + group_name = format("%v%v-cluster-admin", local._prefixes["eks"], var.cluster_name) + attached_policies = [aws_iam_policy.cluster-admin-policy.arn, aws_iam_policy.cluster-admin_assume_policy.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) +} diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/data.eks.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/data.eks.tf new file mode 120000 index 0000000..bc5a403 --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/data.eks.tf @@ -0,0 +1 @@ +../data.eks.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/irsa-roles.autoscale.tf.off b/examples/full-cluster/irsa-roles/cluster-autoscaler/irsa-roles.autoscale.tf.off new file mode 100644 index 0000000..8199a2e --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/irsa-roles.autoscale.tf.off @@ -0,0 +1,63 @@ +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRoleWithWebIdentity"] + effect = "Allow" + + condition { + test = "StringEquals" + variable = "${local.oidc_provider_url}:sub" + values = ["system:serviceaccount:${var.namespace}:${var.name}"] + } + + principals { + identifiers = [local.oidc_provider_arn] + type = "Federated" + } + } +} + +data "aws_iam_policy_document" "app_policy1"{ + statement { + sid = "ClusterAutoscaler" + effect = "Allow" + actions = [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:DescribeLaunchTemplateVersions" + ] + resources = ["*"] + } +} + +resource "aws_iam_policy" "app_policy1" { + name = format("%v%v-%v-%v-policy1", local._prefixes["eks-policy"], var.cluster_name, var.namespace, var.name) + path = "/" + policy = data.aws_iam_policy_document.app_policy1.json + +} + +module "app_role" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git" + + role_name = format("%v%v-irsa-%v-%v", local._prefixes["eks"], var.cluster_name, var.namespace, var.name) + role_description = "EKS IAM Role for ${var.cluster_name} for service account ${var.namespace}:${var.name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.assume_role_policy.json + attached_policies = [aws_iam_policy.app_policy1.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) +} + +output "app_role_arn" { + description = "ARN of IAM Role for Service account for cluster-autoscaler" + value = module.app_role.role_arn +} diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/locals.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/locals.tf new file mode 100644 index 0000000..d1f92d0 --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/locals.tf @@ -0,0 +1,19 @@ +locals { + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } +} + +# replace TF remote state accordingly in parent_rs with that from the parent directory, and be sure to make the link +locals { + parent_rs = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east_vpc3_apps_eks-adsd-cumulus-qa.outputs + + vpc_id = local.parent_rs.cluster_vpc_id + subnet_ids = local.parent_rs.cluster_subnet_ids + cluster_worker_sg_id = local.parent_rs.cluster_worker_sg_id + + oidc_provider_url = local.parent_rs.oidc_provider_url + oidc_provider_arn = local.parent_rs.oidc_provider_arn +} diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/policy.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/policy.tf new file mode 100644 index 0000000..da92e08 --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/policy.tf @@ -0,0 +1,23 @@ +data "aws_iam_policy_document" "app_policy1" { + statement { + sid = "ClusterAutoscaler" + effect = "Allow" + actions = [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:DescribeLaunchTemplateVersions" + ] + resources = ["*"] + } +} + +resource "aws_iam_policy" "app_policy1" { + name = format("%v%v-%v__%v__%v", local._prefixes["eks-policy"], var.cluster_name, "p1", var.namespace, var.name) + path = "/" + policy = data.aws_iam_policy_document.app_policy1.json + +} diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/prefixes.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/providers.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/region.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/region.tf new file mode 100644 index 0000000..f617506 --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/region.tf @@ -0,0 +1,3 @@ +locals { + region = var.region +} diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/remote_state.yml b/examples/full-cluster/irsa-roles/cluster-autoscaler/remote_state.yml new file mode 100644 index 0000000..8b2a0b7 --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/remote_state.yml @@ -0,0 +1,9 @@ +directory: "applications/apps-adsd-eks/vpc/east/vpc3/apps/eks-adsd-cumulus-qa/irsa-roles/cluster-autoscaler" +profile: "252960665057-ma6-gov" +bucket: "inf-tfstate-252960665057" +bucket_region: "us-gov-east-1" +region: "us-gov-east-1" +regions: ["us-gov-east-1"] +account_id: "252960665057" +account_alias: "ma6-gov" +aws_environment: "gov" diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/role.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/role.tf new file mode 100644 index 0000000..d8847b6 --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/role.tf @@ -0,0 +1,39 @@ +data "aws_iam_policy_document" "assume_role_policy" { + statement { + actions = ["sts:AssumeRoleWithWebIdentity"] + effect = "Allow" + + condition { + test = "StringEquals" + variable = "${local.oidc_provider_url}:sub" + values = ["system:serviceaccount:${var.namespace}:${var.name}"] + } + + principals { + identifiers = [local.oidc_provider_arn] + type = "Federated" + } + } +} + +module "app_role" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git" + + role_name = format("%v%v-irsa__%v__%v", local._prefixes["eks"], var.cluster_name, var.namespace, var.name) + role_description = "EKS IAM Role for ${var.cluster_name} for service account ${var.namespace}:${var.name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.assume_role_policy.json + attached_policies = [aws_iam_policy.app_policy1.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) +} + +output "app_role_arn" { + description = "ARN of IAM Role for Service account for cluster-autoscaler" + value = module.app_role.role_arn +} diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/service_account.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/service_account.tf new file mode 100644 index 0000000..2a0d9e0 --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/service_account.tf @@ -0,0 +1,11 @@ +# https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html + +resource "kubernetes_service_account" "app" { + metadata { + name = var.name + namespace = var.namespace + annotations = { + "eks.amazonaws.com/role-arn" = module.app_role.role_arn + } + } +} diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/tf-run.data b/examples/full-cluster/irsa-roles/cluster-autoscaler/tf-run.data new file mode 100644 index 0000000..336f6a5 --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/tf-run.data @@ -0,0 +1,6 @@ +COMMAND tf-directory-setup.py -l none +COMMAND setup-new-directory.sh +COMMAND tf-init -upgrade +POLICY +ALL +COMMAND tf-directory-setup.py -l s3 diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/variables.eks.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/variables.irsa.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/variables.irsa.tf new file mode 120000 index 0000000..840e7bb --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/variables.irsa.tf @@ -0,0 +1 @@ +../variables.irsa.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/variables.tags.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/variables.tags.tf new file mode 120000 index 0000000..2622118 --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/variables.tags.tf @@ -0,0 +1 @@ +../variables.tags.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/version.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/data.eks.tf b/examples/full-cluster/irsa-roles/data.eks.tf new file mode 100644 index 0000000..870e8c6 --- /dev/null +++ b/examples/full-cluster/irsa-roles/data.eks.tf @@ -0,0 +1,15 @@ +data "aws_eks_cluster" "cluster" { + name = var.cluster_name +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster + # for main.tf + # aws_eks_cluster = aws_eks_cluster.eks_cluster + # for all subdirectories + aws_eks_cluster = data.aws_eks_cluster.cluster +} diff --git a/examples/full-cluster/irsa-roles/prefixes.tf b/examples/full-cluster/irsa-roles/prefixes.tf new file mode 120000 index 0000000..e0bf5ad --- /dev/null +++ b/examples/full-cluster/irsa-roles/prefixes.tf @@ -0,0 +1 @@ +../prefixes.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/providers.tf b/examples/full-cluster/irsa-roles/providers.tf new file mode 120000 index 0000000..7244d01 --- /dev/null +++ b/examples/full-cluster/irsa-roles/providers.tf @@ -0,0 +1 @@ +../providers.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/remote_state.yml b/examples/full-cluster/irsa-roles/remote_state.yml new file mode 100644 index 0000000..7af0a5e --- /dev/null +++ b/examples/full-cluster/irsa-roles/remote_state.yml @@ -0,0 +1,9 @@ +directory: "applications/apps-adsd-eks/vpc/east/vpc3/apps/eks-adsd-cumulus-qa/irsa-roles" +profile: "252960665057-ma6-gov" +bucket: "inf-tfstate-252960665057" +bucket_region: "us-gov-east-1" +region: "us-gov-east-1" +regions: ["us-gov-east-1"] +account_id: "252960665057" +account_alias: "ma6-gov" +aws_environment: "gov" diff --git a/examples/full-cluster/irsa-roles/tf-run.data b/examples/full-cluster/irsa-roles/tf-run.data new file mode 100644 index 0000000..151331f --- /dev/null +++ b/examples/full-cluster/irsa-roles/tf-run.data @@ -0,0 +1,7 @@ +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +COMMAND tf-init -upgrade +ALL +COMMAND tf-directory-setup.py -l s3 + +COMMENT cd cluster-roles and tf-run.sh apply diff --git a/examples/full-cluster/irsa-roles/variables.eks.tf b/examples/full-cluster/irsa-roles/variables.eks.tf new file mode 120000 index 0000000..7dd95db --- /dev/null +++ b/examples/full-cluster/irsa-roles/variables.eks.tf @@ -0,0 +1 @@ +../variables.eks.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/variables.irsa.tf b/examples/full-cluster/irsa-roles/variables.irsa.tf new file mode 100644 index 0000000..75e3046 --- /dev/null +++ b/examples/full-cluster/irsa-roles/variables.irsa.tf @@ -0,0 +1,9 @@ +variable "namespace" { + description = "K8S namespace for IAM Role for Service Account (per-pod)" + type = string +} + +variable "name" { + description = "K8S service names for IAM Role for Service Account (per-pod)" + type = string +} diff --git a/examples/full-cluster/irsa-roles/variables.tags.tf b/examples/full-cluster/irsa-roles/variables.tags.tf new file mode 120000 index 0000000..2622118 --- /dev/null +++ b/examples/full-cluster/irsa-roles/variables.tags.tf @@ -0,0 +1 @@ +../variables.tags.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/version.tf b/examples/full-cluster/irsa-roles/version.tf new file mode 120000 index 0000000..061373c --- /dev/null +++ b/examples/full-cluster/irsa-roles/version.tf @@ -0,0 +1 @@ +../version.tf \ No newline at end of file diff --git a/examples/full-cluster/kubeconfig.tf b/examples/full-cluster/kubeconfig.tf new file mode 100644 index 0000000..5a6333e --- /dev/null +++ b/examples/full-cluster/kubeconfig.tf @@ -0,0 +1,29 @@ +resource "null_resource" "kubeconfig" { + triggers = { + always_run = timestamp() + } + provisioner "local-exec" { + command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + } + provisioner "local-exec" { + command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" + } + provisioner "local-exec" { + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + } + command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" + } + depends_on = [aws_eks_cluster.eks_cluster] +} + +#--- +# call it like +#--- +## provisioner "local-exec" { +## environment = { +## KUBECONFIG = "${path.root}/setup/kube.config" +## } +## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" +## } diff --git a/examples/full-cluster/locals.tf b/examples/full-cluster/locals.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster/locals.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster/main.tf b/examples/full-cluster/main.tf new file mode 100644 index 0000000..14e6936 --- /dev/null +++ b/examples/full-cluster/main.tf @@ -0,0 +1,219 @@ +data "aws_vpc" "eks_vpc" { + filter { + name = "tag:Name" + values = [var.eks_vpc_name] + } +} + +data "aws_subnet_ids" "subnets" { + vpc_id = data.aws_vpc.eks_vpc.id + filter { + name = "tag:Name" + values = [var.subnets_name] + } +} + +data "aws_subnet" "subnets" { + for_each = toset(data.aws_subnet_ids.subnets.ids) + id = each.key +} + +data "aws_ebs_default_kms_key" "current" {} + +data "aws_kms_key" "ebs_key" { + key_id = data.aws_ebs_default_kms_key.current.key_arn +} + +# in ew, need to exclude us-east-1e for now, as it lacks sufficient resources to establish the cluster +locals { + vpc_id = data.aws_vpc.eks_vpc.id + vpc_cidr_block = data.aws_vpc.eks_vpc.cidr_block + subnets = [for k, v in data.aws_subnet.subnets : v.id if length(regexall("us-east-1e", v.availability_zone)) == 0] + s3_base_arn = format("arn:%v:%v:::%%v", data.aws_arn.current.partition, "s3") + + base_tags = { + "eks-cluster-name" = var.cluster_name + "boc:tf_module_version" = local._module_version + "boc:created_by" = "terraform" + } + +# https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html + autoscale_tags = { + format("k8s.io/cluster-autoscaler/%v",var.cluster_name) = "owned" + "k8s.io/cluster-autoscaler/enabled" = "TRUE" + } + +} + +resource "aws_eks_cluster" "eks_cluster" { + name = var.cluster_name + version = var.cluster_version + role_arn = module.role_eks-cluster.role_arn + enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] + + vpc_config { + subnet_ids = local.subnets + security_group_ids = [aws_security_group.additional_eks_cluster_sg.id] + endpoint_private_access = true + endpoint_public_access = true + public_access_cidrs = var.census_public_cidr + } + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) + + # Ensure that IAM Role permissions are created before and deleted after EKS Cluster handling. + # Otherwise, EKS will not be able to properly delete EKS managed EC2 infrastructure such as Security Groups. + depends_on = [ + module.role_eks-cluster, + module.role_eks-nodegroup + ] +} + +resource "aws_eks_node_group" "eks-nodegroup" { + cluster_name = aws_eks_cluster.eks_cluster.name + node_group_name = format("%v%v-nodegroup", local._prefixes["eks"], var.cluster_name) + node_role_arn = module.role_eks-nodegroup.role_arn + subnet_ids = local.subnets + # instance_types = [var.eks_instance_type] + # disk_size = var.eks_instance_disk_size + + scaling_config { + desired_size = var.eks_ng_desire_size + max_size = var.eks_ng_max_size + min_size = var.eks_ng_min_size + } + + launch_template { + id = aws_launch_template.eks-nodegroup.id + version = aws_launch_template.eks-nodegroup.latest_version + } + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + local.autoscale_tags, + ) + + + # Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling. + # Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces. + depends_on = [ + module.role_eks-cluster, + module.role_eks-nodegroup, + ] +} + +#--- +# Launch Template with AMI +#--- +#data "aws_ssm_parameter" "cluster" { +# name = "/aws/service/eks/optimized-ami/${aws_eks_cluster.eks_cluster.version}/amazon-linux-2/recommended/image_id" +#} + +#data "aws_launch_template" "cluster" { +# name = aws_launch_template.cluster.name +# +# depends_on = [aws_launch_template.cluster] +#} + +locals { + launch_template_tags = { + "Name" = format("%v%v-nodegroup-instance-name", local._prefixes["eks"], var.cluster_name) + format("kubernetes.io/cluster/%v", var.cluster_name) = "owned" + } +} + +resource "aws_launch_template" "eks-nodegroup" { + instance_type = var.eks_instance_type + name = format("%v%v-launch-template", local._prefixes["eks"], var.cluster_name) + update_default_version = true + key_name = aws_key_pair.cluster_keypair.key_name + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) + + tag_specifications { + resource_type = "instance" + + tags = merge( + local.base_tags, + tomap({ "boc:created_by" = "eks-launch-template" }), + local.common_tags, + local.launch_template_tags, + var.tags, + ) + } + + tag_specifications { + resource_type = "volume" + + tags = merge( + local.base_tags, + tomap({ "boc:created_by" = "eks-launch-template" }), + local.common_tags, + var.tags, + var.application_tags, + ) + } + + tag_specifications { + resource_type = "network-interface" + + tags = merge( + local.base_tags, + tomap({ "boc:created_by" = "eks-launch-template" }), + local.common_tags, + var.tags, + var.application_tags, + ) + } + + # tag_specifications { + # resource_type = "snapshot" + # + # tags = merge( + # local.base_tags, + # tomap({ "boc:created_by" = "eks-launch-template" }), + # local.common_tags, + # var.tags, + # ) + # } + + block_device_mappings { + device_name = "/dev/xvda" + + ebs { + volume_size = var.eks_instance_disk_size + delete_on_termination = true + encrypted = true + # kms_key_id = data.aws_kms_key.ebs_key.arn + # kms_key_id = data.aws_ebs_default_kms_key.current.key_arn + kms_key_id = data.aws_kms_key.ebs_key.arn + } + } + + user_data = base64encode(local.eks-node-private-userdata) +} + +#### User data for worker launch + +locals { + eks-node-private-userdata = templatefile( + "${path.module}/templates/node-private-userdata.tmpl", { + endpoint = aws_eks_cluster.eks_cluster.endpoint + cluster_ca = aws_eks_cluster.eks_cluster.certificate_authority[0].data + cluster_name = var.cluster_name + } + ) +} diff --git a/examples/full-cluster/oidc.tf b/examples/full-cluster/oidc.tf new file mode 100644 index 0000000..311b99d --- /dev/null +++ b/examples/full-cluster/oidc.tf @@ -0,0 +1,32 @@ +# Most of this file references the AWS documentation to install the +# Amazon EFS CSI driver. This documentation is found here: +# https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html + +data "tls_certificate" "certs" { + url = aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer +} + +# Create the oidc provider for the service account. This is a prerequisite +# for using the EFS CSI Driver: +# https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html + +resource "aws_iam_openid_connect_provider" "oidc" { + client_id_list = ["sts.amazonaws.com"] + thumbprint_list = [data.tls_certificate.certs.certificates[0].sha1_fingerprint] + url = aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer +} + +locals { + oidc_provider_url = replace(aws_iam_openid_connect_provider.oidc.url, "https://", "") + oidc_provider_arn = aws_iam_openid_connect_provider.oidc.arn +} + +output "oidc_provider_url" { + description = "OpenID Connector provider URL" + value = local.oidc_provider_url +} + +output "oidc_provider_arn" { + description = "OpenID Connector provider ARN" + value = local.oidc_provider_arn +} diff --git a/examples/full-cluster/outputs.tf b/examples/full-cluster/outputs.tf new file mode 100644 index 0000000..9fa1e23 --- /dev/null +++ b/examples/full-cluster/outputs.tf @@ -0,0 +1,58 @@ +#output "cluster" { +# description = "Full EKS Cluster object output" +# value = aws_eks_cluster.eks_cluster +#} + +output "cluster_name" { + description = "The name of the cluster that was created." + value = aws_eks_cluster.eks_cluster.name +} + +output "cluster_endpoint" { + description = "The endpoint used to reach the Kubernetes API server." + value = aws_eks_cluster.eks_cluster.endpoint +} + +output "cluster_certificate_authority_data" { + description = "Certificate data required to successfully communicate with the Kubernetes API server." + value = aws_eks_cluster.eks_cluster.certificate_authority[0].data +} + +output "cluster_auth_token" { + description = "The token required to authenticate with the cluster." +# value = data.aws_eks_cluster_auth.eks_auth.token + value = local.aws_eks_cluster_auth.token + sensitive = true +} + +output "cluster_worker_sg_id" { + description = "Security group ids attached to the cluster worker nodes." + value = aws_security_group.all_worker_mgmt.id +} + +output "cluster_sg_id" { + description = "Security group ids attached to the cluster control plane." + value = aws_security_group.additional_eks_cluster_sg.id +} + +output "cluster_subnet_ids" { + description = "Subnet IDs used to create the cluster" + value = local.subnets +} + +output "cluster_vpc_id" { + description = "VPC IDs on which the cluster was created" + value = local.vpc_id +} + +## # secondary subnets +## output "cluster_cni_subnet_ids" { +## description = "Subnet IDs used to create the cluster on the CNI custom network." +## value = local.cni_subnets +## } +## +## output "cluster_cni_custom_sg_id" { +## description = "Security group ids attached to the cluster worker nodes for CNI custom networking.." +## value = aws_security_group.cni_custom_sg.id +## } +## diff --git a/examples/full-cluster/policy.tf b/examples/full-cluster/policy.tf new file mode 100644 index 0000000..efa06b0 --- /dev/null +++ b/examples/full-cluster/policy.tf @@ -0,0 +1,186 @@ +resource "aws_iam_policy" "nlb-policy" { + name = format("%v%v-nlb", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow configuration of the ELB" + policy = data.aws_iam_policy_document.nlb-policy.json + + tags = merge( + local.base_tags, + var.tags, + var.application_tags, + ) +} + +# Q: why CreateSecurityGroup +# TBD: refine resources to limit only to eks configurations +data "aws_iam_policy_document" "nlb-policy" { + statement { + sid = "EKSNLBConfiguration" + effect = "Allow" + actions = [ + "elasticloadbalancing:*", + "ec2:CreateSecurityGroup", + "ec2:Describe*", + ] + resources = ["*"] + } +} + +resource "aws_iam_policy" "cloudwatch-policy" { + name = format("%v%v-cloudwatch", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow sending metric data to cloudwatch" + policy = data.aws_iam_policy_document.cloudwatch-policy.json + + tags = merge( + local.base_tags, + var.tags, + var.application_tags, + ) +} + +# TBD: refine resources to limit only to eks configurations +data "aws_iam_policy_document" "cloudwatch-policy" { + statement { + sid = "EKSCloudwatchMetrics" + effect = "Allow" + actions = [ + "cloudwatch:PutMetricData", + ] + resources = ["*"] + } +} + +#--- +# cluster admin policy +#--- +resource "aws_iam_policy" "cluster-admin-policy" { + name = format("%v%v-cluster-admin", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow for administration of the cluster ${var.cluster_name} using AWS resources" + policy = data.aws_iam_policy_document.cluster-admin-policy.json + + tags = merge( + local.base_tags, + var.tags, + var.application_tags, + ) +} + +data "aws_iam_policy_document" "cluster-admin-policy" { + dynamic "statement" { + for_each = local.admin_policy_statements + iterator = s + content { + sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key) + effect = lookup(s.value, "effect", "Allow") + actions = lookup(s.value, "actions", []) + resources = lookup(s.value, "resources", []) + } + } +} + +locals { + base_arn = format("arn:%v:%%v:%v:%v:%%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) + iam_arn = format("arn:%v:iam::%v:%%v", data.aws_arn.current.partition, data.aws_caller_identity.current.account_id) + common_arn = format("arn:%v:%%v:%v:%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id) + eks_resources = ["cluster", "addon", "nodegroup", "identityproviderconfig"] + + admin_policy_statements = { + ECRRead = { + actions = [ + "ecr:Describe*", + "ecr:Get*", + "ecr:ListImages", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + ] + resources = ["*"] + } + ECRWrite = { + actions = [ + "ecr:BatchDeleteImage", + "ecr:CompleteLayerUpload", + "ecr:CreateRepository", + "ecr:DeleteRepository", + "ecr:InitiateLayerUpload", + "ecr:PutImage", + "ecr:UploadLayerPart" + ] + resources = [format(local.common_arn, "ecr", format("repository/eks/%v/*", var.cluster_name))] + } + EKSRead = { + actions = [ + "eks:ListClusters", + "eks:ListAddons", + "eks:ListNodegroups", + "eks:DescribeCluster", + "eks:DescribeAddon*", + "eks:DescribeNodegroup", + ] + resources = [ + format(local.common_arn, "eks", "cluster/*"), + format(local.common_arn, "eks", "addon/*"), + format(local.common_arn, "eks", "addons/*"), + format(local.common_arn, "eks", "/addons/*"), + format(local.common_arn, "eks", "nodegroup/*"), + ] + } + IAMRead = { + actions = [ + "iam:ListRoles", + ] + resources = ["*"] + } + SSMGet = { + actions = [ + "ssm:GetParameter", + ] + resources = [ + format("arn:%v:%v:%v:%v:%v", data.aws_arn.current.partition, "ssm", data.aws_region.current.name, "", "parameter/aws/service/eks/*") + ] + } + EKSReadMyClusters = { + actions = [ + "eks:List*", + "eks:Read*", + "eks:Describe*", + "eks:AccessKubernetesApi", + ] + resources = flatten(concat( + list(format(local.common_arn, "eks", format("/clusters/%v/addons", var.cluster_name))), + [for r in local.eks_resources : list( + format(local.common_arn, "eks", format("%v/%v", r, var.cluster_name)), + format(local.common_arn, "eks", format("%v/%v/*", r, var.cluster_name)) + )])) + } + } +} + + +#--- +# cluster admin assume policy +#--- +resource "aws_iam_policy" "cluster-admin_assume_policy" { + name = format("%v%v-cluster-admin-assume", local._prefixes["eks-policy"], var.cluster_name) + path = "/" + description = "Allow for assume role to the cluster-admin role for ${var.cluster_name}" + policy = data.aws_iam_policy_document.cluster-admin_assume_policy.json + + tags = merge( + local.base_tags, + var.tags, + var.application_tags, + tomap({ "Name" = format("%v%v-cluster-admin-assume", local._prefixes["eks-policy"], var.cluster_name)}), + ) +} + +data "aws_iam_policy_document" "cluster-admin_assume_policy" { + statement { + sid = "AllowSTSAssumeClusterAdminRole" + effect = "Allow" + actions = ["sts:AssumeRole"] + resources = [ module.role_cluster-admin.role_arn ] + } +} diff --git a/examples/full-cluster/prefixes.tf b/examples/full-cluster/prefixes.tf new file mode 100644 index 0000000..03303f1 --- /dev/null +++ b/examples/full-cluster/prefixes.tf @@ -0,0 +1,34 @@ +locals { + _prefixes = { + "efs" = "v-efs-" + "s3" = "v-s3-" + "ebs" = "v-ebs-" + "kms" = "k-kms-" + "role" = "r-" + "policy" = "p-" + "group" = "g-" + "security-group" = "" # "sg-" + # VPC + "vpc" = "" + "dhcp-options" = "" + "vpc-peer" = "vpcp-" + "route-table" = "route-" + "subnet" = "" + "vpc-endpoint" = "vpce-" + "elastic-ip" = "eip-" + "nat-gateway" = "nat-" + "internet-gateway" = "igw-" + "network-acl" = "nacl-" + "customer-gateway" = "cgw-" + "vpn-gateway" = "vpcg-" + "vpn-connection" = "vpn_" + "log-group" = "lg-" + "log-stream" = "lgs-" + # EKS + "eks" = "eks-" + "eks-user" = "s-eks-" + "eks-role" = "r-eks-" + "eks-policy" = "p-eks-" + "eks-security-group" = "eks-" # "sg-eks-" + } +} diff --git a/examples/full-cluster/providers.tf b/examples/full-cluster/providers.tf new file mode 100644 index 0000000..815e4c1 --- /dev/null +++ b/examples/full-cluster/providers.tf @@ -0,0 +1,19 @@ +terraform { + required_version = ">= 0.12.31" +} + +provider "kubernetes" { + host = local.aws_eks_cluster.endpoint + + cluster_ca_certificate = base64decode(local.aws_eks_cluster.certificate_authority[0].data) + token = local.aws_eks_cluster_auth.token +} + +provider "helm" { + kubernetes { + host = local.aws_eks_cluster.endpoint + + cluster_ca_certificate = base64decode(local.aws_eks_cluster.certificate_authority[0].data) + token = local.aws_eks_cluster_auth.token + } +} diff --git a/examples/full-cluster/role.tf b/examples/full-cluster/role.tf new file mode 100644 index 0000000..15d17f6 --- /dev/null +++ b/examples/full-cluster/role.tf @@ -0,0 +1,162 @@ +#--- +# cluster +#--- +locals { + cluster_managed_policy_list = [ + "AmazonEKSClusterPolicy", + "AmazonEC2FullAccess", + "CloudWatchLogsFullAccess", + ] + cluster_managed_policies = [for p in data.aws_iam_policy.cluster_managed_policies : p.arn] +} + +data "aws_iam_policy" "cluster_managed_policies" { + for_each = toset(local.cluster_managed_policy_list) + name = each.key +} + +# this needs the two policies nlb-policy and cloudwatch-policy, created first + +module "role_eks-cluster" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git" + + role_name = format("%v%v-cluster", local._prefixes["eks"], var.cluster_name) + role_description = "EKS Cluster Role for ${var.cluster_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.eks_assume.json + attached_policies = concat([aws_iam_policy.nlb-policy.arn, aws_iam_policy.cloudwatch-policy.arn], local.cluster_managed_policies) + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + var.application_tags, + ) +} + +data "aws_iam_policy_document" "eks_assume" { + statement { + sid = "EKSAssumeRole" + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["eks.amazonaws.com"] + } + } +} + +output "role_eks-cluster_arn" { + description = "Role ARN for EKS Cluster Role" + value = module.role_eks-cluster.role_arn +} + +#--- +# nodegroup +#--- +locals { + nodegroup_managed_policy_list = [ + "AmazonEKSWorkerNodePolicy", + "AmazonEKS_CNI_Policy", + "AmazonEC2ContainerRegistryPowerUser", + "AmazonEC2ContainerRegistryReadOnly", + "CloudWatchLogsFullAccess", + "AmazonS3FullAccess", + ] + nodegroup_managed_policies = [for p in data.aws_iam_policy.nodegroup_managed_policies : p.arn] +} + +data "aws_iam_policy" "nodegroup_managed_policies" { + for_each = toset(local.nodegroup_managed_policy_list) + name = each.key +} + +module "role_eks-nodegroup" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git" + + role_name = format("%v%v-nodegroup", local._prefixes["eks"], var.cluster_name) + role_description = "EKS Nodegroup Role for ${var.cluster_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.ec2_assume.json + attached_policies = concat(local.nodegroup_managed_policies) + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) +} + +#---- +# STS: ec2 assume +#--- +data "aws_iam_policy_document" "ec2_assume" { + statement { + sid = "EKSAssumeRole" + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +output "role_eks-nodegroup-role_arn" { + description = "Role ARN for EKS Cluster Nodegroup Role" + value = module.role_eks-nodegroup.role_arn +} + +#--- +# cluster-admin +#--- +module "role_cluster-admin" { + source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git" + + role_name = format("%v%v-cluster-admin", local._prefixes["eks"], var.cluster_name) + role_description = "SAML EKS cluster admin Role for ${var.cluster_name}" + enable_ldap_creation = false + assume_policy_document = data.aws_iam_policy_document.allow_sts.json +# assume_policy_document = data.aws_iam_policy_document.cluster-admin_combined.json + attached_policies = [aws_iam_policy.cluster-admin-policy.arn] + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + ) +} + +output "role_cluster-admin-role_arn" { + description = "Role ARN for EKS Cluster Admin Role" + value = module.role_cluster-admin.role_arn +} + +# data "aws_iam_policy_document" "empty" {} + +data "aws_iam_policy_document" "allow_sts" { + statement { + sid = "AllowSTSAssume" + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "AWS" + identifiers = [ + format(local.iam_arn, "root"), + ] + } + } +} + +# data "aws_iam_policy_document" "cluster-admin_combined" +# source_policy_documents = [ +# data.aws_iam_policy_document.allow_sts.json +# data.aws_iam_policy_document.saml_assume.json, +# ] +# } +# diff --git a/examples/full-cluster/saml.tf b/examples/full-cluster/saml.tf new file mode 100644 index 0000000..cc86aa9 --- /dev/null +++ b/examples/full-cluster/saml.tf @@ -0,0 +1,26 @@ +# because we can't link into remote state from the parent account, we have to use this +# also, there is no data source for saml provider + +locals { + saml_provider_arn = format(local.common_arn,"iam","saml-provider/Census_TCO_IDMS") + saml_url = var.aws_environment == "gov" ? "https://signin.amazonaws-us-gov.com/saml" : "https://signin.aws.amazon.com/saml" +} + +data "aws_iam_policy_document" "saml_assume" { + statement { + sid = "SAMLFederationCensusIdP" + effect = "Allow" + actions = ["sts:AssumeRoleWithSAML"] + + principals { + type = "Federated" + identifiers = [local.saml_provider_arn] + } + + condition { + test = "StringEquals" + variable = "SAML:aud" + values = [local.saml_url] + } + } +} diff --git a/examples/full-cluster/securitygroup.tf b/examples/full-cluster/securitygroup.tf new file mode 100644 index 0000000..70a3c10 --- /dev/null +++ b/examples/full-cluster/securitygroup.tf @@ -0,0 +1,88 @@ +resource "aws_security_group" "additional_eks_cluster_sg" { + name = format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + tomap({"Name"= format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) }), + ) + + vpc_id = data.aws_vpc.eks_vpc.id + + ingress { + from_port = 0 + to_port = 0 + protocol = -1 + + security_groups = [ + aws_security_group.all_worker_mgmt.id, + ## aws_security_group.cni_custom_sg.id + ] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_security_group" "all_worker_mgmt" { + name = format("%v%v-all-worker-mgmt", local._prefixes["eks-security-group"], var.cluster_name) + + tags = merge( + local.base_tags, + local.common_tags, + var.tags, + var.application_tags, + tomap({"Name" = format("%v%v-all-worker-mgmt", local._prefixes["eks-security-group"], var.cluster_name)}), + ) + + vpc_id = data.aws_vpc.eks_vpc.id + + ingress { + from_port = 0 + to_port = 0 + protocol = -1 + cidr_blocks = [local.vpc_cidr_block] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +## resource "aws_security_group" "cni_custom_sg" { +## name = format("%v%v-cni-custom-networking", local._prefixes["eks-security-group"], var.cluster_name) +## +## tags = merge( +## local.base_tags, +## local.common_tags, +## var.tags, +## ) +## +## vpc_id = data.aws_vpc.eks_vpc.id +## +## ingress { +## from_port = 0 +## to_port = 0 +## protocol = -1 +## cidr_blocks = [ +## local.vpc_cidr_block, +## var.cni_vpc_cidr_block, +## ] +## } +## +## egress { +## from_port = 0 +## to_port = 0 +## protocol = "-1" +## cidr_blocks = ["0.0.0.0/0"] +## } +## } diff --git a/examples/full-cluster/settings.auto.tfvars.example b/examples/full-cluster/settings.auto.tfvars.example new file mode 100644 index 0000000..b73fe44 --- /dev/null +++ b/examples/full-cluster/settings.auto.tfvars.example @@ -0,0 +1,10 @@ +cluster_name = "org-project-env +cluster_version = "1.21" +region = "us-gov-east-1" +domain = "org-project-env.env.domain.census.gov" +eks_instance_disk_size = 40 +eks_vpc_name = "*vpcshortname*" +eks_instance_type = "t3.xlarge" +eks_ng_desire_size = 3 +eks_ng_max_size = 15 +eks_ng_min_size = 3 diff --git a/examples/full-cluster/setup-env.sh b/examples/full-cluster/setup-env.sh new file mode 100644 index 0000000..641465f --- /dev/null +++ b/examples/full-cluster/setup-env.sh @@ -0,0 +1,6 @@ + +export AWS_PROFILE=252960665057-ma6-gov +export ECR_NAME="252960665057.dkr.ecr.us-gov-east-1.amazonaws.com" +export HTTP_PROXY=http://proxy.tco.census.gov:3128 +export HTTPS_PROXY=http://proxy.tco.census.gov:3128 +export NO_PROXY=.census.gov diff --git a/examples/full-cluster/templates/node-private-userdata.tmpl b/examples/full-cluster/templates/node-private-userdata.tmpl new file mode 100644 index 0000000..0770f07 --- /dev/null +++ b/examples/full-cluster/templates/node-private-userdata.tmpl @@ -0,0 +1,9 @@ +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="==MYBOUNDARY==" + +--==MYBOUNDARY== +Content-Type: text/x-shellscript; charset="us-ascii" +#!/bin/bash -xe +sudo /etc/eks/bootstrap.sh --apiserver-endpoint "$endpoint" --b64-cluster-ca "$cluster_ca" "$cluster_name" +--==MYBOUNDARY==--\ + diff --git a/examples/full-cluster/tf-run.data b/examples/full-cluster/tf-run.data new file mode 100644 index 0000000..43c6b27 --- /dev/null +++ b/examples/full-cluster/tf-run.data @@ -0,0 +1,23 @@ +COMMENT make sure the private-lb subnet and container subnets are tagged properly (see README.md) +STOP then continue with at step 3 +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +COMMAND tf-init -upgrade + +POLICY + +COMMENT EC2 key pairs +null_resource.generate_keypair +aws_key_pair.cluster_keypair +COMMAND tf-directory-setup.py -l s3 + +ALL + +COMMENT cd aws-auth and tf-run.sh apply +STOP +COMMENT cd efs and tf-run.sh apply +STOP +COMMENT cd irsa-roles and tf-run.sh apply +STOP +COMMENT cd common-services and tf-run.sh apply +STOP diff --git a/examples/full-cluster/variables.eks.tf b/examples/full-cluster/variables.eks.tf new file mode 100644 index 0000000..b6ba4ca --- /dev/null +++ b/examples/full-cluster/variables.eks.tf @@ -0,0 +1,58 @@ +variable "eks_vpc_name" { + description = "Define the VPC name that will be used by this cluster" + type = string + default = "*UNKNOWN*" +} + +variable "subnets_name" { + description = "Define the name of the subnets to be used by this cluster" + type = string + default = "*-container-*" +} + +variable "cluster_name" { + description = "EKS cluster name name component used through out the EKS cluster describing its purpose (ex: dice-dev)" + type = string + default = null +} + +variable "cluster_version" { + description = "The EKS version number, see https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html" + type = string + default = "1.21" +} + +variable "eks_instance_type" { + description = "EKS worker node instance type" + type = string + default = "t3.xlarge" +} +variable "eks_ng_desire_size" { + description = "Node Group desire size, default is 1" + type = number + default = 4 +} + +variable "eks_ng_min_size" { + description = "Node Group minimum size, default is 1" + type = number + default = 4 +} + +variable "eks_ng_max_size" { + description = "Node Group maximum size, default is 10" + type = number + default = 16 +} + +variable "eks_instance_disk_size" { + description = "The size of the disk in gigabytes" + type = number + default = 40 +} + +variable "domain" { + description = "The DNS domain name of the cluster. Defaults to empty which causes the sample application to use the domain assigned to the load balancer of the istio ingress gateway." + type = string + default = "" +} diff --git a/examples/full-cluster/variables.tags.tf b/examples/full-cluster/variables.tags.tf new file mode 100644 index 0000000..6e2a62e --- /dev/null +++ b/examples/full-cluster/variables.tags.tf @@ -0,0 +1,9 @@ +# this exists in CAT, but not in other accounts. At some point, remove this file and all references to +# var.tags + +variable "tags" { + description = "AWS Tags to apply to appropriate resources." + type = map(string) + default = {} +} + diff --git a/examples/full-cluster/variables.vpc.tf b/examples/full-cluster/variables.vpc.tf new file mode 100644 index 0000000..9126406 --- /dev/null +++ b/examples/full-cluster/variables.vpc.tf @@ -0,0 +1,61 @@ +variable "vpc_name" { + description = "VPC Name including environment (if necessary), excluding vpc{N}" + type = string +} + +variable "vpc_index" { + description = "VPC index number. This used for NACL rule number caculations." + type = number +} + +variable "vpc_cidr_block" { + description = "VPC CIDR Block" + type = string +} + +variable "vpc_short_name" { + description = "VPC short name component, vpc{index}" + type = string +} + +variable "vpc_environment" { + description = "VPC environment purpose (common, shared, dev, stage, ite, prod)" + type = string + default = "" +} + +variable "vpc_enable_igw" { + description = "Enable AWS Internet Gateway (IGW) on the VPC (true | false[x])" + type = bool + default = false +} + +variable "vpc_enable_nat" { + description = "Enable AWS NAT Gateway on the VPC (true | false[x])" + type = bool + default = false +} + +variable "vpc_enable_vpn" { + description = "Enable AWS VPN Configuration on the VPC (true[x] | false)" + type = bool + default = true +} + +variable "vpc_enable_awsdns" { + description = "Enable AWS DNS on the VPC" + type = bool + default = false +} + +variable "vpn_settings" { + description = "VPN Connection details array of site, bgp_asn_id and ip_address" + type = list(object( + { + site = string + bgp_asn_id = number + ip_address = string + } + )) + default = [] +} diff --git a/examples/full-cluster/version.tf b/examples/full-cluster/version.tf new file mode 100644 index 0000000..fa416a0 --- /dev/null +++ b/examples/full-cluster/version.tf @@ -0,0 +1,4 @@ +locals { + _module_version = "0.9.0" +} +