diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6f20ddd..4c1c0f3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,6 +10,7 @@ repos: exclude: version.tf - id: terraform_tflint args: [ "--args=--config=__GIT_WORKING_DIR__/.tflint.hcl"] + exclude: examples - repo: https://github.com/pre-commit/pre-commit-hooks rev: v3.4.0 hooks: diff --git a/examples/full-cluster/README.md b/examples/full-cluster/README.md index 72fa9ac..9eea4e2 100644 --- a/examples/full-cluster/README.md +++ b/examples/full-cluster/README.md @@ -1,495 +1,115 @@ -# EKS Full Cluster Example - -There are a number of steps to end up with a cluster. - -1. From main repository, in the same `vpc/{region}/vpc{number}` directory - 1. [Tag subnets](#subnet-tagging) in main repository (before creating nodegroup) - 1. [Copy variables.vpc.*](#copy-variable-settings) from main respository in the same `vpc/{region}/vpc{number}` - 1. Copy the [includes.d structure](#copy-includesd) -1. In the submodule repository, in the `vpc/{region}/vpc{number}/apps/{clustername}` directory - 1. Update `settings.auto.tfvars` - 1. Initialize [Cluster Main](#initialize-cluster-main) directory - 1. Create [policies](#policies) - 1. Create [EC2 Keypair](#keypair-creation) - 1. Finish [cluster setup](#cluster-creation) -1. Setup [aws-auth](#setup-aws-auth) -1. Setup [EFS](#setup-efs) - -## Post-Setup Tasks - -1. Connect DNS zone from on-prem to Route53 Resolvers with a forwarder - -## Subnet Tagging - -A tag needs to be added to the subnet(s) where the cluster will run. We haven't figured out yet how to incorporate this more -automatically. - -The file to update is the `variable.subnets.auto.tfvars`, in this case `vpc/east/vpc3/variables.subnets.auto.tfvars`: - -```hcl -private_subnets = [ - { base_cidr = "10.188.18.0/23", label = "private-lb", bits = 2, private = true, - tags = { "kubernetes.io/role/internal-elb" = 1 } - }, - { base_cidr = "10.188.17.0/24", label = "endpoints", bits = 2, private = true, tags = {} }, - { base_cidr = "10.188.20.0/23", label = "db", bits = 2, private = true, tags = {} }, - { base_cidr = "10.188.22.0/23", label = "apps", bits = 2, private = true, tags = {} }, - { base_cidr = "10.188.24.0/21", label = "container", bits = 2, private = true, - tags = { - "kubernetes.io/cluster/org-project-env" = "shared" - }, - } -# space all used up -] -``` - -We add the tag `"kubernetes.io/cluster/{cluster_name}" = "shared"` in order for the node groups to pick up the -cluster subnets. This is on the new `container` ubnet. - -For creating a service which uses load balancers (ELB, ALB, or NLB), the last tag listed here is needed -`"kubernetes.io/role/internal-elb" = 1`. This is only one tag for all EKS, not one per cluster, and it should apply -to the subnet(s) for load balancing. A separate set of subnets exist for load balacning, with a name including `private-lb`. - - -## Copy Variable Settings when in a submodule repo - -We need the `variables.vpc.tf` and `variables.vpc.auto.tfvars` from the main repository. These are not to be modified in -this submodule. - -```shell -cd MAIN-REPOSITORY -MAINTOP=$(git rev-parse --show-toplevel) -cd applications/{APPNAME} -cd vpc/{region}/vpc{number} -for f in $(ls $MAINTOP/vpc/{region}/vpc{number}/variables.vpc*) - do - cp $f ./ -done -``` - -Replace {region} and {number} and {APPNAME} with the correct values. - -## Link Variable Settings when in the main account repo - -Link these files from the `vpc/{region}/vpc{number}/` dirctory: - -* variables.vpc.tf -* variables.vpc.auto.tfvars - -## Copy includes.d when in a submodule repo - -This makes a copy of the entire `MAIN/includes.d` structure in the submodule, for use as soft links to bring in -application variables for tagging. - -```shell -cd MAIN-REPOSITORY -MAINTOP=$(git rev-parse --show-toplevel) -cd applications/{APPNAME} -rsync -avRWH $MAINTOP/./includes.d ./ -``` - -Replace {APPNAME} with the correct value. - -## Links includes.d when in the main account repo - -If thre is an existing `MAIN/includes.d/` path for the specific application variables you wish to apply, -make a link to it as appropraite. - -## Update the settings.auto.tfvars file - -Set the appropriate values in the `settings.auto.tfvars` file. An example starter file is at `settings.auto.tfvars.example`. - -Here is a sample file: - -```hcl -cluster_name = "org-project-env -cluster_version = "1.21" -region = "us-gov-east-1" -domain = "org-project-env.env.domain.census.gov" -eks_instance_disk_size = 40 -eks_vpc_name = "*vpcshortname*" -eks_instance_type = "t3.xlarge" -eks_ng_desire_size = 3 -eks_ng_max_size = 15 -eks_ng_min_size = 3 -``` - -You need to change these values: - -* cluster_name: put in the proper org, project, and environment. Cluster names should not be replicated across the environment. -These are tracked in the repo cloud-information/aws/documentation/containers/ (fix link). -* region: include the correct region. This really is a duplicate of the `region` variable, so it may be removed in the future. -* domain: this is the domain name of the clsuter, consisting of the cluster name and the proper domain name for the environment/VPC. -* eks_vpc_name: replace *vpcshortname* with the appropriate vpc name. This is used to find the vpc ID. This will be fixed at a later date. - -All the others are subject to your configuration. They are a good starting point, but can vary. - -# Terraform Automated Setup - -A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. - -* copy the `remote_state.yml` from the parent and update `directory` to be the current directory -* run the tf-run.sh - -```console -% tf-run.sh apply -``` - -* example of the `tf-run.sh` steps - -This is part of a larger cluster configuration, so at the end of the run it indicates another directory -to visit when done. - -```console -% tf-run.sh list -* running action=plan -* START: tf-run.sh v1.1.2 start=1636562594 end= logfile=logs/run.plan.20211110.1636562594.log (not-created) -* reading from tf-run.data -* read 22 entries from tf-run.data -> list -** START: start=1636562594 -* 1 COMMENT> make sure the private-lb subnet and container subnets are tagged properly (see README.md) -* 2 STOP> then continue with at step 3 -* 3 COMMAND> tf-directory-setup.py -l none -f -* 4 COMMAND> setup-new-directory.sh -* 5 COMMAND> tf-init -upgrade -* 6 POLICY> (*.tf) aws_iam_policy.nlb-policy aws_iam_policy.cloudwatch-policy aws_iam_policy.cluster-admin-policy aws_iam_policy.cluster-admin_assume_policy -* 6 tf-plan -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy -target=aws_iam_policy.cluster-admin_assume_policy -* 7 COMMENT> EC2 key pairs -* 8 tf-plan -target=null_resource.generate_keypair -* 9 tf-plan -target=aws_key_pair.cluster_keypair -* 10 COMMAND> tf-directory-setup.py -l s3 -* 11 COMMENT> be sure to add the setup/ec2-ssh-eks-{cluster} to git-secret, git-secret hide, add the setup/*secret and setup/*pub got git, and commit the entirety of the change -* 12 tf-plan -* 13 COMMENT> setup the includes.d/parent_rs.tf according to the includes.d/README -* 14 STOP> -* 15 COMMENT> cd aws-auth and tf-run.sh apply -* 16 STOP> -* 17 COMMENT> cd efs and tf-run.sh apply -* 18 STOP> -* 19 COMMENT> cd irsa-roles and tf-run.sh apply -* 20 STOP> -* 21 COMMENT> cd common-services and tf-run.sh apply -* 22 STOP> -** END: start=1636562594 end=1636562594 elapsed=0 logfile=logs/run.plan.20211110.1636562594.log (not-created) -``` - -It is highly recommended to use the `tf-run.sh` approach. This has a number of stopping points along the way with comments telling you what to do. -It also directs you to the subdirectories to visit to complete the setup. - -# Terraform Manual Setup - -## Initialize Cluster Main - -We need to setup the main directory for the cluster. Be sure `remote_state.yml` is correct. Then: - -```shell -tf-directory-setup.py -l none -tf-init -``` - -## Policies - -First, we have to create the two polices. The roles will not get created until they do. - -```shell -TFTARGET=$(grep ^res.*iam_policy *.tf |awk '{print "-target=" $2 "." $3}'|sed -e 's/"//g') -tf-plan $TFTARGET -tf-apply $TFTARGET -unset TFTARGET -``` - -## Keypair Creation - -We need to create the SSH key, which then allows for the public key to be uploaded. - -```shell -tf-plan -target=null_resource.generate_keypair -tf-apply -target=null_resource.generate_keypair - -tf-plan -target=aws_key_pair.cluster_keypair -tf-apply -target=aws_key_pair.cluster_keypair -``` - -## Cluster Creation - -One created, we can run the rest of the code - -```shell -tf-plan -tf-apply -``` - -Finalize by linking to the remote state file: - -```shell -tf-directory-setup.py -l s3 -``` - -## Setup aws-auth - -Be sure `remote_state.yml` is correct. Examine the `settings.aws-auth.tfvars` and replace any remote state references to the proper -objects. There is at least one, a `rolearn`. You can get the remote state path with - -```shell -grep ^data remote_state.*{clustername}.tf | awk '{print $1 "." $2 "." $3}' |sed -e 's/"//g' -``` - -Next, we setup the remote state files, link to the parent remote state, and initialize terraform. - -```shell -tf-directory-setup.py -l none -# should only be one file here -ln -s ../remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_*.tf . -setup-new-directory.sh -tf-init -``` - -Then, we can plan, apply, and finalize: - -```shell -tf-pan -tf-apply -tf-directory-setup.py -l s3 -``` - -## Setup EFS - -Be sure `remote_state.yml` is correct. Examine the `main.tf` and replace any remote state references to the proper -objects. You can find where they are used: - -```console -% grep data.terraform_remote_state *.tf -main.tf: vpc_id = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_vpc_id -main.tf: subnet_ids = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_subnet_ids -main.tf: cluster_worker_sg_id = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_worker_sg_id -main.tf: oidc_provider_url = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.oidc_provider_url -main.tf: oidc_provider_arn = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.oidc_provider_arn -``` - -Find the value to replace these with: - -```shell -grep ^data remote_state.*{clustername}.tf | awk '{print $1 "." $2 "." $3}' |sed -e 's/"//g' -``` - -Next, we setup the remote state files, link to the parent remote state, and initialize terraform. - -```shell -tf-directory-setup.py -l none -# should only be one file here -ln -s ../remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_*.tf . -setup-new-directory.sh -``` - -Then, we have to create the polices. The roles will not get created until they do. - -```shell -TFTARGET=$(grep ^res.*iam_policy *.tf |awk '{print "-target=" $2 "." $3}'|sed -e 's/"//g') -tf-plan $TFTARGET -tf-apply $TFTARGET -unset TFTARGET -``` - -Finally, you can apply the rest: - - -```shell -tf-plan -tf-apply -``` - -## Common Services -### Certificate Authority - -Set the download to `false` - -```shell -# ca-cert.tf - ca_cert_download = false -``` - -Do the first apply, which generates the key and csr. You'll need to then submit the CSR. (directions generated) - -```shell -tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') -``` - - -```shell -# terraform taint null_resource.ca_cert[0] -# # (wait for submitted cert to be ready) -tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') -tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') -``` - -### Rest of Setup - -```shell -tf-plan -tf-apply -tf-directory-setup.py -l s3 -``` - -## Access to the cluster - -There are two ways to access the cluster. One is from the AWS Console and the other is via the IAM account or role. - -The cluster access vi console is found in the EKS section, under *clusters*. - -For IAM access, one must have IAM account credentials configured in `$HOME/.aws/credentials` and `$HOME/.aws/config`. [Here](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html) -are the docs, and we have an example below. Region is important, otherwise it defaults to `us-gov-west-1` and the STS will fail. - -```script -# $HOME/.aws/credentials -[252960665057-ma6-gov] -aws_access_key_id = ABCD1234... -aws_secret_access_key = abcd5678... - -# $HOME/.aws/config -[profile 252960665057-ma6-gov-eks-org-project-env] -source_profile = 252960665057-ma6-gov -region = us-gov-east-1 -role_arn = arn:aws-us-gov:iam::252960665057:role/r-eks-org-project-env-cluster-admin -role_session_name = badra001 -``` - -With this configuration, using the proifle `252960665057-ma6-gov` gives you the normal IAM access - -```console -% aws --profile 252960665057-ma6-gov sts get-caller-identity -{ - "UserId": "AIDATVZNBNXQ5UPHMBGPY", - "Account": "252960665057", - "Arn": "arn:aws-us-gov:iam::252960665057:user/a-badra001" -} -``` - -Using the other profile will use the source profile (which has to have permission to assume the role), the role arn, and a session -name mapping it back to your Census username (JBID). - -```console -% aws --profile 252960665057-ma6-gov-eks-org-project-env sts get-caller-identity -{ - "UserId": "AROATVZNBNXQ7AV7W2ISZ:badra001", - "Account": "252960665057", - "Arn": "arn:aws-us-gov:sts::252960665057:assumed-role/r-eks-org-project-env-cluster-admin/badra001" -} -``` - ------ -OLD LAB SETUP ------ - -# Cluster Setup - -## Download Configuration - -Now that the cluster is created, we need the `kubectl` command and to download the configuration. - -* get [kubectl](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) - -```console -% aws eks --profile $(get-profile) --region $(get-region) update-kubeconfig --name test2 --kubeconfig ./test2.kube.config -Added new context arn:aws:eks:us-east-1:079788916859:cluster/test2 to /data/git-repos/terraform/079788916859-do2-cat_apps-adsd-eks/vpc/east-1/vpc4/apps/eks-test2/test2.kube.config -% export KUBECONFIG=$(pwd)/test2.kube.config -% kubectl get nodes -NAME STATUS ROLES AGE VERSION -ip-10-194-24-49.ec2.internal Ready 24m v1.20.4-eks-6b7464 -ip-10-194-24-90.ec2.internal Ready 24m v1.20.4-eks-6b7464 -ip-10-194-25-120.ec2.internal Ready 24m v1.20.4-eks-6b7464 -ip-10-194-26-252.ec2.internal Ready 24m v1.20.4-eks-6b7464 -``` - -## Authentication - -### Automated - -This is in theh `aws-auth` subdirectory. - -```shell -cd aws-auth -tf-init -tf-plan -tf-apply -``` - -### Manual - -To allow users and roles to manipulate the cluster, we add to the mapRole or mapUsera. - -```shell -kubectl edit -n kube-system configmap/aws-auth -``` - -Add sections for `mapRoles`: - -```yaml - mapRoles: | - - rolearn: arn:aws:iam::079788916859:role/r-inf-cloud-admin - username: system:node:{{EC2PrivateDNSName}} - groups: - - system:bootstrappers - - system:nodes - - eks-console-dashboard-full-access-group -``` - -Add sections for `mapUsers`: - -```yaml - mapUsers: | - - userarn: arn:aws:iam::079788916859:user/u-zawac002 - username: admin - groups: - - system:masters -``` - -We will like want to do this through templating. - -* users - * arn:aws:iam::079788916859:user/u-badra001 - * arn:aws:iam::079788916859:user/u-ashle001 - * arn:aws:iam::079788916859:user/u-mcgin314 - * arn:aws:iam::079788916859:user/u-sall0002 - * arn:aws:iam::079788916859:user/u-zawac002 -* roles - * arn:aws:iam::079788916859:role/r-inf-cloud-admin - * arn:aws:iam::079788916859:role/r-adsd-cumulus - * arn:aws:iam::079788916859:role/r-adsd-eks - * arn:aws:iam::079788916859:role/r-adsd-tools - -## Adding Cluster Roles for AWS Console - -To allow [console access](https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml), we need these steps. - -It requires the cluster to be up and the `{clustername}.kube.config` file to exist along with the environment variable pointing to it. - -### Automated - -This appies just the full access cluste role, as the restricted one needs additional configuration. - -```shell -tf-apply -target=null_resource.apply_cluster_roles -``` - -### Manual - -```shell -curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml -curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml -``` - -For full console, we'll use the first one. - -```console -% kubectl apply -f eks-console-full-access.yaml -clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created -clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created -``` - - -# Details - -{{ .Content }} - +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.12.31 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | n/a | +| [http](#provider\_http) | n/a | +| [kubernetes](#provider\_kubernetes) | n/a | +| [null](#provider\_null) | n/a | +| [tls](#provider\_tls) | n/a | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [group\_cluster-admin](#module\_group\_cluster-admin) | git@github.e.it.census.gov:terraform-modules/aws-iam-group.git | n/a | +| [role\_cluster-admin](#module\_role\_cluster-admin) | git@github.e.it.census.gov:terraform-modules/aws-iam-role.git | n/a | +| [role\_eks-cluster](#module\_role\_eks-cluster) | git@github.e.it.census.gov:terraform-modules/aws-iam-role.git | n/a | +| [role\_eks-nodegroup](#module\_role\_eks-nodegroup) | git@github.e.it.census.gov:terraform-modules/aws-iam-role.git | n/a | + +## Resources + +| Name | Type | +|------|------| +| [aws_eks_cluster.eks_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource | +| [aws_eks_node_group.eks-nodegroup](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource | +| [aws_iam_openid_connect_provider.oidc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource | +| [aws_iam_policy.cloudwatch-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.cluster-admin-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.cluster-admin_assume_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_policy.nlb-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_key_pair.cluster_keypair](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource | +| [aws_launch_template.eks-nodegroup](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | +| [aws_route53_zone.cluster_domain](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_zone) | resource | +| [aws_security_group.additional_eks_cluster_sg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group.all_worker_mgmt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [kubernetes_storage_class.ebs_encrypted](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/storage_class) | resource | +| [null_resource.apply_cluster_roles](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.cluster_roles](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.delete_default_sc](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.generate_keypair](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.kubeconfig](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [aws_ebs_default_kms_key.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ebs_default_kms_key) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_iam_policy.cluster_managed_policies](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy) | data source | +| [aws_iam_policy.nodegroup_managed_policies](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy) | data source | +| [aws_iam_policy_document.allow_sts](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.cloudwatch-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.cluster-admin-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.cluster-admin_assume_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.ec2_assume](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.eks_assume](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.nlb-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.saml_assume](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_kms_key.ebs_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/kms_key) | data source | +| [aws_subnet.subnets](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet) | data source | +| [aws_subnet_ids.subnets](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet_ids) | data source | +| [aws_vpc.eks_vpc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) | data source | +| [http_http.cluster_roles](https://registry.terraform.io/providers/hashicorp/http/latest/docs/data-sources/http) | data source | +| [tls_certificate.certs](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [cluster\_name](#input\_cluster\_name) | EKS cluster name name component used through out the EKS cluster describing its purpose (ex: dice-dev) | `string` | `null` | no | +| [cluster\_version](#input\_cluster\_version) | The EKS version number, see https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html | `string` | `"1.21"` | no | +| [domain](#input\_domain) | The DNS domain name of the cluster. Defaults to empty which causes the sample application to use the domain assigned to the load balancer of the istio ingress gateway. | `string` | `""` | no | +| [eks\_instance\_disk\_size](#input\_eks\_instance\_disk\_size) | The size of the disk in gigabytes | `number` | `40` | no | +| [eks\_instance\_type](#input\_eks\_instance\_type) | EKS worker node instance type | `string` | `"t3.xlarge"` | no | +| [eks\_ng\_desire\_size](#input\_eks\_ng\_desire\_size) | Node Group desire size, default is 1 | `number` | `4` | no | +| [eks\_ng\_max\_size](#input\_eks\_ng\_max\_size) | Node Group maximum size, default is 10 | `number` | `16` | no | +| [eks\_ng\_min\_size](#input\_eks\_ng\_min\_size) | Node Group minimum size, default is 1 | `number` | `4` | no | +| [eks\_vpc\_name](#input\_eks\_vpc\_name) | Define the VPC name that will be used by this cluster | `string` | `"*UNKNOWN*"` | no | +| [subnets\_name](#input\_subnets\_name) | Define the name of the subnets to be used by this cluster | `string` | `"*-container-*"` | no | +| [tags](#input\_tags) | AWS Tags to apply to appropriate resources. | `map(string)` | `{}` | no | +| [vpc\_cidr\_block](#input\_vpc\_cidr\_block) | VPC CIDR Block | `string` | n/a | yes | +| [vpc\_enable\_awsdns](#input\_vpc\_enable\_awsdns) | Enable AWS DNS on the VPC | `bool` | `false` | no | +| [vpc\_enable\_igw](#input\_vpc\_enable\_igw) | Enable AWS Internet Gateway (IGW) on the VPC (true \| false[x]) | `bool` | `false` | no | +| [vpc\_enable\_nat](#input\_vpc\_enable\_nat) | Enable AWS NAT Gateway on the VPC (true \| false[x]) | `bool` | `false` | no | +| [vpc\_enable\_vpn](#input\_vpc\_enable\_vpn) | Enable AWS VPN Configuration on the VPC (true[x] \| false) | `bool` | `true` | no | +| [vpc\_environment](#input\_vpc\_environment) | VPC environment purpose (common, shared, dev, stage, ite, prod) | `string` | `""` | no | +| [vpc\_index](#input\_vpc\_index) | VPC index number. This used for NACL rule number caculations. | `number` | n/a | yes | +| [vpc\_name](#input\_vpc\_name) | VPC Name including environment (if necessary), excluding vpc{N} | `string` | n/a | yes | +| [vpc\_short\_name](#input\_vpc\_short\_name) | VPC short name component, vpc{index} | `string` | n/a | yes | +| [vpn\_settings](#input\_vpn\_settings) | VPN Connection details array of site, bgp\_asn\_id and ip\_address |
list(object(
{
site = string
bgp_asn_id = number
ip_address = string
}
))
| `[]` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_auth\_token](#output\_cluster\_auth\_token) | The token required to authenticate with the cluster. | +| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Certificate data required to successfully communicate with the Kubernetes API server. | +| [cluster\_domain\_id](#output\_cluster\_domain\_id) | DNS Zone ID | +| [cluster\_domain\_name](#output\_cluster\_domain\_name) | DNS Zone Name | +| [cluster\_domain\_ns](#output\_cluster\_domain\_ns) | DNS Zone Nameservers | +| [cluster\_endpoint](#output\_cluster\_endpoint) | The endpoint used to reach the Kubernetes API server. | +| [cluster\_keypair](#output\_cluster\_keypair) | EC2 keypair for EKS Cluster | +| [cluster\_name](#output\_cluster\_name) | The name of the cluster that was created. | +| [cluster\_sg\_id](#output\_cluster\_sg\_id) | Security group ids attached to the cluster control plane. | +| [cluster\_subnet\_ids](#output\_cluster\_subnet\_ids) | Subnet IDs used to create the cluster | +| [cluster\_vpc\_id](#output\_cluster\_vpc\_id) | VPC IDs on which the cluster was created | +| [cluster\_worker\_sg\_id](#output\_cluster\_worker\_sg\_id) | Security group ids attached to the cluster worker nodes. | +| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | OpenID Connector provider ARN | +| [oidc\_provider\_url](#output\_oidc\_provider\_url) | OpenID Connector provider URL | +| [role\_cluster-admin-role\_arn](#output\_role\_cluster-admin-role\_arn) | Role ARN for EKS Cluster Admin Role | +| [role\_eks-cluster\_arn](#output\_role\_eks-cluster\_arn) | Role ARN for EKS Cluster Role | +| [role\_eks-nodegroup-role\_arn](#output\_role\_eks-nodegroup-role\_arn) | Role ARN for EKS Cluster Nodegroup Role | + \ No newline at end of file diff --git a/examples/full-cluster/aws-auth/tf-run.data b/examples/full-cluster/aws-auth/tf-run.data index 44f61ff..ce77f28 100644 --- a/examples/full-cluster/aws-auth/tf-run.data +++ b/examples/full-cluster/aws-auth/tf-run.data @@ -1,3 +1,4 @@ +REMOTE-STATE COMMAND tf-directory-setup.py -l none -f COMMAND setup-new-directory.sh COMMAND tf-init -upgrade diff --git a/examples/full-cluster/cluster-roles/remote_state.yml b/examples/full-cluster/cluster-roles/remote_state.yml deleted file mode 100644 index b1c5141..0000000 --- a/examples/full-cluster/cluster-roles/remote_state.yml +++ /dev/null @@ -1,9 +0,0 @@ -directory: "applications/apps-adsd-eks/vpc/east/vpc3/apps/eks-adsd-cumulus-qa/cluster-roles" -profile: "252960665057-ma6-gov" -bucket: "inf-tfstate-252960665057" -bucket_region: "us-gov-east-1" -region: "us-gov-east-1" -regions: ["us-gov-east-1"] -account_id: "252960665057" -account_alias: "ma6-gov" -aws_environment: "gov" diff --git a/examples/full-cluster/cluster-roles/tf-run.data b/examples/full-cluster/cluster-roles/tf-run.data new file mode 100644 index 0000000..5d91871 --- /dev/null +++ b/examples/full-cluster/cluster-roles/tf-run.data @@ -0,0 +1,10 @@ +REMOTE-STATE +STOP only run this after the cluster roles represented here have been setup in K8S +COMMAND tf-directory-setup.py -l none -f +COMMAND setup-new-directory.sh +COMMAND tf-init -upgrade +POLICY +ALL +COMMAND tf-directory-setup.py -l s3 + +COMMENT cd ../ and continue diff --git a/examples/full-cluster/common-services/tf-run.data b/examples/full-cluster/common-services/tf-run.data index 63f8c73..25472df 100644 --- a/examples/full-cluster/common-services/tf-run.data +++ b/examples/full-cluster/common-services/tf-run.data @@ -1,3 +1,4 @@ +REMOTE-STATE COMMAND tf-directory-setup.py -l none -f COMMAND setup-new-directory.sh COMMAND tf-init -upgrade diff --git a/examples/full-cluster/data.eks-main.tf b/examples/full-cluster/data.eks-main.tf deleted file mode 120000 index a3addd9..0000000 --- a/examples/full-cluster/data.eks-main.tf +++ /dev/null @@ -1 +0,0 @@ -includes.d/data.eks-main.tf \ No newline at end of file diff --git a/examples/full-cluster/data.eks-main.tf b/examples/full-cluster/data.eks-main.tf new file mode 100644 index 0000000..7ead28b --- /dev/null +++ b/examples/full-cluster/data.eks-main.tf @@ -0,0 +1,18 @@ +locals { + aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster + # for main.tf + aws_eks_cluster = aws_eks_cluster.eks_cluster + # for all subdirectories + ## aws_eks_cluster = data.aws_eks_cluster.cluster +} + +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +#--- +# for all subdirectories only +#--- +## data "aws_eks_cluster" "cluster" { +## name = var.cluster_name +## } diff --git a/examples/full-cluster/ebs-encryption.tf b/examples/full-cluster/ebs-encryption.tf index 7243a3d..7890df5 100644 --- a/examples/full-cluster/ebs-encryption.tf +++ b/examples/full-cluster/ebs-encryption.tf @@ -7,9 +7,9 @@ resource "kubernetes_storage_class" "ebs_encrypted" { } parameters = { fsType = "ext4" - type = "gp2" - encrypted = "true" -# kms_key_id = data.aws_kms_key.ebs_key.arn + type = "gp2" + encrypted = "true" + # kms_key_id = data.aws_kms_key.ebs_key.arn kmsKeyId = data.aws_kms_key.ebs_key.arn } storage_provisioner = "kubernetes.io/aws-ebs" diff --git a/examples/full-cluster/efs/tf-run.data b/examples/full-cluster/efs/tf-run.data index 8bb6677..c778fc1 100644 --- a/examples/full-cluster/efs/tf-run.data +++ b/examples/full-cluster/efs/tf-run.data @@ -1,7 +1,8 @@ +REMOTE-STATE COMMAND tf-directory-setup.py -l none -f COMMAND setup-new-directory.sh COMMAND tf-init -upgrade POLICY ALL COMMAND tf-directory-setup.py -l s3 -STOP cd ../common-services and tf-run.sh apply +STOP cd ../irsa-roles and tf-run.sh apply diff --git a/examples/full-cluster/includes.d/README.md b/examples/full-cluster/includes.d/README.md index b34ca3f..97c168f 100644 --- a/examples/full-cluster/includes.d/README.md +++ b/examples/full-cluster/includes.d/README.md @@ -1,10 +1,30 @@ -# Includes.d +## Requirements -## parent_rs.tf +No requirements. -Update this with the proper remote state path, as pulled from the application directory for the cluster in the -main cluster directory. This is used throughout the cluster components. +## Providers -```hcl - parent_rs = data.terraform_remote_state.{vpc-state-path}_{application-state-path}-eks-{cluster-name}.outputs -``` +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | n/a | +| [null](#provider\_null) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [null_resource.kubeconfig](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | + +## Inputs + +No inputs. + +## Outputs + +No outputs. diff --git a/examples/full-cluster/includes.d/data.eks-main.tf b/examples/full-cluster/includes.d/data.eks-main.tf index 9452be6..7ead28b 100644 --- a/examples/full-cluster/includes.d/data.eks-main.tf +++ b/examples/full-cluster/includes.d/data.eks-main.tf @@ -1,9 +1,9 @@ locals { aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster -# for main.tf + # for main.tf aws_eks_cluster = aws_eks_cluster.eks_cluster -# for all subdirectories -## aws_eks_cluster = data.aws_eks_cluster.cluster + # for all subdirectories + ## aws_eks_cluster = data.aws_eks_cluster.cluster } data "aws_eks_cluster_auth" "cluster" { diff --git a/examples/full-cluster/includes.d/parent_rs.tf b/examples/full-cluster/includes.d/parent_rs.tf index 5ccae16..7d4b782 100644 --- a/examples/full-cluster/includes.d/parent_rs.tf +++ b/examples/full-cluster/includes.d/parent_rs.tf @@ -1,4 +1,4 @@ # replace TF remote state accordingly in parent_rs with that from the parent directory, and be sure to make the link locals { - parent_rs = data.terraform_remote_state.{vpc-state-path}_{application-state-path}-eks-{cluster-name}.outputs + parent_rs = data.terraform_remote_state.vpc-state-path_application-state-path-eks-cluster-name.outputs } diff --git a/examples/full-cluster/irsa-roles/README.md b/examples/full-cluster/irsa-roles/README.md index 6915c05..5995413 100644 --- a/examples/full-cluster/irsa-roles/README.md +++ b/examples/full-cluster/irsa-roles/README.md @@ -1,64 +1,44 @@ -# irsa-roles +## Requirements -This is a directory under which actual IRSA role subdirectories exist. No resources are created here. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.12.31 | -See the directories to follow the directions containd within: +## Providers -* cluster-autoscaler +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | n/a | -## Setup Steps +## Modules -First, copy the `remote_state.yml` from the parent and update `directory` to be the current directory. +No modules. -## Terraform Automated +## Resources -A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. +| Name | Type | +|------|------| +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -* copy the `remote_state.yml` from the parent and update `directory` to be the current directory -* run the tf-run.sh +## Inputs -```console -% tf-run.sh apply -``` +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [cluster\_name](#input\_cluster\_name) | EKS cluster name name component used through out the EKS cluster describing its purpose (ex: dice-dev) | `string` | `null` | no | +| [cluster\_version](#input\_cluster\_version) | The EKS version number, see https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html | `string` | `"1.21"` | no | +| [domain](#input\_domain) | The DNS domain name of the cluster. Defaults to empty which causes the sample application to use the domain assigned to the load balancer of the istio ingress gateway. | `string` | `""` | no | +| [eks\_instance\_disk\_size](#input\_eks\_instance\_disk\_size) | The size of the disk in gigabytes | `number` | `40` | no | +| [eks\_instance\_type](#input\_eks\_instance\_type) | EKS worker node instance type | `string` | `"t3.xlarge"` | no | +| [eks\_ng\_desire\_size](#input\_eks\_ng\_desire\_size) | Node Group desire size, default is 1 | `number` | `4` | no | +| [eks\_ng\_max\_size](#input\_eks\_ng\_max\_size) | Node Group maximum size, default is 10 | `number` | `16` | no | +| [eks\_ng\_min\_size](#input\_eks\_ng\_min\_size) | Node Group minimum size, default is 1 | `number` | `4` | no | +| [eks\_vpc\_name](#input\_eks\_vpc\_name) | Define the VPC name that will be used by this cluster | `string` | `"*UNKNOWN*"` | no | +| [name](#input\_name) | K8S service names for IAM Role for Service Account (per-pod) | `string` | n/a | yes | +| [namespace](#input\_namespace) | K8S namespace for IAM Role for Service Account (per-pod) | `string` | n/a | yes | +| [subnets\_name](#input\_subnets\_name) | Define the name of the subnets to be used by this cluster | `string` | `"*-container-*"` | no | +| [tags](#input\_tags) | AWS Tags to apply to appropriate resources. | `map(string)` | `{}` | no | -* example of the `tf-run.sh` steps +## Outputs -This is part of a larger cluster configuration, so at the end of the run it indicates another directory -to visit when done. - -```console -% tf-run.sh list -* running action=plan -* START: tf-run.sh v1.1.2 start=1636562881 end= logfile=logs/run.plan.20211110.1636562881.log (not-created) -* reading from tf-run.data -* read 6 entries from tf-run.data -> list -** START: start=1636562881 -* 1 COMMAND> tf-directory-setup.py -l none -f -* 2 COMMAND> setup-new-directory.sh -* 3 COMMAND> tf-init -upgrade -* 4 tf-plan -* 5 COMMAND> tf-directory-setup.py -l s3 -* 6 COMMENT> cd cluster-roles and tf-run.sh apply -** END: start=1636562881 end=1636562881 elapsed=0 logfile=logs/run.plan.20211110.1636562881.log (not-created) -``` - -It is highly recommended to use the `tf-run.sh` approach. - -## Terraform Manual - -* setup - -```shell -tf-directory-setup.py -l none -setup-new-directory.sh -tf-init -```` - -* Apply the rest - -```shell -tf-plan -tf-apply -tf-directory-setup.py -l s3 -``` +No outputs. diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/README.md b/examples/full-cluster/irsa-roles/cluster-autoscaler/README.md index bc949cb..bfe2fa5 100644 --- a/examples/full-cluster/irsa-roles/cluster-autoscaler/README.md +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/README.md @@ -1,66 +1,53 @@ -# irsa-roles: cluster-autoscaler - -This sets up the needed IAM roles for service accounts for the cluster autoscaler. - - -## Setup - -First, copy the `remote_state.yml` from the parent and update `directory` to be the current directory. - -## Terraform Automated - -A `tf-run.data` file exists here, so the simplest way to implement is with the `tf-run.sh` script. - -```console -% tf-run.sh apply -``` - -* example of the tf-run.sh`steps - -This is part of a larger cluster configuration, so at the end of the run it indicates another directory -to visit when done. - -```console -% tf-run.sh list -* running action=plan -* START: tf-run.sh v1.1.2 start=1636561755 end= logfile=logs/run.plan.20211110.1636561755.log (not-created) -* reading from tf-run.data -* read 6 entries from tf-run.data -> list -** START: start=1636561755 -* 1 COMMAND> tf-directory-setup.py -l none -* 2 COMMAND> setup-new-directory.sh -* 3 COMMAND> tf-init -upgrade -* 4 POLICY> (*.tf) aws_iam_policy.app_policy1 -* 4 tf-plan -target=aws_iam_policy.app_policy1 -* 5 tf-plan -* 6 COMMAND> tf-directory-setup.py -l s3 -** END: start=1636561755 end=1636561755 elapsed=0 logfile=logs/run.plan.20211110.1636561755.log (not-created) -``` - -It is highly recommended to use the `tf-run.sh` approach. - -## Terraform Manual - -```shell -tf-directory-setup.py -l none -setup-new-directory.sh -tf-init -```` - -* Apply the the policies - -```shell -tf-plan -target=aws_iam_policy.app_policy1 -tf-apply -target=aws_iam_policy.app_policy1 -``` - -* Apply the rest - -```shell -tf-plan -tf-apply -tf-directory-setup.py -l s3 -``` - -## Post Setup Examination +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.12.31 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | n/a | +| [kubernetes](#provider\_kubernetes) | n/a | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [app\_role](#module\_app\_role) | git@github.e.it.census.gov:terraform-modules/aws-iam-role.git | n/a | + +## Resources + +| Name | Type | +|------|------| +| [aws_iam_policy.app_policy1](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [kubernetes_service_account.app](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_iam_policy_document.app_policy1](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [cluster\_name](#input\_cluster\_name) | EKS cluster name name component used through out the EKS cluster describing its purpose (ex: dice-dev) | `string` | `null` | no | +| [cluster\_version](#input\_cluster\_version) | The EKS version number, see https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html | `string` | `"1.21"` | no | +| [domain](#input\_domain) | The DNS domain name of the cluster. Defaults to empty which causes the sample application to use the domain assigned to the load balancer of the istio ingress gateway. | `string` | `""` | no | +| [eks\_instance\_disk\_size](#input\_eks\_instance\_disk\_size) | The size of the disk in gigabytes | `number` | `40` | no | +| [eks\_instance\_type](#input\_eks\_instance\_type) | EKS worker node instance type | `string` | `"t3.xlarge"` | no | +| [eks\_ng\_desire\_size](#input\_eks\_ng\_desire\_size) | Node Group desire size, default is 1 | `number` | `4` | no | +| [eks\_ng\_max\_size](#input\_eks\_ng\_max\_size) | Node Group maximum size, default is 10 | `number` | `16` | no | +| [eks\_ng\_min\_size](#input\_eks\_ng\_min\_size) | Node Group minimum size, default is 1 | `number` | `4` | no | +| [eks\_vpc\_name](#input\_eks\_vpc\_name) | Define the VPC name that will be used by this cluster | `string` | `"*UNKNOWN*"` | no | +| [name](#input\_name) | K8S service names for IAM Role for Service Account (per-pod) | `string` | n/a | yes | +| [namespace](#input\_namespace) | K8S namespace for IAM Role for Service Account (per-pod) | `string` | n/a | yes | +| [subnets\_name](#input\_subnets\_name) | Define the name of the subnets to be used by this cluster | `string` | `"*-container-*"` | no | +| [tags](#input\_tags) | AWS Tags to apply to appropriate resources. | `map(string)` | `{}` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [app\_role\_arn](#output\_app\_role\_arn) | ARN of IAM Role for Service account for cluster-autoscaler | diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/data.eks-subdirectory.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/data.eks-subdirectory.tf new file mode 120000 index 0000000..05ab52d --- /dev/null +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/data.eks-subdirectory.tf @@ -0,0 +1 @@ +../data.eks-subdirectory.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/data.eks.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/data.eks.tf deleted file mode 120000 index bc5a403..0000000 --- a/examples/full-cluster/irsa-roles/cluster-autoscaler/data.eks.tf +++ /dev/null @@ -1 +0,0 @@ -../data.eks.tf \ No newline at end of file diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/locals.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/locals.tf index 3042080..4b9ae5a 100644 --- a/examples/full-cluster/irsa-roles/cluster-autoscaler/locals.tf +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/locals.tf @@ -12,6 +12,6 @@ locals { subnet_ids = local.parent_rs.cluster_subnet_ids cluster_worker_sg_id = local.parent_rs.cluster_worker_sg_id - oidc_provider_url = local.parent_rs.oidc_provider_url - oidc_provider_arn = local.parent_rs.oidc_provider_arn + oidc_provider_url = local.parent_rs.oidc_provider_url + oidc_provider_arn = local.parent_rs.oidc_provider_arn } diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/region.tf b/examples/full-cluster/irsa-roles/cluster-autoscaler/region.tf index f617506..b7b1696 100644 --- a/examples/full-cluster/irsa-roles/cluster-autoscaler/region.tf +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/region.tf @@ -1,3 +1,4 @@ locals { region = var.region } + diff --git a/examples/full-cluster/irsa-roles/cluster-autoscaler/tf-run.data b/examples/full-cluster/irsa-roles/cluster-autoscaler/tf-run.data index 336f6a5..b7371bc 100644 --- a/examples/full-cluster/irsa-roles/cluster-autoscaler/tf-run.data +++ b/examples/full-cluster/irsa-roles/cluster-autoscaler/tf-run.data @@ -1,3 +1,4 @@ +REMOTE-STATE COMMAND tf-directory-setup.py -l none COMMAND setup-new-directory.sh COMMAND tf-init -upgrade diff --git a/examples/full-cluster/locals.tf b/examples/full-cluster/irsa-roles/region.tf similarity index 100% rename from examples/full-cluster/locals.tf rename to examples/full-cluster/irsa-roles/region.tf diff --git a/examples/full-cluster/irsa-roles/tf-run.data b/examples/full-cluster/irsa-roles/tf-run.data index 151331f..eecc8ab 100644 --- a/examples/full-cluster/irsa-roles/tf-run.data +++ b/examples/full-cluster/irsa-roles/tf-run.data @@ -1,3 +1,4 @@ +REMOTE-STATE COMMAND tf-directory-setup.py -l none -f COMMAND setup-new-directory.sh COMMAND tf-init -upgrade diff --git a/examples/full-cluster/kubeconfig.eks-main.tf b/examples/full-cluster/kubeconfig.eks-main.tf deleted file mode 120000 index e3f8503..0000000 --- a/examples/full-cluster/kubeconfig.eks-main.tf +++ /dev/null @@ -1 +0,0 @@ -includes.d/kubeconfig.eks-main.tf \ No newline at end of file diff --git a/examples/full-cluster/kubeconfig.eks-main.tf b/examples/full-cluster/kubeconfig.eks-main.tf new file mode 100644 index 0000000..5a6333e --- /dev/null +++ b/examples/full-cluster/kubeconfig.eks-main.tf @@ -0,0 +1,29 @@ +resource "null_resource" "kubeconfig" { + triggers = { + always_run = timestamp() + } + provisioner "local-exec" { + command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi" + } + provisioner "local-exec" { + command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'" + } + provisioner "local-exec" { + environment = { + AWS_PROFILE = var.profile + AWS_REGION = local.region + } + command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/kube.config" + } + depends_on = [aws_eks_cluster.eks_cluster] +} + +#--- +# call it like +#--- +## provisioner "local-exec" { +## environment = { +## KUBECONFIG = "${path.root}/setup/kube.config" +## } +## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true" +## } diff --git a/examples/full-cluster/main.tf b/examples/full-cluster/main.tf index 14e6936..d1c801b 100644 --- a/examples/full-cluster/main.tf +++ b/examples/full-cluster/main.tf @@ -29,7 +29,7 @@ locals { vpc_id = data.aws_vpc.eks_vpc.id vpc_cidr_block = data.aws_vpc.eks_vpc.cidr_block subnets = [for k, v in data.aws_subnet.subnets : v.id if length(regexall("us-east-1e", v.availability_zone)) == 0] - s3_base_arn = format("arn:%v:%v:::%%v", data.aws_arn.current.partition, "s3") + s3_base_arn = format("arn:%v:%v:::%%v", data.aws_arn.current.partition, "s3") base_tags = { "eks-cluster-name" = var.cluster_name @@ -37,10 +37,10 @@ locals { "boc:created_by" = "terraform" } -# https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html + # https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html autoscale_tags = { - format("k8s.io/cluster-autoscaler/%v",var.cluster_name) = "owned" - "k8s.io/cluster-autoscaler/enabled" = "TRUE" + format("k8s.io/cluster-autoscaler/%v", var.cluster_name) = "owned" + "k8s.io/cluster-autoscaler/enabled" = "TRUE" } } diff --git a/examples/full-cluster/outputs.tf b/examples/full-cluster/outputs.tf index 9fa1e23..e95c90d 100644 --- a/examples/full-cluster/outputs.tf +++ b/examples/full-cluster/outputs.tf @@ -20,9 +20,9 @@ output "cluster_certificate_authority_data" { output "cluster_auth_token" { description = "The token required to authenticate with the cluster." -# value = data.aws_eks_cluster_auth.eks_auth.token - value = local.aws_eks_cluster_auth.token - sensitive = true + # value = data.aws_eks_cluster_auth.eks_auth.token + value = local.aws_eks_cluster_auth.token + sensitive = true } output "cluster_worker_sg_id" { diff --git a/examples/full-cluster/policy.tf b/examples/full-cluster/policy.tf index efa06b0..ac9e414 100644 --- a/examples/full-cluster/policy.tf +++ b/examples/full-cluster/policy.tf @@ -172,7 +172,7 @@ resource "aws_iam_policy" "cluster-admin_assume_policy" { local.base_tags, var.tags, var.application_tags, - tomap({ "Name" = format("%v%v-cluster-admin-assume", local._prefixes["eks-policy"], var.cluster_name)}), + tomap({ "Name" = format("%v%v-cluster-admin-assume", local._prefixes["eks-policy"], var.cluster_name) }), ) } @@ -181,6 +181,6 @@ data "aws_iam_policy_document" "cluster-admin_assume_policy" { sid = "AllowSTSAssumeClusterAdminRole" effect = "Allow" actions = ["sts:AssumeRole"] - resources = [ module.role_cluster-admin.role_arn ] + resources = [module.role_cluster-admin.role_arn] } } diff --git a/examples/full-cluster/region.tf b/examples/full-cluster/region.tf new file mode 100644 index 0000000..b7b1696 --- /dev/null +++ b/examples/full-cluster/region.tf @@ -0,0 +1,4 @@ +locals { + region = var.region +} + diff --git a/examples/full-cluster/role.tf b/examples/full-cluster/role.tf index 15d17f6..7d0db79 100644 --- a/examples/full-cluster/role.tf +++ b/examples/full-cluster/role.tf @@ -121,8 +121,8 @@ module "role_cluster-admin" { role_description = "SAML EKS cluster admin Role for ${var.cluster_name}" enable_ldap_creation = false assume_policy_document = data.aws_iam_policy_document.allow_sts.json -# assume_policy_document = data.aws_iam_policy_document.cluster-admin_combined.json - attached_policies = [aws_iam_policy.cluster-admin-policy.arn] + # assume_policy_document = data.aws_iam_policy_document.cluster-admin_combined.json + attached_policies = [aws_iam_policy.cluster-admin-policy.arn] tags = merge( local.base_tags, diff --git a/examples/full-cluster/saml.tf b/examples/full-cluster/saml.tf index cc86aa9..22c1f74 100644 --- a/examples/full-cluster/saml.tf +++ b/examples/full-cluster/saml.tf @@ -2,8 +2,8 @@ # also, there is no data source for saml provider locals { - saml_provider_arn = format(local.common_arn,"iam","saml-provider/Census_TCO_IDMS") - saml_url = var.aws_environment == "gov" ? "https://signin.amazonaws-us-gov.com/saml" : "https://signin.aws.amazon.com/saml" + saml_provider_arn = format(local.common_arn, "iam", "saml-provider/Census_TCO_IDMS") + saml_url = var.aws_environment == "gov" ? "https://signin.amazonaws-us-gov.com/saml" : "https://signin.aws.amazon.com/saml" } data "aws_iam_policy_document" "saml_assume" { diff --git a/examples/full-cluster/securitygroup.tf b/examples/full-cluster/securitygroup.tf index 70a3c10..8c6e880 100644 --- a/examples/full-cluster/securitygroup.tf +++ b/examples/full-cluster/securitygroup.tf @@ -6,7 +6,7 @@ resource "aws_security_group" "additional_eks_cluster_sg" { local.common_tags, var.tags, var.application_tags, - tomap({"Name"= format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) }), + tomap({ "Name" = format("%v%v-cluster", local._prefixes["eks-security-group"], var.cluster_name) }), ) vpc_id = data.aws_vpc.eks_vpc.id @@ -38,7 +38,7 @@ resource "aws_security_group" "all_worker_mgmt" { local.common_tags, var.tags, var.application_tags, - tomap({"Name" = format("%v%v-all-worker-mgmt", local._prefixes["eks-security-group"], var.cluster_name)}), + tomap({ "Name" = format("%v%v-all-worker-mgmt", local._prefixes["eks-security-group"], var.cluster_name) }), ) vpc_id = data.aws_vpc.eks_vpc.id diff --git a/examples/full-cluster/tf-run.data b/examples/full-cluster/tf-run.data index 0f9370b..0baeaa9 100644 --- a/examples/full-cluster/tf-run.data +++ b/examples/full-cluster/tf-run.data @@ -1,5 +1,6 @@ +REMOTE-STATE COMMENT make sure the private-lb subnet and container subnets are tagged properly (see README.md) -STOP then continue with at step 3 +STOP then continue with at step 4 COMMAND tf-directory-setup.py -l none -f COMMAND setup-new-directory.sh COMMAND tf-init -upgrade