From 44e0a86554c53867e40890f38614c13b22227a1d Mon Sep 17 00:00:00 2001 From: badra001 Date: Wed, 10 Nov 2021 15:55:12 -0500 Subject: [PATCH] restore README.md --- examples/full-cluster/.terraform-docs.yml | 44 -- examples/full-cluster/README.md | 608 ++++++++++++++---- .../cluster-roles/.terraform-docs.yml | 44 -- examples/full-cluster/cluster-roles/README.md | 2 + 4 files changed, 496 insertions(+), 202 deletions(-) delete mode 100644 examples/full-cluster/.terraform-docs.yml delete mode 100644 examples/full-cluster/cluster-roles/.terraform-docs.yml diff --git a/examples/full-cluster/.terraform-docs.yml b/examples/full-cluster/.terraform-docs.yml deleted file mode 100644 index 8391b9d..0000000 --- a/examples/full-cluster/.terraform-docs.yml +++ /dev/null @@ -1,44 +0,0 @@ -formatter: markdown table - -header-from: main.tf -footer-from: "" - -sections: -## hide: [] - show: - - data-sources - - header - - footer - - inputs - - modules - - outputs - - providers - - requirements - - resources - -output: - file: README.md - mode: inject - template: |- - - {{ .Content }} - - -## output-values: -## enabled: false -## from: "" -## -## sort: -## enabled: true -## by: name -## -## settings: -## anchor: true -## color: true -## default: true -## description: false -## escape: true -## indent: 2 -## required: true -## sensitive: true -## type: true diff --git a/examples/full-cluster/README.md b/examples/full-cluster/README.md index 9eea4e2..ac6e9d1 100644 --- a/examples/full-cluster/README.md +++ b/examples/full-cluster/README.md @@ -1,115 +1,495 @@ +# EKS Full Cluster Example + +There are a number of steps to end up with a cluster. + +1. From main repository, in the same `vpc/{region}/vpc{number}` directory + 1. [Tag subnets](#subnet-tagging) in main repository (before creating nodegroup) + 1. [Copy variables.vpc.*](#copy-variable-settings) from main respository in the same `vpc/{region}/vpc{number}` + 1. Copy the [includes.d structure](#copy-includesd) +1. In the submodule repository, in the `vpc/{region}/vpc{number}/apps/{clustername}` directory + 1. Update `settings.auto.tfvars` + 1. Initialize [Cluster Main](#initialize-cluster-main) directory + 1. Create [policies](#policies) + 1. Create [EC2 Keypair](#keypair-creation) + 1. Finish [cluster setup](#cluster-creation) +1. Setup [aws-auth](#setup-aws-auth) +1. Setup [EFS](#setup-efs) + +## Post-Setup Tasks + +1. Connect DNS zone from on-prem to Route53 Resolvers with a forwarder + +## Subnet Tagging + +A tag needs to be added to the subnet(s) where the cluster will run. We haven't figured out yet how to incorporate this more +automatically. + +The file to update is the `variable.subnets.auto.tfvars`, in this case `vpc/east/vpc3/variables.subnets.auto.tfvars`: + +```hcl +private_subnets = [ + { base_cidr = "10.188.18.0/23", label = "private-lb", bits = 2, private = true, + tags = { "kubernetes.io/role/internal-elb" = 1 } + }, + { base_cidr = "10.188.17.0/24", label = "endpoints", bits = 2, private = true, tags = {} }, + { base_cidr = "10.188.20.0/23", label = "db", bits = 2, private = true, tags = {} }, + { base_cidr = "10.188.22.0/23", label = "apps", bits = 2, private = true, tags = {} }, + { base_cidr = "10.188.24.0/21", label = "container", bits = 2, private = true, + tags = { + "kubernetes.io/cluster/org-project-env" = "shared" + }, + } +# space all used up +] +``` + +We add the tag `"kubernetes.io/cluster/{cluster_name}" = "shared"` in order for the node groups to pick up the +cluster subnets. This is on the new `container` ubnet. + +For creating a service which uses load balancers (ELB, ALB, or NLB), the last tag listed here is needed +`"kubernetes.io/role/internal-elb" = 1`. This is only one tag for all EKS, not one per cluster, and it should apply +to the subnet(s) for load balancing. A separate set of subnets exist for load balacning, with a name including `private-lb`. + + +## Copy Variable Settings when in a submodule repo + +We need the `variables.vpc.tf` and `variables.vpc.auto.tfvars` from the main repository. These are not to be modified in +this submodule. + +```shell +cd MAIN-REPOSITORY +MAINTOP=$(git rev-parse --show-toplevel) +cd applications/{APPNAME} +cd vpc/{region}/vpc{number} +for f in $(ls $MAINTOP/vpc/{region}/vpc{number}/variables.vpc*) + do + cp $f ./ +done +``` + +Replace {region} and {number} and {APPNAME} with the correct values. + +## Link Variable Settings when in the main account repo + +Link these files from the `vpc/{region}/vpc{number}/` dirctory: + +* variables.vpc.tf +* variables.vpc.auto.tfvars + +## Copy includes.d when in a submodule repo + +This makes a copy of the entire `MAIN/includes.d` structure in the submodule, for use as soft links to bring in +application variables for tagging. + +```shell +cd MAIN-REPOSITORY +MAINTOP=$(git rev-parse --show-toplevel) +cd applications/{APPNAME} +rsync -avRWH $MAINTOP/./includes.d ./ +``` + +Replace {APPNAME} with the correct value. + +## Links includes.d when in the main account repo + +If thre is an existing `MAIN/includes.d/` path for the specific application variables you wish to apply, +make a link to it as appropraite. + +## Update the settings.auto.tfvars file + +Set the appropriate values in the `settings.auto.tfvars` file. An example starter file is at `settings.auto.tfvars.example`. + +Here is a sample file: + +```hcl +cluster_name = "org-project-env +cluster_version = "1.21" +region = "us-gov-east-1" +domain = "org-project-env.env.domain.census.gov" +eks_instance_disk_size = 40 +eks_vpc_name = "*vpcshortname*" +eks_instance_type = "t3.xlarge" +eks_ng_desire_size = 3 +eks_ng_max_size = 15 +eks_ng_min_size = 3 +``` + +You need to change these values: + +* cluster_name: put in the proper org, project, and environment. Cluster names should not be replicated across the environment. +These are tracked in the repo cloud-information/aws/documentation/containers/ (fix link). +* region: include the correct region. This really is a duplicate of the `region` variable, so it may be removed in the future. +* domain: this is the domain name of the clsuter, consisting of the cluster name and the proper domain name for the environment/VPC. +* eks_vpc_name: replace *vpcshortname* with the appropriate vpc name. This is used to find the vpc ID. This will be fixed at a later date. + +All the others are subject to your configuration. They are a good starting point, but can vary. + +# Terraform Automated Setup + +A `tf-run.data` file exists here, so the simplest way to implemnt is with the `tf-run.sh` script. + +* copy the `remote_state.yml` from the parent and update `directory` to be the current directory +* run the tf-run.sh + +```console +% tf-run.sh apply +``` + +* example of the `tf-run.sh` steps + +This is part of a larger cluster configuration, so at the end of the run it indicates another directory +to visit when done. + +```console +% tf-run.sh list +* running action=plan +* START: tf-run.sh v1.1.2 start=1636562594 end= logfile=logs/run.plan.20211110.1636562594.log (not-created) +* reading from tf-run.data +* read 22 entries from tf-run.data +> list +** START: start=1636562594 +* 1 COMMENT> make sure the private-lb subnet and container subnets are tagged properly (see README.md) +* 2 STOP> then continue with at step 3 +* 3 COMMAND> tf-directory-setup.py -l none -f +* 4 COMMAND> setup-new-directory.sh +* 5 COMMAND> tf-init -upgrade +* 6 POLICY> (*.tf) aws_iam_policy.nlb-policy aws_iam_policy.cloudwatch-policy aws_iam_policy.cluster-admin-policy aws_iam_policy.cluster-admin_assume_policy +* 6 tf-plan -target=aws_iam_policy.nlb-policy -target=aws_iam_policy.cloudwatch-policy -target=aws_iam_policy.cluster-admin-policy -target=aws_iam_policy.cluster-admin_assume_policy +* 7 COMMENT> EC2 key pairs +* 8 tf-plan -target=null_resource.generate_keypair +* 9 tf-plan -target=aws_key_pair.cluster_keypair +* 10 COMMAND> tf-directory-setup.py -l s3 +* 11 COMMENT> be sure to add the setup/ec2-ssh-eks-{cluster} to git-secret, git-secret hide, add the setup/*secret and setup/*pub got git, and commit the entirety of the change +* 12 tf-plan +* 13 COMMENT> setup the includes.d/parent_rs.tf according to the includes.d/README +* 14 STOP> +* 15 COMMENT> cd aws-auth and tf-run.sh apply +* 16 STOP> +* 17 COMMENT> cd efs and tf-run.sh apply +* 18 STOP> +* 19 COMMENT> cd irsa-roles and tf-run.sh apply +* 20 STOP> +* 21 COMMENT> cd common-services and tf-run.sh apply +* 22 STOP> +** END: start=1636562594 end=1636562594 elapsed=0 logfile=logs/run.plan.20211110.1636562594.log (not-created) +``` + +It is highly recommended to use the `tf-run.sh` approach. This has a number of stopping points along the way with comments telling you what to do. +It also directs you to the subdirectories to visit to complete the setup. + +# Terraform Manual Setup + +## Initialize Cluster Main + +We need to setup the main directory for the cluster. Be sure `remote_state.yml` is correct. Then: + +```shell +tf-directory-setup.py -l none +tf-init +``` + +## Policies + +First, we have to create the two polices. The roles will not get created until they do. + +```shell +TFTARGET=$(grep ^res.*iam_policy *.tf |awk '{print "-target=" $2 "." $3}'|sed -e 's/"//g') +tf-plan $TFTARGET +tf-apply $TFTARGET +unset TFTARGET +``` + +## Keypair Creation + +We need to create the SSH key, which then allows for the public key to be uploaded. + +```shell +tf-plan -target=null_resource.generate_keypair +tf-apply -target=null_resource.generate_keypair + +tf-plan -target=aws_key_pair.cluster_keypair +tf-apply -target=aws_key_pair.cluster_keypair +``` + +## Cluster Creation + +One created, we can run the rest of the code + +```shell +tf-plan +tf-apply +``` + +Finalize by linking to the remote state file: + +```shell +tf-directory-setup.py -l s3 +``` + +## Setup aws-auth + +Be sure `remote_state.yml` is correct. Examine the `settings.aws-auth.tfvars` and replace any remote state references to the proper +objects. There is at least one, a `rolearn`. You can get the remote state path with + +```shell +grep ^data remote_state.*{clustername}.tf | awk '{print $1 "." $2 "." $3}' |sed -e 's/"//g' +``` + +Next, we setup the remote state files, link to the parent remote state, and initialize terraform. + +```shell +tf-directory-setup.py -l none +# should only be one file here +ln -s ../remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_*.tf . +setup-new-directory.sh +tf-init +``` + +Then, we can plan, apply, and finalize: + +```shell +tf-pan +tf-apply +tf-directory-setup.py -l s3 +``` + +## Setup EFS + +Be sure `remote_state.yml` is correct. Examine the `main.tf` and replace any remote state references to the proper +objects. You can find where they are used: + +```console +% grep data.terraform_remote_state *.tf +main.tf: vpc_id = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_vpc_id +main.tf: subnet_ids = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_subnet_ids +main.tf: cluster_worker_sg_id = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.cluster_worker_sg_id +main.tf: oidc_provider_url = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.oidc_provider_url +main.tf: oidc_provider_arn = data.terraform_remote_state.applications_apps-adsd-eks_vpc_east-1_vpc4_apps_eks-test4.outputs.oidc_provider_arn +``` + +Find the value to replace these with: + +```shell +grep ^data remote_state.*{clustername}.tf | awk '{print $1 "." $2 "." $3}' |sed -e 's/"//g' +``` + +Next, we setup the remote state files, link to the parent remote state, and initialize terraform. + +```shell +tf-directory-setup.py -l none +# should only be one file here +ln -s ../remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_*.tf . +setup-new-directory.sh +``` + +Then, we have to create the polices. The roles will not get created until they do. + +```shell +TFTARGET=$(grep ^res.*iam_policy *.tf |awk '{print "-target=" $2 "." $3}'|sed -e 's/"//g') +tf-plan $TFTARGET +tf-apply $TFTARGET +unset TFTARGET +``` + +Finally, you can apply the rest: + + +```shell +tf-plan +tf-apply +``` + +## Common Services +### Certificate Authority + +Set the download to `false` + +```shell +# ca-cert.tf + ca_cert_download = false +``` + +Do the first apply, which generates the key and csr. You'll need to then submit the CSR. (directions generated) + +```shell +tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') +``` + + +```shell +# terraform taint null_resource.ca_cert[0] +# # (wait for submitted cert to be ready) +tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') +tf-apply $(grep ^[rd] ca-cert.tf |awk '{print "-target=" $2 "." $3}' |sed -e 's/"//g') +``` + +### Rest of Setup + +```shell +tf-plan +tf-apply +tf-directory-setup.py -l s3 +``` + +## Access to the cluster + +There are two ways to access the cluster. One is from the AWS Console and the other is via the IAM account or role. + +The cluster access vi console is found in the EKS section, under *clusters*. + +For IAM access, one must have IAM account credentials configured in `$HOME/.aws/credentials` and `$HOME/.aws/config`. [Here](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html) +are the docs, and we have an example below. Region is important, otherwise it defaults to `us-gov-west-1` and the STS will fail. + +```script +# $HOME/.aws/credentials +[252960665057-ma6-gov] +aws_access_key_id = ABCD1234... +aws_secret_access_key = abcd5678... + +# $HOME/.aws/config +[profile 252960665057-ma6-gov-eks-org-project-env] +source_profile = 252960665057-ma6-gov +region = us-gov-east-1 +role_arn = arn:aws-us-gov:iam::252960665057:role/r-eks-org-project-env-cluster-admin +role_session_name = badra001 +``` + +With this configuration, using the proifle `252960665057-ma6-gov` gives you the normal IAM access + +```console +% aws --profile 252960665057-ma6-gov sts get-caller-identity +{ + "UserId": "AIDATVZNBNXQ5UPHMBGPY", + "Account": "252960665057", + "Arn": "arn:aws-us-gov:iam::252960665057:user/a-badra001" +} +``` + +Using the other profile will use the source profile (which has to have permission to assume the role), the role arn, and a session +name mapping it back to your Census username (JBID). + +```console +% aws --profile 252960665057-ma6-gov-eks-org-project-env sts get-caller-identity +{ + "UserId": "AROATVZNBNXQ7AV7W2ISZ:badra001", + "Account": "252960665057", + "Arn": "arn:aws-us-gov:sts::252960665057:assumed-role/r-eks-org-project-env-cluster-admin/badra001" +} +``` + +----- +OLD LAB SETUP +----- + +# Cluster Setup + +## Download Configuration + +Now that the cluster is created, we need the `kubectl` command and to download the configuration. + +* get [kubectl](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) + +```console +% aws eks --profile $(get-profile) --region $(get-region) update-kubeconfig --name test2 --kubeconfig ./test2.kube.config +Added new context arn:aws:eks:us-east-1:079788916859:cluster/test2 to /data/git-repos/terraform/079788916859-do2-cat_apps-adsd-eks/vpc/east-1/vpc4/apps/eks-test2/test2.kube.config +% export KUBECONFIG=$(pwd)/test2.kube.config +% kubectl get nodes +NAME STATUS ROLES AGE VERSION +ip-10-194-24-49.ec2.internal Ready 24m v1.20.4-eks-6b7464 +ip-10-194-24-90.ec2.internal Ready 24m v1.20.4-eks-6b7464 +ip-10-194-25-120.ec2.internal Ready 24m v1.20.4-eks-6b7464 +ip-10-194-26-252.ec2.internal Ready 24m v1.20.4-eks-6b7464 +``` + +## Authentication + +### Automated + +This is in theh `aws-auth` subdirectory. + +```shell +cd aws-auth +tf-init +tf-plan +tf-apply +``` + +### Manual + +To allow users and roles to manipulate the cluster, we add to the mapRole or mapUsera. + +```shell +kubectl edit -n kube-system configmap/aws-auth +``` + +Add sections for `mapRoles`: + +```yaml + mapRoles: | + - rolearn: arn:aws:iam::079788916859:role/r-inf-cloud-admin + username: system:node:{{EC2PrivateDNSName}} + groups: + - system:bootstrappers + - system:nodes + - eks-console-dashboard-full-access-group +``` + +Add sections for `mapUsers`: + +```yaml + mapUsers: | + - userarn: arn:aws:iam::079788916859:user/u-zawac002 + username: admin + groups: + - system:masters +``` + +We will like want to do this through templating. + +* users + * arn:aws:iam::079788916859:user/u-badra001 + * arn:aws:iam::079788916859:user/u-ashle001 + * arn:aws:iam::079788916859:user/u-mcgin314 + * arn:aws:iam::079788916859:user/u-sall0002 + * arn:aws:iam::079788916859:user/u-zawac002 +* roles + * arn:aws:iam::079788916859:role/r-inf-cloud-admin + * arn:aws:iam::079788916859:role/r-adsd-cumulus + * arn:aws:iam::079788916859:role/r-adsd-eks + * arn:aws:iam::079788916859:role/r-adsd-tools + +## Adding Cluster Roles for AWS Console + +To allow [console access](https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml), we need these steps. + +It requires the cluster to be up and the `{clustername}.kube.config` file to exist along with the environment variable pointing to it. + +### Automated + +This appies just the full access cluste role, as the restricted one needs additional configuration. + +```shell +tf-apply -target=null_resource.apply_cluster_roles +``` + +### Manual + +```shell +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml +curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml +``` + +For full console, we'll use the first one. + +```console +% kubectl apply -f eks-console-full-access.yaml +clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created +clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created +``` + + +# Details + -## Requirements - -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 0.12.31 | - -## Providers - -| Name | Version | -|------|---------| -| [aws](#provider\_aws) | n/a | -| [http](#provider\_http) | n/a | -| [kubernetes](#provider\_kubernetes) | n/a | -| [null](#provider\_null) | n/a | -| [tls](#provider\_tls) | n/a | - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [group\_cluster-admin](#module\_group\_cluster-admin) | git@github.e.it.census.gov:terraform-modules/aws-iam-group.git | n/a | -| [role\_cluster-admin](#module\_role\_cluster-admin) | git@github.e.it.census.gov:terraform-modules/aws-iam-role.git | n/a | -| [role\_eks-cluster](#module\_role\_eks-cluster) | git@github.e.it.census.gov:terraform-modules/aws-iam-role.git | n/a | -| [role\_eks-nodegroup](#module\_role\_eks-nodegroup) | git@github.e.it.census.gov:terraform-modules/aws-iam-role.git | n/a | - -## Resources - -| Name | Type | -|------|------| -| [aws_eks_cluster.eks_cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource | -| [aws_eks_node_group.eks-nodegroup](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group) | resource | -| [aws_iam_openid_connect_provider.oidc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_openid_connect_provider) | resource | -| [aws_iam_policy.cloudwatch-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_iam_policy.cluster-admin-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_iam_policy.cluster-admin_assume_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_iam_policy.nlb-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_key_pair.cluster_keypair](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource | -| [aws_launch_template.eks-nodegroup](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | -| [aws_route53_zone.cluster_domain](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route53_zone) | resource | -| [aws_security_group.additional_eks_cluster_sg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | -| [aws_security_group.all_worker_mgmt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | -| [kubernetes_storage_class.ebs_encrypted](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/storage_class) | resource | -| [null_resource.apply_cluster_roles](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [null_resource.cluster_roles](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [null_resource.delete_default_sc](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [null_resource.generate_keypair](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [null_resource.kubeconfig](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [aws_ebs_default_kms_key.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ebs_default_kms_key) | data source | -| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | -| [aws_iam_policy.cluster_managed_policies](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy) | data source | -| [aws_iam_policy.nodegroup_managed_policies](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy) | data source | -| [aws_iam_policy_document.allow_sts](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.cloudwatch-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.cluster-admin-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.cluster-admin_assume_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.ec2_assume](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.eks_assume](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.nlb-policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.saml_assume](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_kms_key.ebs_key](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/kms_key) | data source | -| [aws_subnet.subnets](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet) | data source | -| [aws_subnet_ids.subnets](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet_ids) | data source | -| [aws_vpc.eks_vpc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) | data source | -| [http_http.cluster_roles](https://registry.terraform.io/providers/hashicorp/http/latest/docs/data-sources/http) | data source | -| [tls_certificate.certs](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/data-sources/certificate) | data source | - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [cluster\_name](#input\_cluster\_name) | EKS cluster name name component used through out the EKS cluster describing its purpose (ex: dice-dev) | `string` | `null` | no | -| [cluster\_version](#input\_cluster\_version) | The EKS version number, see https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html | `string` | `"1.21"` | no | -| [domain](#input\_domain) | The DNS domain name of the cluster. Defaults to empty which causes the sample application to use the domain assigned to the load balancer of the istio ingress gateway. | `string` | `""` | no | -| [eks\_instance\_disk\_size](#input\_eks\_instance\_disk\_size) | The size of the disk in gigabytes | `number` | `40` | no | -| [eks\_instance\_type](#input\_eks\_instance\_type) | EKS worker node instance type | `string` | `"t3.xlarge"` | no | -| [eks\_ng\_desire\_size](#input\_eks\_ng\_desire\_size) | Node Group desire size, default is 1 | `number` | `4` | no | -| [eks\_ng\_max\_size](#input\_eks\_ng\_max\_size) | Node Group maximum size, default is 10 | `number` | `16` | no | -| [eks\_ng\_min\_size](#input\_eks\_ng\_min\_size) | Node Group minimum size, default is 1 | `number` | `4` | no | -| [eks\_vpc\_name](#input\_eks\_vpc\_name) | Define the VPC name that will be used by this cluster | `string` | `"*UNKNOWN*"` | no | -| [subnets\_name](#input\_subnets\_name) | Define the name of the subnets to be used by this cluster | `string` | `"*-container-*"` | no | -| [tags](#input\_tags) | AWS Tags to apply to appropriate resources. | `map(string)` | `{}` | no | -| [vpc\_cidr\_block](#input\_vpc\_cidr\_block) | VPC CIDR Block | `string` | n/a | yes | -| [vpc\_enable\_awsdns](#input\_vpc\_enable\_awsdns) | Enable AWS DNS on the VPC | `bool` | `false` | no | -| [vpc\_enable\_igw](#input\_vpc\_enable\_igw) | Enable AWS Internet Gateway (IGW) on the VPC (true \| false[x]) | `bool` | `false` | no | -| [vpc\_enable\_nat](#input\_vpc\_enable\_nat) | Enable AWS NAT Gateway on the VPC (true \| false[x]) | `bool` | `false` | no | -| [vpc\_enable\_vpn](#input\_vpc\_enable\_vpn) | Enable AWS VPN Configuration on the VPC (true[x] \| false) | `bool` | `true` | no | -| [vpc\_environment](#input\_vpc\_environment) | VPC environment purpose (common, shared, dev, stage, ite, prod) | `string` | `""` | no | -| [vpc\_index](#input\_vpc\_index) | VPC index number. This used for NACL rule number caculations. | `number` | n/a | yes | -| [vpc\_name](#input\_vpc\_name) | VPC Name including environment (if necessary), excluding vpc{N} | `string` | n/a | yes | -| [vpc\_short\_name](#input\_vpc\_short\_name) | VPC short name component, vpc{index} | `string` | n/a | yes | -| [vpn\_settings](#input\_vpn\_settings) | VPN Connection details array of site, bgp\_asn\_id and ip\_address |
list(object(
{
site = string
bgp_asn_id = number
ip_address = string
}
))
| `[]` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| [cluster\_auth\_token](#output\_cluster\_auth\_token) | The token required to authenticate with the cluster. | -| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Certificate data required to successfully communicate with the Kubernetes API server. | -| [cluster\_domain\_id](#output\_cluster\_domain\_id) | DNS Zone ID | -| [cluster\_domain\_name](#output\_cluster\_domain\_name) | DNS Zone Name | -| [cluster\_domain\_ns](#output\_cluster\_domain\_ns) | DNS Zone Nameservers | -| [cluster\_endpoint](#output\_cluster\_endpoint) | The endpoint used to reach the Kubernetes API server. | -| [cluster\_keypair](#output\_cluster\_keypair) | EC2 keypair for EKS Cluster | -| [cluster\_name](#output\_cluster\_name) | The name of the cluster that was created. | -| [cluster\_sg\_id](#output\_cluster\_sg\_id) | Security group ids attached to the cluster control plane. | -| [cluster\_subnet\_ids](#output\_cluster\_subnet\_ids) | Subnet IDs used to create the cluster | -| [cluster\_vpc\_id](#output\_cluster\_vpc\_id) | VPC IDs on which the cluster was created | -| [cluster\_worker\_sg\_id](#output\_cluster\_worker\_sg\_id) | Security group ids attached to the cluster worker nodes. | -| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | OpenID Connector provider ARN | -| [oidc\_provider\_url](#output\_oidc\_provider\_url) | OpenID Connector provider URL | -| [role\_cluster-admin-role\_arn](#output\_role\_cluster-admin-role\_arn) | Role ARN for EKS Cluster Admin Role | -| [role\_eks-cluster\_arn](#output\_role\_eks-cluster\_arn) | Role ARN for EKS Cluster Role | -| [role\_eks-nodegroup-role\_arn](#output\_role\_eks-nodegroup-role\_arn) | Role ARN for EKS Cluster Nodegroup Role | - \ No newline at end of file + + diff --git a/examples/full-cluster/cluster-roles/.terraform-docs.yml b/examples/full-cluster/cluster-roles/.terraform-docs.yml deleted file mode 100644 index 8391b9d..0000000 --- a/examples/full-cluster/cluster-roles/.terraform-docs.yml +++ /dev/null @@ -1,44 +0,0 @@ -formatter: markdown table - -header-from: main.tf -footer-from: "" - -sections: -## hide: [] - show: - - data-sources - - header - - footer - - inputs - - modules - - outputs - - providers - - requirements - - resources - -output: - file: README.md - mode: inject - template: |- - - {{ .Content }} - - -## output-values: -## enabled: false -## from: "" -## -## sort: -## enabled: true -## by: name -## -## settings: -## anchor: true -## color: true -## default: true -## description: false -## escape: true -## indent: 2 -## required: true -## sensitive: true -## type: true diff --git a/examples/full-cluster/cluster-roles/README.md b/examples/full-cluster/cluster-roles/README.md index 15664f8..eae6d1d 100644 --- a/examples/full-cluster/cluster-roles/README.md +++ b/examples/full-cluster/cluster-roles/README.md @@ -233,4 +233,6 @@ vpc_ntp_servers = [ "148.129.127.23", "148.129.191.23" ] + +