diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ce3418..48c832e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,5 @@ # Versions -* v1.0.0 -- {{ yyyy-mm-dd }} - - initial creation +* v1.0.0 -- 2021-10-14 + - patch-aws-auth module creation diff --git a/common/prefixes.tf b/common/prefixes.tf index d2ee1fe..5b4d144 100644 --- a/common/prefixes.tf +++ b/common/prefixes.tf @@ -24,5 +24,11 @@ locals { "vpn-connection" = "vpn_" "log-group" = "lg-" "log-stream" = "lgs-" + # EKS + "eks" = "eks-" + "eks-user" = "s-eks-" + "eks-role" = "r-eks-" + "eks-policy" = "p-eks-" + "eks-security-group" = "eks-" } } diff --git a/common/variables.awscli.tf b/common/variables.awscli.tf new file mode 100644 index 0000000..69ec329 --- /dev/null +++ b/common/variables.awscli.tf @@ -0,0 +1,10 @@ +variable "region" { + description = "AWS region (default: pull from current running provider)" + type = string + default = "" +} + +variable "profile" { + description = "AWS config profile. This is needed because we call the AWSCLI." + type = string +} diff --git a/common/variables.eks.tf b/common/variables.eks.tf new file mode 100644 index 0000000..fd44b5f --- /dev/null +++ b/common/variables.eks.tf @@ -0,0 +1,60 @@ +## variable "vpc_name" { +## description = "Define the VPC name that will be used by this cluster to find the VPC ID" +## type = string +## # default = "" +## } +## +## variable "subnets_name" { +## description = "Define the name of the subnets to be used by this cluster" +## type = string +## # default = "" +## } + +variable "cluster_name" { + description = "EKS cluster name name component used through out the EKS cluster describing its purpose (ex: dice-dev)" + type = string + default = null +} + +variable "cluster_version" { + description = "The EKS Kubernetes version number, see https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html" + type = string + default = "1.21" +} + +variable "instance_type" { + description = "EKS worker node instance type (default: t3.xlarge)" + type = string + default = "t3.xlarge" +} + +variable "nodegroup_desired_size" { + description = "EKS Nodegroup desire size (default: 1)" + type = number + default = 1 +} + +variable "nodegroup_minumum_size" { + description = "EKS Nodegroup minimum size (default: 1)" + type = number + default = 1 +} + +variable "nodegroup_maximum_size" { + description = "EKs Nodegroup maximum size (default: 16)" + type = number + default = 16 +} + +variable "nodegroup_instance_disk_size" { + description = "The size of EKS nodegroup EBS disk in gigabytes (default: 40)" + type = number + default = 40 +} + +variable "domain" { + description = "The DNS domain name of the cluster. Defaults to empty which causes the sample application to use the domain assigned to the load balancer of the istio ingress gateway." + type = string + default = "" +} + diff --git a/common/version.tf b/common/version.tf index a0cd862..fa2705b 100644 --- a/common/version.tf +++ b/common/version.tf @@ -1,3 +1,3 @@ locals { - _module_version = "0.0.0" + _module_version = "1.0.0" } diff --git a/patch-aws-auth/README.md b/patch-aws-auth/README.md new file mode 100644 index 0000000..469d317 --- /dev/null +++ b/patch-aws-auth/README.md @@ -0,0 +1,96 @@ +# About patch-aws-auth +This allows to add IAM roles and IAM users to the `aws-auth ConfigMap`, to tie IAM resources into +Kubernetes (k8s) users and group permissions. + +# Example variable usage + +```hcl +# settings.auto.tfvars +aws_auth_users = [ + { + userarn = "" + aws_username = "a-ashle001" + username = "admin" + groups = ["system:masters", "eks-console-dashboard-full-access-group"] + }, +] +aws_auth_roles = [ + { + rolearn : "" + aws_rolename : "r-inf-cloud-admin" + username : "admin" + groups = ["eks-console-dashboard-full-access-group"] + }, +] +``` + +```hcl +# patch-aws-auth.tf +module "awsauth_base_users" { + source = "git@github.e.it.census.gov:terraform-modules/aws-eks.git//patch-aws-auth?ref=feature-patch-aws-auth" + + profile = "123456789012-ma6-gov" + region = "us-gov-east-1" + cluster_name = "adsd-cumulus-dev" + aws_auth_users = var.aws_auth_users + aws_auth_roles = var.aws_auth_roles + # optional + keep_temporary_files = false +} +``` + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.12.31 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | n/a | +| [kubernetes](#provider\_kubernetes) | n/a | +| [null](#provider\_null) | n/a | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [null_resource.kubeconfig](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.patch-aws-auth](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.remove_temporary_files](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [aws_arn.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/arn) | data source | +| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_iam_role.auth_roles](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_role) | data source | +| [aws_iam_user.auth_users](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_user) | data source | +| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | +| [kubernetes_config_map.aws-auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/data-sources/config_map) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_auth\_roles](#input\_aws\_auth\_roles) | A list of objects where each object has rolearn, aws\_rolename, (k8s) username, and (k8s) groups, where groups is a list of groups to associate with the role. Leaving rolearn as an empty string will pull the role ARN from AWS. |
list(object({
rolearn = string
aws_rolename = string
username = string
groups = list(string)
})) | `[]` | no |
+| [aws\_auth\_users](#input\_aws\_auth\_users) | A list of objects where each object has userarn, aws\_username, (k8s) username, and (k8s) groups, where groups is a list of groups to associate with the user. Leaving userarn as an empty string will pull the user ARN from AWS. | list(object({
userarn = string
aws_username = string
username = string
groups = list(string)
})) | `[]` | no |
+| [cluster\_name](#input\_cluster\_name) | EKS cluster name name component used through out the EKS cluster describing its purpose (ex: dice-dev) | `string` | `null` | no |
+| [cluster\_version](#input\_cluster\_version) | The EKS Kubernetes version number, see https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html | `string` | `"1.21"` | no |
+| [domain](#input\_domain) | The DNS domain name of the cluster. Defaults to empty which causes the sample application to use the domain assigned to the load balancer of the istio ingress gateway. | `string` | `""` | no |
+| [instance\_type](#input\_instance\_type) | EKS worker node instance type (default: t3.xlarge) | `string` | `"t3.xlarge"` | no |
+| [keep\_temporary\_files](#input\_keep\_temporary\_files) | This module creates temporary files in setup/patch-aws-auth.TIMESTAMP.*. This flag determines whether to keep or remove them (default: false) | `bool` | `false` | no |
+| [nodegroup\_desired\_size](#input\_nodegroup\_desired\_size) | EKS Nodegroup desire size (default: 1) | `number` | `1` | no |
+| [nodegroup\_instance\_disk\_size](#input\_nodegroup\_instance\_disk\_size) | The size of EKS nodegroup EBS disk in gigabytes (default: 40) | `number` | `40` | no |
+| [nodegroup\_maximum\_size](#input\_nodegroup\_maximum\_size) | EKs Nodegroup maximum size (default: 16) | `number` | `16` | no |
+| [nodegroup\_minumum\_size](#input\_nodegroup\_minumum\_size) | EKS Nodegroup minimum size (default: 1) | `number` | `1` | no |
+| [profile](#input\_profile) | AWS config profile. This is needed because we call the AWSCLI. | `string` | n/a | yes |
+| [region](#input\_region) | AWS region (default: pull from current running provider) | `string` | `""` | no |
+
+## Outputs
+
+No outputs.
diff --git a/patch-aws-auth/data.eks.tf b/patch-aws-auth/data.eks.tf
new file mode 100644
index 0000000..408d60b
--- /dev/null
+++ b/patch-aws-auth/data.eks.tf
@@ -0,0 +1,7 @@
+data "aws_eks_cluster" "cluster" {
+ name = var.cluster_name
+}
+
+data "aws_eks_cluster_auth" "cluster" {
+ name = var.cluster_name
+}
diff --git a/patch-aws-auth/data.tf b/patch-aws-auth/data.tf
new file mode 120000
index 0000000..995624d
--- /dev/null
+++ b/patch-aws-auth/data.tf
@@ -0,0 +1 @@
+../common/data.tf
\ No newline at end of file
diff --git a/patch-aws-auth/defaults.tf b/patch-aws-auth/defaults.tf
new file mode 120000
index 0000000..a5556ac
--- /dev/null
+++ b/patch-aws-auth/defaults.tf
@@ -0,0 +1 @@
+../common/defaults.tf
\ No newline at end of file
diff --git a/patch-aws-auth/examples/settings.aws-auth.tf b/patch-aws-auth/examples/settings.aws-auth.tf
new file mode 100644
index 0000000..02cc2f2
--- /dev/null
+++ b/patch-aws-auth/examples/settings.aws-auth.tf
@@ -0,0 +1,12 @@
+locals {
+ aws_auth_users = []
+ aws_auth_roles = [
+ {
+ # rolearn: data.terraform_remote_state.applications_apps-adsd-eks_vpc_east_vpc2_apps_eks-adsd-cumulus-dev.outputs.role_cluster-admin-role_arn
+ rolearn : ""
+ aws_rolename : format("%v%v-cluster-admin", local._prefixes["eks-role"], var.cluster_name)
+ username : "admin"
+ groups = ["system:masters", "eks-console-dashboard-full-access-group"]
+ },
+ ]
+}
diff --git a/patch-aws-auth/examples/variables.aws-auth.tf b/patch-aws-auth/examples/variables.aws-auth.tf
new file mode 100644
index 0000000..d43c508
--- /dev/null
+++ b/patch-aws-auth/examples/variables.aws-auth.tf
@@ -0,0 +1,23 @@
+# maybe just ignore the ARN entirely and force a read
+
+variable "aws_auth_users" {
+ description = "A list of objects where each object has userarn, aws_username, (k8s) username, and (k8s) groups, where groups is a list of groups to associate with the user. Leaving userarn as an empty string will pull the user ARN from AWS."
+ type = list(object({
+ userarn = string
+ aws_username = string
+ username = string
+ groups = list(string)
+ }))
+ default = []
+}
+
+variable "aws_auth_roles" {
+ description = "A list of objects where each object has rolearn, aws_rolename, (k8s) username, and (k8s) groups, where groups is a list of groups to associate with the role. Leaving rolearn as an empty string will pull the role ARN from AWS."
+ type = list(object({
+ rolearn = string
+ aws_rolename = string
+ username = string
+ groups = list(string)
+ }))
+ default = []
+}
diff --git a/patch-aws-auth/kubeconfig.tf b/patch-aws-auth/kubeconfig.tf
new file mode 100644
index 0000000..df93dc3
--- /dev/null
+++ b/patch-aws-auth/kubeconfig.tf
@@ -0,0 +1,35 @@
+# establish kubeconfig file needed for kubectl patch command
+# requires kubectl command in the path
+
+resource "null_resource" "kubeconfig" {
+ triggers = {
+ always_run = timestamp()
+ }
+ provisioner "local-exec" {
+ command = "which kubectl > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing kubectl'; exit 1; else exit 0; fi"
+ }
+ provisioner "local-exec" {
+ command = "which aws > /dev/null 2>&1; if [ $? != 0 ]; then 'echo missing aws-cli (v2)'; exit 1; else exit 0; fi"
+ }
+ provisioner "local-exec" {
+ command = "test -d '${path.root}/setup' || mkdir '${path.root}/setup'"
+ }
+ provisioner "local-exec" {
+ environment = {
+ AWS_PROFILE = var.profile
+ AWS_REGION = local.region
+ }
+ command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/setup/${local.tmp_filename_prefix}.kube.config"
+ }
+ depends_on = [data.aws_eks_cluster.cluster]
+}
+
+#---
+# call it like
+#---
+## provisioner "local-exec" {
+## environment = {
+## KUBECONFIG = "${path.root}/setup/kube.config"
+## }
+## command = "kubectli set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=true"
+## }
diff --git a/patch-aws-auth/locals.tf b/patch-aws-auth/locals.tf
new file mode 100644
index 0000000..b55ae7b
--- /dev/null
+++ b/patch-aws-auth/locals.tf
@@ -0,0 +1,8 @@
+locals {
+ region = var.region == "" ? data.aws_region.current.name : var.region
+ aws_eks_cluster_auth = data.aws_eks_cluster_auth.cluster
+ aws_eks_cluster = data.aws_eks_cluster.cluster
+
+ tmp_filename_prefix = format("patch-aws-auth.%v", timestamp())
+}
+
diff --git a/patch-aws-auth/main.tf b/patch-aws-auth/main.tf
new file mode 100644
index 0000000..cc31aab
--- /dev/null
+++ b/patch-aws-auth/main.tf
@@ -0,0 +1,182 @@
+/*
+* # About patch-aws-auth
+* This allows to add IAM roles and IAM users to the `aws-auth ConfigMap`, to tie IAM resources into
+* Kubernetes (k8s) users and group permissions.
+*
+* # Example variable usage
+*
+* ```hcl
+* # settings.auto.tfvars
+* aws_auth_users = [
+* {
+* userarn = ""
+* aws_username = "a-ashle001"
+* username = "admin"
+* groups = ["system:masters", "eks-console-dashboard-full-access-group"]
+* },
+* ]
+* aws_auth_roles = [
+* {
+* rolearn : ""
+* aws_rolename : "r-inf-cloud-admin"
+* username : "admin"
+* groups = ["eks-console-dashboard-full-access-group"]
+* },
+* ]
+* ```
+*
+* ```hcl
+* # patch-aws-auth.tf
+* module "awsauth_base_users" {
+* source = "git@github.e.it.census.gov:terraform-modules/aws-eks.git//patch-aws-auth?ref=feature-patch-aws-auth"
+*
+* profile = "123456789012-ma6-gov"
+* region = "us-gov-east-1"
+* cluster_name = "adsd-cumulus-dev"
+* aws_auth_users = var.aws_auth_users
+* aws_auth_roles = var.aws_auth_roles
+* # optional
+* keep_temporary_files = false
+* }
+* ```
+*/
+
+
+# pull in current configmap aws-auth
+data "kubernetes_config_map" "aws-auth" {
+ metadata {
+ name = "aws-auth"
+ namespace = "kube-system"
+ }
+}
+
+# map users without ARNs to arns
+data "aws_iam_user" "auth_users" {
+ for_each = toset([for u in local.joined_auth_users : u.aws_username if u.aws_username != ""])
+ user_name = each.key
+}
+
+# map roles without ARNs to arns
+data "aws_iam_role" "auth_roles" {
+ for_each = toset([for r in local.joined_auth_roles : r.aws_rolename if r.aws_rolename != ""])
+ name = each.key
+}
+
+locals {
+ existing_roles_string = lookup(data.kubernetes_config_map.aws-auth.data, "mapRoles", "")
+ existing_users_string = lookup(data.kubernetes_config_map.aws-auth.data, "mapUsers", "")
+
+ existing_roles = local.existing_roles_string != "" ? yamldecode(local.existing_roles_string) : []
+ existing_users = local.existing_users_string != "" ? yamldecode(local.existing_users_string) : []
+
+ # joined_auth_users = concat(local.aws_auth_users, var.aws_auth_users)
+ # joined_auth_roles = concat(local.aws_auth_roles, var.aws_auth_roles)
+ joined_auth_users = var.aws_auth_users
+ joined_auth_roles = var.aws_auth_roles
+
+ mapped_auth_users = [for u in local.joined_auth_users : {
+ userarn = u.aws_username != "" ? data.aws_iam_user.auth_users[u.aws_username].arn : u.userarn
+ aws_username = u.aws_username
+ username = u.username
+ groups = u.groups
+ }]
+ mapped_auth_roles = [for u in local.joined_auth_roles : {
+ rolearn = u.aws_rolename != "" ? data.aws_iam_role.auth_roles[u.aws_rolename].arn : u.rolearn
+ aws_rolename = u.aws_rolename
+ username = u.username
+ groups = u.groups
+ }]
+
+ merged_users = merge(
+ { for user in local.existing_users : user.userarn => user },
+ # { for user in local.aws_auth_users : user.userarn => user },
+ # { for user in var.aws_auth_users : user.userarn => user }
+ { for user in local.mapped_auth_users : user.userarn => user },
+ )
+
+ merged_roles = merge(
+ { for role in local.existing_roles : role.rolearn => role },
+ # { for role in local.aws_auth_roles : role.rolearn => role },
+ # { for role in var.aws_auth_roles : role.rolearn => role }
+ { for role in local.mapped_auth_roles : role.rolearn => role },
+ )
+
+ # patch = yamlencode({
+ # "data" = {
+ # "mapUsers" = values(local.merged_users)
+ # "mapRoles" = values(local.merged_roles)
+ # }
+ # })
+ patch = <