diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 7c1cae9..0000000 --- a/.gitignore +++ /dev/null @@ -1,38 +0,0 @@ -# Local .terraform directories -**/.terraform/* - -# terraform lock file. -**/.terraform.lock.hcl - -# .tfstate files -*.tfstate -*.tfstate.* - -# Crash log files -crash.log -crash.*.log - -# Exclude all .tfvars files, which are likely to contain sensitive data, -# such as password, private keys, and other secrets. These should not be -# part of version control as they are data points which are potentially -# sensitive and subject to change depending on the environment. -*.tfvars -*.tfvars.json - -# Ignore override files as they are usually used to override resources -# locally and so are not checked in -override.tf -override.tf.json -*_override.tf -*_override.tf.json - -# Include override files you do wish to add to version control using negated pattern -# !example_override.tf - -# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan -# example: *tfplan* - -# Ignore CLI configuration files -.terraformrc -terraform.rc - diff --git a/main.tf b/main.tf index 20aec65..1ab58d7 100644 --- a/main.tf +++ b/main.tf @@ -55,20 +55,20 @@ locals { # This below is just an example, in practice we'd notionally be creating a role (or multiple) specific to the cluster and setting policy # to allow the cluster users to assume said role; but we need to spend some time parsing what exactly are the permissions we plan to hand # out to these clusters. - # access_entries = { - # inf-admin-t2 = { - # principal_arn = "arn:aws-us-gov:iam::224384469011:role/aws-reserved/sso.amazonaws.com/us-gov-east-1/AWSReservedSSO_inf-admin-t2_f3912d726991bbfa" - # kubernetes_groups = [] - # policy_associations = { - # admin = { - # policy_arn = "arn:aws-us-gov:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" - # access_scope = { - # type = "cluster" - # } - # } - # } - # } - # } + access_entries = { + inf-admin-t2 = { + principal_arn = "arn:aws-us-gov:iam::224384469011:role/aws-reserved/sso.amazonaws.com/us-gov-east-1/AWSReservedSSO_inf-admin-t2_f3912d726991bbfa" + kubernetes_groups = [] + policy_associations = { + admin = { + policy_arn = "arn:aws-us-gov:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" + access_scope = { + type = "cluster" + } + } + } + } + } } module "cluster" { @@ -79,7 +79,7 @@ module "cluster" { cluster_version = var.cluster_version cluster_endpoint_public_access = var.cluster_endpoint_public_access enable_cluster_creator_admin_permissions = var.enable_cluster_creator_admin_permissions - # access_entries = local.access_entries + access_entries = local.access_entries cluster_enabled_log_types = [ "audit", @@ -166,6 +166,18 @@ resource "aws_security_group_rule" "allow_sidecar_injection" { source_security_group_id = module.cluster.cluster_primary_security_group_id } +################################################################# +# Update KubeConfig after cluster complete +################################################################ +resource "null_resource" "kube_config_create" { + depends_on = [module.eks.cluster_name] + provisioner "local-exec" { + interpreter = ["/bin/bash", "-c"] + command = "aws eks --region ${var.region} update-kubeconfig --name ${module.eks.cluster_name} --profile=${var.profile} && export KUBE_CONFIG_PATH=~/.kube/config && export KUBERNETES_MASTER=~/.kube/config" + } +} + + # resource "kubernetes_namespace" "operators" { # depends_on = [ # module.cluster.eks_managed_node_groups, @@ -175,4 +187,3 @@ resource "aws_security_group_rule" "allow_sidecar_injection" { # name = var.operators_ns # } # } -