Skip to content

Commit

Permalink
Everything except clsuter_admin related items due to ldap_provider.
Browse files Browse the repository at this point in the history
  • Loading branch information
zawac002 committed Sep 1, 2023
1 parent c412af3 commit fbdca87
Show file tree
Hide file tree
Showing 17 changed files with 789 additions and 144 deletions.
17 changes: 17 additions & 0 deletions aws_data.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
data "aws_caller_identity" "current" {}

data "aws_region" "current" {}

data "aws_arn" "current" {
arn = data.aws_caller_identity.current.arn
}

locals {
base_arn = format("arn:%v:%%v:%v:%v:%%v:%%v", data.aws_arn.current.partition, data.aws_region.current.name, data.aws_caller_identity.current.account_id)
iam_arn = format("arn:%v:iam::%v:%%v", data.aws_arn.current.partition, data.aws_caller_identity.current.account_id)
common_arn = format("arn:%v:%%v:%v:%v:%%v",
data.aws_arn.current.partition,
data.aws_region.current.name,
data.aws_caller_identity.current.account_id)

}
12 changes: 12 additions & 0 deletions cluster_admin_group.tf.disabled
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
module "group_cluster-admin" {
source = "git@github.e.it.census.gov:terraform-modules/aws-iam-group.git"

group_name = format("%v%v-cluster-admin", local._prefixes["eks"], var.cluster_name)
attached_policies = [aws_iam_policy.cluster-admin-policy.arn, aws_iam_policy.cluster-admin_assume_policy.arn]

tags = merge(
local.base_tags,
var.tags,
)
}

129 changes: 129 additions & 0 deletions cluster_admin_policies.tf.disabled
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
#---
# cluster admin policy
#---
locals {
eks_resources = ["cluster", "addon", "nodegroup", "identityproviderconfig"]

admin_policy_statements = {
ECRRead = {
actions = [
"ecr:Describe*",
"ecr:Get*",
"ecr:ListImages",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
]
resources = ["*"]
}
ECRWrite = {
actions = [
"ecr:BatchDeleteImage",
"ecr:CompleteLayerUpload",
"ecr:CreateRepository",
"ecr:DeleteRepository",
"ecr:InitiateLayerUpload",
"ecr:PutImage",
"ecr:UploadLayerPart"
]
resources = [format(local.common_arn, "ecr", format("repository/eks/%v/* ", var.cluster_name))]
}
EKSRead = {
actions = [
"eks:ListClusters",
"eks:ListAddons",
"eks:ListNodegroups",
"eks:DescribeCluster",
"eks:DescribeAddon*",
"eks:DescribeNodegroup",
]
resources = [
format(local.common_arn, "eks", "cluster/*"),
format(local.common_arn, "eks", "addon/*"),
format(local.common_arn, "eks", "addons/*"),
format(local.common_arn, "eks", "/addons/*"),
format(local.common_arn, "eks", "nodegroup/*"),
]
}
IAMRead = {
actions = [
"iam:ListRoles",
]
resources = ["*"]
}
SSMGet = {
actions = [
"ssm:GetParameter",
]
resources = [
format("arn:%v:%v:%v:%v:%v", data.aws_arn.current.partition, "ssm", data.aws_region.current.name, "", "parameter/aws/service/eks/*")
]
}
EKSReadMyClusters = {
actions = [
"eks:List*",
"eks:Read*",
"eks:Describe*",
"eks:AccessKubernetesApi",
]
resources = flatten(concat(
tolist([format(local.common_arn, "eks", format("/clusters/%v/addons", var.cluster_name))]),
[for r in local.eks_resources : tolist([
format(local.common_arn, "eks", format("%v/%v", r, var.cluster_name)),
format(local.common_arn, "eks", format("%v/%v/*", r, var.cluster_name))
])]))
}
}
}

data "aws_iam_policy_document" "cluster-admin-policy" {
dynamic "statement" {
for_each = local.admin_policy_statements
iterator = s
content {
sid = format("%v%vAccess", lookup(s.value, "effect", "Allow"), s.key)
effect = lookup(s.value, "effect", "Allow")
actions = lookup(s.value, "actions", [])
resources = lookup(s.value, "resources", [])
}
}
}

resource "aws_iam_policy" "cluster-admin-policy" {
name = format("%v%v-cluster-admin", local._prefixes["eks-policy"], var.cluster_name)
path = "/"
description = "Allow for administration of the cluster ${var.cluster_name} using AWS resources"
policy = data.aws_iam_policy_document.cluster-admin-policy.json

tags = merge(
local.base_tags,
var.tags,
)
}

#---
# cluster admin assume policy
#---
resource "aws_iam_policy" "cluster-admin_assume_policy" {
name = format("%v%v-cluster-admin-assume", local._prefixes["eks-policy"], var.cluster_name)
path = "/"
description = "Allow for assume role to the cluster-admin role for ${var.cluster_name}"
policy = data.aws_iam_policy_document.cluster-admin_assume_policy.json

tags = merge(
local.base_tags,
var.tags,
var.application_tags,
tomap({ "Name" = format("%v%v-cluster-admin-assume", local._prefixes["eks-policy"], var.cluster_name) }),
)
}

data "aws_iam_policy_document" "cluster-admin_assume_policy" {
statement {
sid = "AllowSTSAssumeClusterAdminRole"
effect = "Allow"
actions = ["sts:AssumeRole"]
resources = [module.role_cluster-admin.role_arn]
}
}

26 changes: 26 additions & 0 deletions cluster_admin_roles.tf.disabled
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#---
# cluster-admin
#---
module "role_cluster-admin" {
source = "git@github.e.it.census.gov:terraform-modules/aws-iam-role.git"

role_name = format("%v%v-cluster-admin", local._prefixes["eks"], var.cluster_name)
role_description = "SAML EKS cluster admin Role for ${var.cluster_name}"
enable_ldap_creation = false
assume_policy_document = data.aws_iam_policy_document.allow_sts.json
# assume_policy_document = data.aws_iam_policy_document.cluster-admin_combined.json
attached_policies = [aws_iam_policy.cluster-admin-policy.arn]

tags = merge(
local.base_tags,
local.common_tags,
var.tags,
var.application_tags,
)
}

output "role_cluster-admin-role_arn" {
description = "Role ARN for EKS Cluster Admin Role"
value = module.role_cluster-admin.role_arn
}

78 changes: 78 additions & 0 deletions cluster_autoscaler.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
locals {
# https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html
autoscale_tags = {
format("k8s.io/cluster-autoscaler/%v", var.cluster_name) = "owned"
"k8s.io/cluster-autoscaler/enabled" = "TRUE"
}

ng_asg_name = module.cluster.eks_managed_node_groups["on_demand"].node_group_resources[0].autoscaling_groups[0].name
}

resource "aws_autoscaling_group_tag" "on-demand" {
autoscaling_group_name = local.ng_asg_name
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/eks.amazonaws.com/capacityType"
value = "ON_DEMAND"
propagate_at_launch = true
}
}

data "kubernetes_namespace" "kube-system" {
depends_on = [
module.cluster.eks_managed_node_groups,
]

metadata {
name = "kube-system"
}
}

resource "helm_release" "cluster-autoscaler" {
depends_on = [
module.images,
module.cluster.eks_managed_node_groups,
]

chart = "cluster-autoscaler"
name = "cluster-autoscaler"
version = var.cluster_autoscaler_chart_version
namespace = data.kubernetes_namespace.kube-system.metadata[0].name
repository = "https://kubernetes.github.io/autoscaler"

set {
name = "image.repository"
value = format("%v/%v",
module.images.images[local.autoscaler_key].dest_registry,
module.images.images[local.autoscaler_key].dest_repository
)
}
set {
name = "image.tag"
value = module.images.images[local.autoscaler_key].tag
}
set {
name = "autoDiscovery.clusterName"
value = var.cluster_name
}
set {
name = "awsRegion"
value = var.region
}

set {
name = "rbac.serviceAccount.name"
value = "cluster-autoscaler"
}

set {
name = "rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
value = module.cluster_autoscaler_irsa_role.iam_role_arn
}

set {
name = "rbac.serviceAccount.create"
value = "false"
}
}


35 changes: 35 additions & 0 deletions copy_images.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
locals {
autoscaler_key = format("%v#%v", "cluster-autoscaler", var.cluster_autoscaler_tag)

image_config = [
{
enabled = true
dest_path = null
name = "cluster-autoscaler"
source_image = "autoscaling/cluster-autoscaler"
source_registry = "registry.k8s.io"
source_tag = null
tag = var.cluster_autoscaler_tag
},
]
}

module "images" {
source = "git@github.e.it.census.gov:terraform-modules/aws-ecr-copy-images.git"

profile = var.profile
application_name = var.cluster_name
image_config = local.image_config
tags = {}

### optional
## account_alias = ""
## account_id = ""
## destination_password = ""
## destination_username = ""
## override_prefixes = {}
## region = ""
## source_password = ""
## source_username = ""
}

55 changes: 55 additions & 0 deletions eks_console_access.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# ```shell
# curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml
# curl -O https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml
# ```
#
# For full console, we'll use the first one.
#
# ```console
# % kubectl apply -f eks-console-full-access.yaml
# clusterrole.rbac.authorization.k8s.io/eks-console-dashboard-full-access-clusterrole created
# clusterrolebinding.rbac.authorization.k8s.io/eks-console-dashboard-full-access-binding created
# ```

locals {
cluster_roles = [
{
name = "eks-console-full-access"
url = "https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml"
enabled = true
},
{
name = "eks-console-restricted-access"
url = "https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-restricted-access.yaml"
enabled = false
},
]
cluster_roles_map = { for cr in local.cluster_roles : cr.name => cr }
}


data "http" "cluster_roles" {
for_each = local.cluster_roles_map
url = each.value.url
}

data "kubectl_file_documents" "access_documents" {
for_each = { for k, v in local.cluster_roles_map : k => v if v.enabled }

content = data.http.cluster_roles[each.key].body
}

locals {
all_access_documents = flatten([
for cr_name, cr_data in local.cluster_roles_map : [
for doc in data.kubectl_file_documents.access_documents[cr_name].manifests : doc
] if cr_data.enabled
])
}

resource "kubectl_manifest" "deploy_cluster_roles" {
count = length(local.all_access_documents)

yaml_body = local.all_access_documents[count.index]
}

Loading

0 comments on commit fbdca87

Please sign in to comment.