diff --git a/examples/efk/.gitignore b/examples/efk/.gitignore
new file mode 100644
index 0000000..2f97b74
--- /dev/null
+++ b/examples/efk/.gitignore
@@ -0,0 +1 @@
+kube.config
diff --git a/examples/efk/config_map_data/apache-log-parser.fluentd.conf b/examples/efk/config_map_data/apache-log-parser.fluentd.conf
new file mode 100644
index 0000000..f90befe
--- /dev/null
+++ b/examples/efk/config_map_data/apache-log-parser.fluentd.conf
@@ -0,0 +1,46 @@
+# Ignore fluentd own events
+
+ @type null
+
+
+# HTTP input for the liveness and readiness probes
+
+ @type http
+ port 9880
+
+
+# Throw the healthcheck to the standard output instead of forwarding it
+
+ @type stdout
+
+
+# Get the logs from the containers running in the cluster
+# This block parses logs using an expression valid for the Apache log format
+# Update this depending on your application log format
+
+ @type tail
+ path /var/log/containers/*.log
+ pos_file /opt/bitnami/fluentd/logs/buffers/fluentd-docker.pos
+ tag www.log
+
+ @type regexp
+ expression /^(?[^ ]*) [^ ]* (?[^ ]*) \[(?
+
+
+# Forward all logs to the aggregators
+
+ @type forward
+
+ host fluentd-0.fluentd-headless.logging.svc.cluster.local
+ port 24224
+
+
+
+ @type file
+ path /opt/bitnami/fluentd/logs/buffers/logs.buffer
+ flush_thread_count 2
+ flush_interval 5s
+
+
diff --git a/examples/efk/config_map_data/elasticsearch-output.fluentd.conf b/examples/efk/config_map_data/elasticsearch-output.fluentd.conf
new file mode 100644
index 0000000..edb978a
--- /dev/null
+++ b/examples/efk/config_map_data/elasticsearch-output.fluentd.conf
@@ -0,0 +1,38 @@
+# Ignore fluentd own events
+
+ @type null
+
+
+# TCP input to receive logs from the forwarders
+
+ @type forward
+ bind 0.0.0.0
+ port 24224
+
+
+# HTTP input for the liveness and readiness probes
+
+ @type http
+ bind 0.0.0.0
+ port 9880
+
+
+# Throw the healthcheck to the standard output instead of forwarding it
+
+ @type stdout
+
+
+# Send the logs to the standard output
+
+ @type elasticsearch
+ include_tag_key true
+ host "#{ENV['ELASTICSEARCH_HOST']}"
+ port "#{ENV['ELASTICSEARCH_PORT']}"
+ logstash_format true
+
+ @type file
+ path /opt/bitnami/fluentd/logs/buffers/logs.buffer
+ flush_thread_count 2
+ flush_interval 5s
+
+
diff --git a/examples/efk/copy_images.tf.old b/examples/efk/copy_images.tf.old
deleted file mode 100644
index 0fa4a50..0000000
--- a/examples/efk/copy_images.tf.old
+++ /dev/null
@@ -1,43 +0,0 @@
-data "aws_ecr_authorization_token" "token" {}
-
-locals {
- account_id = data.aws_caller_identity.current.account_id
- repo_parent_name = format("eks/%v", var.cluster_name)
-
- account_ecr = format("%v.dkr.ecr.%v.amazonaws.com/%v", local.account_id, var.region, local.repo_parent_name)
-
- images = [
- # logging stack related images:
- {
- name = "elastic/elasticsearch"
- image = "docker.elastic.co/elasticsearch/elasticsearch"
- tag = var.elasticsearch_tag
- },
- {
- name = "elastic/kibana"
- image = "docker.elastic.co/kibana/kibana"
- tag = var.kibana_tag
- },
- {
- name = "fluent/fluentd-kubernetes-daemonset"
- image = "docker.io/fluent/fluentd-kubernetes-daemonset"
- tag = var.fluentd_tag
- },
- ]
- image_repos = { for image in local.images : image.name => format("%v/%v", local.account_ecr, image.name) }
-}
-
-resource "null_resource" "copy_images" {
- for_each = { for image in local.images : image.name => image }
-
- provisioner "local-exec" {
- command = "${path.module}/copy_image.sh"
- environment = {
- SOURCE_IMAGE = format("%v:%v", each.value.image, each.value.tag)
- DESTINATION_IMAGE = format("%v/%v:%v", local.account_ecr, each.value.name, each.value.tag)
- DESTINATION_USERNAME = data.aws_ecr_authorization_token.token.user_name
- DESTINATION_PASSWORD = data.aws_ecr_authorization_token.token.password
- }
- }
-}
-
diff --git a/examples/efk/main.tf b/examples/efk/main.tf
index 91adba4..1d7c631 100644
--- a/examples/efk/main.tf
+++ b/examples/efk/main.tf
@@ -1,3 +1,5 @@
+resource "null_resource" "eks_cluster" { }
+
resource "kubernetes_namespace" "logging" {
# depends_on = [null_resource.copy_images]
depends_on = [module.images]
@@ -63,7 +65,7 @@ resource "helm_release" "elasticsearch" {
set {
name = "image"
# value = local.image_repos["elastic/elasticsearch"]
- value = format("%v/%v", module.images.images[each.value.image_key].dest_registry, module.images.images[each.value.image_key].dest_registry)
+ value = format("%v/%v", module.images.images[each.value.image_key].dest_registry, module.images.images[each.value.image_key].dest_repository)
}
set {
name = "imageTag"
@@ -95,7 +97,8 @@ resource "helm_release" "elasticsearch" {
# value = "false"
# }
- timeout = 300
+# timeout = 300
+ timeout = 600
}
resource "helm_release" "kibana" {
@@ -110,7 +113,7 @@ resource "helm_release" "kibana" {
set {
name = "image"
# value = local.image_repos["elastic/kibana"]
- value = format("%v/%v", module.images.images[each.value.image_key].dest_registry, module.images.images[each.value.image_key].dest_registry)
+ value = format("%v/%v", module.images.images[each.value.image_key].dest_registry, module.images.images[each.value.image_key].dest_repository)
}
set {
name = "imageTag"
@@ -152,7 +155,8 @@ resource "helm_release" "kibana" {
# value = "false"
# }
- timeout = 180
+# timeout = 180
+ timeout = 300
}
resource "helm_release" "fluentd" {
@@ -166,14 +170,15 @@ resource "helm_release" "fluentd" {
set {
name = "image.repository"
- value = format("%v/%v", module.images.images[each.value.image_key].dest_registry, module.images.images[each.value.image_key].dest_registry)
+ value = format("%v/%v", module.images.images[each.value.image_key].dest_registry, module.images.images[each.value.image_key].dest_repository)
}
set {
name = "image.tag"
value = each.value.image_tag
}
- timeout = 180
+# timeout = 180
+ timeout = 300
}
@@ -184,46 +189,7 @@ resource "kubernetes_config_map" "elasticsearch-output" {
}
data = {
- "fluentd.conf" = <
- @type null
-
-
- # TCP input to receive logs from the forwarders
-
- @type forward
- bind 0.0.0.0
- port 24224
-
-
- # HTTP input for the liveness and readiness probes
-
- @type http
- bind 0.0.0.0
- port 9880
-
-
- # Throw the healthcheck to the standard output instead of forwarding it
-
- @type stdout
-
-
- # Send the logs to the standard output
-
- @type elasticsearch
- include_tag_key true
- host "#{ENV['ELASTICSEARCH_HOST']}"
- port "#{ENV['ELASTICSEARCH_PORT']}"
- logstash_format true
-
- @type file
- path /opt/bitnami/fluentd/logs/buffers/logs.buffer
- flush_thread_count 2
- flush_interval 5s
-
-
-EOF
+ "fluentd.conf" = file(format("%v/config_map_data/%v.fluentd.conf",path.root,"elasticsearch-output"))
}
}
@@ -234,53 +200,6 @@ resource "kubernetes_config_map" "apache-log-parser" {
}
data = {
- "fluentd.conf" = <
- @type null
-
-
- # HTTP input for the liveness and readiness probes
-
- @type http
- port 9880
-
-
- # Throw the healthcheck to the standard output instead of forwarding it
-
- @type stdout
-
-
- # Get the logs from the containers running in the cluster
- # This block parses logs using an expression valid for the Apache log format
- # Update this depending on your application log format
-
- @type tail
- path /var/log/containers/*.log
- pos_file /opt/bitnami/fluentd/logs/buffers/fluentd-docker.pos
- tag www.log
-
- @type regexp
- expression /^(?[^ ]*) [^ ]* (?[^ ]*) \[(?
-
-
- # Forward all logs to the aggregators
-
- @type forward
-
- host fluentd-0.fluentd-headless.logging.svc.cluster.local
- port 24224
-
-
-
- @type file
- path /opt/bitnami/fluentd/logs/buffers/logs.buffer
- flush_thread_count 2
- flush_interval 5s
-
-
-EOF
+ "fluentd.conf" = file(format("%v/config_map_data/%v.fluentd.conf",path.root,"apache-log-parser"))
}
}
diff --git a/examples/efk/outputs.tf b/examples/efk/outputs.tf
new file mode 100644
index 0000000..6045174
--- /dev/null
+++ b/examples/efk/outputs.tf
@@ -0,0 +1,4 @@
+output "cluster_endpoint" {
+ description = "The endpoint used to reach the Kubernetes API server"
+ value = data.aws_eks_cluster.cluster.endpoint
+}
diff --git a/examples/efk/providers.tf b/examples/efk/providers.tf
deleted file mode 100644
index 830cf67..0000000
--- a/examples/efk/providers.tf
+++ /dev/null
@@ -1,34 +0,0 @@
-terraform {
- required_version = ">= 0.12.31"
-}
-
-data "aws_caller_identity" "current" {}
-
-data "aws_eks_cluster" "cluster" {
- name = var.cluster_name
-}
-
-data "aws_eks_cluster_auth" "cluster" {
- name = var.cluster_name
-}
-
-provider "aws" {
- region = var.region
- profile = var.profile
-}
-
-provider "kubernetes" {
- host = data.aws_eks_cluster.cluster.endpoint
-
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
-}
-
-provider "helm" {
- kubernetes {
- host = data.aws_eks_cluster.cluster.endpoint
-
- cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
- token = data.aws_eks_cluster_auth.cluster.token
- }
-}
diff --git a/examples/efk/region.tf b/examples/efk/region.tf
new file mode 100644
index 0000000..b7b1696
--- /dev/null
+++ b/examples/efk/region.tf
@@ -0,0 +1,4 @@
+locals {
+ region = var.region
+}
+
diff --git a/examples/efk/tf-run.data b/examples/efk/tf-run.data
new file mode 100644
index 0000000..41120ce
--- /dev/null
+++ b/examples/efk/tf-run.data
@@ -0,0 +1,17 @@
+VERSION 1.0.5
+REMOTE-STATE
+COMMAND tf-directory-setup.py -l none -f
+COMMAND setup-new-directory.sh
+COMMAND tf-init -upgrade
+COMMAND ln -sf ../settings.auto.tfvars
+COMMAND ln -sf ../variables.vpc.auto.tfvars .
+COMMAND ln -sf ../variables.vpc.tf
+data.aws_eks_cluster.cluster null_resource.eks_cluster
+COMMAND tf-directory-setup.py -l s3
+COMMAND tf-output cluster_endpoint
+
+COMMENT Be sure to set the appropriate proxy configuration for pulling images
+STOP Add to environment variable NO_PROXY the DNS name of the cluster_endpoint shown in the tf-ouptut above because the EKS API will not work through the proxy
+
+module.images
+ALL
diff --git a/examples/efk/variables.eks.tf b/examples/efk/variables.eks.tf
deleted file mode 100644
index ec7b0d9..0000000
--- a/examples/efk/variables.eks.tf
+++ /dev/null
@@ -1,15 +0,0 @@
-variable "region" {
- description = "AWS region"
- type = string
-}
-
-variable "profile" {
- description = "AWS config profile"
- type = string
- default = ""
-}
-
-variable "cluster_name" {
- description = "The name of the EKS cluster into which cert-manager is to be installed."
- type = string
-}