Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
badra001 committed Sep 16, 2022
1 parent 8922a7d commit 906899d
Show file tree
Hide file tree
Showing 10 changed files with 123 additions and 186 deletions.
1 change: 1 addition & 0 deletions examples/efk/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
kube.config
46 changes: 46 additions & 0 deletions examples/efk/config_map_data/apache-log-parser.fluentd.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# Ignore fluentd own events
<match fluent.**>
@type null
</match>

# HTTP input for the liveness and readiness probes
<source>
@type http
port 9880
</source>

# Throw the healthcheck to the standard output instead of forwarding it
<match fluentd.healthcheck>
@type stdout
</match>

# Get the logs from the containers running in the cluster
# This block parses logs using an expression valid for the Apache log format
# Update this depending on your application log format
<source>
@type tail
path /var/log/containers/*.log
pos_file /opt/bitnami/fluentd/logs/buffers/fluentd-docker.pos
tag www.log
<parse>
@type regexp
expression /^(?<host>[^ ]*) [^ ]* (?<user>[^ ]*) \[(?<time>[^\]]*)\] \\"(?<method>\S+)(?: +(?<path>[^ ]*) +\S*)?\\" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>[^\"]*)" "(?<agent>[^\"]*)")?$/
time_format %d/%b/%Y:%H:%M:%S %z
</parse>
</source>

# Forward all logs to the aggregators
<match **>
@type forward
<server>
host fluentd-0.fluentd-headless.logging.svc.cluster.local
port 24224
</server>

<buffer>
@type file
path /opt/bitnami/fluentd/logs/buffers/logs.buffer
flush_thread_count 2
flush_interval 5s
</buffer>
</match>
38 changes: 38 additions & 0 deletions examples/efk/config_map_data/elasticsearch-output.fluentd.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# Ignore fluentd own events
<match fluent.**>
@type null
</match>

# TCP input to receive logs from the forwarders
<source>
@type forward
bind 0.0.0.0
port 24224
</source>

# HTTP input for the liveness and readiness probes
<source>
@type http
bind 0.0.0.0
port 9880
</source>

# Throw the healthcheck to the standard output instead of forwarding it
<match fluentd.healthcheck>
@type stdout
</match>

# Send the logs to the standard output
<match **>
@type elasticsearch
include_tag_key true
host "#{ENV['ELASTICSEARCH_HOST']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format true
<buffer>
@type file
path /opt/bitnami/fluentd/logs/buffers/logs.buffer
flush_thread_count 2
flush_interval 5s
</buffer>
</match>
43 changes: 0 additions & 43 deletions examples/efk/copy_images.tf.old

This file was deleted.

107 changes: 13 additions & 94 deletions examples/efk/main.tf
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
resource "null_resource" "eks_cluster" { }

resource "kubernetes_namespace" "logging" {
# depends_on = [null_resource.copy_images]
depends_on = [module.images]
Expand Down Expand Up @@ -63,7 +65,7 @@ resource "helm_release" "elasticsearch" {
set {
name = "image"
# value = local.image_repos["elastic/elasticsearch"]
value = format("%v/%v", module.images.images[each.value.image_key].dest_registry, module.images.images[each.value.image_key].dest_registry)
value = format("%v/%v", module.images.images[each.value.image_key].dest_registry, module.images.images[each.value.image_key].dest_repository)
}
set {
name = "imageTag"
Expand Down Expand Up @@ -95,7 +97,8 @@ resource "helm_release" "elasticsearch" {
# value = "false"
# }

timeout = 300
# timeout = 300
timeout = 600
}

resource "helm_release" "kibana" {
Expand All @@ -110,7 +113,7 @@ resource "helm_release" "kibana" {
set {
name = "image"
# value = local.image_repos["elastic/kibana"]
value = format("%v/%v", module.images.images[each.value.image_key].dest_registry, module.images.images[each.value.image_key].dest_registry)
value = format("%v/%v", module.images.images[each.value.image_key].dest_registry, module.images.images[each.value.image_key].dest_repository)
}
set {
name = "imageTag"
Expand Down Expand Up @@ -152,7 +155,8 @@ resource "helm_release" "kibana" {
# value = "false"
# }

timeout = 180
# timeout = 180
timeout = 300
}

resource "helm_release" "fluentd" {
Expand All @@ -166,14 +170,15 @@ resource "helm_release" "fluentd" {

set {
name = "image.repository"
value = format("%v/%v", module.images.images[each.value.image_key].dest_registry, module.images.images[each.value.image_key].dest_registry)
value = format("%v/%v", module.images.images[each.value.image_key].dest_registry, module.images.images[each.value.image_key].dest_repository)
}
set {
name = "image.tag"
value = each.value.image_tag
}

timeout = 180
# timeout = 180
timeout = 300
}


Expand All @@ -184,46 +189,7 @@ resource "kubernetes_config_map" "elasticsearch-output" {
}

data = {
"fluentd.conf" = <<EOF
# Ignore fluentd own events
<match fluent.**>
@type null
</match>
# TCP input to receive logs from the forwarders
<source>
@type forward
bind 0.0.0.0
port 24224
</source>
# HTTP input for the liveness and readiness probes
<source>
@type http
bind 0.0.0.0
port 9880
</source>
# Throw the healthcheck to the standard output instead of forwarding it
<match fluentd.healthcheck>
@type stdout
</match>
# Send the logs to the standard output
<match **>
@type elasticsearch
include_tag_key true
host "#{ENV['ELASTICSEARCH_HOST']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
logstash_format true
<buffer>
@type file
path /opt/bitnami/fluentd/logs/buffers/logs.buffer
flush_thread_count 2
flush_interval 5s
</buffer>
</match>
EOF
"fluentd.conf" = file(format("%v/config_map_data/%v.fluentd.conf",path.root,"elasticsearch-output"))
}
}

Expand All @@ -234,53 +200,6 @@ resource "kubernetes_config_map" "apache-log-parser" {
}

data = {
"fluentd.conf" = <<EOF
# Ignore fluentd own events
<match fluent.**>
@type null
</match>
# HTTP input for the liveness and readiness probes
<source>
@type http
port 9880
</source>
# Throw the healthcheck to the standard output instead of forwarding it
<match fluentd.healthcheck>
@type stdout
</match>
# Get the logs from the containers running in the cluster
# This block parses logs using an expression valid for the Apache log format
# Update this depending on your application log format
<source>
@type tail
path /var/log/containers/*.log
pos_file /opt/bitnami/fluentd/logs/buffers/fluentd-docker.pos
tag www.log
<parse>
@type regexp
expression /^(?<host>[^ ]*) [^ ]* (?<user>[^ ]*) \[(?<time>[^\]]*)\] \\"(?<method>\S+)(?: +(?<path>[^ ]*) +\S*)?\\" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>[^\"]*)" "(?<agent>[^\"]*)")?$/
time_format %d/%b/%Y:%H:%M:%S %z
</parse>
</source>
# Forward all logs to the aggregators
<match **>
@type forward
<server>
host fluentd-0.fluentd-headless.logging.svc.cluster.local
port 24224
</server>
<buffer>
@type file
path /opt/bitnami/fluentd/logs/buffers/logs.buffer
flush_thread_count 2
flush_interval 5s
</buffer>
</match>
EOF
"fluentd.conf" = file(format("%v/config_map_data/%v.fluentd.conf",path.root,"apache-log-parser"))
}
}
4 changes: 4 additions & 0 deletions examples/efk/outputs.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
output "cluster_endpoint" {
description = "The endpoint used to reach the Kubernetes API server"
value = data.aws_eks_cluster.cluster.endpoint
}
34 changes: 0 additions & 34 deletions examples/efk/providers.tf

This file was deleted.

4 changes: 4 additions & 0 deletions examples/efk/region.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
locals {
region = var.region
}

17 changes: 17 additions & 0 deletions examples/efk/tf-run.data
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
VERSION 1.0.5
REMOTE-STATE
COMMAND tf-directory-setup.py -l none -f
COMMAND setup-new-directory.sh
COMMAND tf-init -upgrade
COMMAND ln -sf ../settings.auto.tfvars
COMMAND ln -sf ../variables.vpc.auto.tfvars .
COMMAND ln -sf ../variables.vpc.tf
data.aws_eks_cluster.cluster null_resource.eks_cluster
COMMAND tf-directory-setup.py -l s3
COMMAND tf-output cluster_endpoint

COMMENT Be sure to set the appropriate proxy configuration for pulling images
STOP Add to environment variable NO_PROXY the DNS name of the cluster_endpoint shown in the tf-ouptut above because the EKS API will not work through the proxy

module.images
ALL
15 changes: 0 additions & 15 deletions examples/efk/variables.eks.tf

This file was deleted.

0 comments on commit 906899d

Please sign in to comment.