mirror of
https://github.com/MichaelCade/90DaysOfDevOps.git
synced 2025-07-13 09:20:09 +07:00
Day 81 Fluentd & FluentBit
This commit is contained in:
Binary file not shown.
Before Width: | Height: | Size: 850 KiB After Width: | Height: | Size: 32 KiB |
Binary file not shown.
Before Width: | Height: | Size: 364 KiB After Width: | Height: | Size: 16 KiB |
BIN
Days/Images/Day81_Monitoring3.png
Normal file
BIN
Days/Images/Day81_Monitoring3.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 12 KiB |
BIN
Days/Images/Day81_Monitoring4.png
Normal file
BIN
Days/Images/Day81_Monitoring4.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 44 KiB |
@ -1,13 +0,0 @@
|
||||
<source>
|
||||
@type tail
|
||||
format json
|
||||
read_from_head true
|
||||
tag docker.log
|
||||
path /fluentd/log/containers/*/*-json.log
|
||||
pos_file /tmp/container-logs.pos
|
||||
</source>
|
||||
|
||||
# <match docker.log>
|
||||
# @type file
|
||||
# path /output/docker.log
|
||||
# </match>
|
@ -1,26 +0,0 @@
|
||||
# where to send http logs
|
||||
<match http-*.log>
|
||||
@type elasticsearch
|
||||
host elasticsearch
|
||||
port 9200
|
||||
index_name fluentd-http
|
||||
type_name fluentd
|
||||
</match>
|
||||
|
||||
#where to send file logs
|
||||
<match file-myapp.log>
|
||||
@type elasticsearch
|
||||
host elasticsearch
|
||||
port 9200
|
||||
index_name fluentd-file
|
||||
type_name fluentd
|
||||
</match>
|
||||
|
||||
#where to send docker logs
|
||||
<match docker.log>
|
||||
@type elasticsearch
|
||||
host elasticsearch
|
||||
port 9200
|
||||
index_name fluentd-docker
|
||||
type_name fluentd
|
||||
</match>
|
@ -1,20 +0,0 @@
|
||||
<source>
|
||||
@type tail
|
||||
format json
|
||||
read_from_head true
|
||||
tag file-myapp.log
|
||||
path /fluentd/log/files/example-log.log
|
||||
pos_file /tmp/example-log.log.pos
|
||||
</source>
|
||||
|
||||
<filter file-myapp.log>
|
||||
@type record_transformer
|
||||
<record>
|
||||
host_param "#{Socket.gethostname}"
|
||||
</record>
|
||||
</filter>
|
||||
|
||||
# <match file-myapp.log>
|
||||
# @type file
|
||||
# path /output/file-myapp.log
|
||||
# </match>
|
@ -1,16 +0,0 @@
|
||||
################################################################
|
||||
# This source reads tail of a file
|
||||
@include file-fluent.conf
|
||||
|
||||
################################################################
|
||||
|
||||
# This source gets incoming logs over HTTP
|
||||
@include http-fluent.conf
|
||||
|
||||
################################################################
|
||||
# This source gets all logs from local docker host
|
||||
@include containers-fluent.conf
|
||||
|
||||
################################################################
|
||||
# Send all logs to elastic search
|
||||
@include elastic-fluent.conf
|
@ -1,19 +0,0 @@
|
||||
<source>
|
||||
@type http
|
||||
port 9880
|
||||
bind 0.0.0.0
|
||||
body_size_limit 32m
|
||||
keepalive_timeout 10s
|
||||
</source>
|
||||
|
||||
<filter http-*.log>
|
||||
@type record_transformer
|
||||
<record>
|
||||
host_param "#{Socket.gethostname}"
|
||||
</record>
|
||||
</filter>
|
||||
|
||||
# <match http-*.log>
|
||||
# @type file
|
||||
# path /output/http.log
|
||||
# </match>
|
@ -1,49 +0,0 @@
|
||||
version: "3"
|
||||
services:
|
||||
fluentd:
|
||||
container_name: fluentd
|
||||
user: root
|
||||
build:
|
||||
context: .
|
||||
image: fluentd
|
||||
volumes:
|
||||
- /var/lib/docker/containers:/fluentd/log/containers # Example: Reading docker logs
|
||||
- ./file:/fluentd/log/files/ #Example: Reading logs from a file
|
||||
- ./configurations:/fluentd/etc/
|
||||
- ./logs:/output/ # Example: Fluentd will collect logs and store it here for demo
|
||||
logging:
|
||||
driver: "local"
|
||||
# This app sends logs to Fluentd via HTTP
|
||||
http-myapp:
|
||||
container_name: http-myapp
|
||||
image: alpine
|
||||
volumes:
|
||||
- ./http:/app
|
||||
command: [ /bin/sh , -c , "apk add --no-cache curl && chmod +x /app/app.sh && ./app/app.sh"]
|
||||
# This app writes logs to a local file
|
||||
file-myapp:
|
||||
container_name: file-myapp
|
||||
image: alpine
|
||||
volumes:
|
||||
- ./file:/app
|
||||
command: [ /bin/sh , -c , "chmod +x /app/app.sh && ./app/app.sh"]
|
||||
elasticsearch: # port 9200
|
||||
image: elasticsearch:7.9.1
|
||||
container_name: elasticsearch
|
||||
environment:
|
||||
- node.name=elasticsearch
|
||||
- cluster.initial_master_nodes=elasticsearch
|
||||
- bootstrap.memory_lock=true
|
||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
kibana:
|
||||
image: kibana:7.9.1
|
||||
container_name: kibana
|
||||
ports:
|
||||
- "5601:5601"
|
||||
environment:
|
||||
ELASTICSEARCH_URL: http://elasticsearch:9200
|
||||
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
|
@ -1,4 +0,0 @@
|
||||
FROM fluent/fluentd:v1.11-debian
|
||||
|
||||
USER root
|
||||
RUN gem install fluent-plugin-elasticsearch
|
@ -1,7 +0,0 @@
|
||||
#!/bin/sh
|
||||
while true
|
||||
do
|
||||
echo "Writing log to a file"
|
||||
echo '{"app":"file-myapp"}' >> /app/example-log.log
|
||||
sleep 5
|
||||
done
|
@ -1,244 +0,0 @@
|
||||
This is a log
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
||||
{"app":"file-myapp"}
|
@ -1,7 +0,0 @@
|
||||
#!/bin/sh
|
||||
while true
|
||||
do
|
||||
echo "Sending logs to FluentD"
|
||||
curl -X POST -d 'json={"foo":"bar"}' http://fluentd:9880/http-myapp.log
|
||||
sleep 5
|
||||
done
|
@ -1,2 +0,0 @@
|
||||
fluentd will collect all container logs and write them
|
||||
in this folder
|
@ -1,28 +0,0 @@
|
||||
# Introduction to Fluentd
|
||||
|
||||
## Collecting logs from files
|
||||
|
||||
Reading logs from a file we need an application that writes logs to a file. <br/>
|
||||
Lets start one:
|
||||
|
||||
```
|
||||
cd monitoring\logging\fluentd\introduction\
|
||||
|
||||
docker-compose up -d file-myapp
|
||||
|
||||
```
|
||||
|
||||
To collect the logs, lets start fluentd
|
||||
|
||||
```
|
||||
docker-compose up -d fluentd
|
||||
```
|
||||
|
||||
## Collecting logs over HTTP (incoming)
|
||||
|
||||
```
|
||||
cd monitoring\logging\fluentd\introduction\
|
||||
|
||||
docker-compose up -d http-myapp
|
||||
|
||||
```
|
170
Days/day81.md
170
Days/day81.md
@ -2,71 +2,141 @@
|
||||
|
||||
Another data collector that I wanted to explore as part of this observability section was [Fluentd](https://docs.fluentd.org/). An Open-Source unified logging layer.
|
||||
|
||||
Fluentd treats logs as JSON
|
||||
Fluentd has four key features that makes it suitable to build clean, reliable logging pipelines:
|
||||
|
||||
Unified Logging with JSON: Fluentd tries to structure data as JSON as much as possible. This allows Fluentd to unify all facets of processing log data: collecting, filtering, buffering, and outputting logs across multiple sources and destinations. The downstream data processing is much easier with JSON, since it has enough structure to be accessible without forcing rigid schemas.
|
||||
|
||||
Pluggable Architecture: Fluentd has a flexible plugin system that allows the community to extend its functionality. Over 300 community-contributed plugins connect dozens of data sources to dozens of data outputs, manipulating the data as needed. By using plugins, you can make better use of your logs right away.
|
||||
|
||||
Minimum Resources Required: A data collector should be lightweight so that it runs comfortably on a busy machine. Fluentd is written in a combination of C and Ruby, and requires minimal system resources. The vanilla instance runs on 30-40MB of memory and can process 13,000 events/second/core.
|
||||
|
||||
Built-in Reliability: Data loss should never happen. Fluentd supports memory- and file-based buffering to prevent inter-node data loss. Fluentd also supports robust failover and can be set up for high availability.
|
||||
|
||||
[Installing Fluentd](https://docs.fluentd.org/quickstart#step-1-installing-fluentd)
|
||||
|
||||
## Collecting logs from files
|
||||
### How apps log data?
|
||||
|
||||
We all have applications that write to a `.log` file format. Fluentd has the ability to read logs from a file, with the configuration we have set, we will cover that shortly. Below you can see me bringing up the two containers, `docker-compose up -d file-myapp` and then `docker-compose up -d fluentd`
|
||||
- Write to files. `.log` files (difficult to analyse without a tool and at scale)
|
||||
- Log directly to a database (each application must be configured with the correct format)
|
||||
- Third party applications (NodeJS, NGINX, PostgreSQL)
|
||||
|
||||
This is why we want a unified logging layer.
|
||||
|
||||
FluentD allows for the 3 logging data types shown above and gives us the ability to collect, process and send those to a destination, this could be sending them logs to Elastic, MongoDB, Kafka databases for example.
|
||||
|
||||
Any Data, Any Data source can be sent to FluentD and that can be sent to any destination. FluentD is not tied to any particular source or destination.
|
||||
|
||||
In my research of Fluentd I kept stumbling across Fluent bit as another option and it looks like if you were looking to deploy a logging tool into your Kubernetes environment then fluent bit would give you that capability, even though fluentd can also be deployed to containers as well as servers.
|
||||
|
||||
[Fluentd & Fluent Bit](https://docs.fluentbit.io/manual/about/fluentd-and-fluent-bit)
|
||||
|
||||
Fluentd and Fluentbit will use the input plugins to transform that data to Fluent Bit format, then we have output plugins to whatever that output target is such as elasticsearch.
|
||||
|
||||
We can also use tags and matches between configurations.
|
||||
|
||||
I cannot see a good reason for using fluentd and it sems that Fluent Bit is the best way to get started. Although they can be used together in some architectures.
|
||||
|
||||
### Fluent Bit in Kubernetes
|
||||
|
||||
Fluent Bit in Kubernetes is deployed as a DaemonSet, which means it will run on each node in the cluster. Each Fluent Bit pod on each node will then read each container on that node and gather all of the logs available. It will also gather the metadata from the Kubernetes API Server.
|
||||
|
||||
Kubernetes annotations can be used within the configuration YAML of our applications.
|
||||
|
||||
|
||||
First of all we can deploy from the fluent helm repository. `helm repo add fluent https://fluent.github.io/helm-charts` and then install using the `helm install fluent-bit fluent/fluent-bit` command.
|
||||
|
||||

|
||||
|
||||
If you watch [Introduction to Fluentd: Collect logs and send almost anywhere](https://www.youtube.com/watch?v=Gp0-7oVOtPw&t=447s)from the "That DevOps Guy" (Some amazing content that has been linked throughout this whole challenge) I am using his example here.
|
||||
|
||||
With the container `file-myapp` there is a script in there to add to an example log file. You can see this happening below.
|
||||
In my cluster I am also running prometheus in my default namespace (for test purposes) we need to make sure our fluent-bit pod is up and running. we can do this using `kubectl get all | grep fluent` this is going to show us our running pod, service and daemonset that we mentioned earlier.
|
||||
|
||||

|
||||
|
||||
The script that we have running inside of our `file-myapp` container looks like this:
|
||||
So that fluentbit knows where to get logs from we have a configuration file, in this Kubernetes deployment of fluentbit we have a configmap which resembles the configuration file.
|
||||
|
||||

|
||||
|
||||
That ConfigMap will look something like:
|
||||
|
||||
```
|
||||
#!/bin/sh
|
||||
while true
|
||||
do
|
||||
echo "Writing log to a file"
|
||||
echo '{"app":"file-myapp"}' >> /app/example-log.log
|
||||
sleep 5
|
||||
done
|
||||
Name: fluent-bit
|
||||
Namespace: default
|
||||
Labels: app.kubernetes.io/instance=fluent-bit
|
||||
app.kubernetes.io/managed-by=Helm
|
||||
app.kubernetes.io/name=fluent-bit
|
||||
app.kubernetes.io/version=1.8.14
|
||||
helm.sh/chart=fluent-bit-0.19.21
|
||||
Annotations: meta.helm.sh/release-name: fluent-bit
|
||||
meta.helm.sh/release-namespace: default
|
||||
|
||||
Data
|
||||
====
|
||||
custom_parsers.conf:
|
||||
----
|
||||
[PARSER]
|
||||
Name docker_no_time
|
||||
Format json
|
||||
Time_Keep Off
|
||||
Time_Key time
|
||||
Time_Format %Y-%m-%dT%H:%M:%S.%L
|
||||
|
||||
fluent-bit.conf:
|
||||
----
|
||||
[SERVICE]
|
||||
Daemon Off
|
||||
Flush 1
|
||||
Log_Level info
|
||||
Parsers_File parsers.conf
|
||||
Parsers_File custom_parsers.conf
|
||||
HTTP_Server On
|
||||
HTTP_Listen 0.0.0.0
|
||||
HTTP_Port 2020
|
||||
Health_Check On
|
||||
|
||||
[INPUT]
|
||||
Name tail
|
||||
Path /var/log/containers/*.log
|
||||
multiline.parser docker, cri
|
||||
Tag kube.*
|
||||
Mem_Buf_Limit 5MB
|
||||
Skip_Long_Lines On
|
||||
|
||||
[INPUT]
|
||||
Name systemd
|
||||
Tag host.*
|
||||
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
|
||||
Read_From_Tail On
|
||||
|
||||
[FILTER]
|
||||
Name kubernetes
|
||||
Match kube.*
|
||||
Merge_Log On
|
||||
Keep_Log Off
|
||||
K8S-Logging.Parser On
|
||||
K8S-Logging.Exclude On
|
||||
|
||||
[OUTPUT]
|
||||
Name es
|
||||
Match kube.*
|
||||
Host elasticsearch-master
|
||||
Logstash_Format On
|
||||
Retry_Limit False
|
||||
|
||||
[OUTPUT]
|
||||
Name es
|
||||
Match host.*
|
||||
Host elasticsearch-master
|
||||
Logstash_Format On
|
||||
Logstash_Prefix node
|
||||
Retry_Limit False
|
||||
|
||||
Events: <none>
|
||||
```
|
||||
|
||||
We are using the [tail plugin](https://docs.fluentd.org/input/tail) within fluentd which allows us to read those events from the tail of text files, similar to the `tail -F` command.
|
||||
|
||||
We can also use the [HTTP plugin](https://docs.fluentd.org/input/http) which allows us to send events through HTTP requests. This is what we will see with the `http-myapp` container in the repository. This container also runs a similar shell script to generate logs
|
||||
|
||||
```
|
||||
#!/bin/sh
|
||||
while true
|
||||
do
|
||||
echo "Sending logs to FluentD"
|
||||
curl -X POST -d 'json={"foo":"bar"}' http://fluentd:9880/http-myapp.log
|
||||
sleep 5
|
||||
done
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### Container logging
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
We are going to be using docker-compose to bring up a small demo environment that can demonstrate fluentd. The docker compose file is going to bring up the following containers.
|
||||
|
||||
container_name: fluentd
|
||||
container_name: http-myapp
|
||||
container_name: file-myapp
|
||||
container_name: elasticsearch
|
||||
container_name: kibana
|
||||
We can now port-forward our pod to our localhost to ensure that we have connectivity. Firstly get the name of your pod with `kubectl get pods | grep fluent` and then use `kubectl port-forward fluent-bit-8kvl4 2020:2020` open a web browser to http://localhost:2020/
|
||||
|
||||

|
||||
|
||||
I also found this really great medium article covering more about [Fluent Bit](https://medium.com/kubernetes-tutorials/exporting-kubernetes-logs-to-elasticsearch-using-fluent-bit-758e8de606af)
|
||||
|
||||
## Resources
|
||||
|
||||
@ -82,6 +152,8 @@ container_name: kibana
|
||||
- [Log Management what DevOps need to know](https://devops.com/log-management-what-devops-teams-need-to-know/)
|
||||
- [What is ELK Stack?](https://www.youtube.com/watch?v=4X0WLg05ASw)
|
||||
- [Fluentd simply explained](https://www.youtube.com/watch?v=5ofsNyHZwWE&t=14s)
|
||||
- [ Fluent Bit explained | Fluent Bit vs Fluentd ](https://www.youtube.com/watch?v=B2IS-XS-cc0)
|
||||
|
||||
|
||||
See you on [Day 82](day82.md)
|
||||
|
||||
|
Reference in New Issue
Block a user