04-4-日志收集
1. 日志收集方式
Kubernetes的业务Pod日志有两种输出方式:一种是直接打到标准输出或者标准错误,第二种是将日志写到特定目录下的文件种。针对这两种不同场景,提供了不同的容器日志收集思路。
1.1. Kubernetes日志收集思路
1.1.1. 使用节点代理收集日志
在各个Node节点上以Deamonset方式部署log-agent-pod,将宿主机上的日志目录挂载到log-agent-pod里面,由log-agent-pod将日志发送出去。常用的log-agent是通过Fluentd实现的。
1.1.2. 使用边车模式收集日志
在每个业务Pod中,启动一个辅助容器,将主容器的日志目录挂载到辅助容器中,将主容器刷盘的日志推送出去。这种模式下,辅助容器必须要在主容器之前启动,避免主容器丢日志。常用的辅助容器是filebeat实现的。
1.1.3. 日志输出方式的转换
- 标准输出-->写文件:command >/logs/stdout.log 2>/logs/stderr.log
- 写文件-->标准输出:tail -f xxx.log
1.2. 拓扑图
本实验采用辅助容器(边车模式)收集日志到ELK集群系统,本实验先将日志写到Kafka集群,然后再通过logstash消费Kafka中的数据,并写入Elasticsearch,这种方式是异步的,虽然消息的及时性不是特别高,但是性能比直接写Elasticsearch高很高。
2. 准备业务APP
2.1. 准备Tomcat基础镜像
2.1.1. 准备Tomcat
[root@hdss7-200 src]# wget http://mirrors.tuna.tsinghua.edu.cn/apache/tomcat/tomcat-8/v8.5.50/bin/apache-tomcat-8.5.50.tar.gz
[root@hdss7-200 src]# tar -xf apache-tomcat-8.5.50.tar.gz -C docker_files/tomcat/
[root@hdss7-200 src]# cd docker_files/tomcat/
[root@hdss7-200 tomcat]# rm -fr apache-tomcat-8.5.50/webappsfilebeat /bin && rm -fr /opt/filebeat*
COPY docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
[root@hdss7-200 filebeat]# cat entrypoint.sh
#!/bin/bash
ENV=${ENV:-"dev"} # 运行环境
PROJ_NAME=${PROJ_NAME:-"no-define"} # project 名称,关系到topic
MULTILINE=${MULTILINE:-"^d{2}"} # 多行匹配,根据日志格式来定
KAFKA_ADDR=${KAFKA_ADDR:-'"10.4.7.11:9092"'}
cat > /etc/filebeat.yaml << EOF
filebeat.inputs:
- type: log
fields_under_root: true
fields:
topic: logm-${PROJ_NAME}
paths:
- /logm*.log
- /logm*.log
- /logm***.log
scan_frequency: 120s
max_bytes: 10485760
multiline.pattern: '$MULTILINE'
multiline.negate: true
multiline.match: after
multiline.max_lines: 100
- type: log
fields_under_root: true
fields:
topic: logu-${PROJ_NAME}
paths:
- /logu*.log
- /logu*.log
- /logu***.log
- /logu**.log
output.kafka:
hosts: [${KAFKA_ADDR}]
topic: k8s-fb-$ENV-%{[topic]}
version: 2.0.0
required_acks: 0
max_message_bytes: 10485760
EOF
set -xe
if [[ "$1" == "" ]]; then
exec filebeat -c /etc/filebeat.yaml
else
exec "$@"
fi
[root@hdss7-200 filebeat]# chmod +x entrypoint.sh
[root@hdss7-200 filebeat]# docker image build . -t harbor.od.com/public/filebeat:v7.4.0
[root@hdss7-200 filebeat]# docker image push harbor.od.com/public/filebeat:v7.4.0
4.2. 运行dubbo消费者
# 本deployment修改的是 apollo 章节中 dev 环境下 dubbo-demo-consumer 服务
# 需要apollo、生产者启动
# reference: https://www.yuque.com/duduniao/ww8pmw/pvwdlq#CPLkT
apiVersion: apps/v1
kind: Deployment
metadata:
name: dubbo-demo-consumer
namespace: dev
labels:
name: dubbo-demo-consumer
spec:
replicas: 1
selector:
matchLabels:
name: dubbo-demo-consumer
template:
metadata:
labels:
app: dubbo-demo-consumer
name: dubbo-demo-consumer
spec:
containers:
- name: dubbo-demo-consumer
image: harbor.od.com/app/dubbo-demo-consumer:tomcat_20200209_1149
ports:
- containerPort: 8080
protocol: TCP
env:
- name: C_OPTS
value: -Denv=dev -Dapollo.meta=http://config-dev.od.com
volumeMounts:
- mountPath: /opt/tomcat/logs
name: logm
- name: filebeat
image: harbor.od.com/public/filebeat:v7.4.0
env:
- name: ENV
value: dev
- name: PROJ_NAME
value: dubbo-demo-consumer
- name: KAFKA_ADDR
value: '"10.4.7.11:9092"'
volumeMounts:
- mountPath: /logm
name: logm
volumes:
- emptyDir: {}
name: logm
imagePullSecrets:
- name: harbor
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dev/dubbo-demo-consumer/deployment.yaml
2.1.1. 准备Tomcat
[root@hdss7-200 src]# wget http://mirrors.tuna.tsinghua.edu.cn/apache/tomcat/tomcat-8/v8.5.50/bin/apache-tomcat-8.5.50.tar.gz
[root@hdss7-200 src]# tar -xf apache-tomcat-8.5.50.tar.gz -C docker_files/tomcat/
[root@hdss7-200 src]# cd docker_files/tomcat/
[root@hdss7-200 tomcat]# rm -fr apache-tomcat-8.5.50/webappsfilebeat /bin && rm -fr /opt/filebeat*
COPY docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
[root@hdss7-200 filebeat]# cat entrypoint.sh
#!/bin/bash
ENV=${ENV:-"dev"} # 运行环境
PROJ_NAME=${PROJ_NAME:-"no-define"} # project 名称,关系到topic
MULTILINE=${MULTILINE:-"^d{2}"} # 多行匹配,根据日志格式来定
KAFKA_ADDR=${KAFKA_ADDR:-'"10.4.7.11:9092"'}
cat > /etc/filebeat.yaml << EOF
filebeat.inputs:
- type: log
fields_under_root: true
fields:
topic: logm-${PROJ_NAME}
paths:
- /logm*.log
- /logm*.log
- /logm***.log
scan_frequency: 120s
max_bytes: 10485760
multiline.pattern: '$MULTILINE'
multiline.negate: true
multiline.match: after
multiline.max_lines: 100
- type: log
fields_under_root: true
fields:
topic: logu-${PROJ_NAME}
paths:
- /logu*.log
- /logu*.log
- /logu***.log
- /logu**.log
output.kafka:
hosts: [${KAFKA_ADDR}]
topic: k8s-fb-$ENV-%{[topic]}
version: 2.0.0
required_acks: 0
max_message_bytes: 10485760
EOF
set -xe
if [[ "$1" == "" ]]; then
exec filebeat -c /etc/filebeat.yaml
else
exec "$@"
fi
[root@hdss7-200 filebeat]# chmod +x entrypoint.sh
[root@hdss7-200 filebeat]# docker image build . -t harbor.od.com/public/filebeat:v7.4.0
[root@hdss7-200 filebeat]# docker image push harbor.od.com/public/filebeat:v7.4.0
4.2. 运行dubbo消费者
# 本deployment修改的是 apollo 章节中 dev 环境下 dubbo-demo-consumer 服务
# 需要apollo、生产者启动
# reference: https://www.yuque.com/duduniao/ww8pmw/pvwdlq#CPLkT
apiVersion: apps/v1
kind: Deployment
metadata:
name: dubbo-demo-consumer
namespace: dev
labels:
name: dubbo-demo-consumer
spec:
replicas: 1
selector:
matchLabels:
name: dubbo-demo-consumer
template:
metadata:
labels:
app: dubbo-demo-consumer
name: dubbo-demo-consumer
spec:
containers:
- name: dubbo-demo-consumer
image: harbor.od.com/app/dubbo-demo-consumer:tomcat_20200209_1149
ports:
- containerPort: 8080
protocol: TCP
env:
- name: C_OPTS
value: -Denv=dev -Dapollo.meta=http://config-dev.od.com
volumeMounts:
- mountPath: /opt/tomcat/logs
name: logm
- name: filebeat
image: harbor.od.com/public/filebeat:v7.4.0
env:
- name: ENV
value: dev
- name: PROJ_NAME
value: dubbo-demo-consumer
- name: KAFKA_ADDR
value: '"10.4.7.11:9092"'
volumeMounts:
- mountPath: /logm
name: logm
volumes:
- emptyDir: {}
name: logm
imagePullSecrets:
- name: harbor
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dev/dubbo-demo-consumer/deployment.yaml
# 本deployment修改的是 apollo 章节中 dev 环境下 dubbo-demo-consumer 服务
# 需要apollo、生产者启动
# reference: https://www.yuque.com/duduniao/ww8pmw/pvwdlq#CPLkT
apiVersion: apps/v1
kind: Deployment
metadata:
name: dubbo-demo-consumer
namespace: dev
labels:
name: dubbo-demo-consumer
spec:
replicas: 1
selector:
matchLabels:
name: dubbo-demo-consumer
template:
metadata:
labels:
app: dubbo-demo-consumer
name: dubbo-demo-consumer
spec:
containers:
- name: dubbo-demo-consumer
image: harbor.od.com/app/dubbo-demo-consumer:tomcat_20200209_1149
ports:
- containerPort: 8080
protocol: TCP
env:
- name: C_OPTS
value: -Denv=dev -Dapollo.meta=http://config-dev.od.com
volumeMounts:
- mountPath: /opt/tomcat/logs
name: logm
- name: filebeat
image: harbor.od.com/public/filebeat:v7.4.0
env:
- name: ENV
value: dev
- name: PROJ_NAME
value: dubbo-demo-consumer
- name: KAFKA_ADDR
value: '"10.4.7.11:9092"'
volumeMounts:
- mountPath: /logm
name: logm
volumes:
- emptyDir: {}
name: logm
imagePullSecrets:
- name: harbor
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dev/dubbo-demo-consumer/deployment.yaml
5. 部署logstash
5.1. 制作logstash镜像
[root@hdss7-200 ~]# docker image pull logstash:6.8.3
[root@hdss7-200 ~]# docker image tag logstash:6.8.3 harbor.od.com/public/logstash:v6.8.3
[root@hdss7-200 ~]# docker image push harbor.od.com/public/logstash:v6.8.3
5.2. 启动logstash
# 启动logstash,可以交付到k8s中,相关配置采用configmap方式挂载
# k8s在不同项目的名称空间中,可以创建不同的logstash,制定不同的索引
[root@hdss7-200 ~]# cat /etc/logstash/logstash-dev.conf
input {
kafka {
bootstrap_servers => "10.4.7.11:9092"
client_id => "10.4.7.200"
consumer_threads => 4
group_id => "k8s_dev"
topics_pattern => "k8s-fb-dev-.*"
}
}
filter {
json {
source => "message"
}
}
output {
elasticsearch {
hosts => ["10.4.7.12:9200"]
index => "k8s-dev-%{+YYYY.MM.DD}"
}
}
[root@hdss7-200 ~]# docker run -d --name logstash-dev -v /etc/logstash:/etc/logstash harbor.od.com/public/logstash:v6.8.3 -f /etc/logstash/logstash-dev.conf
024fa2bda8157710212a860a68ca869ffc09fef706907ca63e8b920db394cade
[root@hdss7-200 ~]# docker image pull logstash:6.8.3 [root@hdss7-200 ~]# docker image tag logstash:6.8.3 harbor.od.com/public/logstash:v6.8.3 [root@hdss7-200 ~]# docker image push harbor.od.com/public/logstash:v6.8.3
5.2. 启动logstash
# 启动logstash,可以交付到k8s中,相关配置采用configmap方式挂载
# k8s在不同项目的名称空间中,可以创建不同的logstash,制定不同的索引
[root@hdss7-200 ~]# cat /etc/logstash/logstash-dev.conf
input {
kafka {
bootstrap_servers => "10.4.7.11:9092"
client_id => "10.4.7.200"
consumer_threads => 4
group_id => "k8s_dev"
topics_pattern => "k8s-fb-dev-.*"
}
}
filter {
json {
source => "message"
}
}
output {
elasticsearch {
hosts => ["10.4.7.12:9200"]
index => "k8s-dev-%{+YYYY.MM.DD}"
}
}
[root@hdss7-200 ~]# docker run -d --name logstash-dev -v /etc/logstash:/etc/logstash harbor.od.com/public/logstash:v6.8.3 -f /etc/logstash/logstash-dev.conf
024fa2bda8157710212a860a68ca869ffc09fef706907ca63e8b920db394cade
6. Kibana部署
6.1. 准备镜像
[root@hdss7-200 ~]# docker pull kibana:6.8.3
[root@hdss7-200 ~]# docker image tag kibana:6.8.3 harbor.od.com/public/kibana:v6.8.3
[root@hdss7-200 ~]# docker image push harbor.od.com/public/kibana:v6.8.3
6.2. 准备资源配置清单
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: infra
labels:
name: kibana
spec:
replicas: 1
selector:
matchLabels:
name: kibana
template:
metadata:
labels:
app: kibana
name: kibana
spec:
containers:
- name: kibana
image: harbor.od.com/public/kibana:v6.8.3
ports:
- containerPort: 5601
protocol: TCP
env:
- name: ELASTICSEARCH_URL
value: http://10.4.7.12:9200
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: infra
spec:
ports:
- protocol: TCP
port: 80
targetPort: 5601
selector:
app: kibana
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kibana
namespace: infra
spec:
rules:
- host: kibana.od.com
http:
paths:
- path: /
backend:
serviceName: kibana
servicePort: 80
6.3. 应用资源配置清单
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/devops/ELK/kibana/deployment.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/devops/ELK/kibana/ingress.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/devops/ELK/kibana/service.yaml
[root@hdss7-11 ~]# vim /var/named/od.com.zone
......
kibana A 10.4.7.10
[root@hdss7-11 ~]# systemctl restart named
6.4. 配置Kibana索引分组
[root@hdss7-200 ~]# docker pull kibana:6.8.3 [root@hdss7-200 ~]# docker image tag kibana:6.8.3 harbor.od.com/public/kibana:v6.8.3 [root@hdss7-200 ~]# docker image push harbor.od.com/public/kibana:v6.8.3
6.2. 准备资源配置清单
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: infra
labels:
name: kibana
spec:
replicas: 1
selector:
matchLabels:
name: kibana
template:
metadata:
labels:
app: kibana
name: kibana
spec:
containers:
- name: kibana
image: harbor.od.com/public/kibana:v6.8.3
ports:
- containerPort: 5601
protocol: TCP
env:
- name: ELASTICSEARCH_URL
value: http://10.4.7.12:9200
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: infra
spec:
ports:
- protocol: TCP
port: 80
targetPort: 5601
selector:
app: kibana
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kibana
namespace: infra
spec:
rules:
- host: kibana.od.com
http:
paths:
- path: /
backend:
serviceName: kibana
servicePort: 80
6.3. 应用资源配置清单
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/devops/ELK/kibana/deployment.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/devops/ELK/kibana/ingress.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/devops/ELK/kibana/service.yaml
[root@hdss7-11 ~]# vim /var/named/od.com.zone
......
kibana A 10.4.7.10
[root@hdss7-11 ~]# systemctl restart named
6.4. 配置Kibana索引分组
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/devops/ELK/kibana/deployment.yaml [root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/devops/ELK/kibana/ingress.yaml [root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/devops/ELK/kibana/service.yaml [root@hdss7-11 ~]# vim /var/named/od.com.zone ...... kibana A 10.4.7.10 [root@hdss7-11 ~]# systemctl restart named
6.4. 配置Kibana索引分组
6.5. 多环境收日志
# 环境准备分为两步
# 1. 启动测试环境(fat)中dubbo-demo-consumer(tomcat)版本pod,参考4.2
# 2. 启动收集fat日志的logstash
[root@hdss7-200 ~]# docker run -d --name logstash-fat -v /etc/logstash:/etc/logstash harbor.od.com/public/logstash:v6.8.3 -f /etc/logstash/logstash-fat.conf
[root@hdss7-21 ~]# curl http://10.4.7.12:9200/_cat/indices?v health status index uuid pri rep docs.count docs.deleted store.size pri.store.size green open k8s-dev-2020.02 rReG1iadTqulQhzGfPH6RQ 5 0 137 0 488.3kb 488.3kb green open k8s-fat-2020.02 8TaKkfvCQzeDgSOba2-R4Q 5 0 8 0 63kb 63kb ......



