es-logstash-kibana部署日志展示
//
# es-logstash-kibana部署日志展示
- docker镜像下载
Kibana-dockerHub (opens new window) Logstash-dockerHub (opens new window) Elasticsearch-dockerHub (opens new window)
按照前人的经验,版本统一
docker pull logstash:6.8.22
docker pull elasticsearch:6.8.22
docker pull kibana:6.8.22
docker tag logstash:6.8.22 harbor.yfklife.cn/devops/logstash:6.8.22
docker tag kibana:6.8.22 harbor.yfklife.cn/devops/kibana:6.8.22
docker push harbor.yfklife.cn/devops/logstash:6.8.22
docker push harbor.yfklife.cn/devops/kibana:6.8.22
1
2
3
4
5
6
7
8
9
10
2
3
4
5
6
7
8
9
10
# 部署ES-6.8.22
运行主机:192.168.14.12:9200
- es6 单机配置
[root@hdss14-12 ~]# cd /opt/elasticsearch/config
[root@hdss14-12 config]# grep -Ev "^$|#" elasticsearch.yml
cluster.name: my-application
node.name: 192.168.14.12
path.data: /opt/elasticsearch/data
path.logs: /opt/elasticsearch/logs
bootstrap.memory_lock: false
network.host: 192.168.14.12
http.port: 9200
1
2
3
4
5
6
7
8
9
2
3
4
5
6
7
8
9
- 单机部署,索引变绿
curl -H "Content-Type:application/json" -XPUT http://192.168.14.12:9200/_template/k8s -d '{
"template" : "k8s*",
"index_patterns": ["k8s*"],
"settings": {
"number_of_shards": 5,
"number_of_replicas": 0
}
}'
1
2
3
4
5
6
7
8
2
3
4
5
6
7
8
- 定时清理索引库
#!/bin/bash
#获取30天之前的年月,删除上个月的数据
curl -s http://192.168.14.12:9200/_cat/indices?v |grep $(date -d "-35day" +%Y.%m) |awk '{print $3}' > es_indices_overdue.txt
while read line
do
curl -X DELETE http://192.168.14.12:9200/${line}
done < es_indices_overdue.txt
1
2
3
4
5
6
7
8
9
2
3
4
5
6
7
8
9
# 部署logstash-6.8.22
运行主机:192.168.14.21
- kafka部署
- 配置logstash配置
test -d /opt/logstash ||mkdir /opt/logstash && cd /opt/logstash
vi logstash-test.conf
input {
kafka {
bootstrap_servers => "192.168.14.21:9092"
client_id => "192.168.14.21" #填写本机
consumer_threads => 4
group_id => "k8s_test" # 为test组
topics_pattern => "k8s-fb-test-.*" # 只收集k8s-fb-test开头的topics
}
}
filter {
json {
source => "message"
}
}
output {
elasticsearch {
hosts => ["192.168.14.12:9200"]
index => "k8s-test-%{+YYYY.MM.DD}"
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
- 运行,检查
#起一个test容器
cd /opt/logstash
docker run -d --restart=always --name logstash_test -v `pwd`:/etc/logstash harbor.yfklife.cn/devops/logstash:6.8.22 -f /etc/logstash/logstash-test.conf
#起一个prod容器
cp logstash-test.conf logstash-prod.conf && sed -i 's#test#prod#g' logstash-prod.conf
docker run -d --restart=always --name logstash_prod -v `pwd`:/etc/logstash harbor.yfklife.cn/devops/logstash:6.8.22 -f /etc/logstash/logstash-prod.conf
docker ps|grep logstash
docker logs -f --tail 10 logstash_test
1
2
3
4
5
6
7
8
9
10
2
3
4
5
6
7
8
9
10
刷新zlt前端项目,产生日志,查看es
ES服务器:
[root@hdss14-21 efk]# curl http://192.168.14.12:9200/_cat/indices?v
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
green open k8s-test-2022.07.191 VLLWesa2SEuy7UgsNBqxTw 5 0 38 0 141.8kb 141.8kb
1
2
3
2
3
# 部署kibana-6.8.22
运行主机:K8s集群
- kibana-svc-deployment
kibana-svc-deployment.yml
kind: Service
apiVersion: v1
metadata:
name: kibana
namespace: devops
spec:
ports:
- protocol: TCP
port: 5601
targetPort: 5601
selector:
app: kibana
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: kibana
namespace: devops
labels:
name: kibana
spec:
replicas: 1
selector:
matchLabels:
name: kibana
template:
metadata:
labels:
app: kibana
name: kibana
spec:
containers:
- name: kibana
image: harbor.yfklife.cn/devops/kibana:6.8.22
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5601
protocol: TCP
env:
- name: ELASTICSEARCH_URL
value: http://192.168.14.12:9200
imagePullSecrets:
- name: harbor
securityContext:
runAsUser: 0
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
revisionHistoryLimit: 7
progressDeadlineSeconds: 600
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
- kibana-ingress
vi kibana-ingress.yml
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: kibana
namespace: devops
spec:
entryPoints:
- web
routes:
- match: Host(`kibana.yfklife.cn`)
kind: Rule
services:
- kind: Service
name: kibana
port: 5601
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
2
3
4
5
6
7
8
9
10
11
12
13
14
15
- 应用资源清单
kubectl create ns devops
kubectl apply -f kibana-svc-deployment.yml
kubectl apply -f kibana-ingress.yml
1
2
3
2
3
# 配置kibana-dashboard
添加域名解析,登录:http://kibana.yfklife.cn
查看监控信息(Monitoring)
- 添加Index Patterns(management)
- 查看日志
提示:如果是别的项目:修改前面的zlt-gateway-dp-svc.yml里面的env test为prod,创建Patterns,即可
//
如果此文章对您有帮助,点击 -->> 请博主喝咖啡 (opens new window)
上次更新: 2022/07/10, 23:09:14