k8s部署elk+filebeat+logstash+kafka集群(三)logstash部署
k8s部署logstash用于消费kafka的数据
·
logstash采用statefulset或者deployment部署均可,本文采用deployment
过滤条件根据实际环境的日志结构自行配置
configmap配置文件
# cat logstash-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-configmap
namespace: elk
labels:
app: logstash-cm
data:
logstash.conf: |-
input {
kafka {
bootstrap_servers => ["kafka-kraft-statefulset-0.kafka-kraft-svc:9091,kafka-kraft-statefulset-1.kafka-kraft-svc:9091,kafka-kraft-statefulset-2.kafka-kraft-svc:9091"]
client_id => "k8s_logstash"
group_id => "k8s"
auto_offset_reset => "latest"
consumer_threads => 3
decorate_events => false
topics => ["k8s","nginx-access-log","nginx-error-log"]
codec => "json"
}
}
input {
beats {
host => "0.0.0.0"
port => 5044
codec => json
}
}
filter {
#json {
#source => "msg"
#remove_field => [ "msg" ]
#}
if "nginx-access" in [tags] {
grok {
match => { "message" => "%{IPORHOST:clientip} --- .* \[%{HTTPDATE:timestamp}\]\s(?<method>\S+)\s(?<uri>\/(\S)+)\s(?<protocol>(HTTP|HTTPS)\S+)\s-\s(?<response>\d+)\s" }
}
date {
match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
target => "@timestamp"
}
mutate {
remove_field => ["agent","event","host","@version","cloud","input","ecs","container","offset"]
}
}
if [fields][log_topic] == "k8s" {
mutate {
remove_field => ["log","agent","ecs","kubernetes","host","container"]
}
}
}
output {
# 输出到控制台方便我们查看日志时候能看到消费的数据,这块会导致采集的容器日志量十分庞大,后面可以去掉,前期观察用
stdout {
codec => rubydebug
codec => json
}
if "k8s" in [tags] {
elasticsearch {
hosts => ["es-svc:9200"]
index => "k8slog-%{+YYYY.MM.dd}"
}
}
if "nginx-access" in [tags] {
elasticsearch {
hosts => ["es-svc:9200"]
index => "nginx-access-%{+YYYY.MM.dd}"
}
}
if [@metadata][kafka][topic] == "nginx-error-log" {
elasticsearch {
hosts => ["es-svc:9200"]
index => "nginx-error-%{+YYYY.MM.dd}"
}
}
}
部署logstash
# cat logstash.yaml
# logstash.yml配置文件
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-yml
namespace: elk
labels:
type: logstash
data:
logstash.yml: |-
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: http://es-svc:9200
xpack.monitoring.enabled: true
---
# logstash-svc 如果不部署kafka的可以暴露30044端口让外部filebeat推送数据
apiVersion: v1
kind: Service
metadata:
name: logstash-svc
namespace: elk
labels:
app: logstash
spec:
type: NodePort
clusterIP: 10.96.100.101
selector:
app: logstash-8.1.0
ports:
- name: logstashport
port: 5044
targetPort: logport
protocol: TCP
nodePort: 30044
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash-deployment
namespace: elk
spec:
replicas: 2
selector:
matchLabels:
app: logstash-8.1.0
template:
metadata:
labels:
app: logstash-8.1.0
spec:
containers:
- name: logstash
image: 3.127.33.174:8443/elk/logstash:8.1.0
imagePullPolicy: IfNotPresent
env:
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
- name: TZ
value: Asia/Shanghai
ports:
- name: logport
containerPort: 5044
protocol: TCP
command: ["logstash","-f","/usr/share/logstash/config/logstash.conf"]
resources:
limits:
cpu: 1000m
memory: 8192Mi
requests:
cpu: 500m
memory: 2000Mi
volumeMounts:
- name: logstash-config
mountPath: /usr/share/logstash/config/logstash.conf
subPath: logstash.conf
- name: timezone
mountPath: /etc/localtime
- name: logstash-yml
mountPath: /usr/share/logstash/config/logstash.yml
subPath: logstash.yml
volumes:
- name: logstash-config
configMap:
name: logstash-configmap
# defaultMode: 0644
- name: timezone
hostPath:
path: /etc/localtime
- name: logstash-yml
configMap:
name: logstash-yml
# defaultMode: 0644
tolerations:
- key: node-role.kubernetes.io
effect: NoSchedule
operator: Equal
value: master
- effect: NoSchedule
operator: Exists
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: node
operator: In
values:
- master1
- node1
查看容器kubectl get pods -n elk | grep logstash
查看容器日志是否消费到数据 kubectl logs -f --tail 20 logstash-deployment-755c957557-b9n7s -n elk
至此elk搭建完毕
更多推荐
已为社区贡献2条内容
所有评论(0)