ES部署

创建数据目录

1
2
mkdir -pv /data/elasticsearch/{config,data,logs}
chown 1000 /data/elasticsearch/{data,logs}

修改主机配置

1
2
3
4
5
6
7
8
9
vim /etc/sysctl.conf
加入
vm.max_map_count=655360
sysctl -p

vim /etc/security/limits.conf
加入
* soft memlock unlimited
* hard memlock unlimited

配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
cat > /data/elasticsearch/config/elasticsearch.yml << 'EOF'
cluster.name: git_es_cluster
node.name: node-3
network.host: 3.1.101.35
http.port: 9200
bootstrap.memory_lock: true

# 允许跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"

http.cors.allow-headers: Authorization

# Cluster
node.master: true
transport.tcp.port: 9300
discovery.seed_hosts: ["3.1.101.33", "3.1.101.34", "3.1.101.35"]
cluster.initial_master_nodes: ["node-1","node-2","node-3"]

# X-Pack
xpack.security.enabled: false
xpack.security.authc.accept_default_password: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/elastic-certificates.p12
EOF


chown 1000 /data/elasticsearch/config/*

discovery.zen.minimum_master_nodes算法: 节点数/2+1

1
2
3
4
5
6
7
8
9
10
11
自动设置密码命令
elasticsearch-setup-passwords auto

自定义密码命令
elasticsearch-setup-passwords interactive
elasticsearch-setup-passwords interactive -EELASTIC_PASSWORD="123456"
生成证书命令
elasticsearch-certutil ca -out config/elastic-certificates.p12 -pass ""

es-head登录
http://3.1.101.33:9200/?auth_user=elastic&auth_password=elastic123456

docker-compose编排

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
version: "3"
services:
es:
container_name: es
image: elasticsearch:7.11.1
network_mode: host
restart: always
volumes:
- /etc/localtime:/etc/localtime
- /data/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- /data/elasticsearch/logs:/usr/share/elasticsearch/logs
- /data/elasticsearch/data:/usr/share/elasticsearch/data
- /data/elasticsearch/config/elastic-certificates.p12:/usr/share/elasticsearch/config/elastic-certificates.p12
environment:
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms2G -Xmx2G"
ulimits:
memlock:
soft: -1
hard: -1

es-admin:
container_name: es-admin
image: mobz/elasticsearch-head:5
ports:
- "9201:9100"

node-1上部署head,node-2,node-3只部署es即可

Logstash部署

创建数据目录

1
2
mkdir -pv /data/logstash/{config,pipeline,logs}
chown 1000 /data/logstash/{config,pipeline,logs}

配置文件

logstash.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
cat > /data/logstash/config/logstash.yml << 'EOF'
node.name: logstast-node1
http.host: "0.0.0.0"
path.data: data
path.logs: /usr/share/logstash/logs
path.config: /usr/share/logstash/pipeline/
config.reload.automatic: true
config.test_and_exit: false
pipeline.id: main
pipeline.batch.size: 125
xpack.monitoring.enabled: false
xpack.monitoring.elasticsearch.hosts: [ "http://3.1.101.33:9200","http://3.1.101.34:9200","http://3.1.101.35:9200" ]
EOF

git_logstash.conf

pipeline/git_logstash.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
cat > /data/logstash/pipeline/git_logstash.conf << 'EOF'
## multiline 插件也可以用于其他类似的堆栈式信息,比如 linux 的内核日志。
input {
##stdin { }
kafka {
## error-log-服务名称
topics_pattern => "topic-test-.*"
bootstrap_servers => "3.1.101.33:9092,3.1.101.34:9092,3.1.101.35:9092"
codec => json
consumer_threads => 1
decorate_events => true
#auto_offset_rest => "latest"
group_id => "test-elk-group"
}
}

filter {
ruby {
## 时区转换
code => "event.set('index_time',event.timestamp.time.localtime.strftime('%Y-%m-%d'))"
}
grok {
## 表达式
## match => ["message", "\[%{NOTSPACE:currentDateTime}\] \[%{DATA:traceInfo}\] \[%{NOTSPACE:level}\] \[%{NOTSPACE:class}\] \[%{DATA:hostName}\] \[%{DATA:ip}\] \[%{DATA:applicationName}\] \[%{DATA:location}\] \[%{DATA:messageInfo}\] ## (\'\'|%{QUOTEDSTRING:throwable})"]
match => ["message", "\[%{NOTSPACE:currentDateTime}\] \[%{NOTSPACE:level}\] \[%{DATA:traceInfo}\] \[%{NOTSPACE:class}\] \[%{DATA:hostName}\] \[%{DATA:ip}\] \[%{DATA:applicationName}\] \[%{DATA:location}\] \[%{DATA:messageInfo}\] ## (\'\'|%{QUOTEDSTRING:throwable})"]
}
}


## 测试输出到控制台:
output {
stdout { codec => rubydebug }
}


## elasticsearch:
output {
elasticsearch {
## es服务地址
hosts => ["3.1.101.33:9200","3.1.101.34:9200","3.1.101.35:9200"]
## 用户名密码
## user => "elastic"
## password => "123456"
## 索引名,+ 号开头的,就会自动认为后面是时间格式:
## javalog-app-service-2019.01.23
## index => "app-log-%{[fields][logbiz]}-%{index_time}"
index => "index-test-topic-%{index_time}"
## 是否嗅探集群ip:一般设置true;http://ip:9200/_nodes/http?pretty
## 通过嗅探机制进行es集群负载均衡发日志消息
sniffing => true
## logstash默认自带一个mapping模板,进行模板覆盖
template_overwrite => true
}
}
EOF

input fom beat

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
## input from Beats
input {
beats {
port => 5044
}
}

## output to elasticsearch
output {
elasticsearch {
## es服务地址
hosts => ["192.168.2.111:9200","192.168.2.112:9200","192.168.2.113:9200"]
## 用户名密码
## user => "elastic"
## password => "elastic123456"
index => "index-filebeat-%{index_time}"
}
}

input from kafka

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
## input from kafka
input {
# https://www.elastic.co/guide/en/logstash/current/plugins-inputs-kafka.html
# 从kafka获取消息
kafka {
# 服务器列表,可动态更改不必重启logstash
bootstrap_servers => "192.168.2.111:9092,192.168.2.112:9092,192.168.2.113:9092"

# 提交消费情况 默认5000ms 一次
auto_commit_interval_ms => 5000

# 消费组
group_id => "logstash"

# 消费者
client_id => "es-1"

# 消费者线程(默认为1),保证 消费的分区数和线程数一致以达到完美效果,线程数大于消费的分区数时将会产生空闲线程
consumer_threads => 1

# Kafka中没有初始偏移量或偏移量超出范围时,自动将偏移量重置为最新偏移量
auto_offset_reset => "latest"

# 主题,数组格式,可设置多个
topics => ["nginx-access-topic"]

# 自定义字段
add_field => {"logstash" => "192.168.2.111"}
codec => json { charset => "UTF-8" }
}
}

## output to elasticsearch
output {
elasticsearch {
## es服务地址
hosts => ["192.168.2.111:9200","192.168.2.112:9200","192.168.2.113:9200"]
## 用户名密码
## user => "elastic"
## password => "elastic123456"
index => "index-nginx-access"
}
}

docker-compose编排

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
version: "3"
services:
logstash:
container_name: logstash
image: logstash:7.11.1
restart: always
ports:
- "5044:5044"
volumes:
- /data/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- /data/logstash/data:/usr/share/logstash/data
- /data/logstash/pipeline:/usr/share/logstash/pipeline
- /data/logstash/logs:/usr/share/logstash/logs
environment:
LS_JAVA_OPTS: "-Xmx2G -Xms2G"

Kibana部署

创建数据目录

1
2
mkdir -pv /data/kibana/{config,logs}
chown 1000 /data/kibana/{config,logs}

配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
cat > /data/kibana/config/kibana.yml << 'EOF'
#
# ** THIS IS AN AUTO-GENERATED FILE **
#

# Default Kibana configuration for docker target
server.name: kibana
server.port: 5601
server.host: "0"
elasticsearch.hosts: [ "http://3.1.101.33:9200","http://3.1.101.34:9200","http://3.1.101.35:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"

xpack.security.enabled: false
xpack.security.encryptionKey: "fhjskloppd678ehkdfdlliverpoolfcr"
elasticsearch.username: "elastic"
elasticsearch.password: "elastic123456"
EOF
1
2
3
4
>elasticsearch.username: "elastic"
>elasticsearch.password: "elastic"
>xpack.security.enabled: true
>xpack.security.encryptionKey: "4297f44b13955235245b2497399d7a93"

docker-compose编排

1
2
3
4
5
6
7
8
9
10
version: "3"
services:
kibana:
container_name: kibana
image: kibana:7.11.1
restart: always
ports:
- "5601:5601"
volumes:
- /data/kibana/config/kibana.yml:/opt/kibana/config/kibana.yml

filebeat部署

创建数据目录

1
mkdir -pv /data/filebeat/{config,data,logs}

配置文件

发送到kafka

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
###################### Filebeat Configuration Example #########################
filebeat.idle_timeout: 2s
filebeat.name: lb-m
filebeat.spool_zie: 50000
#----------------------------------input form nginx access_log--------------------------------#
filebeat.inputs:
- type: log
paths:
- /data/nginx/logs/kibana-*.log
fields:
topic: nginx-access-topic
log_source: nginx-01
enabled: true
backoff: 1s
backoff_factor: 2
close_inactive: 1h
encoding: plain
harvester_buffer_size: 262144
max_backoff: 10s
max_bytes: 10485760
scan_frequency: 5s
tail_lines: true
#----------------------------------input form nginx error_log--------------------------------#
- type: log
paths:
- /data/nginx/logs/error.log*
fields:
topic: nginx-error-topic
log_source: nginx-02
enabled: true
backoff: 1s
backoff_factor: 2
close_inactive: 1h
encoding: plain
harvester_buffer_size: 262144
max_backoff: 10s
max_bytes: 10485760
scan_frequency: 5s
tail_lines: true
#----------------------------------Kafka output--------------------------------#
output.kafka:
version: "1.0.1"
enabled: true
hosts: ['192.168.2.111:9092','192.168.2.112:9092','192.168.2.113:9092']
topic: '%{[fields.topic]}'
required_acks: 1 #default
compression: gzip #default
compression_level: 0 #0为不压缩,压缩耗CPU
max_message_bytes: 1000000 #default
codec.format:
string: '%{[message]} %{[fields.log_source]}'

docker-compose编排

1
2
3
4
5
6
7
8
9
10
11
version: "3"
services:
filebeat:
container_name: filebeat
image: elastic/filebeat:7.11.1
user: root
restart: always
volumes:
- /data/filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml
- /data/filebeat/data:/usr/share/filebeat/data/registry
- /data/nginx/logs:/data/nginx/logs