efk 本地

# 设置内核参数
vi /etc/sysctl.conf
# 增加以下参数
vm.max_map_count=655360
net.ipv4.icmp_echo_ignore_broadcasts = 1
net.ipv4.conf.all.rp_filter = 1
fs.file-max = 6815744
fs.aio-max-nr = 1048576
kernel.shmall = 2097152
kernel.shmmax = 2147483648
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max= 4194304
net.core.wmem_default= 262144
net.core.wmem_max= 1048576
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1

# 执行以下命令,确保生效配置生效:
sysctl -p

# 设置资源参数
vim /etc/security/limits.conf
# 修改
* soft nofile 65536
* hard nofile 131072
* soft nproc 65536
* hard nproc 131072

# 安装Elasticsearch 
rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch

cat > /etc/yum.repos.d/elasticsearch.repo << EOF
[elasticsearch]
name=Elasticsearch repository for 8.x packages
baseurl=https://artifacts.elastic.co/packages/8.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=0
autorefresh=1
type=rpm-md
EOF

sudo yum install --enablerepo=elasticsearch elasticsearch

--------------------------- Security autoconfiguration information --------------------

Authentication and authorization are enabled.
TLS for the transport and HTTP layers is enabled and configured.

The generated password for the elastic built-in superuser is : 19BQQy63qtzL1I5wiSXs

If this node should join an existing cluster, you can reconfigure this with
'/usr/share/elasticsearch/bin/elasticsearch-reconfigure-node --enrollment-token <token-here>'
after creating an enrollment token on your existing cluster.

You can complete the following actions at any time:

Reset the password of the elastic built-in superuser with
'/usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic'.

Generate an enrollment token for Kibana instances with
 '/usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana'.

Generate an enrollment token for Elasticsearch nodes with
'/usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node'.

export ELASTIC_PASSWORD="your_password"
-----------------------------------------------------------------------------------------
# 重新配置节点以加入现有集群编辑
# 安装 Elasticsearch 时,安装过程会配置一个 默认为单节点集群。如果希望节点加入现有集群 相反,在开始之前,请在现有节点上生成注册令牌 首次使用新节点。
# 在现有群集中的任何节点上,生成节点注册令牌:

/usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node
# 复制注册令牌,该令牌将输出到终端。
# 在新的 Elasticsearch 节点上,将注册令牌作为参数传递给工具:elasticsearch-reconfigure-node

/usr/share/elasticsearch/bin/elasticsearch-reconfigure-node --enrollment-token <enrollment-token>
# Elasticsearch 现在已配置为加入现有集群。

# 配置
network.host: 10.255.251.24
node.name: jtbakserver
# Enable security features
#xpack.security.enabled: true
xpack.security.enabled: false

#xpack.security.enrollment.enabled: true
xpack.security.enrollment.enabled: false

# Enable encryption for HTTP API client connections, such as Kibana, Logstash, and Agents
xpack.security.http.ssl:
  #enabled: true
  enabled: false
  keystore.path: certs/http.p12

# Enable encryption and mutual authentication between cluster nodes
xpack.security.transport.ssl:
  #enabled: true
  enabled: true
  verification_mode: certificate
  keystore.path: certs/transport.p12
  truststore.path: certs/transport.p12
# Create a new cluster with the current node only
# Additional nodes can still join the cluster later
cluster.initial_master_nodes: ["jtbakserver"]

# 关闭ssl 关闭验证 使用内网IP

# 启动
sudo /bin/systemctl daemon-reload
sudo /bin/systemctl enable elasticsearch.service

# 如果您对 Elasticsearch 密钥库进行了密码保护,则需要使用本地文件和 systemd 环境提供密钥库密码 变量。此本地文件在存在时应受到保护,并且可能受到保护 Elasticsearch 启动并运行后安全删除。
echo "keystore_password" > /path/to/my_pwd_file.tmp
chmod 600 /path/to/my_pwd_file.tmp
sudo systemctl set-environment ES_KEYSTORE_PASSPHRASE_FILE=/path/to/my_pwd_file.tmp
sudo systemctl start elasticsearch.service

# 默认情况下,Elasticsearch 服务不会在日志中记录信息。若要启用日志记录,必须删除该选项 从文件中的命令行。
systemd journalctl --quiet ExecStart elasticsearch.service
sudo journalctl -f
# 要列出 elasticsearch 服务的日记帐分录,请执行以下操作:
sudo journalctl --unit elasticsearch
# 要列出从给定时间开始的 elasticsearch 服务的日记帐分录,请执行以下操作:
sudo journalctl --unit elasticsearch --since  "2016-10-30 18:17:16"

# 测试
# curl -k -u elastic https://10.255.251.24:9200/_cat/health?v  # 开启验证开启ssl

curl -k  http://10.255.251.24:9200/_cat/health?v

### kibana安装
sudo yum install --enablerepo=elasticsearch kibana

wget https://artifacts.elastic.co/downloads/kibana/kibana-8.14.1-x86_64.rpm
wget https://artifacts.elastic.co/downloads/kibana/kibana-8.14.1-x86_64.rpm.sha512
shasum -a 512 -c kibana-8.14.1-x86_64.rpm.sha512 
sudo rpm --install kibana-8.14.1-x86_64.rpm

server.host: "10.255.251.24"
elasticsearch.hosts: ["http://10.255.251.24:9200"]
i18n.locale: "zh-CN"

systemctl enable kibana.service
systemctl start kibana.service

### 安装 logstash
sudo rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch

cat > /etc/yum.repos.d/elasticsearch.repo << EOF
[elasticsearch]
name=Elasticsearch repository for 8.x packages
baseurl=https://artifacts.elastic.co/packages/8.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=0
autorefresh=1
type=rpm-md
EOF

sudo yum install logstash --enablerepo=elasticsearch

wget https://home.vimll.com:9999/download/elk/logstash-8.14.1-x86_64.rpm --no-check-certificate
yum install logstash-8.14.1-x86_64.rpm

wget https://artifacts.elastic.co/downloads/logstash/logstash-8.14.1-x86_64.rpm --no-check-certificate

yum install logstash-8.14.1-x86_64.rpm

# 测试
logstash -e 'input { stdin {}} output { stdout {} }'

## # 二进制
## wget https://artifacts.elastic.co/downloads/logstash/logstash-8.14.1-linux-x86_64.tar.gz ## --no-check-certificate
## 
## # 生成启动脚本
## startup.options文件的绝对路径,如果是rpm安装的在/etc/logstash/## startup.options,如果是二进制包解压安装的则在解压目录下的config目录下面。
## 必须要跟启动类型,比如CentOS6是sysv,CentOS7是systemd
## 
## /usr/share/logstash/bin/system-install /etc/logstash/startup.options sysv
## 
## # Successfully created system startup script for Logstash
## 
## vim /etc/init.d/logstash
## #user="logstash"
## user="root"
## #group="logstash"
## group="root"
## 
## vim /usr/share/logstash/config/logstash.yml
## node.name: 10.255.254.83
## path.config: /etc/logstash/conf.d
## 
## mkdir /etc/logstash/conf.d -p
## mkdir /opt/logstash
## 
## /etc/init.d/logstash start
## 
## useradd logstash
## /usr/share/logstash/bin/system-install # 创建systemctl
## mv  -n  /usr/share/logstash/config/* /etc/logstash
## chown -R logstash:logstash /usr/share/logstash/data
## # 和上面一条data命令相同作用,主要看为了logstash.yml中的设置path.data: /var/lib/logstash,不授权报错。
## chown -R  logstash:logstash /var/lib/logstash 
## chown -R logstash:logstash /var/log/logstash # 设置为last_run_metadata_path的路径,不然类似报错:org## /jruby/rubyio.java:1237:in `sysopen',org/jruby/rubyio.java:3800:in `write'
## # 编辑jvm.options文件:
## # set the I/O temp directory,不然报错- logstash stopped processing because of an error: (## loaderror) could not load ffi provider: (notimplementederror) ffi not available: null
## -Djava.io.tmpdir=/opt/logstash/tmp
## chown -R logstash:logstash /opt/logstash
## chmod 775 -R /opt/logstash

# cat /etc/logstash/conf.d/log.conf
input {
  beats {
    port => 5044
  }
}

output {
  elasticsearch {
    hosts => ["http://10.255.251.24:9200"]
    index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
  }
}

curl -k  http://10.255.251.24:9200/_cat/indices?v

# filebeat 安装
sudo yum install filebeat --enablerepo=elasticsearch

curl -k -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-5.6.16-x86_64.rpm
sudo rpm -vi filebeat-5.6.16-x86_64.rpm

curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-8.14.2-linux-x86_64.tar.gz
tar xzvf filebeat-8.14.2-linux-x86_64.tar.gz
==========================================================================
[Unit]
Description=Filebeat sends log files to Logstash or directly to Elasticsearch.
Documentation=https://www.elastic.co/beats/filebeat
Wants=network-online.target
After=network-online.target

[Service]

UMask=0027
Environment="GODEBUG='madvdontneed=1'"
Environment="BEAT_LOG_OPTS="
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/filebeat.yml"
Environment="BEAT_PATH_OPTS=--path.home /usr/local/src/filebeat-8.14.2-linux-x86_64 --path.config /etc/filebeat --path.data /usr/local/src/filebeat-8.14.2-linux-x86_64 --path.logs /usr/local/src/filebeat-8.14.2-linux-x86_64"
ExecStart=/usr/local/src/filebeat-8.14.2-linux-x86_64/filebeat --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
Restart=always

[Install]
WantedBy=multi-user.target
==============================================================================

# 10.255.254.83
vim /etc/filebeat/filebeat.yml

  paths:
    - /home/smartbike/smartbike/message-server-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/smartbike/smartbike/message-server-0.0.1-SNAPSHOT/logs/um_info.log
    - /data/smartbike/backup/20231124/bike-jms-0.0.1-SNAPSHOT/logs/um_error.log
    - /data/smartbike/backup/20231124/bike-jms-0.0.1-SNAPSHOT/logs/um_info.log
    - /data/smartbike/backup/20231124/bike-access-0.0.1-SNAPSHOT/logs/um_error.log
    - /data/smartbike/backup/20231124/bike-access-0.0.1-SNAPSHOT/logs/um_info.log
    - /home/smartbike/smartbike/message-cluster-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/smartbike/smartbike/message-cluster-0.0.1-SNAPSHOT/logs/um_info.log
    - /home/activemq/apache-activemq-5.10.1/data/activemq.log

name: 10.255.254.83

output.logstash:
  hosts: ["10.255.251.24:5044"]

# 10.255.254.84
vim /etc/filebeat/filebeat.yml

  paths:
    - /home/smartbike/smartbike/message-server-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/smartbike/smartbike/message-server-0.0.1-SNAPSHOT/logs/um_info.log
    - /home/smartbike/smartbike/bike-access-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/smartbike/smartbike/bike-access-0.0.1-SNAPSHOT/logs/um_info.log
    - /home/smartbike/smartbike/bike-jms-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/smartbike/smartbike/bike-jms-0.0.1-SNAPSHOT/logs/um_info.log

name: 10.255.254.84

output.logstash:
  hosts: ["10.255.251.24:5044"]

# 10.255.254.87
vim /etc/filebeat/filebeat.yml

  paths:
    - /home/smartbike/bike-ba-web-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/smartbike/bike-ba-web-0.0.1-SNAPSHOT/logs/um_info.log
    - /home/smartbike/bike-access-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/smartbike/bike-access-0.0.1-SNAPSHOT/logs/um_info.log
    - /home/smartbike/bike-jms-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/smartbike/bike-jms-0.0.1-SNAPSHOT/logs/um_info.log
    - /home/smartbike/message-server-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/smartbike/message-server-0.0.1-SNAPSHOT/logs/um_info.log

name: 10.255.254.87

output.logstash:
  hosts: ["10.255.251.24:5044"]

# 10.255.254.64
vim /etc/filebeat/filebeat.yml

  paths:
    - /home/smartbike/smartbike/message-server-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/smartbike/smartbike/message-server-0.0.1-SNAPSHOT/logs/um_info.log
    - /home/smartbike/smartbike/bike-jms-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/smartbike/smartbike/bike-jms-0.0.1-SNAPSHOT/logs/um_info.log
    - /home/smartbike/smartbike/bike-access-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/smartbike/smartbike/bike-access-0.0.1-SNAPSHOT/logs/um_info.log

name: 10.255.254.64

output.logstash:
  hosts: ["10.255.251.24:5044"]

# 10.255.250.79

  paths:
    - /home/hzbike/bike-ba-web-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/hzbike/bike-ba-web-0.0.1-SNAPSHOT/logs/um_info.log

name: 10.255.250.79

output.logstash:
  hosts: ["10.255.251.24:5044"]

# 10.2.128.24   萧山
vim /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /home/H3C/20230905-new/bike-ba-web-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/H3C/20230905-new/bike-ba-web-0.0.1-SNAPSHOT/logs/um_info.log
    - /home/H3C/20230905-new/bike-access-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/H3C/20230905-new/bike-access-0.0.1-SNAPSHOT/logs/um_info.log
    - /home/H3C/20230905-new/bike-jms-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/H3C/20230905-new/bike-jms-0.0.1-SNAPSHOT/logs/um_info.log
    - /home/H3C/20230905-new/message-server-0.0.1-SNAPSHOT/logs/um_error.log
    - /home/H3C/20230905-new/message-server-0.0.1-SNAPSHOT/logs/um_info.log

name: 10.2.128.24

output.logstash:
  hosts: ["10.255.251.24:5044"]

# ElastAlert2  日志规则告警
# cat /etc/yum.repos.d/docker-ce.repo
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

yum install docker-ce

systemctl enable --now docker

docker pull d.vimll.com:9888/root/plulic/jertel/elastalert2:latest

===========================================
# cat /usr/local/src/elastalert2/elastalert.yaml
rules_folder: /opt/elastalert/rules
run_every:
  seconds: 30

buffer_time:
  minutes: 15

es_host: 10.255.251.24
es_port: 9200

writeback_index: elastalert_status

alert_time_limit:
  days: 2

=======================================
# cat email_auth.yaml
user: "xxxxxxxx@xxxxxx.com"
password: "xxxxxxxxx"

=======================================

# cat /usr/local/src/elastalert2/rules/hz-log.yaml
# cat hz-log.yaml
name: "hz-log"
type: "frequency"
index: "filebeat-hz-log"

#aggregation:
#  schedule:
#    '* * * * *'

is_enabled: true
num_events: 50
timeframe:
  minutes: 1
realert:
  minutes: 10
#exponential_realert:
#  minutes: 10
silenced: false
timestamp_field: "@timestamp"
timestamp_type: "iso"
use_strftime_index: false

use_local_time: true
query_timezone: "Asia/Shanghai"

buffer_time:
  minutes: 10

alert_subject: "ElastAlert: 杭州线下日志告警!!!"

alert_text_type: alert_text_only
alert_text: |
  << 杭州线下日志告警 >>
  索引名称: {0}
  时间: {1}
  日志路径: {2}
  10分钟错误日志数: {3}
  规则匹配次数: {4}
  日志信息: {5}
  告警节点: {6}
  日志ID: {7}
alert_text_args:
  - "_index"
  - "@timestamp"
  - source
  - num_hits
  - num_matches
  - message
  - beat.name
  - "_id"

filter:
  - query:
      query_string:
        query: "\"Response Message Routing Error\" OR UMRouteInfoManager"

alert:
  - "email"

email:
- "xxxx@xxxxxxxxxxxx.com"

smtp_auth_file: /opt/elastalert/email_auth.yaml
smtp_host: smtp.exmail.qq.com
smtp_port: 465
smtp_ssl: true
user: xxxxxxxx@xxxxxx.com
from_addr: xxxxxxxx@xxxxxx.com
email_reply_to: xxxxxxxx@xxxxxx.com

docker run -d --name elastalert -e TZ=Asia/Shanghai --restart=always -v /usr/local/src/elastalert2/elastalert.yaml:/opt/elastalert/config.yaml -v /usr/local/src/elastalert2/email_auth.yaml:/opt/elastalert/email_auth.yaml -v /usr/local/src/elastalert2/rules:/opt/elastalert/rules d.vimll.com:9888/root/plulic/jertel/elastalert2:latest --verbose

docker exec -it elastalert bash

docker logs -f elastalert -n 100