可视化日志 ELKB 日志分析系统

Posted on Posted in linux

可视化日志 ELKB 日志分析系统

基本流程和相关包

filebeat(client) -> kafka && zookeeper -> hangout -> elasticsearch -> kibana

elasticsearch-5.6.4.tar.gz
filebeat-5.6.4-linux-x86_64.tar.gz
hangout-dist-0.3.0-release-bin.zip
jdk-8u73-linux-x64.tar.gz
kafka_2.12-1.1.0.tgz
kibana-5.6.4-linux-x86_64.tar.gz
zookeeper-3.4.12.tar.gz

准备环境

系统环境

Distributor ID: CentOS
Description:    CentOS Linux release 7.4.1708 (Core) 
Release:    7.4.1708
Codename:   Core

系统预配置

vim /etc/sysctl.conf
fs.file-max=65536 # 修改用户最大文件打开数量
vm.max_map_count = 262144 # 设置进程拥有内存区域
vim /etc/security/limits.conf
* soft nofile 65535
* hard nofile 131072
* soft nproc 2048
* hard nproc 4096

初始位置

[root@localhost src]# pwd
/usr/local/src
[root@localhost src]# ls
elasticsearch-5.6.4.tar.gz          jdk-8u73-linux-x64.tar.gz  kibana-5.6.4-linux-x86_64.tar.gz  zookeeper-3.4.12.tar.gz
filebeat-6.2.4-linux-x86_64.tar.gz  kafka_2.12-1.1.0.tgz       logstash-5.6.4.tar.gz

安装 jdk

[root@localhost src]# tar zxvf jdk-8u73-linux-x64.tar.gz
[root@localhost src]# mv jdk1.8.0_73 /usr/local/
[root@localhost src]# ln -s /usr/local/jdk1.8.0_73 /usr/local/jdk
[root@localhost local]# vim /etc/profile
export JAVA_HOME=/usr/local/jdk
export CLASSPATH=$JAVA_HOME/lib/
PATH=$PATH:$JAVA_HOME/bin
export PATH JAVA_HOME CLASSPATH
[root@localhost local]# source /etc/profile
[root@localhost local]# java -version
java version "1.8.0_73"
Java(TM) SE Runtime Environment (build 1.8.0_73-b02)
Java HotSpot(TM) 64-Bit Server VM (build 25.73-b02, mixed mode)

安装 elasticsearch

[root@localhost local]# tar zxvf elasticsearch-5.6.4.tar.gz 
[root@localhost local]# mv elasticsearch-5.6.4 /usr/local/elasticsearch
[root@localhost src]# cd /usr/local/elasticsearch/
[root@localhost src]# mkdir -p /data/es
[root@localhost src]# mkdir -p /var/log/es
[root@localhost elasticsearch]# egrep -v "^#" config/elasticsearch.yml # 修改项
node.name: node-1
path.data: /data/es
path.logs: /var/log/es
network.host: 192.168.213.131
http.port: 9200

[root@localhost elasticsearch]# groupadd es # 添加用户和组并设置对应的目录权限
[root@localhost elasticsearch]# useradd -g es es
[root@localhost elasticsearch]# echo "liuhonghe" | passwd --stdin es
Changing password for user es.
passwd: all authentication tokens updated successfully.
[root@localhost elasticsearch]# chown -R es:es /data/es/
[root@localhost elasticsearch]# chown -R es:es /var/log/es
[root@localhost elasticsearch]# chown -R es:es /usr/local/elasticsearch

[root@localhost elasticsearch]# su es -c "/usr/local/elasticsearch/bin/elasticsearch -d" # 启动和停止
[root@localhost elasticsearch]# ps -ef | grep elasticsearch | grep -v grep | awk '{print $2}' | xargs kill -9

安装 kibana

[root@localhost src]# tar zxvf kibana-5.6.4-linux-x86_64.tar.gz
[root@localhost src]# mv kibana-5.6.4-linux-x86_64 /usr/local/kibana
[root@localhost src]# cd /usr/local/kibana/
[root@localhost kibana]# egrep -v '^#|^$' config/kibana.yml 
server.port: 5601
server.host: "192.168.213.131" # 本机IP
elasticsearch.url: "http://192.168.213.131:9200" # 远程IP

[root@localhost kibana]# /usr/local/kibana/bin/kibana > /dev/null 2>&1 & # 启动和停止
[root@localhost kibana]# ps -ef | grep kibana | grep -v grep | awk '{print $2}' | xargs kill -9

安装 zookeeper

[root@localhost src]# tar zxvf zookeeper-3.4.12.tar.gz
[root@localhost src]# mv zookeeper-3.4.12 /usr/local/zookeeper
[root@localhost src]# cd /usr/local/zookeeper/
[root@localhost zookeeper]# cp conf/zoo_sample.cfg conf/zoo.cfg
[root@localhost zookeeper]# egrep -v "^#|^$" conf/zoo.cfg 
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/data/zookeeper
clientPort=2181
server.1=192.168.213.131:2888:3888 # 端口范围
server.2=192.168.213.132:2888:3888 # 用几个 zookeeper 就写几个
[root@localhost zookeeper]# mkdir -p /data/zookeeper
[root@localhost zookeeper]# echo 1 > /data/zookeeper/myid # 对应上面的 server.1

[root@localhost zookeeper]# /usr/local/zookeeper/bin/zkServer.sh start # 启动
[root@localhost zookeeper]# /usr/local/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg
Mode: standalone # 我只配了一个
[root@localhost zookeeper]# /usr/local/zookeeper/bin/zkServer.sh stop

安装 kafka

[root@localhost src]# tar zxvf kafka_2.12-1.1.0.tgz
[root@localhost src]# mv kafka_2.12-1.1.0 /usr/local/kafka
[root@localhost src]# cd /usr/local/kafka/
[root@localhost src]# mkdir -p /var/log/kafka-logs
[root@localhost kafka]# egrep -v "^#|^$" config/server.properties 
broker.id=1 # 其他节点需要修改这项
listeners=PLAINTEXT://192.168.213.131:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/var/log/kafka-logs
num.partitions=6
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=1
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.213.131:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
[root@localhost kafka]# /usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties # 启动和停止
[root@localhost kafka]# /usr/local/kafka/bin/kafka-server-stop.sh

创建消息主题

[root@localhost kafka]# /usr/local/kafka/bin/kafka-topics.sh --create --zookeeper 192.168.213.131:2181 --replication-factor 1 --partitions 2 --topic test
Created topic "test".

在 192.168.213.131 上进行消息消费

[root@localhost kafka]# /usr/local/kafka/bin/kafka-console-consumer.sh --zookeeper 192.168.213.131:2181 --topic test --from-beginning

在 192.168.213.131 上进行消息生产

[root@localhost kafka]# /usr/local/kafka/bin/kafka-console-producer.sh --broker-list 192.168.213.131:9092 --topic test # 在此输入数据,终端上显示出来,kafka功能正常

安装 hangout

https://github.com/childe/hangout

[root@localhost src]# unzip hangout-dist-0.3.0-release-bin.zip -d /usr/local/hangout
[root@localhost src]# cd /usr/local/hangout/
[root@localhost conf]# vim conf/hangout_server.conf
inputs:
    - NewKafka:
        codec: json
        topic:
            test: 2
        consumer_settings:
            bootstrap.servers: 192.168.213.131:9092
            value.deserializer: org.apache.kafka.common.serialization.StringDeserializer
            key.deserializer: org.apache.kafka.common.serialization.StringDeserializer
            group.id: lb-nginx-logs
outputs:
    - Elasticsearch:
        cluster: elasticsearch
        hosts:
            - 192.168.213.131
        index: 'logstash-%{type}-%{+YYYY.MM.dd}'
        index_type: logs
        bulk_actions: 20000
        bulk_size: 15
        flush_interval: 10
        concurrent_requests: 0
        timezone: "Asia/Shanghai"

[root@localhost hangout]# /usr/local/hangout/bin/hangout -f /usr/local/hangout/conf/hangout_server.conf > /dev/null 2>&1 & # 启动和停止
[root@localhost hangout]# ps -ef | grep hangout | grep -v grep | awk '{print $2}' | xargs kill -9

安装 filebeat

[root@localhost src]# tar zxvf filebeat-5.6.4-linux-x86_64.tar.gz # 这里的版本用5, 不要用6, 6取消了document_type
[root@localhost src]# mv filebeat-6.2.4-linux-x86_64 /usr/local/filebeat
[root@localhost src]# cd /usr/local/filebeat/
[root@localhost filebeat]# vim filebeat.yml
filebeat.prospectors:
- input_type: log
  paths:
    - /root/tmp.log
  document_type: locallog # kibana 添加索引时用到,logstash-locallog*
  tags: ["locallog"]
  tail_files: true
output.kafka:
  hosts: ["192.168.213.131:9092"]
  topic: 'test'
  client_id: "filebeat"
  partition.round_robin:
    reachable_only: false
  required_acks: 1
  compression: gzip
  max_message_bytes: 1000000
[root@localhost filebeat]# /usr/local/filebeat/filebeat -c /usr/local/filebeat/filebeat.yml > /dev/null 2>&1 & # 开启和停止
[root@localhost filebeat]# ps -ef | grep filebeat | grep -v grep | awk '{print $2}' | xargs kill -9
» 转载请注明来源:呢喃 » 可视化日志 ELKB 日志分析系统