最新Centos7.6 部署ELK日志分析系统

下载elasticsearch 创建elk用户并授权 useradd elk chown -R elk:elk /home/elk/elasticsearch chown -R elk:elk /home/elk/elasticsearch1 chown -R elk:elk /home/elk/elasticsearch2 mkdir -p /home/eladata mkdir -p /var/log/elk chown -R elk:elk /home/eladata chown -R elk:elk /var/log/elk 主节点master elasticsearch解压,修改配置文件 /home/elk/elasticsearch/config [root@localhost config]# grep -v "^#" elasticsearch.yml cluster.name: my-application node.name: node0 node.master: true node.attr.rack: r1 node.max_local_storage_nodes: 3 path.data: /home/eladata path.logs: /var/log/elk http.cors.enabled: true http.cors.allow-origin: "*" network.host: 192.168.1.70 http.port: 9200 transport.tcp.port: 9301 discovery.zen.minimum_master_nodes: 1 cluster.initial_master_nodes: ["node0"] 手动启动命令 su elk -l -c '/home/elk/elasticsearch/bin/elasticsearch -d' 启动文件 elasticsearch.service [root@localhost system]# pwd /lib/systemd/system [root@localhost system]# cat elasticsearch.service [Unit] Description=Elasticsearch Documentation=http://www.elastic.co Wants=network-online.target After=network-online.target [Service] RuntimeDirectory=elasticsearch PrivateTmp=true Environment=ES_HOME=http://www.likecs.com/home/elk/elasticsearch Environment=ES_PATH_CONF=http://www.likecs.com/home/elk/elasticsearch/config Environment=PID_DIR=http://www.likecs.com/var/run/elasticsearch EnvironmentFile=-/etc/sysconfig/elasticsearch WorkingDirectory=http://www.likecs.com/home/elk/elasticsearch User=elk Group=elk ExecStart=http://www.likecs.com/home/elk/elasticsearch/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet StandardOutput=journal StandardError=inherit LimitNOFILE=65536 LimitNPROC=4096 LimitAS=infinity LimitFSIZE=infinity TimeoutStopSec=0 KillSignal=SIGTERM KillMode=process SendSIGKILL=no SuccessExitStatus=143 [Install] WantedBy=multi-user.target [root@localhost system]# Node1节点 /home/elk/elasticsearch1/config [root@localhost config]# grep -v "^#" elasticsearch.yml cluster.name: my-application node.name: node1 node.master: false node.attr.rack: r1 node.max_local_storage_nodes: 3 path.data: /home/eladata path.logs: /var/log/elk http.cors.enabled: true http.cors.allow-origin: "*" network.host: 192.168.1.70 transport.tcp.port: 9303 http.port: 9302 discovery.zen.ping.unicast.hosts: ["192.168.1.70:9301"] [root@localhost config]# 手动启动命令 su elk -l -c '/home/elk/elasticsearch1/bin/elasticsearch1 -d' 启动文件 elasticsearch1.service [root@localhost system]# pwd /lib/systemd/system [root@localhost system]# cat elasticsearch1.service [Unit] Description=Elasticsearch Documentation=http://www.elastic.co Wants=network-online.target After=network-online.target [Service] RuntimeDirectory=elasticsearch1 PrivateTmp=true Environment=ES_HOME=http://www.likecs.com/home/elk/elasticsearch1 Environment=ES_PATH_CONF=http://www.likecs.com/home/elk/elasticsearch1/config Environment=PID_DIR=http://www.likecs.com/var/run/elasticsearch EnvironmentFile=-/etc/sysconfig/elasticsearch WorkingDirectory=http://www.likecs.com/home/elk/elasticsearch User=elk Group=elk ExecStart=http://www.likecs.com/home/elk/elasticsearch1/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet StandardOutput=journal StandardError=inherit LimitNOFILE=65536 LimitNPROC=4096 LimitAS=infinity LimitFSIZE=infinity TimeoutStopSec=0 KillSignal=SIGTERM KillMode=process SendSIGKILL=no SuccessExitStatus=143 [Install] WantedBy=multi-user.target [root@localhost system]# Node2节点 /home/elk/elasticsearch2/config [root@localhost config]# grep -v "^#" elasticsearch.yml cluster.name: my-application node.name: node2 node.attr.rack: r1 node.master: false node.max_local_storage_nodes: 3 path.data: /home/eladata path.logs: /var/log/elk http.cors.enabled: true http.cors.allow-origin: "*" network.host: 192.168.1.70 http.port: 9203 transport.tcp.port: 9304 discovery.zen.ping.unicast.hosts: ["192.168.1.70:9301"] discovery.zen.minimum_master_nodes: 1 [root@localhost config]# 手动启动命令 su elk -l -c '/home/elk/elasticsearch2/bin/elasticsearch2 -d' 启动文件 elasticsearch2.service [root@localhost system]# pwd /lib/systemd/system [root@localhost system]# cat elasticsearch2.service [Unit] Description=Elasticsearch Documentation=http://www.elastic.co Wants=network-online.target After=network-online.target [Service] RuntimeDirectory=elasticsearch2 PrivateTmp=true Environment=ES_HOME=http://www.likecs.com/home/elk/elasticsearch2 Environment=ES_PATH_CONF=http://www.likecs.com/home/elk/elasticsearch2/config Environment=PID_DIR=http://www.likecs.com/var/run/elasticsearch EnvironmentFile=-/etc/sysconfig/elasticsearch WorkingDirectory=http://www.likecs.com/home/elk/elasticsearch2 User=elk Group=elk ExecStart=http://www.likecs.com/home/elk/elasticsearch2/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet StandardOutput=journal StandardError=inherit LimitNOFILE=65536 LimitNPROC=4096 LimitAS=infinity LimitFSIZE=infinity TimeoutStopSec=0 KillSignal=SIGTERM KillMode=process SendSIGKILL=no SuccessExitStatus=143 [Install] WantedBy=multi-user.target [root@localhost system]# 下载logstash 目录如下,默认配置即可 [root@localhost logstash]# pwd /home/elk/logstash [root@localhost logstash]# 手动启动命令 ./logstash -f ../dev.conf nohup ./logstash -f ../dev.conf & 下载kibana 配置文件如下 [root@localhost config]# pwd /home/elk/kibana/config [root@localhost config]# grep -v "^#" kibana.yml server.host: "192.168.1.70" elasticsearch.hosts: ["http://192.168.1.70:9200"] kibana.index: ".kibana" i18n.locale: "zh-CN" 手动启动命令 ./kibana nohup ./kibana & kibana启动文件 [root@localhost system]# pwd /lib/systemd/system [root@localhost system]# cat kibana.service [Unit] Description=Kibana Server Manager [Service] ExecStart=http://www.likecs.com/home/elk/kibana/bin/kibana [Install] WantedBy=multi-user.target [root@localhost system]# 端口为:5601 访问:192.168.1.70:5601 安装Elasticsearch -head yum install git npm git clone https://github.com/mobz/elasticsearch-head.git [root@localhost elasticsearch-head]# pwd /home/elk/elasticsearch-head [root@localhost elasticsearch-head]# 启动 npm install npm run start nohup npm run start & curl -XPUT '192.168.2.67:9100/book' 访问192.168.2.67:9100 即可访问 下载kafka 修改配置文件如下 [root@localhost config]# pwd /home/elk/kafka/config [root@localhost config]# grep -v "^#" server.properties broker.id=0 listeners=PLAINTEXT://192.168.1.70:9092 num.network.threads=3 num.io.threads=8 socket.send.buffer.bytes=102400 socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 log.dirs=http://www.likecs.com/var/log/kafka-logs num.partitions=1 num.recovery.threads.per.data.dir=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 transaction.state.log.min.isr=1 log.retention.hours=168 log.segment.bytes=1073741824 log.retention.check.interval.ms=300000 zookeeper.connect=localhost:2181 zookeeper.connection.timeout.ms=6000 group.initial.rebalance.delay.ms=0 delete.topic.enable=true [root@localhost config]# kafka配置启动zookeeper 手动启动方式 [root@localhost bin]# pwd /home/elk/kafka/bin [root@localhost bin]# ./zookeeper-server-start.sh ../config/zookeeper.properties systemctl 启动zookeeper [root@localhost system]# pwd /lib/systemd/system [root@localhost system]# cat zookeeper.service [Service] Type=forking SyslogIdentifier=zookeeper Restart=always RestartSec=0s ExecStart=http://www.likecs.com/home/elk/kafka/bin/zookeeper-server-start.sh -daemon /home/elk/kafka/config/zookeeper.properties ExecStop=http://www.likecs.com/home/elk/kafka/bin/zookeeper-server-stop.sh [root@localhost system]# 启动kafka服务 手动启动方式 ./kafka-server-start.sh ../config/server.properties systemctl 启动kafka [root@localhost system]# pwd /lib/systemd/system [root@localhost system]# cat kafka.service [Unit] Description=Apache kafka After=network.target [Service] Type=simple Restart=always RestartSec=0s ExecStart=http://www.likecs.com/home/elk/kafka/bin/kafka-server-start.sh /home/elk/kafka/config/server.properties ExecStop=http://www.likecs.com/home/elk/kafka/bin/kafka-server-stop.sh [root@localhost system]# 测试kafka 新建一个名字为test的topic /kafka-topics.sh --create --zookeeper 192.168.1.70:2181 --replication-factor 1 --partitions 1 --topic test 查看kafka中的topic ./kafka-topics.sh --list --zookeeper 192.168.1.70:2181 往kafka topic为test中 生产消息 ./kafka-console-producer.sh --broker-list 192.168.1.70:9092 --topic test 在kafka topic为test中 消费消息 bin/kafka-console-consumer.sh --bootstrap-server 192.168.1.70:9092 --topic test --from-beginning

生产的消息,消费那边接受到即是ok的

目标机器安装filebeat

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:https://www.heiqu.com/wpzxzg.html