启动服务:(后台运行)
grunt server & //需要在 /home/stt/elasticsearch-head 下执行,因为我的 grunt 没有进行全局的安装
##安装Logstash(这个软件在你需要读取的日志服务上安装,用logstash读取你的日志,上传给elasticsearch的):
也需要安装java1.8环境
tar xf logstash-5.3.0.tar.gz
mkdir /work/opt
mv logstash-5.3.0 /work/opt/
cd /work/opt/
vim /work/opt/logstash-5.3.0/conf/central.conf #(处理基于 FILE 方式输入的日志信息,这里是简单的举个例子,日后继续学习补充)
input {
file {
path => "/tmp/*.log"
}
}
output {
elasticsearch {
hosts => "192.168.8.116:9200"
index => "nginx-access"
}
stdout {
codec => rubydebug
}
}
##安装Kibana:
解压源码包:
tar zxf kibana-5.1.1-linux-x86_64.tar.gz -C /home/stt/server/
vim config/kibana.yml //修改
server.port: 5601 //打开注释而已,不用可以去效果,请使用默认端口
server.host: "0.0.0.0" //打开监听地址,让别的机器也能访问这个 kibana
elasticsearch.url: "http://127.0.0.1:9200" //这个url要根据实质情况,添加访问 elasticsearch 的url
启动服务: (后台运行)
/home/stt/server/kibana-5.1.1-linux-x86_64/bin/kibana &
安装nginx 反向代理,
apt-gei install nginx
nginx放心代理配置文件如下:
##文件名kibana.conf
upstream backend {
server 172.31.6.155:5601;
}
server {
listen 80;
server_name kibana.lvnian.co;
access_log /tmp/kibana-access.log;
error_log /tmp/kibana-error.log;
location / {
#设置主机头和客户端真实地址,以便服务器获取客户端真实IP
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
#禁用缓存
proxy_buffering off;
#反向代理的地址
proxy_pass ;
}
}
logstash 读取nginx的访问日志以及error日志,上传到logstash的配置文件文件。
用下面命令运行即可
nohup /work/opt/logstash-5.3.0/bin/logstash -f /work/opt/logstash-5.3.0/conf/elk-nginx-log.conf &
文件名: elk-nginx-log.conf
input {
file {
path => "/data/log/nginx/*.log"
start_position => beginning
}
}
filter {
if [path] =~ "access" {
mutate { replace => { type => "nginx_access" } }
ruby {
init => "@kname = ['http_x_forwarded_for','time_local','request','status','body_bytes_sent','request_body','content_length','http_referer','http_user_agent','http_cookie','remote_addr','hostname','upstream_addr','upstream_response_time','request_time']"
code => "
new_event = LogStash::Event.new(Hash[@kname.zip(event.get('message').split('|'))])
new_event.remove('@timestamp')
event.append(new_event)
"
}
if [request] {
ruby {
init => "@kname = ['method','uri','verb']"
code => "
new_event = LogStash::Event.new(Hash[@kname.zip(event.get('request').split(' '))])
new_event.remove('@timestamp')
event.append(new_event)
"
}
if [uri] {
ruby {
init => "@kname = ['url_path','url_args']"
code => "
new_event = LogStash::Event.new(Hash[@kname.zip(event.get('uri').split('?'))])
new_event.remove('@timestamp')
event.append(new_event)
"
}
kv {
prefix => "url_"
source => "url_args"
field_split => "& "
remove_field => [ "url_args","uri","request" ]
}
}
}
mutate {
convert => [
"body_bytes_sent" , "integer",
"content_length", "integer",
"upstream_response_time", "float",
"request_time", "float"
]
}
date {
match => [ "time_local", "dd/MMM/yyyy:hh:mm:ss Z" ]
locale => "en"
}
}
else if [path] =~ "error" {
mutate { replace => { type => "nginx_error" } }
grok {
match => { "message" => "(?<datetime>\d\d\d\d/\d\d/\d\d \d\d:\d\d:\d\d) \[(?<errtype>\w+)\] \S+: \*\d+ (?<errmsg>[^,]+), (?<errinfo>.*)$" }
}
mutate {
rename => [ "host", "fromhost" ]
gsub => [ "errmsg", "too large body: \d+ bytes", "too large body" ]
}
if [errinfo]
{
ruby {
code => "
new_event = LogStash::Event.new(Hash[event.get('errinfo').split(', ').map{|l| l.split(': ')}])
new_event.remove('@timestamp')
event.append(new_event)
"
}
}
grok {
# match => { "request" => '"%{WORD:verb} %{URIPATH:urlpath}(?:\?%{NGX_URIPARAM:urlparam})?(?: HTTP/%{NUMBER:httpversion})"' }
match => { "request" => '"%{WORD:verb} %{URIPATH:urlpath}?(?: HTTP/%{NUMBER:httpversion})"' }
patterns_dir => ["/etc/logstash/patterns"]
# remove_field => [ "message", "errinfo", "request" ]
}
}
else {
mutate { replace => { type => "random_logs" } }
}
}
output {
elasticsearch {
hosts => "172.31.1.79:9200"
#index => "logstash-nginx"
index => "logstash-%{type}-%{+YYYY.MM.dd}"
document_type => "%{type}"
flush_size => 20000
idle_flush_time => 10
sniffing => true
template_overwrite => true
}
# stdout {
# codec => rubydebug
# }
}