# cat /etc/issue CentOS release 6.5 (Final)
> vim /etc/elasticsearch/elasticsearch.yml # ---------------------------------- Network ----------------------------------- # # Set the bind address to a specific IP (IPv4 or IPv6): # # network.host: 127.0.0.1 network.host: 192.168.20.50 # # Set a custom port for HTTP: # http.port: 9200 ... bootstrap.system_call_filter: false
!使用本地 IP(127.0.0.1)时,Elasticsearch 进入 dev mode,只能从本机访问,只显示警告。
ERROR: bootstrap checks failed max file descriptors [65535] for elasticsearch process likely too low, increase to at least [65536] memory locking requested for elasticsearch process but memory is not locked max number of threads [1024] for user [jason] likely too low, increase to at least [2048] max virtual memory areas vm.max_map_count [65530] likely too low, increase to at least [262144] system call filters failed to install; check the logs and fix your configuration or disable system call filters at your own risk
需要针对这些参数进行设置:
> vim /etc/security/limits.conf ... elasticsearch hard nofile 65536 # 针对 max file descriptors elasticsearch soft nproc 2048 # 针对 max number of threads > vim /etc/sysctl.conf ... vm.max_map_count=262144 # 针对 max virtual memory areas > vim /etc/elasticsearch/elasticsearch.yml ... bootstrap.system_call_filter: false # 针对 system call filters failed to install, 参见 https://www.elastic.co/guide/en/elasticsearch/reference/current/system-call-filter-check.html
sudo chkconfig --add elasticsearch # configure Elasticsearch to start automatically when the system boots up sudo -i service elasticsearch start sudo -i service elasticsearch stop
日志: /var/log/elasticsearch/
rpm -vi logstash-5.2.0.rpm
这个例子里使用 Filebeat 将测试用的 Apache web log 作为 logstash的输入,解析并写入数据到 ElasticSearch 中。
> vim /etc/logstash/conf.d/first-pipeline.conf input { beats { port => "5043" } } filter { grok { match => { "message" => "%{COMBINEDAPACHELOG}"} } geoip { source => "clientip" } } output { elasticsearch { hosts => [ "192.168.20.50:9200" ] index => "testlog-%{+YYYY.MM.dd}" } }
grok
可以解析未结构化的日志数据,Grok filter pattern 测试网站:http://grokdebug.herokuapp.com/%{COMBINEDAPACHELOG}
%{IPORHOST:clientip} %{USER:ident} %{USER:auth} \[%{HTTPDATE:timestamp}\] "(?:%{WORD:verb} %{NOTSPACE:request} (?: HTTP/%{NUMBER:httpversion})?|%{DATA:rawrequest})" %{NUMBER:response} (?:%{NUMBER:bytes}|-) %{QS:referrer} %{QS:agent}
启动:
sudo initctl start logstash // 作为服务运行,在使用Upstart的系统中
curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-5.2.0-x86_64.rpm sudo rpm -vi filebeat-5.2.0-x86_64.rpm
> vim /etc/filebeat/filebeat.yml filebeat.prospectors: - input_type: log paths: - /var/log/logstash-tutorial.log # 之前下载的测试文件 #- /var/log/*.log #- c:\programdata\elasticsearch\logs\* ... #----------------------------- Logstash output -------------------------------- output.logstash: # The Logstash hosts #hosts: ["localhost:5044"] hosts: ["localhost:5043"]
curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-5.2.0-x86_64.rpm sudo rpm -vi filebeat-5.2.0-x86_64.rpm sudo /etc/init.d/filebeat start
> vim /etc/kibana/kibana.yml server.host: "192.168.20.50" elasticsearch.url: "http://192.168.20.50:9200"
> sudo chkconfig --add kibana # 设置自动启动 > sudo -i service kibana start > sudo -i service kibana stop
echo ‘1.1.1.3 - - [04/Jan/2015:05:13:42 +0000] "GET /test.png HTTP/1.1" 200 203023 "http://test.com/" "Mozilla/5.0"‘ >> /var/log/logstash-tutorial.log
原文:http://www.cnblogs.com/jasonxuli/p/6397244.html