天天看点

Linux系统 Elasticsearch、Logstash、Kibana集群部署文档 版本6.5.4

Elasticsearch、Logstash、Kibana集群部署文档 版本6.5.4

一、Elasticsearch部署文档

由于Elasticsearch、Logstash、Kibana均不能以root账号运行但是Linux对非root账号可并发操作的文件、线程都有限制。

1.修改文件限制修改系统文件

# vim /etc/security/limits.conf

* soft nofile 65536

* hard nofile 65536

* soft nproc 2048

* hard nproc 4096

2.调整进程数修改系统文件

# vim /etc/security/limits.d/20-nproc.conf

*          soft    nproc     4096

root       soft    nproc     unlimited

3.调整虚拟内存&最大并发连接修改系统文件

# im /etc/sysctl.conf

vm.max_map_count = 262144

fs.file-max = 655360

# sysctl -p

4.安装JDK并修改配置文件

# mkdir /usr/local/java

# tar -zxf jdk-8u201-linux-x64.tar.gz -C /usr/local/java/

# vim /etc/profile

JAVA_HOME=/usr/local/java/jdk1.8.0_201

CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar

PATH=$JAVA_HOME/bin:$HOME/bin:$HOME/.local/bin:$PATH

# source /etc/profile

5. 创建elasticsearch目录 并解压elasticsearch

# mkdir /usr/local/elasticsearch

# tar -zxf elasticsearch-6.5.4.tar.gz -C /usr/local/elasticsearch

# mkdir  /usr/local/elasticsearch/elasticsearch-6.5.4/data

查看配置文件

# grep '^[a-Z]' /usr/local/elasticsearch/elasticsearch-6.5.4/config/elasticsearch.yml

修改配置为文件主机名

# vim /etc/hosts

192.168.2.123 elasticsearch1

192.168.2.222 elasticsearch2

192.168.2.221 elasticsearch3

6.创建用户给予权限

# useradd elasticsearch

# chown -R elasticsearch.elasticsearch /usr/local/elasticsearch/

7.切换elasticsearch用户修改配置文件 启动elasticsearch服务

# su -  elasticsearch

# /usr/local/elasticsearch/elasticsearch-6.5.4/bin/elasticsearch -d

查看集群启动后能否正常工作

# curl -X GET "192.168.2.123:19200/"

查看当前索引和分片情况

# curl -i -XGET 'http://192.168.2.123:19200/_count?pretty'

使用head插件显示索引和分片情况安装elasticsearch-head插件

集群节点状态

注:status为green表示正常,yellow为警告,red为故障

# curl http://192.168.2.123:19200/_cluster/health?pretty

查看集群详细信息

# curl '192.168.2.123:19200/_cluster/state?pretty'

# http://192.168.2.123:19200/_cluster/state?pretty

#master配置文件

# ======================== Elasticsearch Configuration =========================

#判别节点是否是统一集群

cluster.name: elasticsearch-cluster

#节点的hostname

node.name: elasticsearch1

#该节点为主节点

node.master: true

#表示这不是数据节点

node.data: false

#本地ip端口

network.host: 192.168.2.123

http.port: 19200

#数据存放路径

path.data: /usr/local/elasticsearch/elasticsearch-6.5.4/data

#日志路径

path.logs: /usr/local/elasticsearch/elasticsearch-6.5.4/logs

#锁住内存,使内存不会再swap中使用

bootstrap.memory_lock: false

bootstrap.system_call_filter: false

http.cors.enabled: true

http.cors.allow-origin: "*"

#配置自动发现

discovery.zen.ping.unicast.hosts: ["192.168.2.222","192.168.2.123","192.168.2.221"]

discovery.zen.minimum_master_nodes: 1

#slave节点配置文件

# ======================== Elasticsearch Configuration =========================

cluster.name: elasticsearch-cluster

node.name: elasticsearch2

path.data: /usr/local/elasticsearch/elasticsearch-6.5.4/data

path.logs: /usr/local/elasticsearch/elasticsearch-6.5.4/logs

node.master: false

node.data: true

network.host: 192.168.2.222

http.port: 19200

bootstrap.memory_lock: false

bootstrap.system_call_filter: false

http.cors.enabled: true

http.cors.allow-origin: "*"

discovery.zen.minimum_master_nodes: 1

discovery.zen.ping.unicast.hosts: ["192.168.2.123","192.168.2.222","192.168.2.221"]

# ======================== Elasticsearch Configuration =========================

cluster.name: elasticsearch-cluster

node.name: elasticsearch3

path.data: /usr/local/elasticsearch/elasticsearch-6.5.4/data

path.logs: /usr/local/elasticsearch/elasticsearch-6.5.4/logs

node.master: false

node.data: true

network.host: 192.168.2.221

http.port: 19200

bootstrap.memory_lock: false

bootstrap.system_call_filter: false

http.cors.enabled: true

http.cors.allow-origin: "*"

discovery.zen.minimum_master_nodes: 1

discovery.zen.ping.unicast.hosts: ["192.168.2.123","192.168.2.222","192.168.2.221"]

二、elasticsearch-logstash部署文档

1、解压安装filebeat-6.5.4服务(日志收集器)

# tar -zxf filebeat-6.5.4-linux-x86_64.tar.gz -C /usr/local/elasticsearch/

# mv  /usr/local/elasticsearch/filebeat-6.5.4-linux-x86_64/ /usr/local/elasticsearch/filebeat-6.5.4

启动服务

# nohup ./usr/local/elasticsearch/filebeat-6.5.4/filebeat -c filebeat.yml &

查看配置文件

# grep "^\s*[^# \t].*$" /usr/local/elasticsearch/filebeat-6.5.4/filebeat.yml

2、解压安装logstash服务

#t ar -zxf logstash-6.5.4.tar.gz -C /usr/local/elasticsearch/

# ./logstash -f ../config/logstash.conf &

配置logstash服务配置文件

# ./logstash -t  -f /usr/local/elasticsearch/logstash-6.5.4/config/logstash.conf

logstash.conf配置文件

# Sample Logstash configuration for creating a simple

# Beats -> Logstash -> Elasticsearch pipeline.

#input {

# beats {

# port => 5044

# }

#}

input{

kafka{

bootstrap_servers => ["192.168.2.123:19092,192.168.2.222:19092"]

topics => "test"

codec => "json"

#client_id => "test"

#group_id => "test"

#auto_offset_reset => "latest" #从最新的偏移量开始消费

#consumer_threads => 5

#decorate_events => true #此属性会将当前topic、offset、group、partition等信息也带到message中

#topics => ["test","loge"] #数组类型,可配置多个topic

#type => "bhy" #所有插件通用属性,尤其在input里面配置多个数据源时很有用

}

}

output {

#{

#if [fields][tag] == "test"

#{

elasticsearch {

hosts => ["http://192.168.2.123:19200"]

index => "test-%{+YYYY-MM-dd}"

codec => "json"

#index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"

#user => "elastic"

#password => "changeme"

}

}

三、elasticsearch-kibana部署文档

1.修改配置文件启动kibana服务

# tar -zxf kibana-6.5.4-linux-x86_64.tar.gz -C /usr/local/elasticsearch/

# mv /usr/local/elasticsearch/kibana-6.5.4-linux-x86_64 /usr/local/elasticsearch/kibana-6.5.4

# nohup /usr/local/elasticsearch/kibana-6.5.4/bin/kibana  > /dev/null 2>&1 &

# http://192.168.2.123:15601

配置文件

# Kibana is served by a back end server. This setting specifies the port to use.

server.port: 15601

# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.

# The default is 'localhost', which usually means remote machines will not be able to connect.

# To allow connections from remote users, set this parameter to a non-loopback address.

server.host: "192.168.2.123"

# Enables you to specify a path to mount Kibana at if you are running behind a proxy.

# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath

# from requests it receives, and to prevent a deprecation warning at startup.

# This setting cannot end in a slash.

#server.basePath: ""

# Specifies whether Kibana should rewrite requests that are prefixed with

# `server.basePath` or require that they are rewritten by your reverse proxy.

# This setting was effectively always `false` before Kibana 6.3 and will

# default to `true` starting in Kibana 7.0.

#server.rewriteBasePath: false

# The maximum payload size in bytes for incoming server requests.

#server.maxPayloadBytes: 1048576

# The Kibana server's name. This is used for display purposes.

server.name: "192.168.2.123"

# The URL of the Elasticsearch instance to use for all your queries.

elasticsearch.url: "http://192.168.2.123:19200"

# When this setting's value is true Kibana uses the hostname specified in the server.host

# setting. When the value of this setting is false, Kibana uses the hostname of the host

# that connects to this Kibana instance.

#elasticsearch.preserveHost: true

# Kibana uses an index in Elasticsearch to store saved searches, visualizations and

# dashboards. Kibana creates a new index if the index doesn't already exist.

kibana.index: ".newkibana"

四、elasticsearch-head部署文档

安装配置docker服务

#yum -y install git docker

启动docker服务

#systemctl start docker

docker服务开机自动开启      

#systemctl enable docker

搜索elasticsearch-head容器镜像

#docker search elasticsearch-head           

下载elasticsearch-head基础容器镜像

# docker pull docker.io/alivv/elasticsearch-head

更改镜像名称

# docker tag docker.io/alivv/elasticsearch-head:latest elasticsearch-head:v1

查看下载的容器镜像

#docker images

部署elasticsearch-head服务

#docker run -d -p 19100:9100 --name elasticsearch-head --hostname elasticsearch-head --restart always elasticsearch-head:v1

转载于:https://www.cnblogs.com/devops-docker/p/11506704.html

继续阅读