filebeat+logstash +zookeeper+kafka+es+kibana

发布时间:2022-06-29 发布网站:脚本宝典
脚本宝典收集整理的这篇文章主要介绍了filebeat+logstash +zookeeper+kafka+es+kibana脚本宝典觉得挺不错的,现在分享给大家,也给大家做个参考。

192.168.192.161-163 es集群 kibaba 9200 5601

192.168.192.164-166 zookerPEr+kafka集群 2181 2888 3888 +9092

192.168.192.167 LOGstash 

filebeat+logstash +zookeeper+kafka+es+kibana

 

 

https://www.apache.org/dyn/closer.lua/zookeeper/zookeeper-3.5.9/apache-zookeeper-3.5.9-bin.tar.gz

apt install openjdk-8-jdk -y

root@ubuntu20:/apps/apache-zookeeper-3.5.9-bin/conf# cat zoo.CFg 
tickTime=2000
inITLimit=10
syncLimit=5
dataDir=/data/zookeeper
clientPort=2181
maxClientCnxns=128
autopurge.snaPRetainCount=3
autopurge.purgeinterval=1

server.1=192.168.192.164:2888:3888
server.2=192.168.192.165:2888:3888
server.3=192.168.192.166:2888:3888
echo 1 > /data/zookeeper/myid
echo 2 > /data/zookeeper/myid
echo 3 > /data/zookeeper/myid

 

root@ubuntu20:/apps/apache-zookeeper-3.5.9-bin/bin# ./zkServer.sh status
/usr/bin/java
ZooKeeper JMX enabled by default
Using config: /apps/apache-zookeeper-3.5.9-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: leader

 

https://www.apache.org/dyn/closer.cgi?path=/kafka/3.0.0/kafka_2.13-2.4.1.tgz

root@ubuntu20:/apps/kafka_2.13-2.4.1/config# vi server.propertie
broker.id=1 2 3
listeners=PLAINTEXT://192.168.192.164 5 6:9092
#num.network.threads=3
#num.io.threads=8
log.dirs=/data/kafka-logs
zookeeper.connect=192.168.192.164:2181,192.168.192.165:2181,192.168.192.166:2181
/apps/kafka/bin/kafka-server-start.sh -daemon /apps/kafka/config/server.properties
LISTEN               0                    50                                 [::ffff:192.168.192.165]:9092                                          *:*

 https://www.elastic.co/guide/en/beats/filebeat/7.6/kafka-output.htML

  

root@slave002:/opt/k8s-data/dockerfile/web/chuan/tomcat-app1# cat run_tomcat.sh
/usr/share/filebeat/bin/filebeat -e -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /VAR/lib/filebeat -path.logs /var/log/filebeat &
su - nginx -c "/apps/tomcat/bin/catalina.sh start"
tail -f /etc/hosts

   在topic-test下创建3个分区,将任意一个分区复制到两个broker上,如果一个broker挂了,还可以从另一个broker上获取消息

./kafka-topics.sh --create --zookeeper 192.168.192.164:2181 --topic topic-test1 --replication-factor 2 --partitions 3 
./kafka-topics.sh --list --zookeeper 192.168.192.164:2181

  #kafka架构

broker:一台kafka服务器就是一个broker,一个集群由多个broker组成,一个broker可以容纳多个topic.

topic: 可以理解为一个队列,生产者和消费者面向的都是一个topic 类似数据库的表名或者ES的index 物理上不同topic的消息可以分开存储.

partion: 为了实现可拓展性,一个非常大的topic可以分布到多个broker(即服务器)上,一个topic可以分割为一个或多个partition,每个partitions是一个有序的队列。kafka只保证partition内的记录是有序的,而不保证

topic中不同partion的顺序。 每个topic至少有一个partition,当生产者产生数据的时候,会根据分配策略选择分区,然后将消息追加到指定的分区的队列末尾。

分区原因:方便在集群中拓展,每个partition可以通过调整适应所在的机器,一个topic可以有多个partition组成,一次整个集群就可以适应任意大小的数据了

                 可以提高并发,因为可以以partition为单位读写了。

 

[root@chuan-tomcat-app1-deployment-5cb9f7bf6b-q6gr5 filebeat]# cat filebeat.yml 
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /apps/tomcat/logs/catalina.out
  fields:
    type: tomcat-catalina
- type: log
  enabled: true
  paths:
    - /apps/tomcat/logs/localhost_access_log.*.txt
  fields:
    type: tomcat-accesslog
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
SETUP.template.settings:
  index.number_of_shards: 1
setup.kibana:

output.kafka:
  hosts: ["192.168.192.164:9092"]
  required_acks: 1
  comPression: gzip
  max_message_bytes: 1000000
  topic: "topic-test1"

#output.redis:
#  hosts: ["192.168.192.152:36379"]
#  key: "k8s-chuan-app1"
#  db: 1
#  timeout: 5
#  password: "123456"

 

[root@chuan-tomcat-app1-deployment-5cb9f7bf6b-q6gr5 filebeat]# ps -ef|grep filebeat
root         143      83  0 14:19 pts/0    00:00:00 /usr/share/filebeat/bin/filebeat -e -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat
root         179     153  0 14:23 pts/1    00:00:00 grep --color=auto filebeat

    https://www.kafkatool.COM/download.html

filebeat+logstash +zookeeper+kafka+es+kibana

 

   logstash

 文档 https://www.elastic.co/guide/en/logstash/7.6/plugins-inputs-kafka.html

1,安装jdk

2,dpkg -i logstash-7.6.2.deb

/usr/share/logstash/bin/logstash -f kafka-to-es.conf  -t
Config Validation Result: OK
/usr/share/logstash/bin/logstash -f kafka-to-es.conf

  

root@ubuntu20:/etc/logstash/conf.d# cat kafka-to-es.conf 
input {
  kafka {
    bootstrap_servers => "192.168.192.164:9092,:9092,192.168.192.165:9092"
    topics => ["topic-test1"]
    codec => "json"
  }
}




output {
  if [fields][type] == "tomcat-accesslog" {
    elasticseArch {
      hosts => ["192.168.192.161:9200","192.168.192.162:9200"]
      index => "chuan-accesslog-%{+yyYY.MM.dd}"
    }
  }

  if [fields][type] == "tomcat-catalina" {
    elasticsearch {
      hosts => ["192.168.192.161:9200","192.168.192.162:9200"]
      index => "chuan-catalinalog-%{+YYYY.MM.dd}"
    }
  }

  stdout { 
    codec => rubydebug
  }
}

 

 #kibana

dpkg -i kibana-7.6.2-amd64.deb

vi /etc/kibana/kibana.yml
server.port: 5601
server.host: "192.168.192.161"
elasticsearch.hosts: ["http://192.168.192.161:9200"]
i18n.locale: "zh-CN"

root@ubuntu20:~# ss -tnl|grep 5601LISTEN 0 511 192.168.192.161:5601 0.0.0.0:*

filebeat+logstash +zookeeper+kafka+es+kibana

 

 

 

   

  

filebeat+logstash +zookeeper+kafka+es+kibana

 

 

 

   

filebeat+logstash +zookeeper+kafka+es+kibana

 

   

filebeat+logstash +zookeeper+kafka+es+kibana

 

脚本宝典总结

以上是脚本宝典为你收集整理的filebeat+logstash +zookeeper+kafka+es+kibana全部内容,希望文章能够帮你解决filebeat+logstash +zookeeper+kafka+es+kibana所遇到的问题。

如果觉得脚本宝典网站内容还不错,欢迎将脚本宝典推荐好友。

本图文内容来源于网友网络收集整理提供,作为学习参考使用,版权属于原作者。
如您有任何意见或建议可联系处理。小编QQ:384754419,请注明来意。