提交各组件部署Ansible剧本初版
This commit is contained in:
@@ -0,0 +1,15 @@
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
kafka:
|
||||
image: {{ image_name }}:{{ image_tag }}
|
||||
restart: always
|
||||
container_name: {{ container_name }}
|
||||
environment:
|
||||
KAFKA_JVM_MEM: "{{ kafka.java_opt }}"
|
||||
volumes:
|
||||
- "{{ deploy_dir }}/{{ container_name }}/config/server.properties:/opt/{{ component_version }}/config/server.properties"
|
||||
- "{{ deploy_dir }}/{{ container_name }}/kafka-logs:/opt/{{ component_version }}/kafka-logs"
|
||||
- "{{ deploy_dir }}/{{ container_name }}/logs:/opt/{{ component_version }}/logs"
|
||||
network_mode: "host"
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
version: '3.3'
|
||||
|
||||
services:
|
||||
kafka-exporter:
|
||||
image: kafka_exporter:v2.0
|
||||
container_name: kafka_exporter
|
||||
ports:
|
||||
- 9982:9308
|
||||
restart: always
|
||||
command:
|
||||
- --kafka.server={{ inventory_hostname }}:9094
|
||||
- --sasl.username=admin
|
||||
- --sasl.password=galaxy2019
|
||||
- --sasl.mechanism=plain
|
||||
- --sasl.enabled
|
||||
networks:
|
||||
olap:
|
||||
ipv4_address: 172.20.88.7
|
||||
networks:
|
||||
olap:
|
||||
external: true
|
||||
@@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
|
||||
LOCAL_IP={{ inventory_hostname }}:9094
|
||||
|
||||
ZK_SERVER={% for dev_info in groups.zookeeper -%}
|
||||
{% if loop.last -%}
|
||||
{{dev_info}}:2181/kafka
|
||||
{%- else %}
|
||||
{{dev_info}}:2181,
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
|
||||
|
||||
KAFKA_SERVER={% for dev_info in groups.kafka -%}
|
||||
{% if loop.last -%}
|
||||
{{dev_info}}:9092
|
||||
{%- else %}
|
||||
{{dev_info}}:9092,
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
|
||||
PARTITIONS={{groups.kafka|length}}
|
||||
|
||||
|
||||
case $1 in
|
||||
producer)
|
||||
kafka-console-producer.sh --producer.config $KAFKA_HOME/config/producer.properties --broker-list $LOCAL_IP --topic $2
|
||||
;;
|
||||
consumer)
|
||||
kafka-console-consumer.sh --consumer.config $KAFKA_HOME/config/consumer.properties --bootstrap-server $LOCAL_IP --topic $2
|
||||
;;
|
||||
consumer-begin)
|
||||
kafka-console-consumer.sh --consumer.config $KAFKA_HOME/config/consumer.properties --from-beginning --bootstrap-server $LOCAL_IP --topic $2
|
||||
;;
|
||||
create)
|
||||
kafka-topics.sh --create --bootstrap-server $KAFKA_SERVER --replication-factor 1 --partitions $PARTITIONS --topic $2
|
||||
;;
|
||||
delete)
|
||||
kafka-topics.sh --delete --bootstrap-server $KAFKA_SERVER --topic $2
|
||||
;;
|
||||
list)
|
||||
kafka-topics.sh --list --bootstrap-server $KAFKA_SERVER
|
||||
;;
|
||||
groups)
|
||||
kafka-consumer-groups.sh --all-groups --all-topics --list --bootstrap-server $KAFKA_SERVER
|
||||
;;
|
||||
group)
|
||||
kafka-consumer-groups.sh --bootstrap-server $KAFKA_SERVER --describe --group $2
|
||||
;;
|
||||
election-leader)
|
||||
kafka-leader-election.sh --bootstrap-server $KAFKA_SERVER --all-topic-partitions --election-type PREFERRED
|
||||
;;
|
||||
*)
|
||||
echo 'Usage: kafka-operation.sh {producer|consumer|consumer-begin|create|delete} {topic-name}'
|
||||
echo 'Status: kafka-operation.sh {list|groups}'
|
||||
echo 'Status: kafka-operation.sh {group} {group name}'
|
||||
echo 'maintenance: kafka-operation.sh {election-leader}'
|
||||
esac
|
||||
|
||||
|
||||
3
Apache Kafka/3.4.1/kafka/role/templates/kafka.sh.j2
Normal file
3
Apache Kafka/3.4.1/kafka/role/templates/kafka.sh.j2
Normal file
@@ -0,0 +1,3 @@
|
||||
#kafka
|
||||
export KAFKA_HOME=/usr/local/{{ component_version }}
|
||||
export PATH=$KAFKA_HOME/bin:$PATH
|
||||
190
Apache Kafka/3.4.1/kafka/role/templates/server.properties.j2
Normal file
190
Apache Kafka/3.4.1/kafka/role/templates/server.properties.j2
Normal file
@@ -0,0 +1,190 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# see kafka.server.KafkaConfig for additional details and defaults
|
||||
|
||||
############################# Server Basics #############################
|
||||
listeners=SASL_PLAINTEXT://{{ inventory_hostname }}:9094,PLAINTEXT://{{ inventory_hostname }}:9092,SSL://{{ inventory_hostname }}:9095
|
||||
advertised.listeners=SASL_PLAINTEXT://{{ inventory_hostname }}:9094,PLAINTEXT://{{ inventory_hostname }}:9092,SSL://{{ inventory_hostname }}:9095
|
||||
ssl.keystore.location=/opt/{{ component_version }}/config/keystore.jks
|
||||
ssl.keystore.password=galaxy2019
|
||||
ssl.key.password=galaxy2019
|
||||
ssl.truststore.location=/opt/{{ component_version }}/config/truststore.jks
|
||||
ssl.truststore.password=galaxy2019
|
||||
#ssl.client.auth=required
|
||||
ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1
|
||||
ssl.keystore.type=JKS
|
||||
ssl.truststore.type=JKS
|
||||
|
||||
# kafka2.0.x开始,将ssl.endpoint.identification.algorithm设置为了HTTPS,即:需要验证主机名
|
||||
# 如果不需要验证主机名,那么可以这么设置 ssl.endpoint.identification.algorithm=即可
|
||||
ssl.endpoint.identification.algorithm=
|
||||
|
||||
# 设置内部访问也用SSL,默认值为security.inter.broker.protocol=PLAINTEXT
|
||||
security.inter.broker.protocol=SASL_PLAINTEXT
|
||||
|
||||
#sasl配置
|
||||
sasl.mechanism.inter.broker.protocol=PLAIN
|
||||
sasl.enabled.mechanisms=PLAIN
|
||||
|
||||
# The id of the broker. This must be set to a unique integer for each broker.
|
||||
broker.id={{ groups['kafka'].index(inventory_hostname) +1 }}
|
||||
|
||||
############################# Socket Server Settings #############################
|
||||
#Is it deleted directlytopic
|
||||
delete.topic.enable=true
|
||||
|
||||
#Are you allowed to create automatically topic
|
||||
auto.create.topics.enable=false
|
||||
|
||||
#Enable log periodic deletion strategy
|
||||
log.cleanup.policy=delete
|
||||
|
||||
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
|
||||
num.network.threads=3
|
||||
|
||||
# The number of threads that the server uses for processing requests, which may include disk I/O
|
||||
num.io.threads=8
|
||||
|
||||
# The send buffer (SO_SNDBUF) used by the socket server
|
||||
socket.send.buffer.bytes=10485760
|
||||
|
||||
# The receive buffer (SO_RCVBUF) used by the socket server
|
||||
socket.receive.buffer.bytes=10485760
|
||||
|
||||
# The maximum size of a request that the socket server will accept (protection against OOM)
|
||||
#socket.request.max.bytes=2147483600
|
||||
socket.request.max.bytes=104857600
|
||||
|
||||
#The maximum size of a message body, unit byte.
|
||||
message.max.bytes=10485760
|
||||
|
||||
#replicas Maximum size of data obtained eachtime
|
||||
replica.fetch.max.bytes=20485760
|
||||
|
||||
############################# Log Basics #############################
|
||||
|
||||
# A comma seperated list of directories under which to store log files
|
||||
log.dirs=/opt/{{ component_version }}/kafka-logs
|
||||
|
||||
# The default number of log partitions per topic. More partitions allow greater
|
||||
# parallelism for consumption, but this will also result in more files across
|
||||
# the brokers.
|
||||
num.partitions=1
|
||||
|
||||
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
|
||||
# This value is recommended to be increased for installations with data dirs located in RAID array.
|
||||
num.recovery.threads.per.data.dir=1
|
||||
|
||||
############################# Internal Topic Settings #############################
|
||||
{% if groups.kafka | length >= 3 %}
|
||||
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
|
||||
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
|
||||
offsets.topic.replication.factor=3
|
||||
|
||||
#事务主题的复制因子(设置更高以确保可用性)。 内部主题创建将失败,直到群集大小满足此复制因素要求
|
||||
transaction.state.log.replication.factor=3
|
||||
|
||||
#覆盖事务主题的min.insync.replicas配置,在min.insync.replicas中,replicas数量为1,该参数将默认replicas定义为2
|
||||
transaction.state.log.min.isr=2
|
||||
{% elif groups.kafka | length == 1 %}
|
||||
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
|
||||
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
|
||||
offsets.topic.replication.factor=1
|
||||
|
||||
#事务主题的复制因子(设置更高以确保可用性)。 内部主题创建将失败,直到群集大小满足此复制因素要求
|
||||
transaction.state.log.replication.factor=1
|
||||
|
||||
#覆盖事务主题的min.insync.replicas配置,在min.insync.replicas中,replicas数量为1,该参数将默认replicas定义为2
|
||||
transaction.state.log.min.isr=1
|
||||
{% endif %}
|
||||
|
||||
#是否允许非ISR的Replica参与竞选Leader。
|
||||
unclean.leader.election.enable=true
|
||||
|
||||
#如果某个Partition的Leader挂掉,则当原来挂掉的Broker恢复正常以后,可以夺回Leader
|
||||
auto.leader.rebalance.enable=true
|
||||
|
||||
############################# Log Flush Policy #############################
|
||||
|
||||
# Messages are immediately written to the filesystem but by default we only fsync() to sync
|
||||
# the OS cache lazily. The following configurations control the flush of data to disk.
|
||||
# There are a few important trade-offs here:
|
||||
# 1. Durability: Unflushed data may be lost if you are not using replication.
|
||||
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
|
||||
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
|
||||
# The settings below allow one to configure the flush policy to flush data after a period of time or
|
||||
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
|
||||
|
||||
# The number of messages to accept before forcing a flush of data to disk
|
||||
#log.flush.interval.messages=10000
|
||||
|
||||
# The maximum amount of time a message can sit in a log before we force a flush
|
||||
#log.flush.interval.ms=1000
|
||||
|
||||
############################# Log Retention Policy #############################
|
||||
|
||||
# The following configurations control the disposal of log segments. The policy can
|
||||
# be set to delete segments after a period of time, or after a given size has accumulated.
|
||||
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
|
||||
# from the end of the log.
|
||||
|
||||
# The minimum age of a log file to be eligible for deletion due to age
|
||||
log.retention.hours={{ kafka['log.retention.hours'] }}
|
||||
|
||||
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
|
||||
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
|
||||
log.retention.bytes={{ kafka['log.retention.bytes'] }}
|
||||
|
||||
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
|
||||
log.segment.bytes=1073741824
|
||||
|
||||
# The interval at which log segments are checked to see if they can be deleted according
|
||||
# to the retention policies
|
||||
log.retention.check.interval.ms=300000
|
||||
|
||||
############################# Zookeeper #############################
|
||||
|
||||
# Zookeeper connection string (see zookeeper docs for details).
|
||||
# This is a comma separated host:port pairs, each corresponding to a zk
|
||||
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
|
||||
# You can also append an optional chroot string to the urls to specify the
|
||||
# root directory for all kafka znodes.
|
||||
zookeeper.connect={% for dev_info in groups.zookeeper -%}
|
||||
{% if loop.last -%}
|
||||
{{dev_info}}:2181/kafka
|
||||
{%- else %}
|
||||
{{dev_info}}:2181,
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
|
||||
# Timeout in ms for connecting to zookeeper
|
||||
zookeeper.connection.timeout.ms=60000
|
||||
|
||||
#zookeeper session超时时间
|
||||
zookeeper.session.timeout.ms=60000
|
||||
|
||||
#Set zookeeper client to use secure ACLs
|
||||
zookeeper.set.acl=false
|
||||
|
||||
############################# Group Coordinator Settings #############################
|
||||
|
||||
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
|
||||
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
|
||||
# The default value for this is 3 seconds.
|
||||
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
|
||||
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
|
||||
group.initial.rebalance.delay.ms=0
|
||||
|
||||
Reference in New Issue
Block a user