提交各组件部署Ansible剧本初版

This commit is contained in:
qidaijie
2024-01-18 15:35:33 +08:00
parent f0bd05d565
commit 0cc392df5c
262 changed files with 15927 additions and 0 deletions

View File

@@ -0,0 +1,5 @@
[zookeeper]
192.168.45.102
[kafka]
192.168.45.102

View File

@@ -0,0 +1,7 @@
- hosts: kafka
remote_user: root
roles:
- role
vars_files:
- role/vars/main.yml

View File

@@ -0,0 +1,13 @@
#The default installation location
deploy_dir: /data/olap
#The default data storage location,use storing application data,logs and configuration files
data_dir: /data/olap
kafka:
#Running memory of the Kafka.
java_opt: '-Xmx1024m -Xms1024m'
#The minimum age of a log file to be eligible for deletion due to age
log.retention.hours: 168
#A size-based retention policy for logs,unit byte
log.retention.bytes: 10737418240

View File

@@ -0,0 +1,38 @@
- name: Loading Image
docker_image:
name: '{{ image_name }}'
tag: '{{ image_tag }}'
load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
source: load
force_tag: yes
force_source: yes
timeout: 300
- name: Stop Container
docker_container:
name: '{{ container_name }}'
state: absent
- name: Start Container
docker_compose:
project_src: '{{ deploy_dir }}/{{ container_name }}/'
- name: Removing Image
docker_image:
name: '{{ image_name }}'
tag: '{{ image_tag }}'
state: absent
- name: Loading Exporter Image
docker_image:
name: 'kafka_exporter'
tag: 'v2.0'
load_path: '{{ deploy_dir }}/{{ container_name }}/monitor/kafka_exporter-v2.0.tar'
source: load
force_tag: yes
force_source: yes
timeout: 300
- name: Start Exporter Container
docker_compose:
project_src: '{{ deploy_dir }}/{{ container_name }}/monitor/'

View File

@@ -0,0 +1,72 @@
- name: Setting node_nums variable
set_fact: node_nums="{{groups.kafka|length}}"
- block:
- name: Check the Zookeeper status
shell: netstat -anlp | egrep "2181" | grep LISTEN | wc -l
register: port_out
delegate_to: "{{ groups.zookeeper[0] }}"
- name: To terminate execution
fail:
msg: "Port 2181 of the zookeeper node is not monitored. The status may be abnormal"
run_once: true
delegate_to: 127.0.0.1
when: port_out.stdout != '1'
- name: Creating directory
file:
state: directory
path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
with_items:
- { dir: 'kafka-logs' }
- { dir: 'config' }
- { dir: 'logs' }
- { dir: 'monitor' }
- name: unpack {{ component_version }}.tgz to /usr/local/
unarchive:
src: 'files/{{ component_version }}.tgz'
dest: '/usr/local/'
- name: Copying Kafka config files
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
mode: 0755
with_items:
- { src: 'kafka-operation.sh.j2', dest: '/usr/local/{{ component_version }}/bin/kafka-operation.sh' }
- { src: 'kafka.sh.j2', dest: '/etc/profile.d/kafka.sh' }
- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
copy:
src: 'files/{{ image_name }}-{{ image_tag }}.tar'
dest: '{{ deploy_dir }}/{{ container_name }}/'
force: true
notify:
- Loading Image
- name: Copying image to {{ deploy_dir }}/{{ container_name }}/monitor
copy:
src: 'files/kafka_exporter-v2.0.tar'
dest: '{{ deploy_dir }}/{{ container_name }}/monitor/'
force: true
notify:
- Loading Exporter Image
- name: Copying Kafka config files
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
mode: 0644
with_items:
- { src: 'server.properties.j2', dest: '{{ deploy_dir }}/{{ container_name }}/config/server.properties' }
- { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml' }
- { src: 'docker-compose_exporter.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/monitor/docker-compose.yml' }
notify:
- Start Container
- Start Exporter Container
- meta: flush_handlers

View File

@@ -0,0 +1,10 @@
- block:
- include: uninstall.yml
- include: deploy.yml
- include: status-check.yml
when: (operation) == "install"
- block:
- include: uninstall.yml
when: (operation) == "uninstall"

View File

@@ -0,0 +1,17 @@
- name: Waitting for Kafka running,60s
shell: sleep 60
- name: Check if the Kafka already exists
shell: ps -ef | grep -v grep | grep kafka.Kafka | wc -l
register: process_out
- name: Check if the Kafka already exists
shell: netstat -anlp | egrep "9092|9094|9095" | grep LISTEN | wc -l
register: port_out
- name: To terminate execution
fail:
msg: "Kafka on node {{ inventory_hostname }} is not started. Please check"
run_once: true
delegate_to: 127.0.0.1
when: process_out.stdout != '1' or port_out.stdout != '3'

View File

@@ -0,0 +1,39 @@
- block:
- name: Stopping and removing {{ container_name }} container
docker_container:
name: '{{ container_name }}'
state: absent
- name: Removing old {{ image_name }} image
docker_image:
name: '{{ image_name }}'
tag: '{{ image_tag }}'
state: absent
- name: Stopping and removing exporter container
docker_container:
name: 'kafka_exporter'
state: absent
- name: Removing old exporter image
docker_image:
name: 'kafka_exporter'
tag: 'v2.0'
state: absent
- name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
file:
path: '{{ deploy_dir }}/{{ container_name }}'
state: absent
- name: Checking ZooKeeper has Kafka nodes
shell: "docker exec -it zookeeper zkCli.sh ls / | grep kafka | wc -l"
run_once: true
delegate_to: "{{ groups.zookeeper[0] }}"
register: has_zknode
- name: Delete Kafka nodes in ZooKeeper
shell: "docker exec -it zookeeper zkCli.sh rmr /kafka"
run_once: true
delegate_to: "{{ groups.zookeeper[0] }}"
when: has_zknode.stdout >= '1'

View File

@@ -0,0 +1,15 @@
version: '2'
services:
kafka:
image: {{ image_name }}:{{ image_tag }}
restart: always
container_name: {{ container_name }}
environment:
KAFKA_JVM_MEM: "{{ kafka.java_opt }}"
volumes:
- "{{ deploy_dir }}/{{ container_name }}/config/server.properties:/opt/{{ component_version }}/config/server.properties"
- "{{ deploy_dir }}/{{ container_name }}/kafka-logs:/opt/{{ component_version }}/kafka-logs"
- "{{ deploy_dir }}/{{ container_name }}/logs:/opt/{{ component_version }}/logs"
network_mode: "host"

View File

@@ -0,0 +1,21 @@
version: '3.3'
services:
kafka-exporter:
image: kafka_exporter:v2.0
container_name: kafka_exporter
ports:
- 9982:9308
restart: always
command:
- --kafka.server={{ inventory_hostname }}:9094
- --sasl.username=admin
- --sasl.password=galaxy2019
- --sasl.mechanism=plain
- --sasl.enabled
networks:
olap:
ipv4_address: 172.20.88.7
networks:
olap:
external: true

View File

@@ -0,0 +1,60 @@
#!/bin/bash
LOCAL_IP={{ inventory_hostname }}:9094
ZK_SERVER={% for dev_info in groups.zookeeper -%}
{% if loop.last -%}
{{dev_info}}:2181/kafka
{%- else %}
{{dev_info}}:2181,
{%- endif %}
{%- endfor %}
KAFKA_SERVER={% for dev_info in groups.kafka -%}
{% if loop.last -%}
{{dev_info}}:9092
{%- else %}
{{dev_info}}:9092,
{%- endif %}
{%- endfor %}
PARTITIONS={{groups.kafka|length}}
case $1 in
producer)
kafka-console-producer.sh --producer.config $KAFKA_HOME/config/producer.properties --broker-list $LOCAL_IP --topic $2
;;
consumer)
kafka-console-consumer.sh --consumer.config $KAFKA_HOME/config/consumer.properties --bootstrap-server $LOCAL_IP --topic $2
;;
consumer-begin)
kafka-console-consumer.sh --consumer.config $KAFKA_HOME/config/consumer.properties --from-beginning --bootstrap-server $LOCAL_IP --topic $2
;;
create)
kafka-topics.sh --create --bootstrap-server $KAFKA_SERVER --replication-factor 1 --partitions $PARTITIONS --topic $2
;;
delete)
kafka-topics.sh --delete --bootstrap-server $KAFKA_SERVER --topic $2
;;
list)
kafka-topics.sh --list --bootstrap-server $KAFKA_SERVER
;;
groups)
kafka-consumer-groups.sh --all-groups --all-topics --list --bootstrap-server $KAFKA_SERVER
;;
group)
kafka-consumer-groups.sh --bootstrap-server $KAFKA_SERVER --describe --group $2
;;
election-leader)
kafka-leader-election.sh --bootstrap-server $KAFKA_SERVER --all-topic-partitions --election-type PREFERRED
;;
*)
echo 'Usage: kafka-operation.sh {producer|consumer|consumer-begin|create|delete} {topic-name}'
echo 'Status: kafka-operation.sh {list|groups}'
echo 'Status: kafka-operation.sh {group} {group name}'
echo 'maintenance: kafka-operation.sh {election-leader}'
esac

View File

@@ -0,0 +1,3 @@
#kafka
export KAFKA_HOME=/usr/local/{{ component_version }}
export PATH=$KAFKA_HOME/bin:$PATH

View File

@@ -0,0 +1,190 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults
############################# Server Basics #############################
listeners=SASL_PLAINTEXT://{{ inventory_hostname }}:9094,PLAINTEXT://{{ inventory_hostname }}:9092,SSL://{{ inventory_hostname }}:9095
advertised.listeners=SASL_PLAINTEXT://{{ inventory_hostname }}:9094,PLAINTEXT://{{ inventory_hostname }}:9092,SSL://{{ inventory_hostname }}:9095
ssl.keystore.location=/opt/{{ component_version }}/config/keystore.jks
ssl.keystore.password=galaxy2019
ssl.key.password=galaxy2019
ssl.truststore.location=/opt/{{ component_version }}/config/truststore.jks
ssl.truststore.password=galaxy2019
#ssl.client.auth=required
ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1
ssl.keystore.type=JKS
ssl.truststore.type=JKS
# kafka2.0.x开始将ssl.endpoint.identification.algorithm设置为了HTTPS即:需要验证主机名
# 如果不需要验证主机名,那么可以这么设置 ssl.endpoint.identification.algorithm=即可
ssl.endpoint.identification.algorithm=
# 设置内部访问也用SSL默认值为security.inter.broker.protocol=PLAINTEXT
security.inter.broker.protocol=SASL_PLAINTEXT
#sasl配置
sasl.mechanism.inter.broker.protocol=PLAIN
sasl.enabled.mechanisms=PLAIN
# The id of the broker. This must be set to a unique integer for each broker.
broker.id={{ groups['kafka'].index(inventory_hostname) +1 }}
############################# Socket Server Settings #############################
#Is it deleted directlytopic
delete.topic.enable=true
#Are you allowed to create automatically topic
auto.create.topics.enable=false
#Enable log periodic deletion strategy
log.cleanup.policy=delete
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
num.network.threads=3
# The number of threads that the server uses for processing requests, which may include disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=10485760
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=10485760
# The maximum size of a request that the socket server will accept (protection against OOM)
#socket.request.max.bytes=2147483600
socket.request.max.bytes=104857600
#The maximum size of a message body, unit byte.
message.max.bytes=10485760
#replicas Maximum size of data obtained eachtime
replica.fetch.max.bytes=20485760
############################# Log Basics #############################
# A comma seperated list of directories under which to store log files
log.dirs=/opt/{{ component_version }}/kafka-logs
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings #############################
{% if groups.kafka | length >= 3 %}
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
offsets.topic.replication.factor=3
#事务主题的复制因子(设置更高以确保可用性)。 内部主题创建将失败,直到群集大小满足此复制因素要求
transaction.state.log.replication.factor=3
#覆盖事务主题的min.insync.replicas配置在min.insync.replicas中replicas数量为1该参数将默认replicas定义为2
transaction.state.log.min.isr=2
{% elif groups.kafka | length == 1 %}
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
offsets.topic.replication.factor=1
#事务主题的复制因子(设置更高以确保可用性)。 内部主题创建将失败,直到群集大小满足此复制因素要求
transaction.state.log.replication.factor=1
#覆盖事务主题的min.insync.replicas配置在min.insync.replicas中replicas数量为1该参数将默认replicas定义为2
transaction.state.log.min.isr=1
{% endif %}
#是否允许非ISR的Replica参与竞选Leader。
unclean.leader.election.enable=true
#如果某个Partition的Leader挂掉则当原来挂掉的Broker恢复正常以后可以夺回Leader
auto.leader.rebalance.enable=true
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours={{ kafka['log.retention.hours'] }}
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
log.retention.bytes={{ kafka['log.retention.bytes'] }}
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect={% for dev_info in groups.zookeeper -%}
{% if loop.last -%}
{{dev_info}}:2181/kafka
{%- else %}
{{dev_info}}:2181,
{%- endif %}
{%- endfor %}
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=60000
#zookeeper session超时时间
zookeeper.session.timeout.ms=60000
#Set zookeeper client to use secure ACLs
zookeeper.set.acl=false
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0

View File

@@ -0,0 +1,23 @@
#镜像名称
image_name: kafka
#镜像版本号
image_tag: 2.12-3.4.1
#容器名称
container_name: kafka
#组件版本
component_version: kafka_2.12-3.4.1
#备份目录
backup_path: "{{ deploy_dir }}/backup/platform/{{ old_version }}/{{ container_name }}"
#待备份的文件
backup_items:
- "{{ deploy_dir }}/{{ container_name }}/config"
- "{{ deploy_dir }}/{{ container_name }}/docker-compose.yml"
#平台版本对应镜像的sha256
version_sha256_items:
"1.0.0": "d91a3183b1f625ab57829db7ffb51f53671e4c2b4c19d3b8511dbb8601593611"