提交各组件部署Ansible剧本初版

This commit is contained in:
qidaijie
2024-01-18 15:35:33 +08:00
parent f0bd05d565
commit 0cc392df5c
262 changed files with 15927 additions and 0 deletions

View File

@@ -0,0 +1,8 @@
[zookeeper]
192.168.45.102
[hdfs]
[hbase]
192.168.45.102

View File

@@ -0,0 +1,7 @@
- hosts: hbase
remote_user: root
roles:
- role
vars_files:
- role/vars/main.yml

View File

@@ -0,0 +1,22 @@
#The default installation location
deploy_dir: /data/olap
#The default data storage location,use storing application data,logs and configuration files
data_dir: /data/olap
hbase:
common:
#The HBase resource isolation function is used to group tables for storage.
enable_rsgroup: true
hmaster:
#Running memory of the HBase HMaster.
java_opt: '-Xmx1024m -Xms1024m'
regionserver:
#Running memory of the HBase HRegionserver.
java_opt: '-Xmx1024m -Xms1024m -Xmn128m'
#This defines the number of threads the region server keeps open to serve requests to tables,It should generally be set to (number of cores - 1)
hbase.regionserver.handler.count: 40
#If any one of a column families' HStoreFiles has grown to exceed this value, the hosting HRegion is split in two.
hbase.hregion.max.filesize: 10737418240
#Indicates the memory used by all read caches. The value can be the actual memory value, expressed in MB
hbase.bucketcache.size: 100

Binary file not shown.

View File

@@ -0,0 +1,27 @@
- name: Loading Image
docker_image:
name: '{{ image_name }}'
tag: '{{ image_tag }}'
load_path: '{{ deploy_dir }}/{{ container_name }}/{{ image_name }}-{{ image_tag }}.tar'
source: load
force_tag: yes
force_source: yes
timeout: 300
- name: Stop Container
docker_container:
name: '{{ item }}'
state: absent
with_items:
- ['HMaster']
- ['HRegionServer']
- name: Start Container
docker_compose:
project_src: '{{ deploy_dir }}/{{ container_name }}/'
- name: Removing Image
docker_image:
name: '{{ image_name }}'
tag: '{{ image_tag }}'
state: absent

View File

@@ -0,0 +1,88 @@
- name: Setting node_nums variable
set_fact: node_nums="{{groups.hbase|length}}"
- name: To terminate execution
fail:
msg: "HBase Cluster mode at least 3 nodes,please checking configurations/hosts -> hbase"
when: node_nums < 3
- name: Checking Hadoop DataNode status
shell: source /etc/profile && hadoop dfsadmin -report | grep "Live datanodes" | grep -E -o "[0-9]"
async: 10
register: datanode_out
run_once: true
delegate_to: "{{ groups.hdfs[0] }}"
- name: Checking Hadoop NameNode status
shell: source /etc/profile && hadoop dfsadmin -report |grep 50010 | wc -l
async: 10
register: namenode_out
run_once: true
delegate_to: "{{ groups.hdfs[0] }}"
- name: To terminate execution
fail:
msg: "If the dependency test fails, check whether the Hadoop cluster is normal"
when: datanode_out.stdout <= '1' and namenode_out.stdout <= '1'
- name: Creating directory
file:
state: directory
path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
with_items:
- { dir: 'logs' }
- { dir: 'data' }
- { dir: 'conf' }
- { dir: 'init' }
- name: Unarchiving phoenix and conf
unarchive:
src: 'files/phoenix-hbase-2.2-5.1.2-bin.tar'
dest: '{{ deploy_dir }}/{{ container_name }}/'
force: yes
with_items:
- { file_name: 'phoenix-hbase-2.2-5.1.2-bin.tar' }
- { file_name: 'conf.zip' }
- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
copy:
src: 'files/{{ image_name }}-{{ image_tag }}.tar'
dest: '{{ deploy_dir }}/{{ container_name }}/'
force: true
notify:
- Loading Image
- name: Fetching Hadoop config files to /tmp
ansible.builtin.fetch:
src: "{{ deploy_dir }}/hadoop-2.7.1/etc/hadoop/{{ item.filename }}"
dest: "/tmp/"
flat: yes
loop: "{{ hadoop_config_files }}"
run_once: true
delegate_to: "{{ groups.hdfs[0] }}"
- name: Copying Hadoop config files to other nodes
ansible.builtin.copy:
src: "/tmp/{{ item.filename }}"
dest: "{{ deploy_dir }}/{{ container_name }}/conf/"
loop: "{{ hadoop_config_files }}"
- name: Copying HBase config files
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
mode: '{{ item.mode }}'
with_items:
- { src: 'hbase-site.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/hbase-site.xml', mode: '0644' }
- { src: 'startsql.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/startsql.sh', mode: '0755' }
- { src: 'hbase-site.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/hbase-site.xml', mode: '0644' }
- { src: 'regionservers.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/regionservers', mode: '0644' }
- { src: 'backup-masters.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/backup-masters', mode: '0644' }
- { src: 'hbase-env.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/hbase-env.sh', mode: '0755' }
- { src: 'rsgroup.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/init/rsgroup.sh', mode: '0755' }
- { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml', mode: '0644' }
notify:
- Loading Image
- Start Container
- meta: flush_handlers

View File

@@ -0,0 +1,44 @@
- name: Creating directory
file:
state: directory
path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
with_items:
- { dir: 'logs' }
- { dir: 'data' }
- { dir: 'conf' }
- { dir: 'init' }
- name: Unarchiving phoenix and conf
unarchive:
src: 'files/phoenix-hbase-2.2-5.1.2-bin.tar'
dest: '{{ deploy_dir }}/{{ container_name }}/'
force: yes
with_items:
- { file_name: 'phoenix-hbase-2.2-5.1.2-bin.tar' }
- { file_name: 'conf.zip' }
- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
copy:
src: 'files/{{ image_name }}-{{ image_tag }}.tar'
dest: '{{ deploy_dir }}/{{ container_name }}/'
force: true
notify:
- Loading Image
- name: Copying HBase config files
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
mode: '{{ item.mode }}'
with_items:
- { src: 'hbase-site.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/hbase-site.xml', mode: '0644' }
- { src: 'startsql.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/startsql.sh', mode: '0755' }
- { src: 'hbase-site.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/hbase-site.xml', mode: '0644' }
- { src: 'regionservers.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/regionservers', mode: '0644' }
- { src: 'hbase-env.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/hbase-env.sh', mode: '0755' }
- { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml', mode: '0644' }
notify:
- Loading Image
- Start Container
- meta: flush_handlers

View File

@@ -0,0 +1,11 @@
- block:
- include: uninstall.yml
- include: "{{ playbook_name }}"
vars:
playbook_name: "{{ 'deploy-cluster.yml' if groups.hbase | length > 1 else 'deploy-standalone.yml' }}"
- include: status-check.yml
when: (operation) == "install"
- block:
- include: uninstall.yml
when: (operation) == "uninstall"

View File

@@ -0,0 +1,47 @@
- name: Creating directory
file:
state: directory
path: '{{ deploy_dir }}/{{ container_name }}/{{ item.dir }}'
with_items:
- { dir: 'logs' }
- { dir: 'data' }
- { dir: 'conf' }
- { dir: 'init' }
- name: Copying conf to {{ deploy_dir }}/{{ container_name }}/
copy:
src: 'files/conf'
dest: '{{ deploy_dir }}/{{ container_name }}/'
force: true
- name: Unarchiving phoenix
unarchive:
src: '{{ role_path }}/../../../software-packages/phoenix-hbase-2.2-5.1.2-bin.tar'
dest: '{{ deploy_dir }}/{{ container_name }}/'
force: yes
- name: Copying image to {{ deploy_dir }}/{{ container_name }}/
copy:
src: '{{ role_path }}/../../../software-packages/{{ image_name }}-{{ image_tag }}.tar'
dest: '{{ deploy_dir }}/{{ container_name }}/'
force: true
notify:
- Loading Image
- name: Copying HBase config files
template:
src: '{{ item.src }}'
dest: '{{ item.dest }}'
mode: '{{ item.mode }}'
with_items:
- { src: 'hbase-site.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/hbase-site.xml', mode: '0644' }
- { src: 'startsql.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/startsql.sh', mode: '0755' }
- { src: 'hbase-site.xml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/hbase-site.xml', mode: '0644' }
- { src: 'regionservers.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/regionservers', mode: '0644' }
- { src: 'hbase-env.sh.j2', dest: '{{ deploy_dir }}/{{ container_name }}/conf/hbase-env.sh', mode: '0755' }
- { src: 'docker-compose.yml.j2', dest: '{{ deploy_dir }}/{{ container_name }}/docker-compose.yml', mode: '0644' }
notify:
- Loading Image
- Start Container
- meta: flush_handlers

View File

@@ -0,0 +1,31 @@
- block:
- name: Stopping and removing container
docker_container:
name: '{{ item }}'
state: absent
with_items:
- ['HMaster']
- ['HRegionServer']
- name: Removing old {{ image_name }} image
docker_image:
name: '{{ image_name }}'
tag: '{{ image_tag }}'
state: absent
- name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
file:
path: '{{ deploy_dir }}/{{ container_name }}'
state: absent
- name: Checking ZooKeeper has HBase nodes
shell: "docker exec -it zookeeper zkCli.sh ls / | grep hbase | wc -l"
run_once: true
delegate_to: "{{ groups.zookeeper[0] }}"
register: has_zknode
- name: Delete HBase nodes in ZooKeeper
shell: "docker exec -it zookeeper zkCli.sh rmr /hbase"
run_once: true
delegate_to: "{{ groups.zookeeper[0] }}"
when: has_zknode.stdout >= '1'

View File

@@ -0,0 +1,36 @@
- name: Waitting for HBase running,10s
shell: sleep 10
- block:
- name: Check the HBase Master node status
shell: ps -ef | grep "org.apache.hadoop.hbase.master.HMaster" | grep -v grep |wc -l
register: check_master
- name: To terminate execution
fail:
msg: "检测到{{ inventory_hostname }}节点HBase未正常启动请保留日志反馈路径{{ deploy_dir }}/{{ container_name }}/logs"
run_once: true
delegate_to: 127.0.0.1
when: check_master.stdout != '1'
when: inventory_hostname in groups['hbase'][0:3]
- block:
- name: Check the HBase HRegionServer node status
shell: ps -ef | egrep "org.apache.hadoop.hbase.regionserver.HRegionServer" | grep -v grep |wc -l
register: check_region
- name: To terminate execution
fail:
msg: "检测到{{ inventory_hostname }}节点HBase未正常启动请保留日志反馈路径{{ deploy_dir }}/{{ container_name }}/logs"
run_once: true
delegate_to: 127.0.0.1
when: check_region.stdout != '1'
- name: Initializing phoenix
shell: cd {{ deploy_dir }}/{{ container_name }}/phoenix-hbase-2.2-5.1.2-bin/bin/ && ./startsql.sh
- name: Enable RsGroup
shell: cd {{ deploy_dir }}/{{ container_name }}/init/ && ./rsgroup.sh | grep ERROR | egrep -v "already exists|Target RSGroup important is same as source|Source RSGroup important is same as target"
register: result
failed_when: "'ERROR' in result.stdout"
when: hbase.common.enable_rsgroup

View File

@@ -0,0 +1,45 @@
- block:
- name: Stopping and removing container
docker_container:
name: '{{ item }}'
state: absent
with_items:
- ['HMaster']
- ['HRegionServer']
- name: Removing old {{ image_name }} image
docker_image:
name: '{{ image_name }}'
tag: '{{ image_tag }}'
state: absent
- name: Ansible delete old {{ deploy_dir }}/{{ container_name }}
file:
path: '{{ deploy_dir }}/{{ container_name }}'
state: absent
- name: Checking ZooKeeper has HBase nodes
shell: "docker exec -it zookeeper zkCli.sh ls / | grep hbase | wc -l"
run_once: true
delegate_to: "{{ groups.zookeeper[0] }}"
register: has_zknode
- name: Delete HBase nodes in ZooKeeper
shell: "docker exec -it zookeeper zkCli.sh rmr /hbase"
run_once: true
delegate_to: "{{ groups.zookeeper[0] }}"
when: has_zknode.stdout >= '1'
- block:
- name: Checking HDFS has hbase folder
shell: source /etc/profile && hdfs dfs -ls / | grep hbase | wc -l
register: folder_exists
run_once: true
delegate_to: "{{ groups.hdfs[0] }}"
- name: Delete HBase data folder in HDFS
shell: source /etc/profile && hadoop fs -rm -r /hbase
run_once: true
delegate_to: "{{ groups.hdfs[0] }}"
when: folder_exists.stdout >= '1'
when: (groups.hbase) | length > 1

View File

@@ -0,0 +1,2 @@
{{ groups.hbase[1] }}
{{ groups.hbase[2] }}

View File

@@ -0,0 +1,45 @@
version: "3"
services:
{% if inventory_hostname in groups['hbase'][0:3] %}
hmaster:
image: {{ image_name }}:{{ image_tag }}
restart: always
container_name: HMaster
hostname: {{ansible_hostname}}
environment:
MODE: master
volumes:
- "{{ deploy_dir }}/{{ container_name }}/data:/opt/hbase-2.2.3/data"
- "{{ deploy_dir }}/{{ container_name }}/logs:/opt/hbase-2.2.3/logs"
- "{{ deploy_dir }}/{{ container_name }}/conf:/opt/hbase-2.2.3/conf"
network_mode: "host"
regionserver:
image: {{ image_name }}:{{ image_tag }}
restart: always
container_name: HRegionServer
hostname: {{ansible_hostname}}
environment:
MODE: regionserver
volumes:
- "{{ deploy_dir }}/{{ container_name }}/data:/opt/hbase-2.2.3/data"
- "{{ deploy_dir }}/{{ container_name }}/logs:/opt/hbase-2.2.3/logs"
- "{{ deploy_dir }}/{{ container_name }}/conf:/opt/hbase-2.2.3/conf"
network_mode: "host"
depends_on:
- hmaster
{% else %}
regionserver:
image: {{ image_name }}:{{ image_tag }}
restart: always
container_name: HRegionServer
hostname: {{ansible_hostname}}
environment:
MODE: regionserver
volumes:
- "{{ deploy_dir }}/{{ container_name }}/data:/opt/hbase-2.2.3/data"
- "{{ deploy_dir }}/{{ container_name }}/logs:/opt/hbase-2.2.3/logs"
- "{{ deploy_dir }}/{{ container_name }}/conf:/opt/hbase-2.2.3/conf"
network_mode: "host"
{% endif %}

View File

@@ -0,0 +1,143 @@
#
#/**
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
# Set environment variables here.
# This script sets variables multiple times over the course of starting an hbase process,
# so try to keep things idempotent unless you want to take an even deeper look
# into the startup scripts (bin/hbase, etc.)
# The java implementation to use. Java 1.7+ required.
export JAVA_HOME=/opt/jdk1.8.0_202
# Extra Java CLASSPATH elements. Optional.
# export HBASE_CLASSPATH=
# The maximum amount of heap to use. Default is left to JVM default.
#export HBASE_HEAPSIZE={heap}
# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of
# offheap, set the value to "8G".
#export HBASE_OFFHEAPSIZE=5G
# Extra Java runtime options.
# Below are what we set by default. May only work with SUN JVM.
# For more on why as well as other possible settings,
# see http://wiki.apache.org/hadoop/PerformanceTuning
export HBASE_OPTS="-XX:+UseConcMarkSweepGC "
# Configure PermSize. Only needed in JDK7. You can safely remove it for JDK8+
export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS {{ hbase.regionserver.java_opt }} -Xss256k -XX:MetaspaceSize=512m -XX:MaxMetaspaceSize=512m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:-DisableExplicitGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/opt/{{ component_version }}/logs/gc-regionserver-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/opt/{{ component_version }}/logs/"
export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE {{ hbase.hmaster.java_opt }} -Xss256k -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=128m -XX:SurvivorRatio=2 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:MaxTenuringThreshold=15 -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=1 -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:-DisableExplicitGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:/opt/{{ component_version }}/logs/gc-master-%t.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/opt/{{ component_version }}/logs/ -javaagent:/opt/{{ component_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9907:/opt/{{ component_version }}/monitor/hbase.yaml"
export HBASE_REGIONSERVER_JMX_OPTS="$HBASE_JMX_BASE -javaagent:/opt/{{ component_version }}/monitor/jmx_prometheus_javaagent-0.12.0.jar=9908:/opt/{{ component_version }}/monitor/hbase.yaml"
# Uncomment one of the below three options to enable java garbage collection logging for the server-side processes.
# This enables basic gc logging to the .out file.
# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
# This enables basic gc logging to its own file.
# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"
# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.
# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
# Uncomment one of the below three options to enable java garbage collection logging for the client processes.
# This enables basic gc logging to the .out file.
# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
# This enables basic gc logging to its own file.
# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH>"
# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+.
# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR .
# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:<FILE-PATH> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M"
# See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations
# needed setting up off-heap block caching.
# Uncomment and adjust to enable JMX exporting
# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
# NOTE: HBase provides an alternative JMX implementation to fix the random ports issue, please see JMX
# section in HBase Reference Guide for instructions.
# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101"
# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102"
# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
# export HBASE_REST_OPTS="$HBASE_REST_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10105"
# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers
# Uncomment and adjust to keep all the Region Server pages mapped to be memory resident
#HBASE_REGIONSERVER_MLOCK=true
#HBASE_REGIONSERVER_UID="hbase"
# File naming hosts on which backup HMaster will run. $HBASE_HOME/conf/backup-masters by default.
# export HBASE_BACKUP_MASTERS=${HBASE_HOME}/conf/backup-masters
# Extra ssh options. Empty by default.
# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
# Where log files are stored. $HBASE_HOME/logs by default.
export HBASE_LOG_DIR=/opt/{{ component_version }}/logs
# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers
# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070"
# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071"
# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072"
# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8073"
# A string representing this instance of hbase. $USER by default.
# export HBASE_IDENT_STRING=$USER
# The scheduling priority for daemon processes. See 'man nice'.
# export HBASE_NICENESS=10
# The directory where pid files are stored. /tmp by default.
export HBASE_PID_DIR=/opt/{{ component_version }}/pids
# Seconds to sleep between slave commands. Unset by default. This
# can be useful in large clusters, where, e.g., slave rsyncs can
# otherwise arrive faster than the master can service them.
# export HBASE_SLAVE_SLEEP=0.1
# Tell HBase whether it should manage it's own instance of Zookeeper or not.
export HBASE_MANAGES_ZK=false
# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the
# RFA appender. Please refer to the log4j.properties file to see more details on this appender.
# In case one needs to do log rolling on a date change, one should set the environment property
# HBASE_ROOT_LOGGER to "<DESIRED_LOG LEVEL>,DRFA".
# For example:
#HBASE_ROOT_LOGGER=INFO,DRFA
HBASE_ROOT_LOGGER=ERROR,DRFA
# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as
# DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context.

View File

@@ -0,0 +1,274 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<configuration>
{% if groups.hbase | length > 1 %}
<property>
<name>hbase.rootdir</name>
<value>hdfs://ns1/hbase</value>
</property>
{% elif groups.hbase | length == 1 %}
<property>
<name>hbase.rootdir</name>
<value>/opt/hbase-2.2.3/data</value>
</property>
{% endif %}
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
{% if groups.hbase | length > 1 %}
<property>
<name>hbase.zookeeper.quorum</name>
{% for dev_info in groups.zookeeper -%}
{% if loop.last -%}
{{dev_info}}</value>
{% elif loop.first %}
<value>{{dev_info}},
{%- else %}
{{dev_info}},
{%- endif %}
{%- endfor %}
</property>
{% elif groups.hbase | length == 1 %}
<property>
<name>hbase.zookeeper.quorum</name>
<value>{{inventory_hostname}}</value>
</property>
{% endif %}
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2181</value>
</property>
<property>
<name>hbase.master.info.port</name>
<value>60010</value>
</property>
<property>
<name>hbase.server.keyvalue.maxsize</name>
<value>5368709120</value>
</property>
<property>
<name>zookeeper.znode.parent</name>
<value>/hbase</value>
</property>
<property>
<name>hbase.rpc.timeout</name>
<value>300000</value>
</property>
<property>
<name>zookeeper.session.timeout</name>
<value>300000</value>
</property>
<!--小于该值的文件将在mob compaction中合并-->
<property>
<name>hbase.mob.compaction.mergeable.threshold</name>
<value>1342177280</value>
</property>
<property>
<name>hbase.mob.file.cache.size</name>
<value>1000</value>
</property>
<!--mob cache回收缓存周期-->
<property>
<name>hbase.mob.cache.evict.period</name>
<value>3600</value>
</property>
<!--mob cache回收之后cache中保留文件个数比例cache数量超过hbase.mob.file.cache.size会回收-->
<property>
<name>hbase.mob.cache.evict.remain.ratio</name>
<value>0.5f</value>
</property>
<!--开启mob-->
<property>
<name>hfile.format.version</name>
<value>3</value>
</property>
<property>
<name>hbase.hregion.memstore.flush.size</name>
<value>534217728</value>
</property>
<!-- flush线程数 -->
<property>
<name>hbase.hstore.flusher.count</name>
<value>8</value>
</property>
<property>
<name>hbase.regionserver.global.memstore.size.lower.limit</name>
<value>0.95</value>
</property>
<property>
<name>hbase.regionserver.global.memstore.size</name>
<value>0.45</value>
</property>
<property>
<name>hfile.block.cache.size</name>
<value>0.3</value>
</property>
<property>
<name>hbase.hregion.memstore.block.multiplier</name>
<value>10</value>
</property>
<property>
<name>hbase.ipc.server.max.callqueue.length</name>
<value>1073741824</value>
</property>
<property>
<name>hbase.regionserver.handler.count</name>
<value>{{ hbase.regionserver['hbase.regionserver.handler.count'] }}</value>
<description>Count of RPC Listener instances spun up on RegionServers.
Same property is used by the Master for count of master handlers.</description>
</property>
<property>
<name>hbase.zookeeper.property.maxClientCnxns</name>
<value>1000</value>
</property>
<property>
<name>hbase.ipc.max.request.size</name>
<value>1173741824</value>
</property>
<property>
<name>hbase.hstore.blockingWaitTime</name>
<value>30000</value>
</property>
<property>
<name>hbase.hstore.blockingStoreFiles</name>
<value>100</value>
</property>
<!--split参数-->
<property>
  <name>hbase.hregion.max.filesize</name>
  <value>{{ hbase.regionserver['hbase.hregion.max.filesize'] }}</value>
</property>
<property>
<name>hbase.regionserver.regionSplitLimit</name>
<value>1000</value>
</property>
<!-- phoenix -->
<property>
   <name>phoenix.schema.isNamespaceMappingEnabled</name>
   <value>true</value>
</property>
<property>
  <name>phoenix.schema.mapSystemTablesToNamespace</name>
  <value>true</value>
</property>
<!-- RsGroup -->
<property>
<name>hbase.coprocessor.master.classes</name>
<value>org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint</value>
</property>
<property>
<name>hbase.master.loadbalancer.class</name>
<value>org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer</value>
</property>
<!--表region自动平衡-->
<property>
  <name>hbase.master.loadbalance.bytable</name>
  <value>true</value>
</property>
<property>
<name>hbase.bucketcache.ioengine</name>
<value>offheap</value>
</property>
<property>
<name>hbase.bucketcache.size</name>
<value>{{ hbase.regionserver['hbase.bucketcache.size'] }}</value>
</property>
<!-- storefile数量大于该值执行compact -->
<property>
<name>hbase.hstore.compactionThreshold</name>
<value>5</value>
</property>
<property>
<name>hbase.hstore.compaction.min</name>
<value>5</value>
</property>
<!-- 最多选取多少个storefile进行compace -->
<property>
<name>hbase.hstore.compaction.max</name>
<value>20</value>
</property>
<property>
<name>hbase.hstore.compaction.min.size</name>
<value>134217728</value>
</property>
<property>
<name>hbase.hstore.compaction.max.size</name>
<value>10737418240</value>
</property>
<property>
<name>hbase.regionserver.thread.compaction.small</name>
<value>5</value>
</property>
<property>
<name>hbase.regionserver.thread.compaction.large</name>
<value>5</value>
</property>
<property>
<name>hbase.hregion.majorcompaction</name>
<value>604800000</value>
</property>
</configuration>

View File

@@ -0,0 +1,3 @@
{% for dev_info in groups.hbase %}
{{dev_info}}
{% endfor %}

View File

@@ -0,0 +1,23 @@
#!/bin/bash
source /etc/profile
docker exec -it hbase hbase shell <<EOF
add_rsgroup 'important'
move_servers_rsgroup 'important',['{{ hostvars[groups.hbase[0]]['ansible_hostname'] }}:16020']
move_servers_rsgroup 'important',['{{ hostvars[groups.hbase[1]]['ansible_hostname'] }}:16020']
flush 'tsg:report_result'
move_tables_rsgroup 'important',['tsg:report_result']
flush 'tsg_galaxy:job_result'
move_tables_rsgroup 'important',['tsg_galaxy:job_result']
EOF

View File

@@ -0,0 +1,23 @@
#!/bin/bash
source /etc/profile
BASE_DIR=$(cd $(dirname $0); pwd)
{% for dev_info in groups.zookeeper -%}
{% if loop.last -%}
{{dev_info}}
{% elif loop.first %}
ZK_SERVER={{dev_info}},
{%- else %}
{{dev_info}},
{%- endif %}
{%- endfor %}
cd $BASE_DIR
exec python sqlline.py $ZK_SERVER <<EOF
!quit
EOF

View File

@@ -0,0 +1,15 @@
#镜像名称
image_name: hbase
#镜像版本号
image_tag: 2.2.3-alp-2
#容器名称
container_name: hbase
#组件版本
component_version: hbase-2.2.3
hadoop_config_files:
- { filename: 'hdfs-site.xml' }
- { filename: 'core-site.xml' }