170 lines
5.3 KiB
Django/Jinja
170 lines
5.3 KiB
Django/Jinja
#
|
|
# Licensed to the Apache Software Foundation (ASF) under one
|
|
# or more contributor license agreements. See the NOTICE file
|
|
# distributed with this work for additional information
|
|
# regarding copyright ownership. The ASF licenses this file
|
|
# to you under the Apache License, Version 2.0 (the
|
|
# "License"); you may not use this file except in compliance
|
|
# with the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing,
|
|
# software distributed under the License is distributed on an
|
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
# KIND, either express or implied. See the License for the
|
|
# specific language governing permissions and limitations
|
|
# under the License.
|
|
#
|
|
|
|
# Extensions specified in the load list will be loaded by Druid
|
|
# We are using local fs for deep storage - not recommended for production - use S3, HDFS, or NFS instead
|
|
# We are using local derby for the metadata store - not recommended for production - use MySQL or Postgres instead
|
|
|
|
# If you specify `druid.extensions.loadList=[]`, Druid won't load any extension from file system.
|
|
# If you don't specify `druid.extensions.loadList`, Druid will load all the extensions under root extension directory.
|
|
# More info: https://druid.apache.org/docs/latest/operations/including-extensions.html
|
|
druid.extensions.loadList=["druid-hdfs-storage", "druid-kafka-indexing-service", "druid-datasketches", "druid-multi-stage-query","mysql-metadata-storage","druid-hlld", "druid-hdrhistogram"]
|
|
|
|
# If you have a different version of Hadoop, place your Hadoop client jar files in your hadoop-dependencies directory
|
|
# and uncomment the line below to point to your directory.
|
|
#druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies
|
|
|
|
|
|
#
|
|
# Hostname
|
|
#
|
|
druid.host={{ inventory_hostname }}
|
|
|
|
#
|
|
# Logging
|
|
#
|
|
|
|
# Log all runtime properties on startup. Disable to avoid logging properties on startup:
|
|
druid.startup.logging.logProperties=true
|
|
|
|
#
|
|
# Zookeeper
|
|
#
|
|
|
|
druid.zk.service.host={{ druid.common['druid.zk.service.host'] }}
|
|
|
|
druid.zk.paths.base=/druid
|
|
|
|
#
|
|
# Metadata storage
|
|
#
|
|
|
|
# For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over):
|
|
#druid.metadata.storage.type=derby
|
|
#druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
|
|
#druid.metadata.storage.connector.host=localhost
|
|
#druid.metadata.storage.connector.port=1527
|
|
|
|
# For MySQL (make sure to include the MySQL JDBC driver on the classpath):
|
|
druid.metadata.storage.type=mysql
|
|
druid.metadata.storage.connector.connectURI={{ druid.common['druid.metadata.storage.connector.connectURI'] }}
|
|
druid.metadata.storage.connector.user=root
|
|
druid.metadata.storage.connector.password={{ druid.common['druid.metadata.storage.connector.password'] }}
|
|
|
|
# For PostgreSQL:
|
|
#druid.metadata.storage.type=postgresql
|
|
#druid.metadata.storage.connector.connectURI=jdbc:postgresql://db.example.com:5432/druid
|
|
#druid.metadata.storage.connector.user=...
|
|
#druid.metadata.storage.connector.password=...
|
|
|
|
#
|
|
# Deep storage
|
|
#
|
|
|
|
# For local disk (only viable in a cluster if this is a network mount):
|
|
{% if groups.druid | length == 1 %}
|
|
druid.storage.type=local
|
|
druid.storage.storageDirectory=var/druid/segments
|
|
{% elif groups.druid | length >= 3 %}
|
|
# For HDFS:
|
|
druid.storage.type=hdfs
|
|
druid.storage.storageDirectory=/druid/segments
|
|
{% endif %}
|
|
|
|
# For S3:
|
|
#druid.storage.type=s3
|
|
#druid.storage.bucket=your-bucket
|
|
#druid.storage.baseKey=druid/segments
|
|
#druid.s3.accessKey=...
|
|
#druid.s3.secretKey=...
|
|
|
|
#
|
|
# Indexing service logs
|
|
#
|
|
|
|
# For local disk (only viable in a cluster if this is a network mount):
|
|
{% if groups.druid | length == 1 %}
|
|
druid.indexer.logs.type=file
|
|
druid.indexer.logs.directory=var/druid/indexing-logs
|
|
{% elif groups.druid | length >= 3 %}
|
|
# For HDFS:
|
|
druid.indexer.logs.type=hdfs
|
|
druid.indexer.logs.directory=/druid/indexing-logs
|
|
{% endif %}
|
|
|
|
druid.indexer.logs.kill.enabled=true
|
|
druid.indexer.logs.kill.durationToRetain=604800000
|
|
druid.indexer.logs.kill.delay=21600000
|
|
|
|
# For S3:
|
|
#druid.indexer.logs.type=s3
|
|
#druid.indexer.logs.s3Bucket=your-bucket
|
|
#druid.indexer.logs.s3Prefix=druid/indexing-logs
|
|
|
|
|
|
#
|
|
# Service discovery
|
|
#
|
|
|
|
druid.selectors.indexing.serviceName=druid/overlord
|
|
druid.selectors.coordinator.serviceName=druid/coordinator
|
|
|
|
#
|
|
# Monitoring
|
|
#
|
|
|
|
druid.monitoring.monitors=["org.apache.druid.java.util.metrics.JvmMonitor"]
|
|
druid.emitter=http
|
|
druid.emitter.logging.logLevel=info
|
|
druid.emitter.http.recipientBaseUrl=http://{{ inventory_hostname }}:9903
|
|
|
|
# Storage type of double columns
|
|
# ommiting this will lead to index double as float at the storage layer
|
|
|
|
druid.indexing.doubleStorage=double
|
|
|
|
#
|
|
# Security
|
|
#
|
|
druid.server.hiddenProperties=["druid.s3.accessKey","druid.s3.secretKey","druid.metadata.storage.connector.password", "password", "key", "token", "pwd"]
|
|
|
|
|
|
#
|
|
# SQL
|
|
#
|
|
druid.sql.enable=true
|
|
|
|
#
|
|
# Lookups
|
|
#
|
|
druid.lookup.enableLookupSyncOnStartup=false
|
|
|
|
# Planning SQL query when there is aggregate distinct in the statement
|
|
druid.sql.planner.useGroupingSetForExactDistinct=true
|
|
|
|
# Expression processing config
|
|
druid.expressions.useStrictBooleans=true
|
|
|
|
# Http client
|
|
druid.global.http.eagerInitialization=false
|
|
|
|
#Set to false to store and query data in SQL compatible mode. When set to true (legacy mode), null values will be stored as '' for string columns and 0 for numeric columns.
|
|
druid.generic.useDefaultValueForNull=false
|
|
|