install_config.conf 4.6 KB
Newer Older
B
bao liang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

D
dailidong 已提交
18

H
hcz 已提交
19
# NOTICE: If the following config has special characters in the variable `.*[]^${}\+?|()@#&`, Please escape, for example, `[` escape to `\[`
D
dailidong 已提交
20 21 22 23 24 25 26 27 28 29
# postgresql or mysql
dbtype="mysql"

# db config
# db address and port
dbhost="192.168.xx.xx:3306"

# db username
username="xx"

H
hcz 已提交
30 31 32 33
# db password
# NOTICE: if there are special characters, please use the \ to escape, for example, `[` escape to `\[`
password="xx"

张世鸣 已提交
34 35 36
# database name
dbname="dolphinscheduler"

D
dailidong 已提交
37 38 39 40

# zk cluster
zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"

feloxx's avatar
feloxx 已提交
41 42 43
# zk root directory
zkRoot="/dolphinscheduler"

D
dailidong 已提交
44 45 46 47 48 49 50 51
# Note: the target installation path for dolphinscheduler, please not config as the same as the current path (pwd)
installPath="/data1_1T/dolphinscheduler"

# deployment user
# Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself
deployUser="dolphinscheduler"

# alert config
52 53 54
# alert plugin dir
# Note: find and load the Alert Plugin Jar from this dir.
alertPluginDir="/data1_1T/dolphinscheduler/lib/plugin/alert"
D
dailidong 已提交
55

56 57 58
# user data local directory path, please make sure the directory exists and have read write permissions
dataBasedirPath="/tmp/dolphinscheduler"

59
# resource storage type: HDFS, S3, NONE
D
dailidong 已提交
60 61
resourceStorageType="NONE"

62 63 64
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
resourceUploadPath="/dolphinscheduler"

D
dailidong 已提交
65 66 67 68 69 70 71 72 73 74
# if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory.
# if S3,write S3 address,HA,for example :s3a://dolphinscheduler,
# Note,s3 be sure to create the root directory /dolphinscheduler
defaultFS="hdfs://mycluster:8020"

# if resourceStorageType is S3, the following three configuration is required, otherwise please ignore
s3Endpoint="http://192.168.xx.xx:9010"
s3AccessKey="xxxxxxxxxx"
s3SecretKey="xxxxxxxxxx"

75
# resourcemanager port, the default value is 8088 if not specified
76
resourceManagerHttpAddressPort="8088"
77 78

# if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
D
dailidong 已提交
79 80
yarnHaIps="192.168.xx.xx,192.168.xx.xx"

81
# if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
D
dailidong 已提交
82
singleYarnIp="yarnIp1"
D
dailidong 已提交
83 84 85 86 87 88 89 90 91 92 93 94 95 96

# who have permissions to create directory under HDFS/S3 root path
# Note: if kerberos is enabled, please config hdfsRootUser=
hdfsRootUser="hdfs"

# kerberos config
# whether kerberos starts, if kerberos starts, following four items need to config, otherwise please ignore
kerberosStartUp="false"
# kdc krb5 config file path
krb5ConfPath="$installPath/conf/krb5.conf"
# keytab username
keytabUserName="hdfs-mycluster@ESZ.COM"
# username keytab path
keytabPath="$installPath/conf/hdfs.headless.keytab"
97 98 99 100 101
# kerberos expire time, the unit is hour
kerberosExpireTime="2"

# use sudo or not
sudoEnable="true"
D
dailidong 已提交
102

103 104
# worker tenant auto create
workerTenantAutoCreate="false"
D
dailidong 已提交
105 106 107 108 109 110 111

# api server port
apiServerPort="12345"


# install hosts
# Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname
D
dailidong 已提交
112
ips="ds1,ds2,ds3,ds4,ds5"
D
dailidong 已提交
113 114 115 116 117 118 119

# ssh port, default 22
# Note: if ssh port is not default, modify here
sshPort="22"

# run master machine
# Note: list of hosts hostname for deploying master
D
dailidong 已提交
120
masters="ds1,ds2"
D
dailidong 已提交
121 122

# run worker machine
D
dailidong 已提交
123
# note: need to write the worker group name of each worker, the default value is "default"
D
dailidong 已提交
124
workers="ds1:default,ds2:default,ds3:default,ds4:default,ds5:default"
D
dailidong 已提交
125 126 127

# run alert machine
# note: list of machine hostnames for deploying alert server
D
dailidong 已提交
128
alertServer="ds3"
D
dailidong 已提交
129 130 131

# run api machine
# note: list of machine hostnames for deploying api server
132
apiServers="ds1"