# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # NOTICE: If the following config has special characters in the variable `.*[]^${}\+?|()@#&`, Please escape, for example, `[` escape to `\[` # postgresql or mysql dbtype="mysql" # db config # db address and port dbhost="192.168.xx.xx:3306" # db username username="xx" # db password # NOTICE: if there are special characters, please use the \ to escape, for example, `[` escape to `\[` password="xx" # database name dbname="dolphinscheduler" # zk cluster zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181" # zk root directory zkRoot="/dolphinscheduler" # Note: the target installation path for dolphinscheduler, please not config as the same as the current path (pwd) installPath="/data1_1T/dolphinscheduler" # deployment user # Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself deployUser="dolphinscheduler" # alert config # alert plugin dir # Note: find and load the Alert Plugin Jar from this dir. alertPluginDir="/data1_1T/dolphinscheduler/lib/plugin/alert" # user data local directory path, please make sure the directory exists and have read write permissions dataBasedirPath="/tmp/dolphinscheduler" # resource storage type: HDFS, S3, NONE resourceStorageType="NONE" # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended resourceUploadPath="/dolphinscheduler" # if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory. # if S3,write S3 address,HA,for example :s3a://dolphinscheduler, # Note,s3 be sure to create the root directory /dolphinscheduler defaultFS="hdfs://mycluster:8020" # if resourceStorageType is S3, the following three configuration is required, otherwise please ignore s3Endpoint="http://192.168.xx.xx:9010" s3AccessKey="xxxxxxxxxx" s3SecretKey="xxxxxxxxxx" # resourcemanager port, the default value is 8088 if not specified resourceManagerHttpAddressPort="8088" # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty yarnHaIps="192.168.xx.xx,192.168.xx.xx" # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname singleYarnIp="yarnIp1" # who have permissions to create directory under HDFS/S3 root path # Note: if kerberos is enabled, please config hdfsRootUser= hdfsRootUser="hdfs" # kerberos config # whether kerberos starts, if kerberos starts, following four items need to config, otherwise please ignore kerberosStartUp="false" # kdc krb5 config file path krb5ConfPath="$installPath/conf/krb5.conf" # keytab username keytabUserName="hdfs-mycluster@ESZ.COM" # username keytab path keytabPath="$installPath/conf/hdfs.headless.keytab" # kerberos expire time, the unit is hour kerberosExpireTime="2" # use sudo or not sudoEnable="true" # worker tenant auto create workerTenantAutoCreate="false" # api server port apiServerPort="12345" # install hosts # Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname ips="ds1,ds2,ds3,ds4,ds5" # ssh port, default 22 # Note: if ssh port is not default, modify here sshPort="22" # run master machine # Note: list of hosts hostname for deploying master masters="ds1,ds2" # run worker machine # note: need to write the worker group name of each worker, the default value is "default" workers="ds1:default,ds2:default,ds3:default,ds4:default,ds5:default" # run alert machine # note: list of machine hostnames for deploying alert server alertServer="ds3" # run api machine # note: list of machine hostnames for deploying api server apiServers="ds1"