From 4d2ddf17e3038af3baf912028ffd78facff80e6b Mon Sep 17 00:00:00 2001 From: wangxuewei10 Date: Thu, 5 Sep 2019 16:03:59 +0800 Subject: [PATCH] splitter support etc --- config/splitter_example.conf | 57 ++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/config/splitter_example.conf b/config/splitter_example.conf index 3430643..3382f9a 100644 --- a/config/splitter_example.conf +++ b/config/splitter_example.conf @@ -439,6 +439,63 @@ max_idle_conns = 100 sql_log_file = /var/log/bds-splitter/doge-sql.log debug = false +# =============================== etc ================================== +[etc] +#是否开启 etc 数据splitter +enable = true +#是否开启数据库 +database_enable = true +#数据库worker缓存大小 +database_worker_buffer = 8192 +#数据库worker数量 +database_worker_number = 1 +#一次请求区块链数据的最大块数,400000块之前可以设置大一些,比如300 +max_batch_block = 100 +#etc 全节点的地址 +endpoint = http://[etc 全节点的ip/域名]:[etc 全节点运行端口] +#etc 数据校验规则文件地址 +json_schema_file = /etc/bds-splitter/schema/etc.json↩ +#etc 数据校验是否开启 +json_schema_validation_enable = false + +#etc 定时任务配置 +[cron.etc] +update_meta_expr = @every 1m + +#etc kafka 配置 +[kafka.etc] +enable = true +topic = etc +# kafka 客户端标示 +client_id = etc-client-1 +# kafka 消费组标示 +group_id = etc-group +# kafka 服务的地址 +broker_list = [kafka 服务的ip/域名]:[kafka 服务的运行端口] +buffer_size = 1000 +return_errors = true + +#etc 数据库配置 +[database.etc] +#数据库类型,sql server为mssql,postgre为postgres +type = postgres +#数据库的访问地址 +host = [数据库服务的ip/域名] +#数据库的端口信息 +port = [数据库服务的端口] +#数据库的库名,需要初始化好,创建表和导入数据用 +database = [数据库服务的库] +#数据库的访问账号 +user = [数据库服务的账号] +#数据库的访问账号密码信息 +password = [数据库服务的密码] +timezone = Asia/Shanghai +max_open_conns = 500 +max_idle_conns = 100 +sql_log_file = /var/log/bds-splitter/etc-sql.log +debug = false + + # =============================== log ================================== #普通日志配置 [logging_normal] -- GitLab