From 264391191941ae8cb8f2279d6b0a7c21227e2308 Mon Sep 17 00:00:00 2001 From: wangxuewei10 Date: Fri, 30 Aug 2019 13:25:37 +0800 Subject: [PATCH] splitter support doge --- config/splitter_example.conf | 60 ++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/config/splitter_example.conf b/config/splitter_example.conf index 3a47dee..3430643 100644 --- a/config/splitter_example.conf +++ b/config/splitter_example.conf @@ -379,6 +379,66 @@ max_idle_conns = 100 sql_log_file = /var/log/bds-splitter/tron-sql.log debug = false +# =============================== doge ================================== +[doge] +#是否开启 doge 数据splitter +enable = true +#是否开启数据库 +database_enable = true +#数据库worker缓存大小 +database_worker_buffer = 8192 +#数据库worker数量 +database_worker_number = 1 +#一次请求区块链数据的最大块数,400000块之前可以设置大一些,比如300 +max_batch_block = 30 +#doge全节点的地址 +endpoint = http://[doge 全节点的ip/域名]:[doge 全节点运行端口] +#运行 doge 全节点设置的用户名 +user = [rpc 访问账号] +#运行 doge 全节点设置的密码 +password = [rpc 访问密码] +#doge数据校验规则文件地址 +json_schema_file = /etc/bds-splitter/schema/doge.json↩ +#doge数据校验是否开启 +json_schema_validation_enable = false + +#doge定时任务配置 +[cron.doge] +update_meta_expr = @every 1m + +#doge kafka 配置 +[kafka.doge] +enable = true +topic = doge +# kafka 客户端标示 +client_id = doge-client-1 +# kafka 消费组标示 +group_id = doge-group +# kafka 服务的地址 +broker_list = [kafka 服务的ip/域名]:[kafka 服务的运行端口] +buffer_size = 1000 +return_errors = true + +#doge数据库配置 +[database.doge] +#数据库类型,sql server为mssql,postgre为postgres +type = postgres +#数据库的访问地址 +host = [数据库服务的ip/域名] +#数据库的端口信息 +port = [数据库服务的端口] +#数据库的库名,需要初始化好,创建表和导入数据用 +database = [数据库服务的库] +#数据库的访问账号 +user = [数据库服务的账号] +#数据库的访问账号密码信息 +password = [数据库服务的密码] +timezone = Asia/Shanghai +max_open_conns = 500 +max_idle_conns = 100 +sql_log_file = /var/log/bds-splitter/doge-sql.log +debug = false + # =============================== log ================================== #普通日志配置 [logging_normal] -- GitLab