未验证 提交 a0a5b4d5 编写于 作者: P Paul Guo 提交者: GitHub

Bring back load balancing of single segment index for gang type GANGTYPE_SINGLETON_READER. (#9995)

It was removed during the slice refactor work. I found that when running test
isolation2/terminate_in_gang_creation.  This feature should be quite useful in
parallel OLTP load.

With this code change, test isolation2/terminate_in_gang_creation needs to be
modified to be deterministic. Also added tests in regression/dispatch for this
code change.  Changes in other tests are not relevant (just for cleanup).
Reviewed-by: NZhenghua Lyu <zlv@pivotal.io>
上级 d71b3afd
......@@ -1262,8 +1262,11 @@ ExecPrefetchJoinQual(JoinState *node)
static void
FillSliceGangInfo(ExecSlice *slice, int numsegments, DirectDispatchInfo *dd)
FillSliceGangInfo(ExecSlice *slice, PlanSlice *ps)
{
int numsegments = ps->numsegments;
DirectDispatchInfo *dd = &ps->directDispatch;
switch (slice->gangType)
{
case GANGTYPE_UNALLOCATED:
......@@ -1294,7 +1297,7 @@ FillSliceGangInfo(ExecSlice *slice, int numsegments, DirectDispatchInfo *dd)
break;
case GANGTYPE_SINGLETON_READER:
slice->planNumSegments = 1;
slice->segments = list_make1_int(gp_session_id % numsegments);
slice->segments = list_make1_int(ps->segindex);
break;
default:
elog(ERROR, "unexpected gang type");
......@@ -1388,7 +1391,7 @@ InitSliceTable(EState *estate, PlannedStmt *plannedstmt)
currExecSlice->rootIndex = rootIndex;
currExecSlice->gangType = currPlanSlice->gangType;
FillSliceGangInfo(currExecSlice, currPlanSlice->numsegments, &currPlanSlice->directDispatch);
FillSliceGangInfo(currExecSlice, currPlanSlice);
}
table->numSlices = numSlices;
......
......@@ -2593,25 +2593,20 @@ create_motion_plan(PlannerInfo *root, CdbMotionPath *path)
case CdbLocusType_SingleQE:
sendSlice->gangType = GANGTYPE_SINGLETON_READER;
sendSlice->numsegments = 1;
/*
* XXX: for now, always execute the slice in segment 0. Ideally, we
* would assign different SingleQEs to different segments to distribute
* the load more evenly, but keep it simple for now.
*/
sendSlice->segindex = 0;
sendSlice->segindex = gp_session_id % subpath->locus.numsegments;
break;
case CdbLocusType_General:
/* */
sendSlice->gangType = GANGTYPE_PRIMARY_READER;
sendSlice->gangType = GANGTYPE_SINGLETON_READER;
sendSlice->numsegments = 1;
sendSlice->segindex = 0;
sendSlice->segindex = gp_session_id % getgpsegmentCount();
break;
case CdbLocusType_SegmentGeneral:
sendSlice->gangType = GANGTYPE_SINGLETON_READER;
sendSlice->numsegments = subpath->locus.numsegments;
sendSlice->segindex = 0;
sendSlice->segindex = gp_session_id % subpath->locus.numsegments;
break;
case CdbLocusType_Replicated:
......
......@@ -871,7 +871,9 @@ LockAcquireExtended(const LOCKTAG *locktag,
ereport(FATAL,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg(WRITER_IS_MISSING_MSG),
errdetail("lock [%u,%u] %s %d", locktag->locktag_field1,
errdetail("lock [%u,%u] %s %d. "
"Probably because writer gang is gone somehow. "
"Maybe try rerunning.", locktag->locktag_field1,
locktag->locktag_field2, lock_mode_names[lockmode],
(int)locktag->locktag_type)));
}
......
include: helpers/server_helpers.sql;
CREATE
-- start_matchsubs
-- m/seg0 [0-9.]+:\d+/
-- s/seg0 [0-9.]+:\d+/seg0 IP:PORT/
-- m/seg[0-9] [0-9.]+:\d+/
-- s/seg[0-9] [0-9.]+:\d+/segN IP:PORT/
-- m/lock \[\d+,\d+\]/
-- s/lock \[\d+,\d+\]//
-- end_matchsubs
......@@ -76,13 +76,15 @@ DROP TABLE foo;
DROP
-- Test a bug that if cached idle primary QE is gone (e.g. after kill-9, pg_ctl
-- restart, etc), a new query needs a new created reader gang could fail with
-- restart, etc), a new query needs a new created reader gang might fail with
-- error like this:
--
-- DETAIL: FATAL: reader could not find writer proc entry, lock [0,1260] AccessShareLock 0 (lock.c:874)
-- (seg0 192.168.235.128:7002)
-- ERROR: failed to acquire resources on one or more segments
-- DETAIL: FATAL: reader could not find writer proc entry
-- DETAIL: lock [0,1260] AccessShareLock 0. Probably because writer gang is gone somehow. Maybe try rerunning.
-- (seg2 127.0.0.1:7004)
--
-- This is expected since the writer gang is gone, but previously QD code does
-- This is ok since the writer gang is gone, but previously QD code does
-- not reset all gangs (just retry creating the new reader gang) so re-running
-- this query could always fail with the same error since the reader gang would
-- always fail to create. The below test is used to test the fix.
......@@ -104,21 +106,28 @@ SELECT gp_wait_until_triggered_fault('fts_probe', 1, dbid) FROM gp_segment_confi
Success:
(1 row)
-- Prevent below pg_ctl restart timeout although the timeout should be enough.
CHECKPOINT;
CHECKPOINT
11: CREATE TABLE foo (c1 int, c2 int) DISTRIBUTED BY (c1);
CREATE
-- ORCA optimizes value scan so there is no additional reader gang in below INSERT.
11: SET optimizer = off;
SET
SELECT pg_ctl(datadir, 'restart', 'immediate') FROM gp_segment_configuration WHERE role='p' AND content=0;
-- the value scan (reader gang) might be on any segment so restart all segments.
SELECT pg_ctl(datadir, 'restart', 'immediate') FROM gp_segment_configuration WHERE role='p' AND content != -1;
pg_ctl
--------
OK
(1 row)
OK
OK
(3 rows)
11: INSERT INTO foo values(2),(1);
ERROR: failed to acquire resources on one or more segments
DETAIL: FATAL: reader could not find writer proc entry
DETAIL: lock [0,1260] AccessShareLock 0
(seg0 192.168.235.128:7002)
DETAIL: lock [0,1260] AccessShareLock 0. Probably because writer gang is gone somehow. Maybe try rerunning.
(seg1 127.0.0.1:7004)
11: INSERT INTO foo values(2),(1);
INSERT 2
11: DROP TABLE foo;
......
include: helpers/server_helpers.sql;
-- start_matchsubs
-- m/seg0 [0-9.]+:\d+/
-- s/seg0 [0-9.]+:\d+/seg0 IP:PORT/
-- m/seg[0-9] [0-9.]+:\d+/
-- s/seg[0-9] [0-9.]+:\d+/segN IP:PORT/
-- m/lock \[\d+,\d+\]/
-- s/lock \[\d+,\d+\]//
-- end_matchsubs
......@@ -48,13 +48,15 @@ SELECT gp_inject_fault('create_gang_in_progress', 'resume', 1);
DROP TABLE foo;
-- Test a bug that if cached idle primary QE is gone (e.g. after kill-9, pg_ctl
-- restart, etc), a new query needs a new created reader gang could fail with
-- restart, etc), a new query needs a new created reader gang might fail with
-- error like this:
--
-- DETAIL: FATAL: reader could not find writer proc entry, lock [0,1260] AccessShareLock 0 (lock.c:874)
-- (seg0 192.168.235.128:7002)
-- ERROR: failed to acquire resources on one or more segments
-- DETAIL: FATAL: reader could not find writer proc entry
-- DETAIL: lock [0,1260] AccessShareLock 0. Probably because writer gang is gone somehow. Maybe try rerunning.
-- (seg2 127.0.0.1:7004)
--
-- This is expected since the writer gang is gone, but previously QD code does
-- This is ok since the writer gang is gone, but previously QD code does
-- not reset all gangs (just retry creating the new reader gang) so re-running
-- this query could always fail with the same error since the reader gang would
-- always fail to create. The below test is used to test the fix.
......@@ -66,11 +68,15 @@ SELECT gp_request_fts_probe_scan();
SELECT gp_wait_until_triggered_fault('fts_probe', 1, dbid)
FROM gp_segment_configuration WHERE role='p' AND content=-1;
-- Prevent below pg_ctl restart timeout although the timeout should be enough.
CHECKPOINT;
11: CREATE TABLE foo (c1 int, c2 int) DISTRIBUTED BY (c1);
-- ORCA optimizes value scan so there is no additional reader gang in below INSERT.
11: SET optimizer = off;
-- the value scan (reader gang) might be on any segment so restart all segments.
SELECT pg_ctl(datadir, 'restart', 'immediate')
FROM gp_segment_configuration WHERE role='p' AND content=0;
FROM gp_segment_configuration WHERE role='p' AND content != -1;
11: INSERT INTO foo values(2),(1);
11: INSERT INTO foo values(2),(1);
11: DROP TABLE foo;
......
set allow_system_table_mods = true;
create temp table random_2_0 (a int, b int, c int, d int) distributed randomly;
update gp_distribution_policy set numsegments = 2 where localoid = 'random_2_0'::regclass;
insert into random_2_0 select i,i,i,i from generate_series(1, 10)i;
create temp table replicate_2_1 (a int, b int, c int, d int) distributed replicated;
update gp_distribution_policy set numsegments = 2 where localoid = 'replicate_2_1'::regclass;
insert into replicate_2_1 select i,i,i,i from generate_series(1, 10)i;
create temp table hash_3_3_2 (a int, b int, c int, d int) distributed by (a,b,c);
update gp_distribution_policy set numsegments = 3 where localoid = 'hash_3_3_2'::regclass;
insert into hash_3_3_2 select i,i,i,i from generate_series(1, 10)i;
create temp table replicate_3_3 (a int, b int, c int, d int) distributed replicated;
update gp_distribution_policy set numsegments = 3 where localoid = 'replicate_3_3'::regclass;
insert into replicate_3_3 select i,i,i,i from generate_series(1, 10)i;
create temp table hash_2_3_4 (a int, b int, c int, d int) distributed by (a,b,c);
update gp_distribution_policy set numsegments = 2 where localoid = 'hash_2_3_4'::regclass;
insert into hash_2_3_4 select i,i,i,i from generate_series(1, 10)i;
create temp table replicate_2_5 (a int, b int, c int, d int) distributed replicated;
update gp_distribution_policy set numsegments = 2 where localoid = 'replicate_2_5'::regclass;
insert into replicate_2_5 select i,i,i,i from generate_series(1, 10)i;
set Test_print_direct_dispatch_info = true;
\o /dev/null
-- This test focuses on the whether slices' corresponding gang size in runtime.
-- So we enable the GUC Test_print_direct_dispatch_info to display dispatch info.
-- The following sqls are generated by a script. These queries are all multi-tables'
-- join and aggregation so they have many slices. We are not interested in their
-- results, so we redirect the output to /dev/null.
-- Some of the queries are only possible with Nested Loop Joins. Enable them, to
-- avoid the really high cost estimates on such plans, which skew the planners
-- decisions.
set enable_nestloop=on;
select replicate_2_1.c, hash_2_3_4.c, avg(hash_3_3_2.d), max(replicate_3_3.c) from ((random_2_0 right join replicate_2_1 on random_2_0.b = replicate_2_1.c) left join (hash_3_3_2 inner join replicate_3_3 on hash_3_3_2.c >= replicate_3_3.b) on replicate_2_1.d >= hash_3_3_2.a) inner join (hash_2_3_4 inner join replicate_2_5 on hash_2_3_4.c = replicate_2_5.d) on hash_3_3_2.a <> hash_2_3_4.a group by replicate_2_1.c, hash_2_3_4.c order by 1,2;
INFO: (slice 3) Dispatch command to ALL contents: 0 1 2
INFO: (slice 4) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 5) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 2) Dispatch command to SINGLE content
select hash_3_3_2.b, replicate_2_5.d, sum(replicate_2_1.d), sum(replicate_3_3.a) from (((random_2_0 left join replicate_2_1 on random_2_0.b <> replicate_2_1.d) left join hash_3_3_2 on random_2_0.c = hash_3_3_2.b) inner join (replicate_3_3 inner join hash_2_3_4 on replicate_3_3.d = hash_2_3_4.d) on hash_3_3_2.a <> hash_2_3_4.a) right join replicate_2_5 on random_2_0.d <> replicate_2_5.c group by hash_3_3_2.b, replicate_2_5.d order by 1,2;
INFO: (slice 3) Dispatch command to ALL contents: 0 1 2
INFO: (slice 4) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 2) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to SINGLE content
select replicate_3_3.a, hash_2_3_4.d, sum(replicate_2_1.a), max(hash_3_3_2.d) from (((random_2_0 right join replicate_2_1 on random_2_0.d >= replicate_2_1.c) left join (hash_3_3_2 full join replicate_3_3 on hash_3_3_2.b = replicate_3_3.a) on replicate_2_1.a < hash_3_3_2.c) full join hash_2_3_4 on random_2_0.a >= hash_2_3_4.b) inner join replicate_2_5 on hash_3_3_2.c < replicate_2_5.d group by replicate_3_3.a, hash_2_3_4.d order by 1,2;
INFO: (slice 4) Dispatch command to ALL contents: 0 1 2
INFO: (slice 2) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 3) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to SINGLE content
INFO: (slice 5) Dispatch command to SINGLE content
select replicate_3_3.d, replicate_2_1.d, avg(random_2_0.c), max(hash_3_3_2.a) from ((((random_2_0 left join replicate_2_1 on random_2_0.c <= replicate_2_1.d) full join hash_3_3_2 on random_2_0.a > hash_3_3_2.d) full join replicate_3_3 on hash_3_3_2.a >= replicate_3_3.b) right join hash_2_3_4 on random_2_0.a = hash_2_3_4.b) left join replicate_2_5 on random_2_0.a > replicate_2_5.b group by replicate_3_3.d, replicate_2_1.d order by 1,2;
INFO: (slice 4) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to ALL contents: 0 1 2
INFO: (slice 5) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 3) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 2) Dispatch command to PARTIAL contents: 0 1
select hash_3_3_2.b, replicate_2_1.c, max(replicate_3_3.d), avg(random_2_0.a) from (random_2_0 right join (replicate_2_1 left join hash_3_3_2 on replicate_2_1.b <> hash_3_3_2.b) on random_2_0.a <> hash_3_3_2.a) right join ((replicate_3_3 inner join hash_2_3_4 on replicate_3_3.b < hash_2_3_4.c) left join replicate_2_5 on hash_2_3_4.b < replicate_2_5.d) on hash_3_3_2.a > hash_2_3_4.b group by hash_3_3_2.b, replicate_2_1.c order by 1,2;
INFO: (slice 3) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 4) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 2) Dispatch command to SINGLE content
select replicate_2_1.c, random_2_0.c, sum(replicate_3_3.a), max(replicate_2_5.c) from (random_2_0 left join replicate_2_1 on random_2_0.d <= replicate_2_1.b) right join (hash_3_3_2 inner join (replicate_3_3 inner join (hash_2_3_4 right join replicate_2_5 on hash_2_3_4.b <> replicate_2_5.a) on replicate_3_3.b = hash_2_3_4.b) on hash_3_3_2.a <> replicate_2_5.a) on replicate_2_1.c <> replicate_3_3.c group by replicate_2_1.c, random_2_0.c order by 1,2;
INFO: (slice 1) Dispatch command to ALL contents: 0 1 2
INFO: (slice 3) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 5) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 2) Dispatch command to SINGLE content
INFO: (slice 4) Dispatch command to SINGLE content
select hash_2_3_4.d, replicate_2_1.b, sum(random_2_0.a), count(replicate_2_5.d) from ((random_2_0 left join replicate_2_1 on random_2_0.b = replicate_2_1.b) right join (hash_3_3_2 full join replicate_3_3 on hash_3_3_2.b <= replicate_3_3.d) on replicate_2_1.c > hash_3_3_2.b) inner join (hash_2_3_4 right join replicate_2_5 on hash_2_3_4.a < replicate_2_5.d) on replicate_2_1.b < replicate_2_5.d group by hash_2_3_4.d, replicate_2_1.b order by 1,2;
INFO: (slice 5) Dispatch command to ALL contents: 0 1 2
INFO: (slice 2) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 3) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 4) Dispatch command to SINGLE content
INFO: (slice 1) Dispatch command to SINGLE content
INFO: (slice 6) Dispatch command to SINGLE content
select replicate_3_3.d, random_2_0.c, count(hash_3_3_2.d), count(hash_2_3_4.b) from ((random_2_0 full join (replicate_2_1 right join hash_3_3_2 on replicate_2_1.b > hash_3_3_2.c) on random_2_0.b >= hash_3_3_2.a) inner join replicate_3_3 on hash_3_3_2.d < replicate_3_3.a) left join (hash_2_3_4 full join replicate_2_5 on hash_2_3_4.a <> replicate_2_5.a) on random_2_0.d = hash_2_3_4.c group by replicate_3_3.d, random_2_0.c order by 1,2;
INFO: (slice 4) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to ALL contents: 0 1 2
INFO: (slice 5) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 6) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 2) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 3) Dispatch command to SINGLE content
select random_2_0.d, replicate_2_1.a, max(hash_3_3_2.b), count(replicate_2_5.b) from (random_2_0 left join replicate_2_1 on random_2_0.c <> replicate_2_1.b) inner join (hash_3_3_2 full join ((replicate_3_3 inner join hash_2_3_4 on replicate_3_3.d <> hash_2_3_4.d) left join replicate_2_5 on hash_2_3_4.d < replicate_2_5.b) on hash_3_3_2.c > hash_2_3_4.b) on random_2_0.b < replicate_2_5.d group by random_2_0.d, replicate_2_1.a order by 1,2;
INFO: (slice 4) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to ALL contents: 0 1 2
INFO: (slice 3) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 2) Dispatch command to PARTIAL contents: 0 1
select replicate_2_5.c, hash_3_3_2.c, sum(replicate_3_3.c), max(hash_2_3_4.c) from ((random_2_0 inner join replicate_2_1 on random_2_0.a = replicate_2_1.d) right join hash_3_3_2 on replicate_2_1.c <> hash_3_3_2.c) left join ((replicate_3_3 inner join hash_2_3_4 on replicate_3_3.d <= hash_2_3_4.c) right join replicate_2_5 on replicate_3_3.d > replicate_2_5.b) on hash_3_3_2.b = hash_2_3_4.b group by replicate_2_5.c, hash_3_3_2.c order by 1,2;
INFO: (slice 4) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to ALL contents: 0 1 2
INFO: (slice 3) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 5) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 2) Dispatch command to PARTIAL contents: 0 1
select hash_3_3_2.a, replicate_3_3.b, max(random_2_0.c), sum(replicate_2_1.a) from random_2_0 full join (replicate_2_1 inner join ((hash_3_3_2 inner join (replicate_3_3 inner join hash_2_3_4 on replicate_3_3.d >= hash_2_3_4.d) on hash_3_3_2.c >= replicate_3_3.d) inner join replicate_2_5 on replicate_3_3.b <= replicate_2_5.d) on replicate_2_1.c > replicate_3_3.a) on random_2_0.d = hash_3_3_2.d group by hash_3_3_2.a, replicate_3_3.b order by 1,2;
INFO: (slice 4) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to ALL contents: 0 1 2
INFO: (slice 5) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 6) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 2) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 3) Dispatch command to SINGLE content
select replicate_2_1.d, random_2_0.c, sum(hash_3_3_2.d), max(replicate_3_3.d) from (random_2_0 right join (replicate_2_1 inner join hash_3_3_2 on replicate_2_1.d >= hash_3_3_2.c) on random_2_0.c <= hash_3_3_2.c) inner join ((replicate_3_3 full join hash_2_3_4 on replicate_3_3.d <= hash_2_3_4.b) left join replicate_2_5 on replicate_3_3.d <= replicate_2_5.b) on random_2_0.b > replicate_2_5.b group by replicate_2_1.d, random_2_0.c order by 1,2;
INFO: (slice 2) Dispatch command to ALL contents: 0 1 2
INFO: (slice 3) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 5) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 4) Dispatch command to SINGLE content
INFO: (slice 6) Dispatch command to SINGLE content
INFO: (slice 1) Dispatch command to SINGLE content
select replicate_3_3.d, hash_3_3_2.a, avg(replicate_2_5.d), max(replicate_2_1.d) from random_2_0 right join ((replicate_2_1 right join hash_3_3_2 on replicate_2_1.b <= hash_3_3_2.a) inner join (replicate_3_3 right join (hash_2_3_4 right join replicate_2_5 on hash_2_3_4.c = replicate_2_5.d) on replicate_3_3.b >= hash_2_3_4.c) on hash_3_3_2.b <> hash_2_3_4.c) on random_2_0.c >= hash_2_3_4.b group by replicate_3_3.d, hash_3_3_2.a order by 1,2;
INFO: (slice 5) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to ALL contents: 0 1 2
INFO: (slice 3) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 2) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 4) Dispatch command to SINGLE content
select hash_2_3_4.c, replicate_2_1.b, sum(replicate_3_3.b), avg(replicate_2_5.a) from random_2_0 left join (replicate_2_1 right join ((hash_3_3_2 left join replicate_3_3 on hash_3_3_2.d >= replicate_3_3.b) right join (hash_2_3_4 right join replicate_2_5 on hash_2_3_4.d <> replicate_2_5.d) on replicate_3_3.d <= replicate_2_5.a) on replicate_2_1.b <= replicate_2_5.d) on random_2_0.a < replicate_3_3.a group by hash_2_3_4.c, replicate_2_1.b order by 1,2;
INFO: (slice 3) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 4) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 2) Dispatch command to SINGLE content
-- Test for BEGIN;
begin;
commit;
begin;
abort;
-- Test for UPDATE/DELETE/INSERT;
-- Insert
insert into random_2_0 select i,i,i,i from generate_series(21, 30)i;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to SINGLE content
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL contents: 0 1 2
begin;
insert into random_2_0 select i,i,i,i from generate_series(21, 30)i;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to SINGLE content
end;
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL contents: 0 1 2
insert into replicate_2_1 select i,i,i,i from generate_series(21, 30)i;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to PARTIAL contents: 0 1
begin;
insert into replicate_2_1 select i,i,i,i from generate_series(21, 30)i;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
end;
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to PARTIAL contents: 0 1
insert into hash_3_3_2 select i,i,i,i from generate_series(21, 30)i;
INFO: (slice 0) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to SINGLE content
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL contents: 0 1 2
begin;
insert into hash_3_3_2 select i,i,i,i from generate_series(21, 30)i;
INFO: (slice 0) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to SINGLE content
end;
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL contents: 0 1 2
insert into replicate_3_3 select i,i,i,i from generate_series(21, 30)i;
INFO: (slice 0) Dispatch command to ALL contents: 0 1 2
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL contents: 0 1 2
begin;
insert into replicate_3_3 select i,i,i,i from generate_series(21, 30)i;
INFO: (slice 0) Dispatch command to ALL contents: 0 1 2
end;
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL contents: 0 1 2
insert into hash_2_3_4 select i,i,i,i from generate_series(21, 30)i;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to SINGLE content
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL contents: 0 1 2
begin;
insert into hash_2_3_4 select i,i,i,i from generate_series(21, 30)i;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: (slice 1) Dispatch command to SINGLE content
end;
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL contents: 0 1 2
--Update
update random_2_0 set a = a + 1;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to PARTIAL contents: 0 1
begin;
update random_2_0 set a = a + 1;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
end;
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to PARTIAL contents: 0 1
update random_2_0 set a = 1 from hash_3_3_2 where hash_3_3_2.b = random_2_0.c;
INFO: (slice 1) Dispatch command to ALL contents: 0 1 2
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL contents: 0 1 2
begin;
update random_2_0 set a = 1 from hash_3_3_2 where hash_3_3_2.b = random_2_0.c;
INFO: (slice 1) Dispatch command to ALL contents: 0 1 2
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
end;
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to ALL contents: 0 1 2
update replicate_2_1 set a = a + 1;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to PARTIAL contents: 0 1
begin;
update replicate_2_1 set a = a + 1;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
end;
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to PARTIAL contents: 0 1
-- Delete
delete from hash_2_3_4 where a in (1, 2, 3);
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to PARTIAL contents: 0 1
begin;
delete from hash_2_3_4 where a = 4 or a = 5;
INFO: (slice 0) Dispatch command to PARTIAL contents: 0 1
end;
INFO: Distributed transaction command 'Distributed Commit (one-phase)' to PARTIAL contents: 0 1
-- add test for table expand
begin;
alter table random_2_0 expand table;
INFO: (slice 0) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to PARTIAL contents: 0 1
abort;
INFO: Distributed transaction command 'Distributed Abort (No Prepared)' to ALL contents: 0 1 2
begin;
alter table replicate_2_1 expand table;
INFO: (slice 0) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to SINGLE content
abort;
INFO: Distributed transaction command 'Distributed Abort (No Prepared)' to ALL contents: 0 1 2
begin;
alter table hash_2_3_4 expand table;
INFO: (slice 0) Dispatch command to ALL contents: 0 1 2
INFO: (slice 1) Dispatch command to PARTIAL contents: 0 1
abort;
INFO: Distributed transaction command 'Distributed Abort (No Prepared)' to ALL contents: 0 1 2
......@@ -5,8 +5,6 @@
\! echo 'EXPANSION_PREPARE_STARTED:<path> to inputfile' > $MASTER_DATA_DIRECTORY/gpexpand.status
\! gpcheckcat
ERROR: Usage of gpcheckcat is not supported while the cluster is in a reconfiguration state, exit gpcheckcat
\! gpconfig -r gp_debug_linger
20190116:16:55:31:057863 gpconfig:zlv:gpadmin-[ERROR]:-ERROR: Usage of gpconfig is not supported while the cluster is in a reconfiguration state, exit gpconfig
-- most gppkg actions should be disallowed while gpexpand is in progress
\! gppkg --query no-such-package
20190220:10:23:39:016760 gppkg:nyu-vm-desktop:u-[INFO]:-Starting gppkg with args: --query no-such-package
......
......@@ -187,9 +187,6 @@ AS '@abs_builddir@/regress@DLSUFFIX@', 'hasBackendsExist' LANGUAGE C;
CREATE OR REPLACE FUNCTION hasGangsExist() RETURNS BOOL
AS '@abs_builddir@/regress@DLSUFFIX@', 'hasGangsExist' LANGUAGE C;
-- disable debug linger to get immediate feedback from FATAL errors.
set gp_debug_linger to 0;
-- test log debug related code within dispatch
set gp_log_gang to debug;
set log_min_messages to DEBUG;
......
......@@ -275,8 +275,6 @@ AS '@abs_builddir@/regress@DLSUFFIX@', 'hasBackendsExist' LANGUAGE C;
-- check if QD has reusable gangs
CREATE OR REPLACE FUNCTION hasGangsExist() RETURNS BOOL
AS '@abs_builddir@/regress@DLSUFFIX@', 'hasGangsExist' LANGUAGE C;
-- disable debug linger to get immediate feedback from FATAL errors.
set gp_debug_linger to 0;
-- test log debug related code within dispatch
set gp_log_gang to debug;
set log_min_messages to DEBUG;
......
......@@ -5,7 +5,6 @@
\! echo 'EXPANSION_PREPARE_STARTED:<path> to inputfile' > $MASTER_DATA_DIRECTORY/gpexpand.status
\! gpcheckcat
\! gpconfig -r gp_debug_linger
-- most gppkg actions should be disallowed while gpexpand is in progress
\! gppkg --query no-such-package
-- the only exception is 'build' which has no interaction with the cluster
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册