issues

1490

1921

1699

http://10.20.16.216:9090/ADB/AntDB/-/issues/1490 http://10.20.16.216:9090/ADB/AntDB/-/issues/1921 http://10.20.16.216:9090/ADB/AntDB/-/issues/1699

问题

使用到reduce的地方(包括查询)偶现以下错误,再次执行sql就正常;

ERROR:  remote node 16389 closed socket
CONTEXT:  dynamic reduce

复现方法

使用修改分布方式的方式去复现问题,过程如下: 创建100张表,然后并发100,不停执行下面的语句,5分钟之内会出现报错:

alter table xxx distribute by replication;
alter table xxx distribute by random;
  • 无法直接复现

1779

http://10.20.16.216:9090/ADB/AntDB/-/issues/1779

问题

执行计划不正确导致dyreduce执行出错

复现方法


create table tttb1( c1 int,c2 varchar(100),c3 varchar(100));
create table tttb2(c1 int,sheet_num varchar(100));
create table tttb3(c1 int,order_no varchar(100));
explain select count(distinct c2) from tttb1 where c2 in (select distinct sheet_num from tttb2,tttb3 where sheet_num=order_no and c3='1');
    


create table tb1(c1 int,c2 varchar(100),c3 varchar(100));
create table tb2(c1 int,sheet_num varchar(100));
create table tb3(c1 int,order_no varchar(100));
explain select count(distinct c2) from tb1 where c2 in (select distinct sheet_num from tb2,tb3 where sheet_num=order_no and c3='1');

insert into tb1 select (random()*10000000)::int, (random()*10000000)::int, (random()*10000000)::int from  generate_series(1,100000);
insert into tb2 select (random()*10000000)::int, (random()*10000000)::int from  generate_series(1,100000);
insert into tb3 select (random()*10000000)::character varying, (random()*10000000)::int from  generate_series(1,100000);


create table tb1int(c1 int, c2 int,c3 int);
create table tb2int(c1 int, sheet_num int);
create table tb3int(c1 int, order_no int);
explain select count(distinct c2) from tb1int where c2 in (select distinct sheet_num from tb2int,tb3int where sheet_num=order_no and c3=1);

insert into tb1int select (random()*10000000)::int, (random()*10000000)::int, (random()*10000000)::int from  generate_series(1,100000);
insert into tb2int select (random()*10000000)::int, (random()*10000000)::int from  generate_series(1,100000);
insert into tb3int select (random()*10000000)::int, (random()*10000000)::int from  generate_series(1,100000);

1875

http://10.20.16.216:9090/ADB/AntDB/-/issues/1875

问题

数据重分布错误

复现方法

create table test (id int ,name varchar(22)) to node(dn2);
alter table test  distribute by replication ;
create table t1 (id int ,name text) distribute by replication to node(dn2);
alter table t1 distribute by hash(id);


alter table test to node(dn1, dn2);

1938

http://10.20.16.216:9090/ADB/AntDB/-/issues/1938

cluster reduce not support from REPLICATED to none REPLICATED

复现

create table test2(c1 int not null primary key,c2 text) distribute by replication;
insert into test2 select id ,'abcd'||id||'efgh' from generate_series(1,100000) id;
create table test2_tmp2 (c1 int not null primary key,c2 text,c3 int) distribute by hash(c1);
create table test2_tmp3 (c1 int,c2 text,c3 int) distribute by hash(c1);
insert into test2_tmp3 select c1, count(*) c, 100 from test2 where c1 between 501 and 1000 group by c1 HAVING c1<520 ;
insert into test2_tmp3 select c1, c2, c1 from test2 limit 10  ;



select c1, count(*) c, rank() OVER my_win as rnk from test2 where c1 between 501 and 1000 group by c1 HAVING c1<520 WINDOW my_win AS (ORDER BY c2 DESC) limit 50 offset 5;

insert into test2_tmp2 select c1, count(*) c, rank() OVER my_win as rnk from test2 where c1 between 501 and 1000 group by c1 HAVING c1<520 WINDOW my_win AS (ORDER BY c2 DESC) limit 50 offset 5;

 Insert on public.test2_tmp2  (cost=9.50..9.99 rows=0 width=0)
   ->  Subquery Scan on "*SELECT*"  (cost=9.50..9.99 rows=14 width=40)
         Output: "*SELECT*".c1, "*SELECT*".c, "*SELECT*".rnk
         ->  Limit  (cost=9.50..9.74 rows=14 width=33)
               Output: test2.c1, (count(*)), (rank() OVER (?)), test2.c2
               ->  WindowAgg  (cost=9.41..9.74 rows=19 width=33)
                     Output: test2.c1, (count(*)), rank() OVER (?), test2.c2
                     ->  Sort  (cost=9.41..9.46 rows=19 width=25)
                           Output: test2.c1, test2.c2, (count(*))
                           Sort Key: test2.c2 DESC
                           ->  GroupAggregate  (cost=0.29..9.00 rows=19 width=25)
                                 Output: test2.c1, test2.c2, count(*)
                                 Group Key: test2.c1
                                 ->  Index Scan using test2_pkey on public.test2  (cost=0.29..8.72 rows=19 width=17)
                                       Output: test2.c1, test2.c2
                                       Index Cond: ((test2.c1 >= 501) AND (test2.c1 <= 1000) AND (test2.c1 < 520))


 Limit  (cost=47.30..47.32 rows=1 width=33)
   Output: c1, (count(*)), (rank() OVER (?)), c2
   ->  WindowAgg  (cost=47.21..47.32 rows=6 width=33)
         Output: c1, (count(*)), rank() OVER (?), c2
         ->  Sort  (cost=47.21..47.23 rows=6 width=25)
               Output: c1, c2, (count(*))
               Sort Key: test2.c2 DESC
               ->  Cluster Gather  (cost=0.17..47.14 rows=6 width=25)
                     Remote node: 16388
                     ->  GroupAggregate  (cost=0.17..45.34 rows=6 width=25)
                           Output: c1, c2, count(*)
                           Group Key: test2.c1
                           ->  Index Scan using test2_pkey on public.test2  (cost=0.17..45.25 rows=6 width=17)
                                 Output: c1, c2
                                 Index Cond: ((test2.c1 >= 501) AND (test2.c1 <= 1000) AND (test2.c1 < 520))
                                 Remote node: 16386,16387,16388

#0  cost_cluster_reduce (path=0x55d3eec5acd8) at /workspaces/antdb/src/backend/optimizer/path/costsize.c:6422
#1  0x000055d3ed7c2289 in create_cluster_reduce_path (root=0x55d3eec4b1a8, sub_path=0x55d3eec59a00, rinfo_list=0x55d3eec5ac88, rel=0x55d3eec5a648, pathkeys=0x0) at /workspaces/antdb/src/backend/optimizer/util/pathnode.c:5051
#2  0x000055d3ed759dc6 in create_cluster_window_path (root=0x55d3eec4b1a8, window_rel=0x55d3eec5a648, path=0x55d3eec59a00, input_target=0x55d3eec592d8, output_target=0x55d3eec58c80, wflists=0x55d3eec56b18, activeWindows=0x55d3eec56c18)
    at /workspaces/antdb/src/backend/optimizer/plan/planner.c:5948
#3  0x000055d3ed758c56 in create_window_paths (root=0x55d3eec4b1a8, input_rel=0x55d3eec58110, input_target=0x55d3eec592d8, output_target=0x55d3eec58c80, output_target_parallel_safe=false, wflists=0x55d3eec56b18, activeWindows=0x55d3eec56c18)
    at /workspaces/antdb/src/backend/optimizer/plan/planner.c:5654
#4  0x000055d3ed74ee81 in grouping_planner (root=0x55d3eec4b1a8, inheritance_update=false, tuple_fraction=55) at /workspaces/antdb/src/backend/optimizer/plan/planner.c:2732
#5  0x000055d3ed74aeca in subquery_planner (glob=0x55d3eeb594c0, parse=0x55d3eec37be0, parent_root=0x55d3eec495a8, hasRecursion=false, tuple_fraction=0) at /workspaces/antdb/src/backend/optimizer/plan/planner.c:1320
#6  0x000055d3ed6deef6 in set_subquery_pathlist (root=0x55d3eec495a8, rel=0x55d3eec49ce8, rti=2, rte=0x55d3eec379d0) at /workspaces/antdb/src/backend/optimizer/path/allpaths.c:3262
#7  0x000055d3ed6d4bf4 in set_rel_size (root=0x55d3eec495a8, rel=0x55d3eec49ce8, rti=2, rte=0x55d3eec379d0) at /workspaces/antdb/src/backend/optimizer/path/allpaths.c:437
#8  0x000055d3ed6d487a in set_base_rel_sizes (root=0x55d3eec495a8) at /workspaces/antdb/src/backend/optimizer/path/allpaths.c:338
#9  0x000055d3ed6d4431 in make_one_rel (root=0x55d3eec495a8, joinlist=0x55d3eec4a198) at /workspaces/antdb/src/backend/optimizer/path/allpaths.c:200
#10 0x000055d3ed7476c2 in query_planner (root=0x55d3eec495a8, qp_callback=0x55d3ed754f96 <standard_qp_callback>, qp_extra=0x7ffe3b15f700) at /workspaces/antdb/src/backend/optimizer/plan/planmain.c:292
#11 0x000055d3ed74df07 in grouping_planner (root=0x55d3eec495a8, inheritance_update=false, tuple_fraction=0) at /workspaces/antdb/src/backend/optimizer/plan/planner.c:2471
#12 0x000055d3ed74aeca in subquery_planner (glob=0x55d3eeb594c0, parse=0x55d3eeb593b0, parent_root=0x0, hasRecursion=false, tuple_fraction=0) at /workspaces/antdb/src/backend/optimizer/plan/planner.c:1320

问题

执行计划不正确,select 的 top 应该是 reduce,但是在某些情况下,执行计划生成错误, 最简单的例子是 insert .. select .. limit offset, 这是一大类问题,除了 limit offset , 窗口函数也有问题,

explain insert into test2_tmp3 select c1, c2, c1 from test2 limit 10  offset 20;

2085

http://10.20.16.216:9090/ADB/AntDB/-/issues/2085


set enable_cluster_plan to off; -- 不使用reduce逻辑

drop table tbl;
CREATE TABLE IF NOT EXISTS tbl (sd int)  DISTRIBUTE BY REPLICATION to node(dn1, dn2);
insert into tbl select generate_series(1,10);


drop table tbl;
CREATE TABLE IF NOT EXISTS tbl (sd int)  DISTRIBUTE BY REPLICATION;
insert into tbl select generate_series(1,10);
postgres=# \d+ tbl
                                    Table "public.tbl"
 Column |  Type   | Collation | Nullable | Default | Storage | Stats target | Description
--------+---------+-----------+----------+---------+---------+--------------+-------------
 sd     | integer |           |          |         | plain   |              |
Tablespace: "hottbs"
DISTRIBUTE BY REPLICATION TO NODE(dn1_1, dn2_1, dn3_1)
Access method: heap

postgres=# select * from tbl order by 1;
 sd
----
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
(10 rows)

execute direct on (dn1) 'select * from tbl ';
execute direct on (dn2) 'select * from tbl ';
execute direct on (dn3) 'select * from tbl ';

execute direct on (dn1) 'select * from tbl where ((hash_combin_mod(3, hashint4(sd))) = 0)';
execute direct on (dn2) 'select * from tbl where ((hash_combin_mod(3, hashint4(sd))) = 1)';
execute direct on (dn3) 'select * from tbl where ((hash_combin_mod(3, hashint4(sd))) = 2)';

alter table tbl distribute by hash(sd);
alter table tbl distribute by modulo(sd);
alter table tbl distribute by random;
alter table tbl distribute by hash(sd) TO NODE(dn1, dn2, dn3);
alter table tbl distribute by REPLICATION;
postgres=# \d+ tbl
                                    Table "public.tbl"
 Column |  Type   | Collation | Nullable | Default | Storage | Stats target | Description
--------+---------+-----------+----------+---------+---------+--------------+-------------
 sd     | integer |           |          |         | plain   |              |
Tablespace: "hottbs"
DISTRIBUTE BY HASH(sd) TO NODE(dn1_1, dn2_1, dn3_1)
Access method: heap

postgres=# select * from tbl order by 1;
 sd
----
  3
  3
  3
  5
  5
  5
(6 rows)

DELETE FROM public.tbl where ((hash_combin_mod(3, hashint4(sd))) != 0)

问题


distrib_delete_hash 构建语句删除数据,条件拼接错误 

--- a/src/backend/pgxc/locator/redistrib.c
+++ b/src/backend/pgxc/locator/redistrib.c
@@ -805,6 +805,7 @@ distrib_delete_hash(RedistribState *distribState, ExecNodes *exec_nodes)
 
                /* Then launch this single query */
                distrib_execute_query(buf2.data, RelationUsesLocalBuffers(rel), local_exec_nodes);
+               nodepos++;
 
                FreeExecNodes(&local_exec_nodes);
                pfree(buf2.data);

2103

http://10.20.16.216:9090/ADB/AntDB/-/issues/2103

版本:ADB 6.3.14 d22d4cf
pgbench压测一段时间
gc上执行with a as (select sum(abalance) from pgbench_accounts),b as (select sum(delta) from pgbench_history),c as (select sum(bbalance) from pgbench_branches) select * from a,b,c;

报错:
postgres=# with a as (select sum(abalance) from pgbench_accounts),b as (select sum(delta) from pgbench_history),c as (select sum(bbalance) from pgbench_branches) select * from a,b,c;
ERROR:  can not send message to dynamic reduce for plan: MQ detached

cn上执行成功,没有报错。
  • 无法复现

1927

http://10.20.16.216:9090/ADB/AntDB/-/issues/1927

drop table ff1;
drop table ff2;
create table ff1 (a bigint,b integer);
create table ff2 (a bigint,b integer);
insert into ff1 values(456789, 123456),(11, 123456),(123456, 11), (4567890123456789, 344545);
insert into ff2 values(4567890123456789, 344545),(11, 344545),(123456, 11);

select * from ff1, ff2 where ff1.a = ff2.a;

drop table ff1;
drop table ff2;
create table ff1 (a integer);
create table ff2 (a bigint);
insert into ff1 values(456789),(11);
insert into ff2 values(4567890123456789),(11);

select * from ff1, ff2 where ff1.a = ff2.a;

                                                       QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------
 Cluster Gather  (cost=1047.12..2272.56 rows=3200 width=12)
   Remote node: 16386,16387,16388
   ->  Hash Join  (cost=47.12..312.56 rows=1067 width=12)
         Output: ff1.a, ff2.a
         Hash Cond: (ff2.a = ff1.a)
         ->  Cluster Reduce  (cost=1.00..152.55 rows=753 width=8)
               Reduce: ('[0:2]={16386,16387,16388}'::oid[])[COALESCE(hash_combin_mod(3, hashint4((ff2.a)::integer)), 0)]
               ->  Seq Scan on public.ff2  (cost=0.00..32.60 rows=753 width=8)
                     Output: ff2.a
                     Remote node: 16386,16387,16388
         ->  Hash  (cost=35.50..35.50 rows=850 width=4)
               Output: ff1.a
               ->  Seq Scan on public.ff1  (cost=0.00..35.50 rows=850 width=4)
                     Output: ff1.a
                     Remote node: 16386,16387,16388

('[0:2]={16386,16387,16388}'::oid[])[COALESCE(hash_combin_mod(3, hashint8((ff2.a)::bigint)), 0)]

select * from pg_attribute where attrelid = 286766 and attname = 'a';

int4-->23--->int4_ops--->10020
int8-->20--->int8_ops--->10021


bigint  opclass  10020 int4_ops 不正确, 因该为 10021 int8_ops
select * from pg_opclass where oid = 10020;
SetReduceKeyDefaultInfo
CreateExprUsingReduceInfo
ff2.a
{
  key = 0x5558cb24ea28,
  opclass = 10020,
  opfamily = 1977
}

select * from pg_attribute where attrelid = 286766 and attname = 'a';

reduce_outer_and_inner_for_join reduce inner 或者 outter 的时候,使用的是对应另一边的path的rinfo,然后使用函数 MakeReduceInfoAs 进行 copy 的,他在copy 的时候只会copy  key 之外的参数,但是 key   opclass  opfamily 应该是和 key 绑定的,
所以类型不一样的时候
left  
{
  key = int8-->20--->int8_ops--->10021
  opclass = 10021,
  opfamily = 1977
}
right
{
  key = int4-->23--->int4_ops--->10020
  opclass = 10020,
  opfamily = 1977
}

copy 之后会产生错误的keyinfo

create_inner_reduce_info_for_join before: 0x55a8fe4e4af8: {key: {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}, opclass: 10020, opfamily: 1977}
CopyReduceInfoExtend: (nil): {key: <>, opclass: 10020, opfamily: 1977}
create_inner_reduce_info_for_join after: 0x55a8fe4e5a28: {key: {VAR :varno 2 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location 37}, opclass: 10020, opfamily: 1977}
CopyReduceInfoExtend: 0x55a8fe4e6c68: {key: {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}, opclass: 10020, opfamily: 1977}
CopyReduceInfoExtend: 0x55a8fe4e6dc8: {key: {VAR :varno 2 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location 37}, opclass: 10020, opfamily: 1977}
CopyReduceInfoExtend: 0x55a8fe4e6f28: {key: {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location -1}, opclass: 10020, opfamily: 1977}
CopyReduceInfoExtend: 0x55a8fe4e7088: {key: {VAR :varno 2 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location 37}, opclass: 10020, opfamily: 1977}
create_outer_reduce_info_for_join before: 0x55a8fe4e5078: {key: {VAR :varno 2 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location -1}, opclass: 10021, opfamily: 1977}
CopyReduceInfoExtend: (nil): {key: <>, opclass: 10021, opfamily: 1977}
create_outer_reduce_info_for_join after: 0x55a8fe4e5a78: {key: {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 29}, opclass: 10021, opfamily: 1977}
CopyReduceInfoExtend: 0x55a8fe4e7508: {key: {VAR :varno 2 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location -1}, opclass: 10021, opfamily: 1977}
CopyReduceInfoExtend: 0x55a8fe4e76f8: {key: {VAR :varno 2 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location -1}, opclass: 10021, opfamily: 1977}
CopyReduceInfoExtend: 0x55a8fe4e7968: {key: {VAR :varno 2 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 2 :varattnosyn 1 :location -1}, opclass: 10021, opfamily: 1977}
CopyReduceInfoExtend: 0x55a8fe4e7ac8: {key: {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 29}, opclass: 10021, opfamily: 1977}


所以在最后 CreateExprUsingReduceInfo 生成表达式的时候,使用了错误的rinfo
fix1
  从根本上修改,copy rinfo 的时候,按照 key 重新生成 opclass  opfamily ,但是后续使用 rinfo 已经将错就错形成一套体系,修改比较大
fix2:最终create plan 的时候 fix 对应类型

已解决

@@ -2317,6 +2362,26 @@ Expr *CreateExprUsingReduceInfo(ReduceInfo *reduce)
                        for(i=0,nkey=reduce->nkey;i<nkey;++i)
                        {
                                ReduceKeyInfo *key = &reduce->keys[i];
+                               {
+                                       printf("CreateExprUsingReduceInfo: %p: {key: ", key->key);
+
+                                       char       *s;
+
+                                       s = nodeToString((void*)key->key);
+                                       printf("%s, ", s);
+                                       pfree(s);
+
+                                       printf("opclass: %d, opfamily: %d}\n", key->opclass, key->opfamily);
+                                       fflush(stdout);
+
+                                       // fix opfamily and opclass
+                                       key->opclass = ResolveOpClass(NIL,
+                                                                 exprType((Node*)key->key),
+                                                                       "hash",
+                                                                 HASH_AM_OID
+                                                                 );
+                                       key->opfamily = get_opclass_family(key->opclass);
+                               }
                                args = lappend(args, makeHashExprFamily(key->key, key->opfamily, get_opclass_input_type(key->opclass)));
                        }
                        result = (Expr*)makeSimpleFuncExpr(F_HASH_COMBIN_MOD, INT4OID, args);

1736

http://10.20.16.216:9090/ADB/AntDB/-/issues/1736

issuesfixstatus\根本问题
1490无法复现scoket close
1921无法复现同上
1699无法复现同上
1736无法复现并行reduce偶发core
2103无法复现gc pgbench 压测can not send message to dynamic reduce for plan: MQ detached
1779hard已修复稳定复现执行计划错误子查询存在上层引用,没有处理joinrel的情况
1875已定位稳定复现数据重分布hang住
1938hard已定位稳定复现insert..select..有reduce的时候不支持执行计划生成错误,在grouping_planner之后的op都可能有问题,修复涉及范围可能太大
2085mid已修复稳定复现数据重分布之后数据不正确使用remote query进行数据重分布的时候,拼接语句序号计算错误,修正序号即可
1927easy已修复稳定复现reduce 操作int8 溢出join 条件左右类型不匹配的时候,构建的表达式错误的向下转型,需要修正

10.21.15.28 AntDB@123!@#

../ADB6_2_FOR_POC/configure –prefix=/home/maoxp/wen –disable-cluster –with-segsize=4 –with-wal-blocksize=64 –enable-grammar-oracle –with-perl –with-python –with-pam –with-ldap –with-libxml –with-libxslt –enable-thread-safety –disable-debug –disable-cassert –enable-depend CFLAGS="-O2 -ggdb3 -fsigned-char -march=armv8-a+crc+lse" –with-libxml

../ADB6_2_FOR_POC/configure –prefix=/home/maoxp/app/antdb –with-segsize=4 –with-wal-blocksize=64 –enable-grammar-oracle –with-perl –with-python –with-pam –with-ldap –with-libxml –with-libxslt –enable-thread-safety –disable-debug –disable-cassert –enable-depend CFLAGS="-O2 -ggdb3 -fsigned-char -march=armv8-a+crc+lse" –with-libxml

mxp1234@#

sysbench –db-driver=pgsql –pgsql-host=172.10.13.28 –pgsql-port=65032 –pgsql-user=maoxp –pgsql-password=mxp1234@# –pgsql-db=sysbench_db –time=60 –report-interval=10 –events=0 –percentile=95 –threads=1 –table-size=5000000 –tables=5 /usr/share/sysbench/oltp_insert.lua run

/home/maoxp/wen/bin/initdb -D /home/maoxp/datewen/data /home/maoxp/wen/bin/createdb maoxp

/home/maoxp/wen/bin/pg_ctl -D /home/maoxp/datewen/data start /home/maoxp/wen/bin/pg_ctl -D /home/maoxp/datewen/data stop

sysbench –table_size=50000 –pgsql-host=172.10.13.28 –pgsql-port=5432 –pgsql-user=maoxp –pgsql-password=mxp1234@# –pgsql-db=postgres –db-driver=pgsql –threads=1 –tables=5 –report-interval=5 –time=120 oltp_insert run

sysbench –db-driver=pgsql –pgsql-host=172.10.13.28 –pgsql-port=65032 –pgsql-user=maoxp –pgsql-password=mxp1234@# –pgsql-db=sysbench_db –time=2000 –report-interval=5 –events=0 –percentile=95 –threads=1 –table-size=5000000 –tables=5 /usr/share/sysbench/oltp_insert.lua run

DELETE FROM public.sbtest5 sbtest5 WHERE (id = $1);

create table tb2(c1 int,sheet_num varchar(100)); create table tb3(c1 int,order_no varchar(100)); create table tb4( c1 int,c2 varchar(100),c3 int);

explain select count(distinct c2) from tb4 where c2 in (select distinct sheet_num from tb2,tb3 where sheet_num=order_no and c3=1);

create table ttb1( c1 int,c2 varchar(100),c3 varchar(100)); create table ttb2(c1 int,sheet_num varchar(100)); create table ttb3(c1 int,order_no varchar(100)); explain select count(distinct c2) from ttb1 where c2 in (select distinct sheet_num from ttb2,ttb3 where sheet_num=order_no and c3=‘1’);