源码编译
下载源码
,解压,这里尝试使用-g
编译源码,目的是为了可以调试源码
编译中有flex的一个问题,提示版本不正确,且即使使用export
指定FLEX
,编译的时候还是会用yylex的一个重定义的错误,从postgres主线上找到解决的patch,这里网上没有太多有用的资料,所以有尝试的建议先直接编译,如果遇到相同的问题再使用下面的解决方法。
gcc -DPGXC -Wall -Wmissing-prototypes -Wpointer-arith -Wdeclaration-after-statement -Werror=vla -Wendif-labels -Wmissing-format-attribute -Wimplicit-fallthrough=3 -Wcast-function-type -Wformat-security -fno-strict-aliasing -fwrapv -fexcess-precision=standard -Wno-format-truncation -Wno-stringop-truncation -g -I. -I. -I../../../src/include -D_GNU_SOURCE -c -o gram.o gram.c
gram.y: In function ‘base_yyparse’:
gram.y:9025:11: warning: implicit declaration of function ‘superuser’ [-Wimplicit-function-declaration]
9025 | if (!superuser())
| ^~~~~~~~~
scan.c: At top level:
gram.c:68:25: error: conflicting types for ‘base_yylex’
68 | #define yylex base_yylex
| ^~~~~~~~~~
scan.c:9108:12: note: in expansion of macro ‘yylex’
9108 | extern int yylex \
| ^~~~~
In file included from gram.y:60:
../../../src/include/parser/gramparse.h:66:12: note: previous declaration of ‘base_yylex’ was here
66 | extern int base_yylex(YYSTYPE *lvalp, YYLTYPE *llocp,
| ^~~~~~~~~~
gram.c:68:25: error: conflicting types for ‘base_yylex’
68 | #define yylex base_yylex
| ^~~~~~~~~~
scan.c:9111:21: note: in expansion of macro ‘yylex’
9111 | #define YY_DECL int yylex \
| ^~~~~
scan.c:9132:1: note: in expansion of macro ‘YY_DECL’
9132 | YY_DECL
| ^~~~~~~
In file included from gram.y:60:
../../../src/include/parser/gramparse.h:66:12: note: previous declaration of ‘base_yylex’ was here
66 | extern int base_yylex(YYSTYPE *lvalp, YYLTYPE *llocp,
| ^~~~~~~~~~
使用下面的patch可以解决问题,其中configure我还修改了编译选项,如果只是想搭建集群,可以把他修改回来
diff --git a/configure b/configure
index 64fccd7..4199c1d 100755
--- a/configure
+++ b/configure
@@ -3702,13 +3702,13 @@ if test "$ac_test_CFLAGS" = set; then
CFLAGS=$ac_save_CFLAGS
elif test $ac_cv_prog_cc_g = yes; then
if test "$GCC" = yes; then
- CFLAGS="-g -O2"
+ CFLAGS="-g"
else
CFLAGS="-g"
fi
else
if test "$GCC" = yes; then
- CFLAGS="-O2"
+ CFLAGS="-g"
else
CFLAGS=
fi
@@ -3945,8 +3945,8 @@ unset CFLAGS
# If the user specifies something in the environment, that is used.
# else: If the template file set something, that is used.
# else: If coverage was enabled, don't set anything.
-# else: If the compiler is GCC, then we use -O2.
-# else: If the compiler is something else, then we use -O, unless debugging.
+# else: If the compiler is GCC, then we use -g.
+# else: If the compiler is something else, then we use -g, unless debugging.
if test "$ac_env_CFLAGS_set" = set; then
CFLAGS=$ac_env_CFLAGS_value
@@ -3955,11 +3955,11 @@ elif test "${CFLAGS+set}" = set; then
elif test "$enable_coverage" = yes; then
: # no optimization by default
elif test "$GCC" = yes; then
- CFLAGS="-O2"
+ CFLAGS="-g"
else
- # if the user selected debug mode, don't use -O
+ # if the user selected debug mode, don't use -g
if test "$enable_debug" != yes; then
- CFLAGS="-O"
+ CFLAGS="-g"
fi
fi
@@ -7335,7 +7335,7 @@ else
echo '%%' > conftest.l
if $pgac_candidate -t conftest.l 2>/dev/null | grep FLEX_SCANNER >/dev/null 2>&1; then
pgac_flex_version=`$pgac_candidate --version 2>/dev/null`
- if echo "$pgac_flex_version" | sed 's/[.a-z]/ /g' | $AWK '{ if ($1 = 2 && $2 = 5 && $3 >= 31) exit 0; else exit 1;}'
+ if echo "$pgac_flex_version" | sed 's/[.a-z]/ /g' | $AWK '{ if ($1 == 2 && ($2 > 5 || ($2 == 5 && $3 >= 31))) exit 0; else exit 1;}'
then
pgac_cv_path_flex=$pgac_candidate
break 2
@@ -7433,7 +7433,7 @@ if test "$PERL"; then
{ $as_echo "$as_me:$LINENO: using perl $pgac_perl_version" >&5
$as_echo "$as_me: using perl $pgac_perl_version" >&6;}
if echo "$pgac_perl_version" | sed 's/[.a-z_]/ /g' | \
- $AWK '{ if ($1 = 5 && $2 >= 8) exit 1; else exit 0;}'
+ $AWK '{ if ($1 == 5 && $2 >= 8) exit 1; else exit 0;}'
then
{ $as_echo "$as_me:$LINENO: WARNING:
*** The installed version of Perl, $PERL, is too old to use with PostgreSQL.
diff --git a/contrib/pgxc_ctl/variables.c b/contrib/pgxc_ctl/variables.c
index 1bfaf79..74ceecf 100644
--- a/contrib/pgxc_ctl/variables.c
+++ b/contrib/pgxc_ctl/variables.c
@@ -17,6 +17,7 @@
pgxc_ctl_var *var_head = NULL;
pgxc_ctl_var *var_tail = NULL;
+pgxc_var_hash var_hash[NUM_HASH_BUCKET];
static void clear_var(pgxc_ctl_var *var);
/*
@@ -35,7 +35,7 @@ typedef struct pgxc_var_hash {
} pgxc_var_hash;
-pgxc_var_hash var_hash[NUM_HASH_BUCKET];
+
void init_var_hash(void);
void add_var_hash(pgxc_ctl_var *var);
diff --git a/src/backend/parser/Makefile b/src/backend/parser/Makefile
index 0395bd5..9cc8946 100644
--- a/src/backend/parser/Makefile
+++ b/src/backend/parser/Makefile
@@ -12,7 +12,7 @@ include $(top_builddir)/src/Makefile.global
override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS)
-OBJS= analyze.o gram.o keywords.o kwlookup.o parser.o \
+OBJS= analyze.o gram.o scan.o keywords.o kwlookup.o parser.o \
parse_agg.o parse_clause.o parse_coerce.o parse_collate.o parse_cte.o \
parse_expr.o parse_func.o parse_node.o parse_oper.o parse_param.o \
parse_relation.o parse_target.o parse_type.o parse_utilcmd.o scansup.o
@@ -20,12 +20,9 @@ OBJS= analyze.o gram.o keywords.o kwlookup.o parser.o \
include $(top_srcdir)/src/backend/common.mk
-# scan is compiled as part of gram
-gram.o: scan.c
-
# Latest flex causes warnings in this file.
ifeq ($(GCC),yes)
-gram.o: CFLAGS += -Wno-error
+scan.o: CFLAGS += -Wno-error
endif
@@ -47,7 +44,7 @@ scan.c: FLEX_NO_BACKUP=yes
# Force these dependencies to be known even without dependency info built:
-gram.o keywords.o parser.o: gram.h
+gram.o scan.o keywords.o parser.o: gram.h
# gram.c, gram.h, and scan.c are in the distribution tarball, so they
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 173c078..cce9386 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -14254,13 +14254,3 @@ parser_init(base_yy_extra_type *yyext)
{
yyext->parsetree = NIL; /* in case grammar forgets to set it */
}
-
-/*
- * Must undefine this stuff before including scan.c, since it has different
- * definitions for these macros.
- */
-#undef yyerror
-#undef yylval
-#undef yylloc
-
-#include "scan.c"
diff --git a/src/backend/parser/scan.l b/src/backend/parser/scan.l
index 92f38a2..911a0ed 100644
--- a/src/backend/parser/scan.l
+++ b/src/backend/parser/scan.l
@@ -1,4 +1,4 @@
-%{
+%top{
/*-------------------------------------------------------------------------
*
* scan.l
@@ -6,7 +6,7 @@
*
* NOTE NOTE NOTE:
*
- * The rules in this file must be kept in sync with psql's lexer!!!
+ * The rules in this file must be kept in sync with psql's psqlscan.l!
*
* The rules are designed so that the scanner never has to backtrack,
* in the sense that there is always a rule that can match the input
@@ -34,12 +34,13 @@
#include <ctype.h>
#include <unistd.h>
+#include "parser/gramparse.h"
#include "parser/parser.h" /* only needed for GUC variables */
-#include "parser/scanner.h"
#include "parser/scansup.h"
#include "mb/pg_wchar.h"
+}
-
+%{
/* Avoid exit() on fatal scanner errors (a bit ugly -- see yy_fatal_error) */
#undef fprintf
#define fprintf(file, fmt, msg) fprintf_to_ereport(fmt, msg)
diff --git a/src/gtm/proxy/proxy_main.c b/src/gtm/proxy/proxy_main.c
index 3224525..52d186f 100644
--- a/src/gtm/proxy/proxy_main.c
+++ b/src/gtm/proxy/proxy_main.c
@@ -69,7 +69,7 @@ int GTMProxyPortNumber;
int GTMProxyWorkerThreads;
char *GTMProxyDataDir;
char *GTMProxyConfigFileName;
-char *GTMConfigFileName;
+extern char *GTMConfigFileName;
char *GTMServerHost;
int GTMServerPortNumber;
diff --git a/src/interfaces/ecpg/preproc/Makefile b/src/interfaces/ecpg/preproc/Makefile
index 715be28..8651f30 100644
--- a/src/interfaces/ecpg/preproc/Makefile
+++ b/src/interfaces/ecpg/preproc/Makefile
@@ -26,7 +26,7 @@ override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \
override CFLAGS += $(PTHREAD_CFLAGS) -DECPG_COMPILE
-OBJS= preproc.o type.o ecpg.o output.o parser.o \
+OBJS= preproc.o pgc.o type.o ecpg.o output.o parser.o \
keywords.o c_keywords.o ecpg_keywords.o kwlookup.o ../ecpglib/typename.o descriptor.o variable.o \
$(WIN32RES)
@@ -44,9 +44,6 @@ ecpg: $(OBJS) | submake-libpgport
../ecpglib/typename.o: ../ecpglib/typename.c
$(MAKE) -C $(dir $@) $(notdir $@)
-# pgc is compiled as part of preproc
-preproc.o: pgc.c
-
preproc.h: preproc.c ;
preproc.c: BISONFLAGS += -d
@@ -54,7 +51,7 @@ preproc.y: ../../../backend/parser/gram.y parse.pl ecpg.addons ecpg.header ecpg.
$(PERL) $(srcdir)/parse.pl $(srcdir) < $< > $@
$(PERL) $(srcdir)/check_rules.pl $(srcdir) $<
-ecpg_keywords.o c_keywords.o keywords.o preproc.o parser.o: preproc.h
+ecpg_keywords.o c_keywords.o keywords.o preproc.o pgc.o parser.o: preproc.h
kwlookup.c: % : $(top_srcdir)/src/backend/parser/%
rm -f $@ && $(LN_S) $< .
diff --git a/src/interfaces/ecpg/preproc/ecpg.trailer b/src/interfaces/ecpg/preproc/ecpg.trailer
index 70dc690..fb8e03a 100644
--- a/src/interfaces/ecpg/preproc/ecpg.trailer
+++ b/src/interfaces/ecpg/preproc/ecpg.trailer
@@ -1912,11 +1912,3 @@ void parser_init(void)
{
/* This function is empty. It only exists for compatibility with the backend parser right now. */
}
-
-/*
- * Must undefine base_yylex before including pgc.c, since we want it
- * to create the function base_yylex not filtered_base_yylex.
- */
-#undef base_yylex
-
-#include "pgc.c"
diff --git a/src/interfaces/ecpg/preproc/pgc.l b/src/interfaces/ecpg/preproc/pgc.l
index 5abb74f..2e363c2 100644
--- a/src/interfaces/ecpg/preproc/pgc.l
+++ b/src/interfaces/ecpg/preproc/pgc.l
@@ -1,4 +1,4 @@
-%{
+%top{
/*-------------------------------------------------------------------------
*
* pgc.l
@@ -23,7 +23,19 @@
#include <limits.h>
#include "extern.h"
+#include "preproc.h"
+
+/*
+ * Change symbol names as expected by preproc.l. It'd be better to do this
+ * with %option prefix="base_yy", but that affects some other names that
+ * various files expect *not* to be prefixed with "base_". Cleaning it up
+ * is not worth the trouble right now.
+ */
+#define yylex base_yylex
+#define yylval base_yylval
+}
+%{
extern YYSTYPE yylval;
static int xcdepth = 0; /* depth of nesting in slash-star comments */
- 使用正常的编译postgres的方法编译源码即可,具体可以参考我其他文章
配置集群
- GTM * 1
- COOR * 2
- DNODE * 2
我这里尝试在单机上搭建集群环境,理论上修改port是可以成功的,但是我可能某一步没有正确配置,所以失败了,但是我这里的资料都是我主要从官网扒下来的,失败了也是想不通😂
主要过程为和配置如下,这是后期整理的资料,因该有用,起码命令有用
# 配置.bashrc
export PGHOME=/home/postgres/postgres-home/build/postgres-xc/build
export PGPORT=4431
export PGDATA=$PGHOME/../data
export LANG=en_US.utf8
export LD_LIBRARY_PATH=$PGHOME/lib:$LD_LIBRARY_PATH
export PATH=$PGHOME/bin:$PATH:.
export MANPATH=$PGHOME/share/man:$MANPATH
./initgtm -Z gtm -D $PGHOME/../data1/gtm/data
./initdb -D $PGHOME/../data1/coor_1/data --nodename coor_1
./initdb -D $PGHOME/../data1/coor_2/data --nodename coor_2
./initdb -D $PGHOME/../data1/db_1/data --nodename db_1
./initdb -D $PGHOME/../data1/db_2/data --nodename db_2
./initdb -D $PGHOME/../data1/db_3/data --nodename db_3
sed -i "s/#port = 5432/port = 24076/g" coor_1/data/postgresql.conf
sed -i "s/#pooler_port = 6667/pooler_port = 6667/g" coor_1/data/postgresql.conf
sed -i "s/#port = 5432/port = 24077/g" coor_2/data/postgresql.conf
sed -i "s/#pooler_port = 6667/pooler_port = 6668/g" coor_2/data/postgresql.conf
sed -i "s/#port = 5432/port = 24071/g" db_1/data/postgresql.conf
sed -i "s/#port = 5432/port = 24072/g" db_2/data/postgresql.conf
sed -i "s/#port = 5432/port = 24073/g" db_3/data/postgresql.conf
编辑data/co1/postgresql.conf:
gtm_port = 6666 //为默认值
pgxc_node_name = co1 //pgxc_node_name不能重复
编辑data/co2/postgresql.conf:
gtm_port = 6666
pgxc_node_name = co2
编辑data/dn1/postgresql.conf:
gtm_port = 6666
pgxc_node_name = dn1
编辑data/dn2/postgresql.conf:
gtm_port = 6666
pgxc_node_name = dn2
编辑data/dn2/postgresql.conf:
gtm_port = 6666
pgxc_node_name = dn3
mkdir gtm/data/log
mkdir coor_1/data/log
mkdir coor_2/data/log
mkdir db_1/data/log
mkdir db_2/data/log
mkdir db_3/data/log
#./bin/gtm_ctl start -S gtm -D data/gtm -l data/gtm/gtm.log //启动gtm(由于切换为相对路径后找不到对应的文件夹,所以创建日志会失败)
./gtm_ctl start -Z gtm -D $PGHOME/../data1/gtm/data -l /home/wen/postgres-home/build/postgres-xc/data1/gtm/data/log/gtm.log
./pg_ctl start -Z datanode -D $PGHOME/../data1/db_1/data -l /home/wen/postgres-home/build/postgres-xc/data1/db_1/data/log/postgresql.log -o "-p 24071"
./pg_ctl start -Z datanode -D $PGHOME/../data1/db_2/data -l /home/wen/postgres-home/build/postgres-xc/data1/db_2/data/log/postgresql.log -o "-p 24072"
./pg_ctl start -Z datanode -D $PGHOME/../data1/db_3/data -l /home/wen/postgres-home/build/postgres-xc/data1/db_3/data/log/postgresql.log -o "-p 24073"
./pg_ctl start -Z coordinator -D $PGHOME/../data1/coor_2/data -l /home/wen/postgres-home/build/postgres-xc/data1/coor_2/data/log/postgresql.log -o "-p 24077"
./pg_ctl start -Z coordinator -D $PGHOME/../data1/coor_1/data -l /home/wen/postgres-home/build/postgres-xc/data1/coor_1/data/log/postgresql.log -o "-p 24076"
./psql -p 24076
CREATE NODE db_1 WITH (HOST = 'localhost', type = 'datanode', PORT = 24071);
CREATE NODE db_2 WITH (HOST = 'localhost', type = 'datanode', PORT = 24072);
CREATE NODE db_3 WITH (HOST = 'localhost', type = 'datanode', PORT = 24073);
CREATE NODE coor_1 WITH (HOST = 'localhost', type = 'coordinator', PORT = 24076);
CREATE NODE coor_2 WITH (HOST = 'localhost', type = 'coordinator', PORT = 24077);
SELECT pgxc_pool_reload();
select * from pgxc_node;
./pg_ctl stop -D $PGHOME/../data1/coor_1/data -m immediate
./pg_ctl stop -D $PGHOME/../data1/coor_2/data -m immediate
./pg_ctl stop -D $PGHOME/../data1/db_1/data -m immediate
./pg_ctl stop -D $PGHOME/../data1/db_2/data -m immediate
./pg_ctl stop -D $PGHOME/../data1/db_3/data -m immediate
./gtm_ctl stop -Z gtm -D $PGHOME/../data1/gtm/data
$ rm -f data/gtm/register.node
$ ./bin/gtm_ctl start -Z gtm -D data/gtm -p ./bin -l data/gtm/gtm.log
$ ./bin/pg_ctl start -l data/dn1/postgresql.log -Z datanode -D data/dn1 -o "-p 24071"
$ ./bin/pg_ctl start -l data/dn2/postgresql.log -Z datanode -D data/dn2 -o "-p 24072"
$ ./bin/pg_ctl start -l data/dn3/postgresql.log -Z datanode -D data/dn3 -o "-p 24073"
$ ./bin/pg_ctl start -l data/co1/postgresql.log -Z coordinator -D data/co1 -o "-p 24076"
$ ./bin/pg_ctl start -l data/co2/postgresql.log -Z coordinator -D data/co2 -o "-p 24077"
$ ./bin/pg_ctl stop -D data/co1 -m immediate
$ ./bin/pg_ctl stop -D data/co2 -m immediate
$ ./bin/pg_ctl stop -D data/dn1 -m immediate
$ ./bin/pg_ctl stop -D data/dn2 -m immediate
$ ./bin/pg_ctl stop -D data/dn3 -m immediate
$ ./bin/gtm_ctl stop -Z gtm -D data/gtm
$ rm -f data/gtm/register.node
$ rm -rf data
export XCHOME=/home/wen/postgres/build/postgres-xc/build
export PATH=$XCHOME/bin:$PATH
export LD_LIBRARY_PATH=$XCHOME/lib:$LD_LIBRARY_PATH
export XCDATABASE=postgres
export XCDATA=/home/wen/postgres/build/postgres-xc/data
alias cdd="cd $XCDATA"
# alias pcn1="psql -d postgres -p 65022"
# alias pcn2="psql -d postgres -p 65023"
# alias pdn1="psql -d postgres -p 65013"
# alias pdn2="psql -d postgres -p 65014"
# alias pgtm="psql -d postgres -p 65011"
alias mk="cd /home/wen/postgres/build/postgres-xc/make && \
/home/wen/postgres/postgres-xc/configure --prefix=$XCHOME \
--with-segsize=4 \
--with-wal-blocksize=64 \
--enable-grammar-oracle \
--with-perl \
--with-python \
--with-pam \
--with-ldap \
--with-libxml \
--with-libxslt \
--enable-thread-safety \
--disable-cassert \
--enable-depend CFLAGS='-g' && \
make && make install
"
重新分布数据以及节点操作
PXC 可以修改表的重分布属性,即将表的数据在不同的节点中进行重新分布。有下面几种方法
默认的重分布方法,首先,通过 copy to 命令把所有的数据保存到 coordinator上,然后使用truncate把各个数据节点上的数据清空。最后使用 copy from 机制把数据重新分布到底层的各个数据节点上。最后根据情况可能会执行 reindex。
replicated表到replicated表: 这通常是在 replicated表中增加或删除底层数据节点时使用。
- 删除一个副本,直接在这个数据节点上 执行 truncate 即可。
- 增加一个副本,通过 copy to 命令把任意节点上的数据保存到 coordinator上,然后使用 copy from 机制把数据复制到新增的节点上即可。最后根据情况可能会执行 reindex。
把replicated表转为distribute表: 如果转换后 distribute表的节点分布列表与分布前不同或有新节点增加,则使用默认方法,即第一种; 如果转换后distribute表分布的节点和转换前相同或减少了,则不需要跨节点重新分布数据,只需要删除底层节点表中那些不需要保留的数据即可。最后根据情况可能会执行 reindex。
把distribute表转换成 replicated表: 使用默认的重分布方法,即第一种。
Postgres-XC 通过扩展 ALTER TABLE
命令提供了让表数据重新分布的功能,主要增加了以下子句
DISTRIBUTE BY { REPLICATION | ROUNDROBIN | { HASH | MODULO ( column_name) } }
TO { GROUP groupname | NODE (nodename [ , ....])}
ADD NODE (nodename [, ... ])
DELETE NODE (nodename [, ... ] )
增加Coordinators节点
- 初始化一个新的 coordinator节点
- 配置对应的 postgresql.conf 文件
- 连接到任意已有的 coordinator节点,锁住集群,为备份做准备,执行锁住命令 但不要退出,否则备份出来的数据会不一致
postgres=# select pgxc_lock_for_backup();
INFO: please do not close this session until you are done adding the new node
做完这个操作后,整个集群不要执行DDL语句,这样数据库的元数据就不会发生变化,就能保证备份的一致性了。
- 再连接已有的任意一个 coordinator节点,执行备份
pg_dumpall -p 5432 -s --include-nodes --dump-nodes --file=meta.sql
- 把新的 coordinator节点启动到 restoremode 模式下
pg_ctl start -Z restoremode -D /opt/pgxc/coordinator/
- 将备份文件拷贝到新节点
psql -d postgres -p 5432 -f /tmp/meta.sql
- 然后将新的 coordinator停止
pg_ctl stop -D /opt/pgxc/coordinator
- 启动新coordinator
pg_ctl start -Z coordinator -D /opt/pgxc/coordinator
- 在每台 coordinator上执行 create node 命令增加新节点,最后调用函数
pgxc_pool_reload()
刷新缓存在连接池中的节点信息
create node cdtor5 with (host= 'vlnx107001.firstshare.cn',type = 'coordinator',port=5432);
select pgxc_pool_reload();
- 退出第3 步的session,释放集群锁。
移除Coordinators 节点
- 将要移除的节点停掉
pg_ctl stop -Z coordinator -D /opt/pgxc/coordinators -m faster
- 连接到任意 coordinator, 执行 drop node命令
drop node cdtor3;
- 刷新缓存
select pgxc_pool_reload();
增加 Datanode节点
- 初始化新的 datanode 并修改对应的postgresql.conf文件
- 连接到任意已有的 coordinator节点,锁住集群,为备份做准备,执行锁住命令 但不要退出,否则备份出来的数据会不一致
postgres=# select pgxc_lock_for_backup();
INFO: please do not close this session until you are done adding the new node
做完这个操作后,整个集群不要执行DDL语句,这样数据库的元数据就不会发生变化,就能保证备份的一致性了。
- 连接到任意 datanode节点,执行元数据的备份
pg_dumpall -p 5432 -s --file=datanode_meta.sql
- 把新的datanode启动到 restoremode,和coordinator类似
pg_ctl start -Z restoremode -D /var/lib/pgsql/9.6/dnode -p 5439
psql -d postgres -f datanode_meta.sql - p 5439
pg_ctl stop -D /var/lib/pgsql/9.6/dnode -p 5439 -m fast
- 启动新的datanode节点到正常状态下
pg_ctl start -Z datanode -D /var/lib/pgsql/9.6/dnode
- 在每台 coordinator 执行 create node 增加新节点,然后调用 pgxc_pool_reload()
create node dnode4 with(host='hostname',type ='datanode',port= 5439);
select pgxc_pool_reload();
- 退出第2步
- 可以执行 alter table 命令,把旧表的数据重新分布到新节点上。如果不执行这步,则旧表的数据只存在于原先的节点中。
移除 Datanode节点
- 要移除一个datanode,首先要把这个datanode上的数据重新分布到其他节点上
alter table tbl_name1 delete node (dnode4);
alter table tbl_name2 delete node (dnode4);
alter table tbl_name3 delete node (dnode4);
.....
检查是否有表把数据放在这个节点上
select * from pgxc_class c,pgxc_node n where n.node_name='dnode4' and n.oid=any(c.nodeoids);
- 停掉该节点
pg_ctl stop -Z datanode -D /var/lib/pgsql/9.6/dnode -m fast;
- 连接到所有 coordinator节点 删除给节点
drop node dnode4;
- 刷新缓存
select pgxc_pool_reload();