Sat Sep 8 07:21:42 PDT 2007
- Previous message: [Slony1-commit] slony1-engine/src Makefile
- Next message: [Slony1-commit] slony1-engine/src/slon remote_worker.c
- Messages sorted by: [ date ] [ thread ] [ subject ] [ author ]
Update of /home/cvsd/slony1/slony1-engine/src/ducttape
In directory main.slony.info:/tmp/cvs-serv20140/src/ducttape
Modified Files:
Tag: REL_1_2_STABLE
test_5_ddlscript test_8_logship.in
Log Message:
Add the slony_logshipper.
This is a standalone utility that can be used with the slon -x option
to postprocess slony archive log files. Since it does not require any
changes in the existing slony or slonik functionality, I decided to
the current stable branch to be released with 1.2.12.
Jan
Index: test_5_ddlscript
===================================================================
RCS file: /home/cvsd/slony1/slony1-engine/src/ducttape/test_5_ddlscript,v
retrieving revision 1.2.6.1
retrieving revision 1.2.6.2
diff -C2 -d -r1.2.6.1 -r1.2.6.2
*** test_5_ddlscript 4 Jun 2007 22:51:19 -0000 1.2.6.1
--- test_5_ddlscript 8 Sep 2007 14:21:40 -0000 1.2.6.2
***************
*** 19,22 ****
--- 19,23 ----
cat >test_5_tmp.sql <<_EOF_
+ set search_path = public;
alter table accounts add column lastuser name;
Index: test_8_logship.in
===================================================================
RCS file: /home/cvsd/slony1/slony1-engine/src/ducttape/test_8_logship.in,v
retrieving revision 1.1.2.1
retrieving revision 1.1.2.2
diff -C2 -d -r1.1.2.1 -r1.1.2.2
*** test_8_logship.in 31 May 2007 13:29:48 -0000 1.1.2.1
--- test_8_logship.in 8 Sep 2007 14:21:40 -0000 1.1.2.2
***************
*** 51,54 ****
--- 51,55 ----
kill -15 $slon2_pid
fi
+ slony_logshipper -T ./test_8_logshipper.conf
exit 1
' 2 15
***************
*** 82,87 ****
PREAMBLE_FILE=/tmp/preamble.$$
cat <<EOF > $PREAMBLE_FILE
! define origin 11;
! define sub1 22;
cluster name = $CLUSTERNAME;
node @origin admin conninfo='dbname=$DB1';
--- 83,88 ----
PREAMBLE_FILE=/tmp/preamble.$$
cat <<EOF > $PREAMBLE_FILE
! define origin 1;
! define sub1 2;
cluster name = $CLUSTERNAME;
node @origin admin conninfo='dbname=$DB1';
***************
*** 104,108 ****
# Create the "Primary Node"
#####
! echo "**** creating database for Node 11"
createdb $DB1 || exit 1
--- 105,109 ----
# Create the "Primary Node"
#####
! echo "**** creating database for Node 1"
createdb $DB1 || exit 1
***************
*** 135,139 ****
include <$PREAMBLE_FILE>;
init cluster (id = @origin, comment = 'Node @origin');
! echo 'Database $DB1 initialized as Node 11';
_EOF_
if [ $? -ne 0 ] ; then
--- 136,140 ----
include <$PREAMBLE_FILE>;
init cluster (id = @origin, comment = 'Node @origin');
! echo 'Database $DB1 initialized as Node 1';
_EOF_
if [ $? -ne 0 ] ; then
***************
*** 143,147 ****
echo "**** starting the Slony-I node daemon for $DB1"
! $TERMPROG -title "Slon node 11" -e sh -c "slon -d$DEBUG_LEVEL -s500 -g10 $CLUSTERNAME dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
--- 144,148 ----
echo "**** starting the Slony-I node daemon for $DB1"
! $TERMPROG -title "Slon node 1" -e sh -c "slon -d$DEBUG_LEVEL -s500 -g10 $CLUSTERNAME dbname=$DB1; echo -n 'Enter>'; read line" &
slon1_pid=$!
echo "slon[$slon1_pid] on dbname=$DB1"
***************
*** 152,156 ****
try {
! table add key (node id = 11, fully qualified name = 'public.history');
}
on error {
--- 153,157 ----
try {
! table add key (node id = 1, fully qualified name = 'public.history');
}
on error {
***************
*** 207,211 ****
# set of the running pgbench
######################################################################
! echo "**** creating database for node 22"
if ! createdb $DB2 ; then
kill $pgbench_pid 2>/dev/null
--- 208,212 ----
# set of the running pgbench
######################################################################
! echo "**** creating database for node 2"
if ! createdb $DB2 ; then
kill $pgbench_pid 2>/dev/null
***************
*** 214,221 ****
fi
! echo "**** initializing $DB2 as node 22 of Slony-I cluster $CLUSTERNAME"
slonik <<_EOF_
include <$PREAMBLE_FILE>;
! echo 'Creating node 22';
try {
store node (id = @sub1, comment = 'node @sub1', event node = @origin);
--- 215,222 ----
fi
! echo "**** initializing $DB2 as node 2 of Slony-I cluster $CLUSTERNAME"
slonik <<_EOF_
include <$PREAMBLE_FILE>;
! echo 'Creating node 2';
try {
store node (id = @sub1, comment = 'node @sub1', event node = @origin);
***************
*** 239,251 ****
exit 1
fi
echo "**** starting the Slony-I node daemon for $DB2"
! $TERMPROG -title "Slon node 22" -e sh -c "slon -d$DEBUG_LEVEL -s10000 -o10000 -g10 -a $LOGSHIPDIR $CLUSTERNAME dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
- echo "**** taking offline_dump.sql from $DB2 for logshipping"
- sh ../../tools/slony1_dump.sh $DB2 T1 >offline_dump.sql
-
#####
# Check that pgbench is still running
--- 240,274 ----
exit 1
fi
+ echo "**** creating pgbench tables in node 2"
+ (
+ cat pgbench_schema.sql
+ ) | psql -q $DB2
+ echo "**** taking offline_dump.sql from $DB2 for logshipping"
+ sh ../../tools/slony1_dump.sh $DB2 T1 >offline_dump.sql
+
+ ######################################################################
+ # Prepare DB3 for receiving offline logs from the slony_logshipper
+ ######################################################################
+ echo "**** creating database for offline node"
+ if ! createdb $DB3 ; then
+ exit 1
+ fi
+ echo "**** loading pgbench schema into $DB3"
+ psql -q $DB3 <./pgbench_schema.sql
+ echo "**** loading Slony-I offline replica schema into $DB3"
+ psql -q $DB3 <./offline_dump.sql
+ psql -q $DB3 <<_EOF_
+ alter table history add column "_Slony-I_T1_rowID" bigint;
+ _EOF_
+
+ ######################################################################
+ # Start the slon for node 2, which also fires the logshipper
+ ######################################################################
echo "**** starting the Slony-I node daemon for $DB2"
! $TERMPROG -title "Slon node 2" -e sh -c "slon -d$DEBUG_LEVEL -s10000 -o10000 -g10 -a $LOGSHIPDIR -x 'slony_logshipper ./test_8_logshipper.conf' $CLUSTERNAME dbname=$DB2; echo -n 'Enter>'; read line" &
slon2_pid=$!
echo "slon[$slon2_pid] on dbname=$DB2"
#####
# Check that pgbench is still running
***************
*** 261,268 ****
# and the attendants in the first row climb on their chairs ...
######################################################################
! echo "**** creating pgbench tables and subscribing node 22 to set 1"
! (
! cat pgbench_schema.sql
! ) | psql -q $DB2
slonik <<_EOF_
include <$PREAMBLE_FILE>;
--- 284,288 ----
# and the attendants in the first row climb on their chairs ...
######################################################################
! echo "**** Subscribing node 2 to set 1"
slonik <<_EOF_
include <$PREAMBLE_FILE>;
***************
*** 289,312 ****
kill $slon1_pid 2>/dev/null
kill $slon2_pid 2>/dev/null
sh ./compare_pgbench_dumps $DB1 $DB2
-
- echo "**** creating database for offline node"
- if ! createdb $DB3 ; then
- exit 1
- fi
-
- echo "---- loading pgbench schema into $DB3"
- psql -q $DB3 <./pgbench_schema.sql
- echo "---- loading Slony-I offline replica schema into $DB3"
- psql -q $DB3 <./offline_dump.sql
- psql -q $DB3 <<_EOF_
- alter table history add column "_Slony-I_T1_rowID" bigint;
- _EOF_
- echo "---- loading all offline log archives"
- for afile in $LOGSHIPDIR/*.sql ; do
- psql -q $DB3 <$afile
- done
-
sh ./compare_pgbench_dumps $DB1 $DB3
--- 309,315 ----
kill $slon1_pid 2>/dev/null
kill $slon2_pid 2>/dev/null
+ slony_logshipper -t ./test_8_logshipper.conf
sh ./compare_pgbench_dumps $DB1 $DB2
sh ./compare_pgbench_dumps $DB1 $DB3
- Previous message: [Slony1-commit] slony1-engine/src Makefile
- Next message: [Slony1-commit] slony1-engine/src/slon remote_worker.c
- Messages sorted by: [ date ] [ thread ] [ subject ] [ author ]
More information about the Slony1-commit mailing list