CVS User Account cvsuser
Wed Jul 27 19:37:36 PDT 2005
Log Message:
-----------
Add "stop slon after event" functionality.

This allows you to tell a slon to terminate once it receives a certain
event from one of the providers.

You could query time stamps on SYNC events to determine which SYNCs
are associated with which times, and use this to hold a subscriber
behind by (say) 4 hours.

Modified Files:
--------------
    slony1-engine/doc/adminguide:
        slon.sgml (r1.16 -> r1.17)
        slonconf.sgml (r1.8 -> r1.9)
    slony1-engine/src/ducttape:
        Makefile (r1.10 -> r1.11)
    slony1-engine/src/slon:
        confoptions.h (r1.23 -> r1.24)
        remote_worker.c (r1.86 -> r1.87)
        slon.c (r1.53 -> r1.54)

Added Files:
-----------
    slony1-engine/src/ducttape:
        test_G_stopat.in (r1.1)

-------------- next part --------------
Index: slonconf.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/slonconf.sgml,v
retrieving revision 1.8
retrieving revision 1.9
diff -Ldoc/adminguide/slonconf.sgml -Ldoc/adminguide/slonconf.sgml -u -w -r1.8 -r1.9
--- doc/adminguide/slonconf.sgml
+++ doc/adminguide/slonconf.sgml
@@ -286,6 +286,38 @@
       </listitem>
     </varlistentry>
 
+    <varlistentry id="slon-config-quit-sync-provider" xreflabel="quit_sync_provider">
+      <term><varname>quit_sync_provider</varname>  (<type>integer</type>)</term>
+      <indexterm>
+        <primary><varname>quit_sync_provider</varname> configuration parameter</primary>
+      </indexterm>
+      <listitem>
+        <para> This must be used in conjunction with <xref
+        linkend="slon-config-quit-sync-finalsync">, and indicates
+        which provider node's worker thread should be watched to see
+        if the slon should terminate due to reaching some desired
+        <quote>final</quote> event number.</para>
+
+	<para>If the value is set to 0, this logic will be ignored.</para>
+      </listitem>
+    </varlistentry>
+    <varlistentry id="slon-config-quit-sync-finalsync" xreflabel="quit_sync_finalsync">
+      <term><varname>quit_sync_finalsync</varname>  (<type>integer</type>)</term>
+      <indexterm>
+        <primary><varname>quit_sync_finalsync</varname> configuration parameter</primary>
+      </indexterm>
+      <listitem>
+        <para>Final event number to process.  This must be used in
+        conjunction with <xref linkend="slon-config-quit-sync-finalsync">, and
+        allows the <application>slon</application> to terminate itself
+        once it reaches a certain event for the specified
+        provider. </para>
+
+	<para>If the value is set to 0, this logic will be ignored.
+        </para>
+      </listitem>
+    </varlistentry>
+
   </variablelist>
 </sect1>
 </article>
Index: slon.sgml
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/doc/adminguide/slon.sgml,v
retrieving revision 1.16
retrieving revision 1.17
diff -Ldoc/adminguide/slon.sgml -Ldoc/adminguide/slon.sgml -u -w -r1.16 -r1.17
--- doc/adminguide/slon.sgml
+++ doc/adminguide/slon.sgml
@@ -302,6 +302,35 @@
      </para>
     </listitem>
    </varlistentry>
+
+
+   <varlistentry>
+    <term><option>-q</option><replaceable class="parameter"> quit based on SYNC provider </replaceable></term>
+    <listitem>
+     <para>
+      <envar>quit_sync_provider</envar> indicates which provider's
+      worker thread should be watched in order to terminate after a
+      certain event.  This must be used in conjunction with the
+      <option>-r</option> option below...
+     </para>
+
+     <para> This allows you to have a <application>slon</application>
+     stop replicating after a certain point. </para>
+    </listitem>
+   </varlistentry>
+
+   <varlistentry>
+    <term><option>-r</option><replaceable class="parameter"> quit at event number </replaceable></term>
+    <listitem>
+     <para>
+      <envar>quit_sync_finalsync</envar> indicates the event number
+      after which the remote worker thread for the provider above
+      should terminate.  This must be used in conjunction with the
+      <option>-q</option> option above...
+     </para>
+    </listitem>
+   </varlistentry>
+
   </variablelist>
  </refsect1>
  <refsect1>
--- /dev/null
+++ src/ducttape/test_G_stopat.in
@@ -0,0 +1,280 @@
+#!/bin/sh
+# $Id: test_G_stopat.in,v 1.1 2005/07/27 18:36:58 cbbrowne Exp $
+# **********
+# test_1_pgbench
+#
+# 	This test script creates a standalone pgbench database 
+#	as slony_test1 and then:
+#
+#	- initializes a primary node and starts the node daemon
+#	- creates a set containing all 4 pgbench tables
+#	- creates a second database as slony_test2
+#	- adds database slony_test2 to the system
+#	- starts the second replication daemon
+#	- creates the pgbench tables (schema only)
+#	- subscribes the replication set from the primary node
+# **********
+
+TERMPROG=xterm
+WHICHPROG=which
+
+export PATH
+TMPOUT=/tmp/output.$$
+DB1=slony_test1
+DB2=slony_test2
+DEBUG_LEVEL=2
+
+PGBENCH_SCALE=1
+PGBENCH_CLIENTS=5
+PGBENCH_TRANS=`expr 30000 / $PGBENCH_CLIENTS`
+
+trap '
+	echo ""
+	echo "**** user abort"
+	if [ ! -z $pgbench_pid ] ; then
+		echo "**** killing pgbench"
+		kill -15 $pgbench_pid
+	fi
+	if [ ! -z $slon1_pid ] ; then
+		echo "**** killing node daemon 1"
+		kill -15 $slon1_pid
+	fi
+	if [ ! -z $slon2_pid ] ; then
+		echo "**** killing node daemon 2"
+		kill -15 $slon2_pid
+	fi
+	exit 1
+' 2 15
+
+######################################################################
+# Preparations ... create a standalone pgbench database and
+# have the "application" (pgbench) running.
+######################################################################
+
+#####
+# Make sure the install is up to date
+#####
+WGM=`$WHICHPROG gmake | egrep "^/"`
+if [ -z "$WGM" ] ; then
+    MAKE=make
+    CGNU=`make -v | grep GNU`
+    if [ -z "$CGNU" ] ; then
+	echo "GNU Make not found - please install GNU Make"
+	exit 1
+    fi
+else
+    MAKE=gmake
+fi
+echo -n "**** running 'make install' in src directory ... "
+if ! ${MAKE} -C .. install >$TMPOUT 2>&1 ; then
+    echo "failed"; cat $TMPOUT; rm $TMPOUT; exit 1
+fi
+echo "done"
+rm $TMPOUT
+
+#####
+# Remove old databases, if they exist
+#####
+echo "**** remove old test databases"
+dropdb $DB1 || echo "**** ignored"
+sleep 1
+dropdb $DB2 || echo "**** ignored"
+sleep 1
+
+#####
+# Create the "Primary Node"
+#####
+PNODE=11       # Primary node number
+SNODE=22       # Secondary node number
+TERMAFTER=900  # Terminate after this number of events on $PNODE
+
+echo "**** creating database for Node $PNODE"
+
+createdb $DB1 || exit 1
+pgbench -i -s $PGBENCH_SCALE $DB1
+pg_dump -s $DB1 >pgbench_schema.sql
+
+#####
+# Start pgbench in the background and give it rampup time
+#####
+pgbench -n -s $PGBENCH_SCALE -c $PGBENCH_CLIENTS -t $PGBENCH_TRANS $DB1 &
+pgbench_pid=$!
+echo "**** pgbench is running in background with pid $pgbench_pid"
+echo -n "**** sleeping 10 seconds to give pgbench time for rampup ... "
+sleep 10
+echo "done"
+
+echo ""
+echo "**********************************************************************"
+echo "**** $DB1 is now a standalone database with a running pgbench"
+echo "**********************************************************************"
+echo ""
+
+######################################################################
+# Setup DB1 as the primary cluster T1 node, start the node daemon,
+# and create a replication set containing the pgbench tables.
+######################################################################
+
+PREAMBLE="	cluster name = T1;
+	node $PNODE admin conninfo = 'dbname=$DB1';
+	node $SNODE admin conninfo = 'dbname=$DB2';
+"
+
+echo "**** initializing $DB1 as Primary Node for Slony-I cluster T1"
+slonik <<_EOF_
+$PREAMBLE
+	init cluster (id = $PNODE, comment = 'Node $PNODE');
+	echo 'Database $DB1 initialized as Node $PNODE';
+_EOF_
+if [ $? -ne 0 ] ; then
+	kill $pgbench_pid;
+	exit 1
+fi
+
+echo "**** starting the Slony-I node daemon for $DB1"
+$TERMPROG -title "Slon node $PNODE" -e sh -c "slon -d$DEBUG_LEVEL -s500 -g10 -q $PNODE -r $TERMAFTER T1 dbname=$DB1; echo -n 'Enter>'; read line" &
+slon1_pid=$!
+echo "slon[$slon1_pid] on dbname=$DB1"
+
+echo "**** creating a replication set containing the 4 pgbench tables ... "
+slonik <<_EOF_
+$PREAMBLE
+
+	try {
+		table add key (node id = $PNODE, fully qualified name = 'public.history');
+	}
+	on error {
+		exit 1;
+	}
+
+	try {
+		create set (id = 1, origin = $PNODE, comment = 'Set 1 - pgbench tables');
+		set add table (set id = 1, origin = $PNODE,
+			id = 1, fully qualified name = 'public.accounts',
+			comment = 'Table accounts');
+		set add table (set id = 1, origin = $PNODE,
+			id = 2, fully qualified name = 'public.branches',
+			comment = 'Table branches');
+		set add table (set id = 1, origin = $PNODE,
+			id = 3, fully qualified name = 'public.tellers',
+			comment = 'Table tellers');
+		set add table (set id = 1, origin = $PNODE,
+			id = 4, fully qualified name = 'public.history',
+			key = serial, comment = 'Table accounts');
+	}
+	on error {
+		exit 1;
+	}
+_EOF_
+
+if [ $? -ne 0 ] ; then
+	echo "failed"
+	kill $pgbench_pid 2>/dev/null
+	kill $slon1_pid 2>/dev/null
+	cat $TMPOUT
+	rm $TMPOUT
+	exit 1
+fi
+echo "**** set created"
+
+#####
+# Check that pgbench is still running
+#####
+if ! kill -0 $pgbench_pid 2>/dev/null ; then
+	echo "**** pgbench terminated ???"
+	kill $slon1_pid 2>/dev/null
+	exit 1
+fi
+
+echo ""
+echo "**********************************************************************"
+echo "**** $DB1 is now the Slony-I origin for set 1"
+echo "**********************************************************************"
+echo ""
+
+######################################################################
+# Setup DB2 as a subscriber node and let it subscribe the replication
+# set of the running pgbench
+######################################################################
+echo "**** creating database for node $SNODE"
+if ! createdb $DB2 ; then
+	kill $pgbench_pid 2>/dev/null
+	kill $slon1_pid 2>/dev/null
+	exit 1
+fi
+
+echo "**** initializing $DB2 as node $SNODE of Slony-I cluster T1"
+slonik <<_EOF_
+$PREAMBLE
+	echo 'Creating node $SNODE';
+	try {
+		store node (id = $SNODE, comment = 'node $SNODE', event node = $PNODE);
+        } on error {
+	    echo 'could not establish node $SNODE';
+	    exit -1;
+	}
+	try {
+		store path (server = $PNODE, client = $SNODE, conninfo = 'dbname=$DB1');
+		store path (server = $SNODE, client = $PNODE, conninfo = 'dbname=$DB2');
+	}
+	on error { 
+	    echo 'could not establish paths between $PNODE and $SNODE';
+	    exit -1; 
+	}
+	echo 'Database $DB2 added as node $SNODE';
+_EOF_
+if [ $? -ne 0 ] ; then
+	kill $pgbench_pid 2>/dev/null
+	kill $slon1_pid 2>/dev/null
+	exit 1
+fi
+
+echo "**** starting the Slony-I node daemon for $DB1"
+$TERMPROG -title "Slon node $SNODE" -e sh -c "slon -d$DEBUG_LEVEL -s10000 -g10 -q $PNODE -r $TERMAFTER T1 dbname=$DB2; echo -n 'Enter>'; read line" &
+slon2_pid=$!
+echo "slon[$slon2_pid] on dbname=$DB2"
+
+#####
+# Check that pgbench is still running
+#####
+if ! kill -0 $pgbench_pid 2>/dev/null ; then
+	echo "**** pgbench terminated ???"
+	kill $slon1_pid 2>/dev/null
+	exit 1
+fi
+
+######################################################################
+# And now comes the moment where the big elephant starts to pee
+# and the attendants in the first row climb on their chairs ...
+######################################################################
+echo "**** creating pgbench tables and subscribing node $SNODE to set 1"
+(
+	cat pgbench_schema.sql
+) | psql -q $DB2
+slonik <<_EOF_
+$PREAMBLE
+
+	subscribe set ( id = 1, provider = $PNODE, receiver = $SNODE, forward = yes );
+_EOF_
+
+echo ""
+echo "**********************************************************************"
+echo "**** $DB2 should now be copying data and attempting to catch up."
+echo "**********************************************************************"
+echo ""
+
+echo -n "**** waiting for pgbench to finish "
+while kill -0 $pgbench_pid 2>/dev/null ; do
+	echo -n "."
+	sleep 10
+done
+echo "**** pgbench finished"
+echo "**** please terminate the replication engines when caught up."
+wait $slon1_pid
+wait $slon2_pid
+
+kill $pgbench_pid 2>/dev/null
+kill $slon1_pid 2>/dev/null
+kill $slon2_pid 2>/dev/null
+
+./compare_pgbench_dumps $DB1 $DB2
Index: Makefile
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/ducttape/Makefile,v
retrieving revision 1.10
retrieving revision 1.11
diff -Lsrc/ducttape/Makefile -Lsrc/ducttape/Makefile -u -w -r1.10 -r1.11
--- src/ducttape/Makefile
+++ src/ducttape/Makefile
@@ -11,8 +11,8 @@
 slony_top_builddir = ../..
 include $(slony_top_builddir)/Makefile.global
 
-TESTSSRC = `ls test_[0-9A-F]_*.in`
-TESTS = `ls test_[0-9A-F]_*.in | sed -e 's/\.in//'`
+TESTSSRC = `ls test_[0-9A-Z]_*.in`
+TESTS = `ls test_[0-9A-Z]_*.in | sed -e 's/\.in//'`
 DISTFILES = Makefile README $(TESTSSRC)
 ALL =
 
@@ -26,7 +26,6 @@
 		fi; \
 	done
 	
-
 clean distclean maintainer-clean:
 	@for script in $(TESTS); do \
 		if [ -f $$script ]; then \
Index: confoptions.h
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/confoptions.h,v
retrieving revision 1.23
retrieving revision 1.24
diff -Lsrc/slon/confoptions.h -Lsrc/slon/confoptions.h -u -w -r1.23 -r1.24
--- src/slon/confoptions.h
+++ src/slon/confoptions.h
@@ -25,6 +25,9 @@
 extern int	sync_group_maxsize;
 extern int	desired_sync_time;
 
+extern int      quit_sync_provider;
+extern int      quit_sync_finalsync;
+
 char	   	*Syslog_ident;
 char	   	*Syslog_facility;
 int		Use_syslog;
@@ -188,6 +191,32 @@
 		2
 	},
 #endif
+ 	{
+ 		{
+ 			(const char *)"quit_sync_provider",
+ 			gettext_noop("Node to watch for a final SYNC"),
+ 			gettext_noop("We want to terminate slon when the worker thread reaches a certain SYNC number "
+ 				     "against a certain provider.  This is the provider... "),
+ 			SLON_C_INT
+ 		},
+ 		&quit_sync_provider,
+ 		0,
+ 		0,
+ 		2147483647
+ 	},
+ 	{
+ 		{
+ 			(const char *)"quit_sync_finalsync",
+ 			gettext_noop("SYNC number at which slon should abort"),
+ 			gettext_noop("We want to terminate slon when the worker thread reaches a certain SYNC number "
+ 				     "against a certain provider.  This is the SYNC number... "),
+ 			SLON_C_INT
+ 		},
+ 		&quit_sync_finalsync,
+ 		0,
+ 		0,
+ 		2147483647
+ 	},
     {0}
 };
 
Index: remote_worker.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/remote_worker.c,v
retrieving revision 1.86
retrieving revision 1.87
diff -Lsrc/slon/remote_worker.c -Lsrc/slon/remote_worker.c -u -w -r1.86 -r1.87
--- src/slon/remote_worker.c
+++ src/slon/remote_worker.c
@@ -222,6 +222,8 @@
 int last_sync_length;
 int max_sync;
 int min_sync;
+int quit_sync_provider;
+int quit_sync_finalsync;
 /*
  * ---------- Local functions ----------
  */
@@ -491,6 +493,20 @@
 						 last_sync_group_size, last_sync_length, ideal_sync, next_sync_group_size);
 				}
 
+
+				/* Quit upon receiving event # quit_sync_number from node # quit_sync_provider */
+				if (quit_sync_provider != 0) {
+					if (quit_sync_provider == node->no_id) {
+						if ((next_sync_group_size + (event->ev_seqno)) > quit_sync_finalsync) {
+							next_sync_group_size = quit_sync_finalsync - event->ev_seqno;
+						}
+						if (event->ev_seqno >= quit_sync_finalsync) {
+							slon_log(SLON_FATAL, "ABORT at sync %d per command line request%n", quit_sync_finalsync);
+							slon_abort();
+						}
+					}
+				}
+
 				gettimeofday(&sync_start, NULL);
 
 				pthread_mutex_lock(&(node->message_lock));
Index: slon.c
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/src/slon/slon.c,v
retrieving revision 1.53
retrieving revision 1.54
diff -Lsrc/slon/slon.c -Lsrc/slon/slon.c -u -w -r1.53 -r1.54
--- src/slon/slon.c
+++ src/slon/slon.c
@@ -102,10 +102,18 @@
 #endif
 	InitializeConfOptions();
 
-	while ((c = getopt(argc, argv, "f:a:d:s:t:g:c:p:o:hv")) != EOF)
+	while ((c = getopt(argc, argv, "f:a:d:s:t:g:c:p:o:q:r:hv")) != EOF)
 	{
 		switch (c)
 		{
+		        case 'q':
+			        set_config_option("quit_sync_provider", optarg);
+				break;
+
+		        case 'r':
+			        set_config_option("quit_sync_finalsync", optarg);
+				break;
+
 			case 'f':
 				ProcessConfigFile(optarg);
 				break;
@@ -225,6 +233,8 @@
 		fprintf(stderr, "    -p <filename>         slon pid file\n");
 		fprintf(stderr, "    -f <filename>         slon configuration file\n");
 		fprintf(stderr, "    -a <directory>        directory to store SYNC archive files\n");
+		fprintf(stderr, "    -q <num>              Terminate when this node reaches # of SYNCs\n");
+		fprintf(stderr, "    -r <num>              # of syncs for -q option\n");
 		return 1;
 	}
 


More information about the Slony1-commit mailing list