[Bast-commits] r4601 - in DBIx-Class/0.08/trunk: . lib/DBIx/Class lib/DBIx/Class/Storage lib/DBIx/Class/Storage/DBI lib/DBIx/Class/Storage/DBI/Replicated lib/DBIx/Class/Storage/DBI/Replicated/Balancer lib/SQL/Translator/Parser/DBIx t t/lib t/lib/DBICTest/Schema

jnapiorkowski at dev.catalyst.perl.org jnapiorkowski at dev.catalyst.perl.org
Mon Jul 21 22:09:21 BST 2008


Author: jnapiorkowski
Date: 2008-07-21 22:09:21 +0100 (Mon, 21 Jul 2008)
New Revision: 4601

Added:
   DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/
   DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm
   DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/
   DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm
   DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm
   DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
   DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
Modified:
   DBIx-Class/0.08/trunk/
   DBIx-Class/0.08/trunk/Changes
   DBIx-Class/0.08/trunk/Makefile.PL
   DBIx-Class/0.08/trunk/lib/DBIx/Class/PK.pm
   DBIx-Class/0.08/trunk/lib/DBIx/Class/Row.pm
   DBIx-Class/0.08/trunk/lib/DBIx/Class/Schema.pm
   DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI.pm
   DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated.pm
   DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/mysql.pm
   DBIx-Class/0.08/trunk/lib/SQL/Translator/Parser/DBIx/Class.pm
   DBIx-Class/0.08/trunk/t/03podcoverage.t
   DBIx-Class/0.08/trunk/t/71mysql.t
   DBIx-Class/0.08/trunk/t/77prefetch.t
   DBIx-Class/0.08/trunk/t/86sqlt.t
   DBIx-Class/0.08/trunk/t/93storage_replication.t
   DBIx-Class/0.08/trunk/t/lib/DBICTest.pm
   DBIx-Class/0.08/trunk/t/lib/DBICTest/Schema/ForceForeign.pm
   DBIx-Class/0.08/trunk/t/lib/DBICTest/Schema/TreeLike.pm
   DBIx-Class/0.08/trunk/t/lib/DBICTest/Schema/TwoKeys.pm
   DBIx-Class/0.08/trunk/t/lib/sqlite.sql
Log:
 r8426 at dev (orig r4305):  jnapiorkowski | 2008-04-28 13:38:43 -0500
 branch for replication rewrite
 r8427 at dev (orig r4307):  jnapiorkowski | 2008-04-30 10:51:48 -0500
 -new config option to DBICTest to let you set an alternative storage type, start on creating a DBIC based load balancer
 r8428 at dev (orig r4309):  jnapiorkowski | 2008-04-30 15:26:26 -0500
 got first pass on the replication and balancer, passing all of the old test suite (which is not much, but it is a milestone of some sort)
 r8429 at dev (orig r4311):  jnapiorkowski | 2008-04-30 17:16:55 -0500
 added some advice to debugging replicants so that we can see a replicant dsn, got this balancing between one master and slave, seems to run well
 r8430 at dev (orig r4312):  jnapiorkowski | 2008-04-30 17:30:47 -0500
 added some advice to debugging replicants so that we can see a replicant dsn, got this balancing between one master and slave, seems to run well
 r8431 at dev (orig r4313):  jnapiorkowski | 2008-04-30 17:50:09 -0500
 cleanup of some docs, got the default shuffling balancer to work properly.  Don't fall in love with this behavior, since I'm probably going to change the default to balancer to just return the first replicant in the list, since this is optimised for the common case of a single replicant
 r8432 at dev (orig r4314):  jnapiorkowski | 2008-04-30 18:15:28 -0500
 added test to check when nothing is found
 r8433 at dev (orig r4315):  jnapiorkowski | 2008-05-01 10:56:10 -0500
 changed replication test to support custom database connect info, added a little code to DBICTest to support this
 r8441 at dev (orig r4335):  jnapiorkowski | 2008-05-05 15:42:52 -0500
 updated mysql test to reflect the fetch without execute error
 r8444 at dev (orig r4338):  jnapiorkowski | 2008-05-05 16:41:48 -0500
 removed code that tossed an error in select_single when more than a single row is returned and updated the tests to TODO the bad count issue
 r8493 at dev (orig r4351):  jnapiorkowski | 2008-05-06 19:23:09 -0500
 refactored the duties of the different balancer classes, added tests and docs
 r8494 at dev (orig r4352):  jnapiorkowski | 2008-05-06 19:43:52 -0500
 documented methods for detecting replicant reliability, created stub methods
 r8567 at dev (orig r4359):  jnapiorkowski | 2008-05-07 17:40:30 -0500
 changed the way args are passed to a storage, should make it easier to use existing code using this, added the master as a fallback to the the replicants, lots of small documentation updates and test improvements.  all tests passing
 r8573 at dev (orig r4365):  jnapiorkowski | 2008-05-08 15:26:01 -0500
 lots of updates to make the test suite work with databases other than sqlite
 r8574 at dev (orig r4366):  jnapiorkowski | 2008-05-08 16:43:16 -0500
 more cleanup of the test suite so that we can run it against other databases.  fixed the problem with tests using self-referential constrainsts for dbs that have trouble handling that
 r8575 at dev (orig r4367):  jnapiorkowski | 2008-05-08 18:34:55 -0500
 converted replicant to a role so that we can apply it after ensure_connected properly reblesses the storage into the correct driver specific storage
 r8576 at dev (orig r4368):  jnapiorkowski | 2008-05-08 19:06:42 -0500
 fixed up the relicant status tests to exclude them if the database is not a real replicating setup, removed some debug code, got the lag_behind_master and is_replicating methods working properly.
 r8577 at dev (orig r4369):  jnapiorkowski | 2008-05-08 19:31:58 -0500
 fixed up the relicant status tests to exclude them if the database is not a real replicating setup, removed some debug code, got the lag_behind_master and is_replicating methods working properly.
 r8578 at dev (orig r4370):  jnapiorkowski | 2008-05-08 20:40:03 -0500
 good start on the validation of replicants and a system to automatically validate them (although that might be a better role than inside a class, for someday!)
 r8581 at dev (orig r4373):  jnapiorkowski | 2008-05-09 10:53:45 -0500
 changed the balancer to a role, created a new class to define the default balancer behavior, finished the autovalidate code and added tests for all the above
 r8582 at dev (orig r4374):  jnapiorkowski | 2008-05-09 12:00:46 -0500
 changed the way args are passed to a storage type that needs args so they can be in the form of a hash or array ref.  This plays nicer with Config::General for loading
 r8583 at dev (orig r4375):  jnapiorkowski | 2008-05-09 13:32:58 -0500
 doh, replaced ability to configure pool args via the storage args
 r8584 at dev (orig r4376):  jnapiorkowski | 2008-05-09 14:34:17 -0500
 removed bad tabbing
 r12902 at dev (orig r4385):  jnapiorkowski | 2008-05-14 11:05:22 -0500
 changed SQLT::Parser::DBIC so that in FK constraints, both the field order for the source and target tables are ensured to follow the same ordering rule
 r12903 at dev (orig r4386):  jnapiorkowski | 2008-05-14 12:57:57 -0500
 documentation updates
 r13046 at dev (orig r4389):  jnapiorkowski | 2008-05-16 12:31:16 -0500
 make sure that the Pool validates the replicants on the first query
 r13401 at dev (orig r4424):  jnapiorkowski | 2008-05-28 14:15:34 -0500
 created storage method to execute a coderef using master storage only, changed tnx_do to only use the master, wrote tests for both the above, wrote docs for both the above
 r13450 at dev (orig r4425):  jnapiorkowski | 2008-05-29 13:30:39 -0500
 discard changes now is forced to use master for replication.  changed discard_changes guts to point to new method called reload_row in storage.  fixed problem with replicated transactionws not returning the right thing.  added tests to all the above
 r13469 at dev (orig r4426):  jnapiorkowski | 2008-05-29 18:03:15 -0500
 changed Storage->reload_row to do less, reverted some behavior to PK->discard_changes.  Did this to solve some capatibility issues with partitioning.  updated docs to reflect this.
 r13470 at dev (orig r4427):  jnapiorkowski | 2008-05-29 19:18:39 -0500
 fixed failing test in podcoverage, fixed regression in replication test that caused the default fake sqlite replication to fail.  not so important since sqlite doesnt replicate but we do not like to see failing tests.
 r13476 at dev (orig r4433):  jnapiorkowski | 2008-05-30 09:32:24 -0500
 fixed failing test for sqlt
 r13498 at dev (orig r4440):  jnapiorkowski | 2008-05-30 11:37:56 -0500
 reverted unneeded change to field name in Treelike.pm to parent from parent_fk, updated all underlying tests and related bits as requested by Castaway.
 r13563 at dev (orig r4461):  jnapiorkowski | 2008-06-03 10:59:17 -0500
 fix to make sure execute_reliably method properly finds its attributes
 r13587 at dev (orig r4467):  jnapiorkowski | 2008-06-04 12:49:15 -0500
 fixed boneheaded failure to properly propogate txn_do
 r13639 at dev (orig r4473):  jnapiorkowski | 2008-06-05 11:28:23 -0500
 all your tabs belong to spaces
 r13684 at dev (orig r4474):  jnapiorkowski | 2008-06-07 10:59:28 -0500
 clarified documentation about setting up slave dsn
 r13803 at dev (orig r4491):  jnapiorkowski | 2008-06-11 10:01:00 -0500
 1) changed all 4 space indentation to 2 space style indents for replication code, 2) fixed broken index test that was broken after pulling from trunk, 3) updated some docs and better internal docs for replication test, 4) added a couple of new tests to make sure replication does not explode if you are careless about transactions inside of transactions inside of execute_reliably, etc.
 r13884 at dev (orig r4493):  jnapiorkowski | 2008-06-12 12:30:32 -0500
 more cleanly separated DBIC::Storage::Replicated from any storage functions (trying to make sure everything goes to the master or slave correctly), added some tests around this issue and updated the docs a bit.
 r14115 at dev (orig r4506):  jnapiorkowski | 2008-06-19 09:45:42 -0500
 removed ->reload_row from storage, changed this to a method based on the actual row object.  discard_changes is still semantically ambiguous but this solution is better
 r14116 at dev (orig r4507):  jnapiorkowski | 2008-06-19 10:48:39 -0500
 renamed get_current_storage to get_from_storage since the first method name is very poorly named
 r14749 at dev (orig r4557):  jnapiorkowski | 2008-07-07 13:58:37 -0500
 removed some debugging comments, removed transaction from Row->get_from_storage, enabled support for new resultset attribute "execute_reliably" which signals the Balancer to send read requests to the master.  Also refactored connect_replicants to break down functionality into two methods and added new Balancer method to roll the replicant to the next in the queque.  added tests for all the above.
 r14750 at dev (orig r4558):  jnapiorkowski | 2008-07-07 14:16:32 -0500
 added some notes in the tests and fixed get_from_storage to actually use the new resultset attribute
 r14751 at dev (orig r4559):  jnapiorkowski | 2008-07-07 16:38:49 -0500
 updated documentation, adding some hints and details, changed the way we can use the resultset attribute to force a particular storage backend.
 r14835 at dev (orig r4562):  jnapiorkowski | 2008-07-09 12:35:06 -0500
 use BUILDARGS intead of wrapping new, added make_immutable, removed unnneeded test, added some docs
 r14836 at dev (orig r4563):  jnapiorkowski | 2008-07-09 12:40:37 -0500
 use BUILDARGS intead of wrapping new, added make_immutable, removed unnneeded test, added some docs
 r14837 at dev (orig r4564):  jnapiorkowski | 2008-07-09 12:51:26 -0500
 removed the mistaken debug code
 r14838 at dev (orig r4565):  jnapiorkowski | 2008-07-09 13:07:17 -0500
 make sure various Storage mutators correctly return a useful value
 r14888 at dev (orig r4566):  jnapiorkowski | 2008-07-10 14:58:28 -0500
 fixed regression in the random balancer that I created when I removed the shuffle dependency, changed the syntax for returning the next storage in the pool to make debugging easier
 r14897 at dev (orig r4567):  jnapiorkowski | 2008-07-10 16:14:04 -0500
 make sure debugobj calls always go to the master
 r14941 at dev (orig r4569):  jnapiorkowski | 2008-07-11 15:18:10 -0500
 added tests for required modules, minor documentation update
 r14942 at dev (orig r4572):  jnapiorkowski | 2008-07-11 16:46:37 -0500
 just a tiny grammer fix to POD
 r15162 at dev (orig r4594):  jnapiorkowski | 2008-07-18 09:14:28 -0500
 updated CHANGES, removed debug code left by accident, added a bit of POD regarding the word "replicant"



Property changes on: DBIx-Class/0.08/trunk
___________________________________________________________________
Name: svk:merge
   - 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
   + 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510

Modified: DBIx-Class/0.08/trunk/Changes
===================================================================
--- DBIx-Class/0.08/trunk/Changes	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/Changes	2008-07-21 21:09:21 UTC (rev 4601)
@@ -21,8 +21,8 @@
           names should now be consistent and collision-free.
         - Improve handling of explicit key attr in ResultSet::find
         - Add warnings for non-unique ResultSet::find queries
-        - Changed Storage::DBI::Replication to Storage::DBI::Replicated, fixed
-          some problems using this with versioned databases, added some docs
+        - Changed Storage::DBI::Replication to Storage::DBI::Replicated and
+          refactored support.
         - By default now deploy/diff et al. will ignore constraint and index 
           names
         - Add ResultSet::_is_deterministic_value, make new_result filter the

Modified: DBIx-Class/0.08/trunk/Makefile.PL
===================================================================
--- DBIx-Class/0.08/trunk/Makefile.PL	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/Makefile.PL	2008-07-21 21:09:21 UTC (rev 4601)
@@ -23,6 +23,7 @@
 requires 'JSON::Any'                 => 1.00; 
 requires 'Scope::Guard'              => 0.03;
 requires 'Path::Class'               => 0;
+requires 'List::Util'                => 1.19;
 
 # Perl 5.8.0 doesn't have utf8::is_utf8()
 requires 'Encode'                    => 0 if ($] <= 5.008000);  

Modified: DBIx-Class/0.08/trunk/lib/DBIx/Class/PK.pm
===================================================================
--- DBIx-Class/0.08/trunk/lib/DBIx/Class/PK.pm	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/lib/DBIx/Class/PK.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -25,7 +25,7 @@
   return (map { $self->{_column_data}{$_} } $self->primary_columns);
 }
 
-=head2 discard_changes
+=head2 discard_changes ($attrs)
 
 Re-selects the row from the database, losing any changes that had
 been made.
@@ -33,28 +33,30 @@
 This method can also be used to refresh from storage, retrieving any
 changes made since the row was last read from storage.
 
+$attrs is expected to be a hashref of attributes suitable for passing as the
+second argument to $resultset->search($cond, $attrs);
+
 =cut
 
 sub discard_changes {
-  my ($self) = @_;
+  my ($self, $attrs) = @_;
   delete $self->{_dirty_columns};
   return unless $self->in_storage; # Don't reload if we aren't real!
-
-  my $reload = $self->result_source->resultset->find(
-    map { $self->$_ } $self->primary_columns
-  );
-  unless ($reload) { # If we got deleted in the mean-time
+  
+  if( my $current_storage = $self->get_from_storage($attrs)) {
+  	
+    # Set $self to the current.
+  	%$self = %$current_storage;
+  	
+    # Avoid a possible infinite loop with
+    # sub DESTROY { $_[0]->discard_changes }
+    bless $current_storage, 'Do::Not::Exist';
+    
+    return $self;  	
+  } else {
     $self->in_storage(0);
-    return $self;
+    return $self;  	
   }
-
-  %$self = %$reload;
-  
-  # Avoid a possible infinite loop with
-  # sub DESTROY { $_[0]->discard_changes }
-  bless $reload, 'Do::Not::Exist';
-
-  return $self;
 }
 
 =head2 id

Modified: DBIx-Class/0.08/trunk/lib/DBIx/Class/Row.pm
===================================================================
--- DBIx-Class/0.08/trunk/lib/DBIx/Class/Row.pm	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/lib/DBIx/Class/Row.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -799,7 +799,30 @@
   $class->mk_group_accessors('column' => $acc);
 }
 
+=head2 get_from_storage ($attrs)
 
+Returns a new Row which is whatever the Storage has for the currently created
+Row object.  You can use this to see if the storage has become inconsistent with
+whatever your Row object is.
+
+$attrs is expected to be a hashref of attributes suitable for passing as the
+second argument to $resultset->search($cond, $attrs);
+
+=cut
+
+sub get_from_storage {
+    my $self = shift @_;
+    my $attrs = shift @_;
+    my @primary_columns = map { $self->$_ } $self->primary_columns;
+    my $resultset = $self->result_source->resultset;
+    
+    if(defined $attrs) {
+    	$resultset = $resultset->search(undef, $attrs);
+    }
+    
+    return $resultset->find(@primary_columns);	
+}
+
 =head2 throw_exception
 
 See Schema's throw_exception.

Modified: DBIx-Class/0.08/trunk/lib/DBIx/Class/Schema.pm
===================================================================
--- DBIx-Class/0.08/trunk/lib/DBIx/Class/Schema.pm	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/lib/DBIx/Class/Schema.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -624,9 +624,9 @@
 
 =over 4
 
-=item Arguments: $storage_type
+=item Arguments: $storage_type|{$storage_type, \%args}
 
-=item Return Value: $storage_type
+=item Return Value: $storage_type|{$storage_type, \%args}
 
 =back
 
@@ -640,6 +640,13 @@
 dealing with MSSQL via L<DBD::Sybase>, in which case you'd set it to
 C<::DBI::Sybase::MSSQL>.
 
+If your storage type requires instantiation arguments, those are defined as a 
+second argument in the form of a hashref and the entire value needs to be
+wrapped into an arrayref or a hashref.  We support both types of refs here in
+order to play nice with your Config::[class] or your choice.
+
+See L<DBIx::Class::Storage::DBI::Replicated> for an example of this.
+
 =head2 connection
 
 =over 4
@@ -662,19 +669,33 @@
 sub connection {
   my ($self, @info) = @_;
   return $self if !@info && $self->storage;
-  my $storage_class = $self->storage_type;
+  
+  my ($storage_class, $args) = ref $self->storage_type ? 
+    ($self->_normalize_storage_type($self->storage_type),{}) : ($self->storage_type, {});
+    
   $storage_class = 'DBIx::Class::Storage'.$storage_class
     if $storage_class =~ m/^::/;
   eval "require ${storage_class};";
   $self->throw_exception(
     "No arguments to load_classes and couldn't load ${storage_class} ($@)"
   ) if $@;
-  my $storage = $storage_class->new($self);
+  my $storage = $storage_class->new($self=>$args);
   $storage->connect_info(\@info);
   $self->storage($storage);
   return $self;
 }
 
+sub _normalize_storage_type {
+  my ($self, $storage_type) = @_;
+  if(ref $storage_type eq 'ARRAY') {
+    return @$storage_type;
+  } elsif(ref $storage_type eq 'HASH') {
+    return %$storage_type;
+  } else {
+    $self->throw_exception('Unsupported REFTYPE given: '. ref $storage_type);
+  }
+}
+
 =head2 connect
 
 =over 4

Added: DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm
===================================================================
--- DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm	                        (rev 0)
+++ DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -0,0 +1,53 @@
+package DBIx::Class::Storage::DBI::Replicated::Balancer::First;
+
+use Moose;
+with 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Balancer::First; Just get the First Balancer
+
+=head1 SYNOPSIS
+
+This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.  You
+shouldn't need to create instances of this class.
+    
+=head1 DESCRIPTION
+
+Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
+database's (L<DBIx::Class::Storage::DBI::Replicated::Replicant>), defines a
+method by which query load can be spread out across each replicant in the pool.
+
+This Balancer just get's whatever is the first replicant in the pool
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 next_storage
+
+Just get the first storage.  Probably only good when you have one replicant.
+
+=cut
+
+sub next_storage {
+  return  (shift->pool->active_replicants)[0];
+}
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski at takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+__PACKAGE__->meta->make_immutable;
+
+1;
\ No newline at end of file

Added: DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm
===================================================================
--- DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm	                        (rev 0)
+++ DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -0,0 +1,62 @@
+package DBIx::Class::Storage::DBI::Replicated::Balancer::Random;
+
+use Moose;
+with 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Balancer::Random; A 'random' Balancer
+
+=head1 SYNOPSIS
+
+This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.  You
+shouldn't need to create instances of this class.
+    
+=head1 DESCRIPTION
+
+Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
+database's (L<DBIx::Class::Storage::DBI::Replicated::Replicant>), defines a
+method by which query load can be spread out across each replicant in the pool.
+
+This Balancer uses L<List::Util> keyword 'shuffle' to randomly pick an active
+replicant from the associated pool.  This may or may not be random enough for
+you, patches welcome.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 next_storage
+
+Returns an active replicant at random.  Please note that due to the nature of
+the word 'random' this means it's possible for a particular active replicant to
+be requested several times in a row.
+
+=cut
+
+sub next_storage {
+  my $self = shift @_;
+  my @active_replicants = $self->pool->active_replicants;
+  my $count_active_replicants = $#active_replicants +1;
+  my $random_replicant = int(rand($count_active_replicants));
+  
+  return $active_replicants[$random_replicant];
+}
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski at takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+__PACKAGE__->meta->make_immutable;
+
+1;
\ No newline at end of file

Added: DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm
===================================================================
--- DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm	                        (rev 0)
+++ DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -0,0 +1,240 @@
+package DBIx::Class::Storage::DBI::Replicated::Balancer;
+
+use Moose::Role;
+requires 'next_storage';
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Balancer; A Software Load Balancer 
+
+=head1 SYNOPSIS
+
+This role is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
+    
+=head1 DESCRIPTION
+
+Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
+database's (L<DBIx::Class::Storage::DBI::Replicated::Replicant>), defines a
+method by which query load can be spread out across each replicant in the pool.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head2 auto_validate_every ($seconds)
+
+If auto_validate has some sort of value, run the L<validate_replicants> every
+$seconds.  Be careful with this, because if you set it to 0 you will end up
+validating every query.
+
+=cut
+
+has 'auto_validate_every' => (
+  is=>'rw',
+  isa=>'Int',
+  predicate=>'has_auto_validate_every',
+);
+
+=head2 master
+
+The L<DBIx::Class::Storage::DBI> object that is the master database all the
+replicants are trying to follow.  The balancer needs to know it since it's the
+ultimate fallback.
+
+=cut
+
+has 'master' => (
+  is=>'ro',
+  isa=>'DBIx::Class::Storage::DBI',
+  required=>1,
+);
+
+=head2 pool
+
+The L<DBIx::Class::Storage::DBI::Replicated::Pool> object that we are trying to
+balance.
+
+=cut
+
+has 'pool' => (
+  is=>'ro',
+  isa=>'DBIx::Class::Storage::DBI::Replicated::Pool',
+  required=>1,
+);
+
+=head2 current_replicant
+
+Replicant storages (slaves) handle all read only traffic.  The assumption is
+that your database will become readbound well before it becomes write bound
+and that being able to spread your read only traffic around to multiple 
+databases is going to help you to scale traffic.
+
+This attribute returns the next slave to handle a read request.  Your L</pool>
+attribute has methods to help you shuffle through all the available replicants
+via it's balancer object.
+
+=cut
+
+has 'current_replicant' => (
+  is=> 'rw',
+  isa=>'DBIx::Class::Storage::DBI',
+  lazy_build=>1,
+  handles=>[qw/
+    select
+    select_single
+    columns_info_for
+  /],
+);
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 _build_current_replicant
+
+Lazy builder for the L</current_replicant_storage> attribute.
+
+=cut
+
+sub _build_current_replicant {
+  my $self = shift @_;
+  $self->next_storage;
+}
+
+=head2 next_storage
+
+This method should be defined in the class which consumes this role.
+
+Given a pool object, return the next replicant that will serve queries.  The
+default behavior is to grap the first replicant it finds but you can write 
+your own subclasses of L<DBIx::Class::Storage::DBI::Replicated::Balancer> to 
+support other balance systems.
+
+This returns from the pool of active replicants.  If there are no active
+replicants, then you should have it return the master as an ultimate fallback.
+
+=head2 around: next_storage
+
+Advice on next storage to add the autovalidation.  We have this broken out so
+that it's easier to break out the auto validation into a role.
+
+This also returns the master in the case that none of the replicants are active
+or just just forgot to create them :)
+
+=cut
+
+around 'next_storage' => sub {
+  my ($next_storage, $self, @args) = @_;
+  my $now = time;
+    
+  ## Do we need to validate the replicants?
+  if(
+     $self->has_auto_validate_every && 
+     ($self->auto_validate_every + $self->pool->last_validated) <= $now
+  ) {
+      $self->pool->validate_replicants;
+  }
+    
+  ## Get a replicant, or the master if none
+  if(my $next = $self->$next_storage(@args)) {
+    return $next;
+  } else {
+    return $self->master;
+  }
+};
+
+=head2 increment_storage
+
+Rolls the Storage to whatever is next in the queue, as defined by the Balancer.
+
+=cut
+
+sub increment_storage {
+  my $self = shift @_;
+  my $next_replicant = $self->next_storage;
+  $self->current_replicant($next_replicant);
+}
+
+=head2 around: select
+
+Advice on the select attribute.  Each time we use a replicant
+we need to change it via the storage pool algorithm.  That way we are spreading
+the load evenly (hopefully) across existing capacity.
+
+=cut
+
+around 'select' => sub {
+  my ($select, $self, @args) = @_;
+  
+  if (my $forced_pool = $args[-1]->{force_pool}) {
+    delete $args[-1]->{force_pool};
+    return $self->_get_forced_pool($forced_pool)->select(@args); 
+  } else {
+    $self->increment_storage;
+    return $self->$select(@args);
+  }
+};
+
+=head2 around: select_single
+
+Advice on the select_single attribute.  Each time we use a replicant
+we need to change it via the storage pool algorithm.  That way we are spreading
+the load evenly (hopefully) across existing capacity.
+
+=cut
+
+around 'select_single' => sub {
+  my ($select_single, $self, @args) = @_;
+  
+  if (my $forced_pool = $args[-1]->{force_pool}) {
+    delete $args[-1]->{force_pool};
+    return $self->_get_forced_pool($forced_pool)->select_single(@args); 
+  } else {
+    $self->increment_storage;
+    return $self->$select_single(@args);
+  }
+};
+
+=head2 before: columns_info_for
+
+Advice on the current_replicant_storage attribute.  Each time we use a replicant
+we need to change it via the storage pool algorithm.  That way we are spreading
+the load evenly (hopefully) across existing capacity.
+
+=cut
+
+before 'columns_info_for' => sub {
+  my $self = shift @_;
+  $self->increment_storage;
+};
+
+=head2 _get_forced_pool ($name)
+
+Given an identifier, find the most correct storage object to handle the query.
+
+=cut
+
+sub _get_forced_pool {
+  my ($self, $forced_pool) = @_;
+  if(blessed $forced_pool) {
+    return $forced_pool;
+  } elsif($forced_pool eq 'master') {
+    return $self->master;
+  } elsif(my $replicant = $self->pool->replicants($forced_pool)) {
+    return $replicant;
+  } else {
+    $self->master->throw_exception("$forced_pool is not a named replicant.");
+  }   
+}
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski at takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;

Added: DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
===================================================================
--- DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm	                        (rev 0)
+++ DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -0,0 +1,276 @@
+package DBIx::Class::Storage::DBI::Replicated::Pool;
+
+use Moose;
+use MooseX::AttributeHelpers;
+use DBIx::Class::Storage::DBI::Replicated::Replicant;
+use List::Util qw(sum);
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Pool; Manage a pool of replicants
+
+=head1 SYNOPSIS
+
+This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.  You
+shouldn't need to create instances of this class.
+  
+=head1 DESCRIPTION
+
+In a replicated storage type, there is at least one replicant to handle the
+read only traffic.  The Pool class manages this replicant, or list of 
+replicants, and gives some methods for querying information about their status.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head2 maximum_lag ($num)
+
+This is a number which defines the maximum allowed lag returned by the
+L<DBIx::Class::Storage::DBI/lag_behind_master> method.  The default is 0.  In
+general, this should return a larger number when the replicant is lagging
+behind it's master, however the implementation of this is database specific, so
+don't count on this number having a fixed meaning.  For example, MySQL will
+return a number of seconds that the replicating database is lagging.
+
+=cut
+
+has 'maximum_lag' => (
+  is=>'rw',
+  isa=>'Num',
+  required=>1,
+  lazy=>1,
+  default=>0,
+);
+
+=head2 last_validated
+
+This is an integer representing a time since the last time the replicants were
+validated. It's nothing fancy, just an integer provided via the perl time 
+builtin.
+
+=cut
+
+has 'last_validated' => (
+  is=>'rw',
+  isa=>'Int',
+  reader=>'last_validated',
+  writer=>'_last_validated',
+  lazy=>1,
+  default=>0,
+);
+
+=head2 replicant_type ($classname)
+
+Base class used to instantiate replicants that are in the pool.  Unless you
+need to subclass L<DBIx::Class::Storage::DBI::Replicated::Replicant> you should
+just leave this alone.
+
+=cut
+
+has 'replicant_type' => (
+  is=>'ro',
+  isa=>'ClassName',
+  required=>1,
+  default=>'DBIx::Class::Storage::DBI',
+  handles=>{
+    'create_replicant' => 'new',
+  },  
+);
+
+=head2 replicants
+
+A hashref of replicant, with the key being the dsn and the value returning the
+actual replicant storage.  For example if the $dsn element is something like:
+
+  "dbi:SQLite:dbname=dbfile"
+  
+You could access the specific replicant via:
+
+  $schema->storage->replicants->{'dbname=dbfile'}
+  
+This attributes also supports the following helper methods:
+
+=over 4
+
+=item set_replicant($key=>$storage)
+
+Pushes a replicant onto the HashRef under $key
+
+=item get_replicant($key)
+
+Retrieves the named replicant
+
+=item has_replicants
+
+Returns true if the Pool defines replicants.
+
+=item num_replicants
+
+The number of replicants in the pool
+
+=item delete_replicant ($key)
+
+removes the replicant under $key from the pool
+
+=back
+
+=cut
+
+has 'replicants' => (
+  is=>'rw',
+  metaclass => 'Collection::Hash',
+  isa=>'HashRef[DBIx::Class::Storage::DBI]',
+  default=>sub {{}},
+  provides  => {
+    'set' => 'set_replicant',
+    'get' => 'get_replicant',            
+    'empty' => 'has_replicants',
+    'count' => 'num_replicants',
+    'delete' => 'delete_replicant',
+  },
+);
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 connect_replicants ($schema, Array[$connect_info])
+
+Given an array of $dsn suitable for connected to a database, create an
+L<DBIx::Class::Storage::DBI::Replicated::Replicant> object and store it in the
+L</replicants> attribute.
+
+=cut
+
+sub connect_replicants {
+  my $self = shift @_;
+  my $schema = shift @_;
+  
+  my @newly_created = ();
+  foreach my $connect_info (@_) {
+    my $replicant = $self->connect_replicant($schema, $connect_info);
+    my ($key) = ($connect_info->[0]=~m/^dbi\:.+\:(.+)$/);
+    $self->set_replicant( $key => $replicant);  
+    push @newly_created, $replicant;
+  }
+  
+  return @newly_created;
+}
+
+=head2 connect_replicant ($schema, $connect_info)
+
+Given a schema object and a hashref of $connect_info, connect the replicant
+and return it.
+
+=cut
+
+sub connect_replicant {
+  my ($self, $schema, $connect_info) = @_;
+  my $replicant = $self->create_replicant($schema);
+    
+  $replicant->connect_info($connect_info);    
+  $replicant->ensure_connected;
+  DBIx::Class::Storage::DBI::Replicated::Replicant->meta->apply($replicant);
+    
+  return $replicant;
+}
+
+=head2 connected_replicants
+
+Returns true if there are connected replicants.  Actually is overloaded to
+return the number of replicants.  So you can do stuff like:
+
+  if( my $num_connected = $storage->has_connected_replicants ) {
+    print "I have $num_connected connected replicants";
+  } else {
+    print "Sorry, no replicants.";
+  }
+
+This method will actually test that each replicant in the L</replicants> hashref
+is actually connected, try not to hit this 10 times a second.
+
+=cut
+
+sub connected_replicants {
+  my $self = shift @_;
+  return sum( map {
+    $_->connected ? 1:0
+  } $self->all_replicants );
+}
+
+=head2 active_replicants
+
+This is an array of replicants that are considered to be active in the pool.
+This does not check to see if they are connected, but if they are not, DBIC
+should automatically reconnect them for us when we hit them with a query.
+
+=cut
+
+sub active_replicants {
+  my $self = shift @_;
+  return ( grep {$_} map {
+    $_->active ? $_:0
+  } $self->all_replicants );
+}
+
+=head2 all_replicants
+
+Just a simple array of all the replicant storages.  No particular order to the
+array is given, nor should any meaning be derived.
+
+=cut
+
+sub all_replicants {
+  my $self = shift @_;
+  return values %{$self->replicants};
+}
+
+=head2 validate_replicants
+
+This does a check to see if 1) each replicate is connected (or reconnectable),
+2) that is ->is_replicating, and 3) that it is not exceeding the lag amount
+defined by L</maximum_lag>.  Replicants that fail any of these tests are set to
+inactive, and thus removed from the replication pool.
+
+This tests L<all_replicants>, since a replicant that has been previous marked
+as inactive can be reactived should it start to pass the validation tests again.
+
+See L<DBIx::Class::Storage::DBI> for more about checking if a replicating
+connection is not following a master or is lagging.
+
+Calling this method will generate queries on the replicant databases so it is
+not recommended that you run them very often.
+
+=cut
+
+sub validate_replicants {
+  my $self = shift @_;
+  foreach my $replicant($self->all_replicants) {
+    if(
+      $replicant->is_replicating &&
+      $replicant->lag_behind_master <= $self->maximum_lag &&
+      $replicant->ensure_connected
+    ) {
+      $replicant->active(1)
+    } else {
+      $replicant->active(0);
+    }
+  }
+  ## Mark that we completed this validation.  
+  $self->_last_validated(time);
+}
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski at takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+__PACKAGE__->meta->make_immutable;
+
+1;

Added: DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
===================================================================
--- DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm	                        (rev 0)
+++ DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -0,0 +1,91 @@
+package DBIx::Class::Storage::DBI::Replicated::Replicant;
+
+use Moose::Role;
+requires qw/_query_start/;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Replicant; A replicated DBI Storage Role
+
+=head1 SYNOPSIS
+
+This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
+    
+=head1 DESCRIPTION
+
+Replicants are DBI Storages that follow a master DBI Storage.  Typically this
+is accomplished via an external replication system.  Please see the documents
+for L<DBIx::Class::Storage::DBI::Replicated> for more details.
+
+This class exists to define methods of a DBI Storage that only make sense when
+it's a classic 'slave' in a pool of slave databases which replicate from a
+given master database.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head2 active
+
+This is a boolean which allows you to programmatically activate or deactivate a
+replicant from the pool.  This way to you do stuff like disallow a replicant
+when it get's too far behind the master, if it stops replicating, etc.
+
+This attribute DOES NOT reflect a replicant's internal status, i.e. if it is
+properly replicating from a master and has not fallen too many seconds behind a
+reliability threshold.  For that, use L</is_replicating>  and L</lag_behind_master>.
+Since the implementation of those functions database specific (and not all DBIC
+supported DB's support replication) you should refer your database specific
+storage driver for more information.
+
+=cut
+
+has 'active' => (
+  is=>'rw',
+  isa=>'Bool',
+  lazy=>1,
+  required=>1,
+  default=>1,
+);
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 after: _query_start
+
+advice iof the _query_start method to add more debuggin
+
+=cut
+
+around '_query_start' => sub {
+  my ($method, $self, $sql, @bind) = @_;
+  my $dsn = $self->connect_info->[0];
+  $self->$method("DSN: $dsn SQL: $sql", @bind);
+};
+
+=head2 debugobj
+
+Override the debugobj method to redirect this method call back to the master.
+
+=cut
+
+sub debugobj {
+    return shift->schema->storage->debugobj;
+}
+
+=head1 ALSO SEE
+
+L<<a href="http://en.wikipedia.org/wiki/Replicant">http://en.wikipedia.org/wiki/Replicant</a>>
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski at takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;
\ No newline at end of file

Modified: DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated.pm
===================================================================
--- DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated.pm	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/Replicated.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -1,274 +1,743 @@
 package DBIx::Class::Storage::DBI::Replicated;
 
-use strict;
-use warnings;
+BEGIN {
+  use Carp::Clan qw/^DBIx::Class/;
+	
+  ## Modules required for Replication support not required for general DBIC
+  ## use, so we explicitly test for these.
+	
+  my %replication_required = (
+    Moose => '0.54',
+    MooseX::AttributeHelpers => '0.12',
+    Moose::Util::TypeConstraints => '0.54',
+    Class::MOP => '0.63',
+  );
+	
+  my @didnt_load;
+  
+  for my $module (keys %replication_required) {
+	eval "use $module $replication_required{$module}";
+	push @didnt_load, "$module $replication_required{$module}"
+	 if $@;
+  }
+	
+  croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication")
+    if @didnt_load;  	
+}
 
 use DBIx::Class::Storage::DBI;
-use DBD::Multi;
+use DBIx::Class::Storage::DBI::Replicated::Pool;
+use DBIx::Class::Storage::DBI::Replicated::Balancer;
 
-use base qw/Class::Accessor::Fast/;
-
-__PACKAGE__->mk_accessors( qw/read_source write_source/ );
-
 =head1 NAME
 
-DBIx::Class::Storage::DBI::Replicated - ALPHA Replicated database support
+DBIx::Class::Storage::DBI::Replicated - BETA Replicated database support
 
 =head1 SYNOPSIS
 
 The Following example shows how to change an existing $schema to a replicated
-storage type and update it's connection information to contain a master DSN and
-an array of slaves.
+storage type, add some replicated (readonly) databases, and perform reporting
+tasks.
 
-    ## Change storage_type in your schema class
-    $schema->storage_type( '::DBI::Replicated' );
-    
-    ## Set your connection.
-    $schema->connect(
-        $dsn, $user, $password, {
-        	AutoCommit => 1,
-        	## Other standard DBI connection or DBD custom attributes added as
-        	## usual.  Additionally, we have two custom attributes for defining
-        	## slave information and controlling how the underlying DBD::Multi
-        	slaves_connect_info => [
-        	   ## Define each slave like a 'normal' DBI connection, but you add
-        	   ## in a DBD::Multi custom attribute to define how the slave is
-        	   ## prioritized.  Please see DBD::Multi for more.
-        	   [$slave1dsn, $user, $password, {%slave1opts, priority=>10}],
-               [$slave2dsn, $user, $password, {%slave2opts, priority=>10}],
-               [$slave3dsn, $user, $password, {%slave3opts, priority=>20}],
-               ## add in a preexisting database handle
-               [$dbh, '','', {priority=>30}], 
-               ## DBD::Multi will call this coderef for connects 
-               [sub {  DBI->connect(< DSN info >) }, '', '', {priority=>40}],  
-               ## If the last item is hashref, we use that for DBD::Multi's 
-               ## configuration information.  Again, see DBD::Multi for more.
-               {timeout=>25, failed_max=>2},      	   
-        	],
-        },
-    );
-    
-    ## Now, just use the schema as normal
-    $schema->resultset('Table')->find(< unique >); ## Reads will use slaves
-    $schema->resultset('Table')->create(\%info); ## Writes will use master
-
+  ## Change storage_type in your schema class
+  $schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] );
+  
+  ## Add some slaves.  Basically this is an array of arrayrefs, where each
+  ## arrayref is database connect information
+  
+  $schema->storage->connect_replicants(
+    [$dsn1, $user, $pass, \%opts],
+    [$dsn2, $user, $pass, \%opts],
+    [$dsn3, $user, $pass, \%opts],
+  );
+  
+  ## Now, just use the $schema as normal
+  $schema->resultset('Source')->search({name=>'etc'});
+  
+  ## You can force a given query to use a particular storage using the search
+  ### attribute 'force_pool'.  For example:
+  
+  my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'});
+  
+  ## Now $RS will force everything (both reads and writes) to use whatever was
+  ## setup as the master storage.  'master' is hardcoded to always point to the
+  ## Master, but you can also use any Replicant name.  Please see:
+  ## L<DBIx::Class::Storage::Replicated::Pool> and the replicants attribute for
+  ## More. Also see transactions and L</execute_reliably> for alternative ways
+  ## to force read traffic to the master.
+  
 =head1 DESCRIPTION
 
-Warning: This class is marked ALPHA.  We are using this in development and have
-some basic test coverage but the code hasn't yet been stressed by a variety
-of databases.  Individual DB's may have quirks we are not aware of.  Please
-use this in development and pass along your experiences/bug fixes.
+Warning: This class is marked BETA.  This has been running a production
+website using MySQL native replication as its backend and we have some decent
+test coverage but the code hasn't yet been stressed by a variety of databases.
+Individual DB's may have quirks we are not aware of.  Please use this in first
+development and pass along your experiences/bug fixes.
 
 This class implements replicated data store for DBI. Currently you can define
 one master and numerous slave database connections. All write-type queries
 (INSERT, UPDATE, DELETE and even LAST_INSERT_ID) are routed to master
 database, all read-type queries (SELECTs) go to the slave database.
 
-For every slave database you can define a priority value, which controls data
-source usage pattern. It uses L<DBD::Multi>, so first the lower priority data
-sources used (if they have the same priority, the are used randomized), than
-if all low priority data sources fail, higher ones tried in order.
+Basically, any method request that L<DBIx::Class::Storage::DBI> would normally
+handle gets delegated to one of the two attributes: L</read_handler> or to
+L</write_handler>.  Additionally, some methods need to be distributed
+to all existing storages.  This way our storage class is a drop in replacement
+for L<DBIx::Class::Storage::DBI>.
 
-=head1 CONFIGURATION
+Read traffic is spread across the replicants (slaves) occuring to a user
+selected algorithm.  The default algorithm is random weighted.
 
-Please see L<DBD::Multi> for most configuration information.
+=head1 NOTES
 
+The consistancy betweeen master and replicants is database specific.  The Pool
+gives you a method to validate it's replicants, removing and replacing them
+when they fail/pass predefined criteria.  Please make careful use of the ways
+to force a query to run against Master when needed.
+
+=head1 REQUIREMENTS
+
+Replicated Storage has additional requirements not currently part of L<DBIx::Class>
+
+  Moose => 1.54
+  MooseX::AttributeHelpers => 0.12 
+  Moose::Util::TypeConstraints => 0.54
+  Class::MOP => 0.63
+  
+You will need to install these modules manually via CPAN or make them part of the
+Makefile for your distribution.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head2 schema
+
+The underlying L<DBIx::Class::Schema> object this storage is attaching
+
 =cut
 
-sub new {
-    my $proto = shift;
-    my $class = ref( $proto ) || $proto;
-    my $self = {};
+has 'schema' => (
+    is=>'rw',
+    isa=>'DBIx::Class::Schema',
+    weak_ref=>1,
+    required=>1,
+);
 
-    bless( $self, $class );
+=head2 pool_type
 
-    $self->write_source( DBIx::Class::Storage::DBI->new );
-    $self->read_source( DBIx::Class::Storage::DBI->new );
+Contains the classname which will instantiate the L</pool> object.  Defaults 
+to: L<DBIx::Class::Storage::DBI::Replicated::Pool>.
 
-    return $self;
-}
+=cut
 
-sub all_sources {
-    my $self = shift;
+has 'pool_type' => (
+  is=>'ro',
+  isa=>'ClassName',
+  required=>1,
+  default=>'DBIx::Class::Storage::DBI::Replicated::Pool',
+  handles=>{
+    'create_pool' => 'new',
+  },
+);
 
-    my @sources = ($self->read_source, $self->write_source);
+=head2 pool_args
 
-    return wantarray ? @sources : \@sources;
-}
+Contains a hashref of initialized information to pass to the Balancer object.
+See L<DBIx::Class::Storage::Replicated::Pool> for available arguments.
 
-sub _connect_info {
-	my $self = shift;
-    my $master = $self->write_source->_connect_info;
-    $master->[-1]->{slave_connect_info} = $self->read_source->_connect_info;
-    return $master;
-}
+=cut
 
-sub connect_info {
-	my ($self, $source_info) = @_;
+has 'pool_args' => (
+  is=>'ro',
+  isa=>'HashRef',
+  lazy=>1,
+  required=>1,
+  default=>sub { {} },
+);
 
-    ## if there is no $source_info, treat this sub like an accessor
-    return $self->_connect_info
-     if !$source_info;
+
+=head2 balancer_type
+
+The replication pool requires a balance class to provider the methods for
+choose how to spread the query load across each replicant in the pool.
+
+=cut
+
+subtype 'DBIx::Class::Storage::DBI::Replicated::BalancerClassNamePart',
+  as 'ClassName';
     
-    ## Alright, let's conect the master 
-    $self->write_source->connect_info($source_info);
-  
-    ## Now, build and then connect the Slaves
-    my @slaves_connect_info = @{$source_info->[-1]->{slaves_connect_info}};   
-    my $dbd_multi_config = ref $slaves_connect_info[-1] eq 'HASH' 
-        ? pop @slaves_connect_info : {};
+coerce 'DBIx::Class::Storage::DBI::Replicated::BalancerClassNamePart',
+  from 'Str',
+  via {
+  	my $type = $_;
+    if($type=~m/^::/) {
+      $type = 'DBIx::Class::Storage::DBI::Replicated::Balancer'.$type;
+    }  
+    Class::MOP::load_class($type);  
+    $type;  	
+  };
 
-    ## We need to do this since SQL::Abstract::Limit can't guess what DBD::Multi is
-    $dbd_multi_config->{limit_dialect} = $self->write_source->sql_maker->limit_dialect
-        unless defined $dbd_multi_config->{limit_dialect};
+has 'balancer_type' => (
+  is=>'ro',
+  isa=>'DBIx::Class::Storage::DBI::Replicated::BalancerClassNamePart',
+  coerce=>1,
+  required=>1,
+  default=> 'DBIx::Class::Storage::DBI::Replicated::Balancer::First',
+  handles=>{
+    'create_balancer' => 'new',
+  },
+);
 
-    @slaves_connect_info = map {
-        ## if the first element in the arrayhash is a ref, make that the value
-        my $db = ref $_->[0] ? $_->[0] : $_;
-    	my $priority = $_->[-1]->{priority} || 10; ## default priority is 10
-    	$priority => $db;
-    } @slaves_connect_info;
+=head2 balancer_args
+
+Contains a hashref of initialized information to pass to the Balancer object.
+See L<DBIx::Class::Storage::Replicated::Balancer> for available arguments.
+
+=cut
+
+has 'balancer_args' => (
+  is=>'ro',
+  isa=>'HashRef',
+  lazy=>1,
+  required=>1,
+  default=>sub { {} },
+);
+
+=head2 pool
+
+Is a <DBIx::Class::Storage::DBI::Replicated::Pool> or derived class.  This is a
+container class for one or more replicated databases.
+
+=cut
+
+has 'pool' => (
+  is=>'ro',
+  isa=>'DBIx::Class::Storage::DBI::Replicated::Pool',
+  lazy_build=>1,
+  handles=>[qw/
+    connect_replicants    
+    replicants
+    has_replicants
+  /],
+);
+
+=head2 balancer
+
+Is a <DBIx::Class::Storage::DBI::Replicated::Balancer> or derived class.  This 
+is a class that takes a pool (<DBIx::Class::Storage::DBI::Replicated::Pool>)
+
+=cut
+
+has 'balancer' => (
+  is=>'ro',
+  isa=>'DBIx::Class::Storage::DBI::Replicated::Balancer',
+  lazy_build=>1,
+  handles=>[qw/auto_validate_every/],
+);
+
+=head2 master
+
+The master defines the canonical state for a pool of connected databases.  All
+the replicants are expected to match this databases state.  Thus, in a classic
+Master / Slaves distributed system, all the slaves are expected to replicate
+the Master's state as quick as possible.  This is the only database in the
+pool of databases that is allowed to handle write traffic.
+
+=cut
+
+has 'master' => (
+  is=> 'ro',
+  isa=>'DBIx::Class::Storage::DBI',
+  lazy_build=>1,
+);
+
+=head1 ATTRIBUTES IMPLEMENTING THE DBIx::Storage::DBI INTERFACE
+
+The following methods are delegated all the methods required for the 
+L<DBIx::Class::Storage::DBI> interface.
+
+=head2 read_handler
+
+Defines an object that implements the read side of L<BIx::Class::Storage::DBI>.
+
+=cut
+
+has 'read_handler' => (
+  is=>'rw',
+  isa=>'Object',
+  lazy_build=>1,
+  handles=>[qw/
+    select
+    select_single
+    columns_info_for
+  /],    
+);
+
+=head2 write_handler
+
+Defines an object that implements the write side of L<BIx::Class::Storage::DBI>.
+
+=cut
+
+has 'write_handler' => (
+  is=>'ro',
+  isa=>'Object',
+  lazy_build=>1,
+  lazy_build=>1,
+  handles=>[qw/   
+    on_connect_do
+    on_disconnect_do       
+    connect_info
+    throw_exception
+    sql_maker
+    sqlt_type
+    create_ddl_dir
+    deployment_statements
+    datetime_parser
+    datetime_parser_type        
+    last_insert_id
+    insert
+    insert_bulk
+    update
+    delete
+    dbh
+    txn_begin
+    txn_do
+    txn_commit
+    txn_rollback
+    txn_scope_guard
+    sth
+    deploy
+
+    reload_row
+    _prep_for_execute
+    configure_sqlt
     
-    $self->read_source->connect_info([ 
-        'dbi:Multi:', undef, undef, { 
-        	dsns => [@slaves_connect_info],
-        	%$dbd_multi_config,
-        },
-    ]);
-    
-    ## Return the formated connection information
-    return $self->_connect_info;
+  /],
+);
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 BUILDARGS
+
+L<DBIx::Class::Schema> when instantiating it's storage passed itself as the
+first argument.  So we need to massage the arguments a bit so that all the
+bits get put into the correct places.
+
+=cut
+
+sub BUILDARGS {
+  my ($class, $schema, $storage_type_args, @args) = @_;	
+  
+  return {
+  	schema=>$schema, 
+  	%$storage_type_args,
+  	@args
+  }
 }
 
-sub select {
-    shift->read_source->select( @_ );
+=head2 _build_master
+
+Lazy builder for the L</master> attribute.
+
+=cut
+
+sub _build_master {
+  my $self = shift @_;
+  DBIx::Class::Storage::DBI->new($self->schema);
 }
-sub select_single {
-    shift->read_source->select_single( @_ );
+
+=head2 _build_pool
+
+Lazy builder for the L</pool> attribute.
+
+=cut
+
+sub _build_pool {
+  my $self = shift @_;
+  $self->create_pool(%{$self->pool_args});
 }
-sub throw_exception {
-    shift->read_source->throw_exception( @_ );
+
+=head2 _build_balancer
+
+Lazy builder for the L</balancer> attribute.  This takes a Pool object so that
+the balancer knows which pool it's balancing.
+
+=cut
+
+sub _build_balancer {
+  my $self = shift @_;
+  $self->create_balancer(
+    pool=>$self->pool, 
+    master=>$self->master,
+    %{$self->balancer_args},
+  );
 }
-sub sql_maker {
-    shift->read_source->sql_maker( @_ );
+
+=head2 _build_write_handler
+
+Lazy builder for the L</write_handler> attribute.  The default is to set this to
+the L</master>.
+
+=cut
+
+sub _build_write_handler {
+  return shift->master;
 }
-sub columns_info_for {
-    shift->read_source->columns_info_for( @_ );
+
+=head2 _build_read_handler
+
+Lazy builder for the L</read_handler> attribute.  The default is to set this to
+the L</balancer>.
+
+=cut
+
+sub _build_read_handler {
+  return shift->balancer;
 }
-sub sqlt_type {
-    shift->read_source->sqlt_type( @_ );
+
+=head2 around: connect_replicants
+
+All calls to connect_replicants needs to have an existing $schema tacked onto
+top of the args, since L<DBIx::Storage::DBI> needs it.
+
+=cut
+
+around 'connect_replicants' => sub {
+  my ($method, $self, @args) = @_;
+  $self->$method($self->schema, @args);
+};
+
+=head2 all_storages
+
+Returns an array of of all the connected storage backends.  The first element
+in the returned array is the master, and the remainings are each of the
+replicants.
+
+=cut
+
+sub all_storages {
+  my $self = shift @_;
+  return grep {defined $_ && blessed $_} (
+     $self->master,
+     $self->replicants,
+  );
 }
-sub create_ddl_dir {
-    shift->read_source->create_ddl_dir( @_ );
+
+=head2 execute_reliably ($coderef, ?@args)
+
+Given a coderef, saves the current state of the L</read_handler>, forces it to
+use reliable storage (ie sets it to the master), executes a coderef and then
+restores the original state.
+
+Example:
+
+  my $reliably = sub {
+    my $name = shift @_;
+    $schema->resultset('User')->create({name=>$name});
+    my $user_rs = $schema->resultset('User')->find({name=>$name}); 
+    return $user_rs;
+  };
+
+  my $user_rs = $schema->storage->execute_reliably($reliably, 'John');
+
+Use this when you must be certain of your database state, such as when you just
+inserted something and need to get a resultset including it, etc.
+
+=cut
+
+sub execute_reliably {
+  my ($self, $coderef, @args) = @_;
+  
+  unless( ref $coderef eq 'CODE') {
+    $self->throw_exception('Second argument must be a coderef');
+  }
+  
+  ##Get copy of master storage
+  my $master = $self->master;
+  
+  ##Get whatever the current read hander is
+  my $current = $self->read_handler;
+  
+  ##Set the read handler to master
+  $self->read_handler($master);
+  
+  ## do whatever the caller needs
+  my @result;
+  my $want_array = wantarray;
+  
+  eval {
+    if($want_array) {
+      @result = $coderef->(@args);
+    } elsif(defined $want_array) {
+      ($result[0]) = ($coderef->(@args));
+    } else {
+      $coderef->(@args);
+    }       
+  };
+  
+  ##Reset to the original state
+  $self->read_handler($current); 
+  
+  ##Exception testing has to come last, otherwise you might leave the 
+  ##read_handler set to master.
+  
+  if($@) {
+    $self->throw_exception("coderef returned an error: $@");
+  } else {
+    return $want_array ? @result : $result[0];
+  }
 }
-sub deployment_statements {
-    shift->read_source->deployment_statements( @_ );
+
+=head2 set_reliable_storage
+
+Sets the current $schema to be 'reliable', that is all queries, both read and
+write are sent to the master
+  
+=cut
+
+sub set_reliable_storage {
+  my $self = shift @_;
+  my $schema = $self->schema;
+  my $write_handler = $self->schema->storage->write_handler;
+  
+  $schema->storage->read_handler($write_handler);
 }
-sub datetime_parser {
-    shift->read_source->datetime_parser( @_ );
-}
-sub datetime_parser_type {
-    shift->read_source->datetime_parser_type( @_ );
-}
-sub build_datetime_parser {
-    shift->read_source->build_datetime_parser( @_ );
-}
 
-sub limit_dialect { $_->limit_dialect( @_ ) for( shift->all_sources ) }
-sub quote_char { $_->quote_char( @_ ) for( shift->all_sources ) }
-sub name_sep { $_->quote_char( @_ ) for( shift->all_sources ) }
-sub disconnect { $_->disconnect( @_ ) for( shift->all_sources ) }
-sub set_schema { $_->set_schema( @_ ) for( shift->all_sources ) }
+=head2 set_balanced_storage
 
-sub DESTROY {
-    my $self = shift;
+Sets the current $schema to be use the </balancer> for all reads, while all
+writea are sent to the master only
+  
+=cut
 
-    undef $self->{write_source};
-    undef $self->{read_sources};
+sub set_balanced_storage {
+  my $self = shift @_;
+  my $schema = $self->schema;
+  my $write_handler = $self->schema->storage->balancer;
+  
+  $schema->storage->read_handler($write_handler);
 }
 
-sub last_insert_id {
-    shift->write_source->last_insert_id( @_ );
-}
-sub insert {
-    shift->write_source->insert( @_ );
-}
-sub update {
-    shift->write_source->update( @_ );
-}
-sub update_all {
-    shift->write_source->update_all( @_ );
-}
-sub delete {
-    shift->write_source->delete( @_ );
-}
-sub delete_all {
-    shift->write_source->delete_all( @_ );
-}
-sub create {
-    shift->write_source->create( @_ );
-}
-sub find_or_create {
-    shift->write_source->find_or_create( @_ );
-}
-sub update_or_create {
-    shift->write_source->update_or_create( @_ );
-}
+=head2 around: txn_do ($coderef)
+
+Overload to the txn_do method, which is delegated to whatever the
+L<write_handler> is set to.  We overload this in order to wrap in inside a
+L</execute_reliably> method.
+
+=cut
+
+around 'txn_do' => sub {
+  my($txn_do, $self, $coderef, @args) = @_;
+  $self->execute_reliably(sub {$self->$txn_do($coderef, @args)}); 
+};
+
+=head2 connected
+
+Check that the master and at least one of the replicants is connected.
+
+=cut
+
 sub connected {
-    shift->write_source->connected( @_ );
+  my $self = shift @_;
+  return
+    $self->master->connected &&
+    $self->pool->connected_replicants;
 }
+
+=head2 ensure_connected
+
+Make sure all the storages are connected.
+
+=cut
+
 sub ensure_connected {
-    shift->write_source->ensure_connected( @_ );
+  my $self = shift @_;
+  foreach my $source ($self->all_storages) {
+    $source->ensure_connected(@_);
+  }
 }
-sub dbh {
-    shift->write_source->dbh( @_ );
+
+=head2 limit_dialect
+
+Set the limit_dialect for all existing storages
+
+=cut
+
+sub limit_dialect {
+  my $self = shift @_;
+  foreach my $source ($self->all_storages) {
+    $source->limit_dialect(@_);
+  }
+  return $self->master->quote_char;
 }
-sub txn_do {
-    shift->write_source->txn_do( @_ );
+
+=head2 quote_char
+
+Set the quote_char for all existing storages
+
+=cut
+
+sub quote_char {
+  my $self = shift @_;
+  foreach my $source ($self->all_storages) {
+    $source->quote_char(@_);
+  }
+  return $self->master->quote_char;
 }
-sub txn_commit {
-    shift->write_source->txn_commit( @_ );
+
+=head2 name_sep
+
+Set the name_sep for all existing storages
+
+=cut
+
+sub name_sep {
+  my $self = shift @_;
+  foreach my $source ($self->all_storages) {
+    $source->name_sep(@_);
+  }
+  return $self->master->name_sep;
 }
-sub txn_rollback {
-    shift->write_source->txn_rollback( @_ );
+
+=head2 set_schema
+
+Set the schema object for all existing storages
+
+=cut
+
+sub set_schema {
+  my $self = shift @_;
+  foreach my $source ($self->all_storages) {
+    $source->set_schema(@_);
+  }
 }
-sub sth {
-    shift->write_source->sth( @_ );
+
+=head2 debug
+
+set a debug flag across all storages
+
+=cut
+
+sub debug {
+  my $self = shift @_;
+  if(@_) {
+    foreach my $source ($self->all_storages) {
+      $source->debug(@_);
+    }   
+  }
+  return $self->master->debug;
 }
-sub deploy {
-    shift->write_source->deploy( @_ );
-}
-sub _prep_for_execute {
-	shift->write_source->_prep_for_execute(@_);
-}
 
+=head2 debugobj
+
+set a debug object across all storages
+
+=cut
+
 sub debugobj {
-	shift->write_source->debugobj(@_);
+  my $self = shift @_;
+  if(@_) {
+    foreach my $source ($self->all_storages) {
+      $source->debugobj(@_);
+    } 	
+  }
+  return $self->master->debugobj;
 }
-sub debug {
-    shift->write_source->debug(@_);
+
+=head2 debugfh
+
+set a debugfh object across all storages
+
+=cut
+
+sub debugfh {
+  my $self = shift @_;
+  if(@_) {
+    foreach my $source ($self->all_storages) {
+      $source->debugfh(@_);
+    }   
+  }
+  return $self->master->debugfh;
 }
 
-sub debugfh { shift->_not_supported( 'debugfh' ) };
-sub debugcb { shift->_not_supported( 'debugcb' ) };
+=head2 debugcb
 
-sub _not_supported {
-    my( $self, $method ) = @_;
+set a debug callback across all storages
 
-    die "This Storage does not support $method method.";
+=cut
+
+sub debugcb {
+  my $self = shift @_;
+  if(@_) {
+    foreach my $source ($self->all_storages) {
+      $source->debugcb(@_);
+    }   
+  }
+  return $self->master->debugcb;
 }
 
-=head1 SEE ALSO
+=head2 disconnect
 
-L<DBI::Class::Storage::DBI>, L<DBD::Multi>, L<DBI>
+disconnect everything
 
+=cut
+
+sub disconnect {
+  my $self = shift @_;
+  foreach my $source ($self->all_storages) {
+    $source->disconnect(@_);
+  }
+}
+
+=head1 GOTCHAS
+
+Due to the fact that replicants can lag behind a master, you must take care to
+make sure you use one of the methods to force read queries to a master should
+you need realtime data integrity.  For example, if you insert a row, and then
+immediately re-read it from the database (say, by doing $row->discard_changes)
+or you insert a row and then immediately build a query that expects that row
+to be an item, you should force the master to handle reads.  Otherwise, due to
+the lag, there is no certainty your data will be in the expected state.
+
+For data integrity, all transactions automatically use the master storage for
+all read and write queries.  Using a transaction is the preferred and recommended
+method to force the master to handle all read queries.
+
+Otherwise, you can force a single query to use the master with the 'force_pool'
+attribute:
+
+  my $row = $resultset->search(undef, {force_pool=>'master'})->find($pk);
+
+This attribute will safely be ignore by non replicated storages, so you can use
+the same code for both types of systems.
+
+Lastly, you can use the L</execute_reliably> method, which works very much like
+a transaction.
+
+For debugging, you can turn replication on/off with the methods L</set_reliable_storage>
+and L</set_balanced_storage>, however this operates at a global level and is not
+suitable if you have a shared Schema object being used by multiple processes,
+such as on a web application server.  You can get around this limitation by
+using the Schema clone method.
+
+  my $new_schema = $schema->clone;
+  $new_schema->set_reliable_storage;
+  
+  ## $new_schema will use only the Master storage for all reads/writes while
+  ## the $schema object will use replicated storage.
+
 =head1 AUTHOR
 
-Norbert Csongrádi <bert at cpan.org>
+  John Napiorkowski <john.napiorkowski at takkle.com>
 
-Peter Siklósi <einon at einon.hu>
+Based on code originated by:
 
-John Napiorkowski <john.napiorkowski at takkle.com>
+  Norbert Csongrádi <bert at cpan.org>
+  Peter Siklósi <einon at einon.hu>
 
 =head1 LICENSE
 
@@ -276,4 +745,6 @@
 
 =cut
 
+__PACKAGE__->meta->make_immutable;
+
 1;

Modified: DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/mysql.pm
===================================================================
--- DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/mysql.pm	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI/mysql.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -33,7 +33,16 @@
 
     $self->dbh->do("ROLLBACK TO SAVEPOINT $name")
 }
+ 
+sub is_replicating {
+    my $status = shift->dbh->selectrow_hashref('show slave status');
+    return ($status->{Slave_IO_Running} eq 'Yes') && ($status->{Slave_SQL_Running} eq 'Yes');
+}
 
+sub lag_behind_master {
+    return shift->dbh->selectrow_hashref('show slave status')->{Seconds_Behind_Master};
+}
+
 1;
 
 =head1 NAME

Modified: DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI.pm	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/lib/DBIx/Class/Storage/DBI.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -1063,6 +1063,7 @@
 
     if ( $self->debug ) {
         @bind = $self->_fix_bind_params(@bind);
+        
         $self->debugobj->query_start( $sql, @bind );
     }
 }
@@ -1707,6 +1708,31 @@
     }
 }
 
+=head2 is_replicating
+
+A boolean that reports if a particular L<DBIx::Class::Storage::DBI> is set to
+replicate from a master database.  Default is undef, which is the result
+returned by databases that don't support replication.
+
+=cut
+
+sub is_replicating {
+    return;
+    
+}
+
+=head2 lag_behind_master
+
+Returns a number that represents a certain amount of lag behind a master db
+when a given storage is replicating.  The number is database dependent, but
+starts at zero and increases with the amount of lag. Default in undef
+
+=cut
+
+sub lag_behind_master {
+    return;
+}
+
 sub DESTROY {
   my $self = shift;
   return if !$self->_dbh;

Modified: DBIx-Class/0.08/trunk/lib/SQL/Translator/Parser/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/trunk/lib/SQL/Translator/Parser/DBIx/Class.pm	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/lib/SQL/Translator/Parser/DBIx/Class.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -129,8 +129,12 @@
             my $othertable = $source->related_source($rel);
             my $rel_table = $othertable->name;
 
+            # Force the order of @cond to match the order of ->add_columns
+            my $idx;
+            my %other_columns_idx = map {'foreign.'.$_ => ++$idx } $othertable->columns;            
+            my @cond = sort { $other_columns_idx{$a} cmp $other_columns_idx{$b} } keys(%{$rel_info->{cond}}); 
+      
             # Get the key information, mapping off the foreign/self markers
-            my @cond = keys(%{$rel_info->{cond}});
             my @refkeys = map {/^\w+\.(\w+)$/} @cond;
             my @keys = map {$rel_info->{cond}->{$_} =~ /^\w+\.(\w+)$/} @cond;
 

Modified: DBIx-Class/0.08/trunk/t/03podcoverage.t
===================================================================
--- DBIx-Class/0.08/trunk/t/03podcoverage.t	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/t/03podcoverage.t	2008-07-21 21:09:21 UTC (rev 4601)
@@ -104,9 +104,8 @@
 
     'DBIx::Class::Schema::Versioned' => { ignore => [ qw(connection) ] },
 
-# must kill authors.
-
-    'DBIx::Class::Storage::DBI::Replicated' => { skip => 1 },
+# don't bother since it's heavily deprecated
+    'DBIx::Class::ResultSetManager' => { skip => 1 },
 };
 
 foreach my $module (@modules) {

Modified: DBIx-Class/0.08/trunk/t/71mysql.t
===================================================================
--- DBIx-Class/0.08/trunk/t/71mysql.t	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/t/71mysql.t	2008-07-21 21:09:21 UTC (rev 4601)
@@ -116,5 +116,5 @@
 
 # clean up our mess
 END {
-    $dbh->do("DROP TABLE artist") if $dbh;
-}
+    #$dbh->do("DROP TABLE artist") if $dbh;
+}
\ No newline at end of file

Modified: DBIx-Class/0.08/trunk/t/77prefetch.t
===================================================================
--- DBIx-Class/0.08/trunk/t/77prefetch.t	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/t/77prefetch.t	2008-07-21 21:09:21 UTC (rev 4601)
@@ -227,7 +227,7 @@
 $schema->storage->debug(1);
 
 my $tree_like =
-     $schema->resultset('TreeLike')->find(4,
+     $schema->resultset('TreeLike')->find(5,
        { join     => { parent => { parent => 'parent' } },
          prefetch => { parent => { parent => 'parent' } } });
 
@@ -244,21 +244,21 @@
 
 cmp_ok($queries, '==', 1, 'Only one query run');
 
-$tree_like = $schema->resultset('TreeLike')->search({'me.id' => 1});
+$tree_like = $schema->resultset('TreeLike')->search({'me.id' => 2});
 $tree_like = $tree_like->search_related('children')->search_related('children')->search_related('children')->first;
 is($tree_like->name, 'quux', 'Tree search_related ok');
 
 $tree_like = $schema->resultset('TreeLike')->search_related('children',
-    { 'children.id' => 2, 'children_2.id' => 3 },
+    { 'children.id' => 3, 'children_2.id' => 4 },
     { prefetch => { children => 'children' } }
   )->first;
 is(eval { $tree_like->children->first->children->first->name }, 'quux',
    'Tree search_related with prefetch ok');
 
 $tree_like = eval { $schema->resultset('TreeLike')->search(
-    { 'children.id' => 2, 'children_2.id' => 5 }, 
+    { 'children.id' => 3, 'children_2.id' => 6 }, 
     { join => [qw/children children/] }
-  )->search_related('children', { 'children_4.id' => 6 }, { prefetch => 'children' }
+  )->search_related('children', { 'children_4.id' => 7 }, { prefetch => 'children' }
   )->first->children->first; };
 is(eval { $tree_like->name }, 'fong', 'Tree with multiple has_many joins ok');
 

Modified: DBIx-Class/0.08/trunk/t/86sqlt.t
===================================================================
--- DBIx-Class/0.08/trunk/t/86sqlt.t	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/t/86sqlt.t	2008-07-21 21:09:21 UTC (rev 4601)
@@ -59,14 +59,14 @@
   fourkeys_to_twokeys => [
     {
       'display' => 'fourkeys_to_twokeys->twokeys',
-      'name' => 'fourkeys_to_twokeys_fk_t_cd_t_artist', 'index_name' => 'fourkeys_to_twokeys_idx_t_cd_t_artist',
+      'name' => 'fourkeys_to_twokeys_fk_t_artist_t_cd', 'index_name' => 'fourkeys_to_twokeys_idx_t_artist_t_cd',
       'selftable' => 'fourkeys_to_twokeys', 'foreigntable' => 'twokeys', 
       'selfcols'  => ['t_artist', 't_cd'], 'foreigncols' => ['artist', 'cd'], 
       on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 1,
     },
     {
-      'display' => 'fourkeys_to_twokeys->fourkeys', 'index_name' => 'fourkeys_to_twokeys_idx_f_foo_f_goodbye_f_hello_f_bar',
-      'name' => 'fourkeys_to_twokeys_fk_f_foo_f_goodbye_f_hello_f_bar',
+      'display' => 'fourkeys_to_twokeys->fourkeys', 'index_name' => 'fourkeys_to_twokeys_idx_f_foo_f_bar_f_hello_f_goodbye',
+      'name' => 'fourkeys_to_twokeys_fk_f_foo_f_bar_f_hello_f_goodbye',
       'selftable' => 'fourkeys_to_twokeys', 'foreigntable' => 'fourkeys', 
       'selfcols'  => [qw(f_foo f_bar f_hello f_goodbye)],
       'foreigncols' => [qw(foo bar hello goodbye)], 
@@ -199,7 +199,7 @@
       'display' => 'forceforeign->artist',
       'name' => 'forceforeign_fk_artist', 'index_name' => 'forceforeign_idx_artist',
       'selftable' => 'forceforeign', 'foreigntable' => 'artist', 
-      'selfcols'  => ['artist'], 'foreigncols' => ['artist_id'], 
+      'selfcols'  => ['artist'], 'foreigncols' => ['artistid'], 
       on_delete => '', on_update => '', deferrable => 1,
     },
   ],

Modified: DBIx-Class/0.08/trunk/t/93storage_replication.t
===================================================================
--- DBIx-Class/0.08/trunk/t/93storage_replication.t	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/t/93storage_replication.t	2008-07-21 21:09:21 UTC (rev 4601)
@@ -2,265 +2,575 @@
 use warnings;
 use lib qw(t/lib);
 use Test::More;
+use Test::Exception;
+use DBICTest;
 
 BEGIN {
-    eval "use DBD::Multi";
+    eval "use Moose; use Test::Moose";
     plan $@
-        ? ( skip_all => 'needs DBD::Multi for testing' )
-        : ( tests => 20 );
-}	
+        ? ( skip_all => 'needs Moose for testing' )
+        : ( tests => 79 );
+}
 
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+use_ok 'DBIx::Class::Storage::DBI::Replicated';
+
+=head1 HOW TO USE
+
+    This is a test of the replicated storage system.  This will work in one of
+    two ways, either it was try to fake replication with a couple of SQLite DBs
+    and creative use of copy, or if you define a couple of %ENV vars correctly
+    will try to test those.  If you do that, it will assume the setup is properly
+    replicating.  Your results may vary, but I have demonstrated this to work with
+    mysql native replication.
+    
+=cut
+
+
 ## ----------------------------------------------------------------------------
 ## Build a class to hold all our required testing data and methods.
 ## ----------------------------------------------------------------------------
 
-TESTSCHEMACLASS: {
-	
-	package DBIx::Class::DBI::Replicated::TestReplication;
+TESTSCHEMACLASSES: {
 
-	use DBI;	
-	use DBICTest;
-	use File::Copy;
-	
-	## Create a constructor
-	
+    ## --------------------------------------------------------------------- ##
+    ## Create an object to contain your replicated stuff.
+    ## --------------------------------------------------------------------- ##
+    
+    package DBIx::Class::DBI::Replicated::TestReplication;
+   
+    use DBICTest;
+    use base qw/Class::Accessor::Fast/;
+    
+    __PACKAGE__->mk_accessors( qw/schema/ );
+
+    ## Initialize the object
+    
 	sub new {
-		my $class = shift @_;
-		my %params = @_;
-		
-		my $self = bless {
-			db_paths => $params{db_paths},
-			dsns => $class->init_dsns(%params),
-			schema=>$class->init_schema,
-		}, $class;
-		
-		$self->connect;
-		return $self;
-	}
+	    my $class = shift @_;
+	    my $self = $class->SUPER::new(@_);
 	
-	## get the DSNs.  We build this up from the list of file paths
-	
-	sub init_dsns {
-		my $class = shift @_;
-		my %params = @_;
-		my $db_paths = $params{db_paths};
-
-		my @dsn = map {
-			"dbi:SQLite:${_}";
-		} @$db_paths;
-		
-		return \@dsn;
+	    $self->schema( $self->init_schema );
+	    return $self;
 	}
+    
+    ## Get the Schema and set the replication storage type
+    
+    sub init_schema {
+        my $class = shift @_;
+        
+        my $schema = DBICTest->init_schema(
+            storage_type=>{
+            	'::DBI::Replicated' => {
+            		balancer_type=>'::Random',
+                    balancer_args=>{
+                    	auto_validate_every=>100,
+                    },
+            	}
+            },
+            deploy_args=>{
+                   add_drop_table => 1,
+            },
+        );
 
-	## get the Schema and set the replication storage type
+        return $schema;
+    }
+    
+    sub generate_replicant_connect_info {}
+    sub replicate {}
+    sub cleanup {}
+
+  
+    ## --------------------------------------------------------------------- ##
+    ## Subclass for when you are using SQLite for testing, this provides a fake
+    ## replication support.
+    ## --------------------------------------------------------------------- ##
+        
+    package DBIx::Class::DBI::Replicated::TestReplication::SQLite;
+
+    use DBICTest;
+    use File::Copy;    
+    use base 'DBIx::Class::DBI::Replicated::TestReplication';
+    
+    __PACKAGE__->mk_accessors( qw/master_path slave_paths/ );
+    
+    ## Set the mastep path from DBICTest
+    
+	sub new {
+	    my $class = shift @_;
+	    my $self = $class->SUPER::new(@_);
 	
-	sub init_schema {
-		my $class = shift @_;
-		my $schema = DBICTest->init_schema();
-		$schema->storage_type( '::DBI::Replicated' );
-		
-		return $schema;
-	}
+	    $self->master_path( DBICTest->_sqlite_dbfilename );
+	    $self->slave_paths([
+            "t/var/DBIxClass_slave1.db",
+            "t/var/DBIxClass_slave2.db",    
+        ]);
+        
+	    return $self;
+	}    
 	
-	## connect the Schema
-	
-	sub connect {
-		my $self = shift @_;
-		my ($master, @slaves) = @{$self->{dsns}};
-		my $master_connect_info = [$master, '','', {AutoCommit=>1, PrintError=>0}];
-		
-		my @slavesob;
-		foreach my $slave (@slaves)
-		{
-			my $dbh = shift @{$self->{slaves}}
-			 || DBI->connect($slave,"","",{PrintError=>0, PrintWarn=>0});
-			
-			push @{$master_connect_info->[-1]->{slaves_connect_info}},
-			 [$dbh, '','',{priority=>10}];
-			 
-			push @slavesob,
-			 $dbh;
-		}
-		
-		## Keep track of the created slave databases
-		$self->{slaves} = \@slavesob;
-		
-		$self
-			->{schema}
-			->connect(@$master_connect_info);
-	}
-	
-	## replication
-	
-	sub replicate {
-		my $self = shift @_;
-		my ($master, @slaves) = @{$self->{db_paths}};
-		
-		foreach my $slave (@slaves) {
-			copy($master, $slave);
-		}
-	}
-	
-	## Cleanup afer ourselves.
-	
-	sub cleanup {
-		my $self = shift @_;
-		my ($master, @slaves) = @{$self->{db_paths}};
-		
-		foreach my $slave (@slaves) {
-			unlink $slave;
-		}		
-	}
-	
-	## Force a reconnection
-	
-	sub reconnect {
-		my $self = shift @_;
-		my $schema = $self->connect;
-		$self->{schema} = $schema;
-		return $schema;
-	}
+    ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
+    ## $storage->connect_info to be used for connecting replicants.
+    
+    sub generate_replicant_connect_info {
+        my $self = shift @_;
+        my @dsn = map {
+            "dbi:SQLite:${_}";
+        } @{$self->slave_paths};
+        
+        return map { [$_,'','',{AutoCommit=>1}] } @dsn;
+    }
+    
+    ## Do a 'good enough' replication by copying the master dbfile over each of
+    ## the slave dbfiles.  If the master is SQLite we do this, otherwise we
+    ## just do a one second pause to let the slaves catch up.
+    
+    sub replicate {
+        my $self = shift @_;
+        foreach my $slave (@{$self->slave_paths}) {
+            copy($self->master_path, $slave);
+        }
+    }
+    
+    ## Cleanup after ourselves.  Unlink all gthe slave paths.
+    
+    sub cleanup {
+        my $self = shift @_;
+        foreach my $slave (@{$self->slave_paths}) {
+            unlink $slave;
+        }     
+    }
+    
+    ## --------------------------------------------------------------------- ##
+    ## Subclass for when you are setting the databases via custom export vars
+    ## This is for when you have a replicating database setup that you are
+    ## going to test against.  You'll need to define the correct $ENV and have
+    ## two slave databases to test against, as well as a replication system
+    ## that will replicate in less than 1 second.
+    ## --------------------------------------------------------------------- ##
+        
+    package DBIx::Class::DBI::Replicated::TestReplication::Custom; 
+    use base 'DBIx::Class::DBI::Replicated::TestReplication';
+    
+    ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
+    ## $storage->connect_info to be used for connecting replicants.
+    
+    sub generate_replicant_connect_info { 
+        return (
+            [$ENV{"DBICTEST_SLAVE0_DSN"}, $ENV{"DBICTEST_SLAVE0_DBUSER"}, $ENV{"DBICTEST_SLAVE0_DBPASS"}, {AutoCommit => 1}],
+            [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}],           
+        );
+    }
+    
+    ## pause a bit to let the replication catch up 
+    
+    sub replicate {
+    	sleep 1;
+    } 
 }
 
 ## ----------------------------------------------------------------------------
 ## Create an object and run some tests
 ## ----------------------------------------------------------------------------
 
-my %params = (
-	db_paths => [
-		"t/var/DBIxClass.db",
-		"t/var/DBIxClass_slave1.db",
-		"t/var/DBIxClass_slave2.db",
-	],
-);
+## Thi first bunch of tests are basic, just make sure all the bits are behaving
 
-ok my $replicate = DBIx::Class::DBI::Replicated::TestReplication->new(%params)
-	=> 'Created a replication object';
-	
-isa_ok $replicate->{schema}
-	=> 'DBIx::Class::Schema';
+my $replicated_class = DBICTest->has_custom_dsn ?
+    'DBIx::Class::DBI::Replicated::TestReplication::Custom' :
+    'DBIx::Class::DBI::Replicated::TestReplication::SQLite';
 
+ok my $replicated = $replicated_class->new
+    => 'Created a replication object';
+    
+isa_ok $replicated->schema
+    => 'DBIx::Class::Schema';
+    
+isa_ok $replicated->schema->storage
+    => 'DBIx::Class::Storage::DBI::Replicated';
+
+ok $replicated->schema->storage->meta
+    => 'has a meta object';
+    
+isa_ok $replicated->schema->storage->master
+    => 'DBIx::Class::Storage::DBI';
+    
+isa_ok $replicated->schema->storage->pool
+    => 'DBIx::Class::Storage::DBI::Replicated::Pool';
+    
+does_ok $replicated->schema->storage->balancer
+    => 'DBIx::Class::Storage::DBI::Replicated::Balancer'; 
+
+ok my @replicant_connects = $replicated->generate_replicant_connect_info
+    => 'got replication connect information';
+
+ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects)
+    => 'Created some storages suitable for replicants';
+    
+isa_ok $replicated->schema->storage->balancer->current_replicant
+    => 'DBIx::Class::Storage::DBI';
+    
+ok $replicated->schema->storage->pool->has_replicants
+    => 'does have replicants';     
+
+is $replicated->schema->storage->pool->num_replicants => 2
+    => 'has two replicants';
+       
+does_ok $replicated_storages[0]
+    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated_storages[1]
+    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+    
+my @replicant_names = keys %{$replicated->schema->storage->replicants};
+
+does_ok $replicated->schema->storage->replicants->{$replicant_names[0]}
+    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated->schema->storage->replicants->{$replicant_names[1]}
+    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';  
+
 ## Add some info to the database
 
-$replicate
-	->{schema}
-	->populate('Artist', [
-		[ qw/artistid name/ ],
-		[ 4, "Ozric Tentacles"],
-	]);
-			    
+$replicated
+    ->schema
+    ->populate('Artist', [
+        [ qw/artistid name/ ],
+        [ 4, "Ozric Tentacles"],
+    ]);
+                
 ## Make sure all the slaves have the table definitions
 
-$replicate->replicate;
+$replicated->replicate;
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
 
 ## Make sure we can read the data.
 
-ok my $artist1 = $replicate->{schema}->resultset('Artist')->find(4)
-	=> 'Created Result';
+ok my $artist1 = $replicated->schema->resultset('Artist')->find(4)
+    => 'Created Result';
 
 isa_ok $artist1
-	=> 'DBICTest::Artist';
-	
+    => 'DBICTest::Artist';
+    
 is $artist1->name, 'Ozric Tentacles'
-	=> 'Found expected name for first result';
+    => 'Found expected name for first result';
 
 ## Add some new rows that only the master will have  This is because
 ## we overload any type of write operation so that is must hit the master
 ## database.
 
-$replicate
-	->{schema}
-	->populate('Artist', [
-		[ qw/artistid name/ ],
-		[ 5, "Doom's Children"],
-		[ 6, "Dead On Arrival"],
-		[ 7, "Watergate"],
-	]);
-	
-## Reconnect the database
-$replicate->reconnect;
+$replicated
+    ->schema
+    ->populate('Artist', [
+        [ qw/artistid name/ ],
+        [ 5, "Doom's Children"],
+        [ 6, "Dead On Arrival"],
+        [ 7, "Watergate"],
+    ]);
 
-## Alright, the database 'cluster' is not in a consistent state.  When we do
-## a read now we expect bad news
-
-is $replicate->{schema}->resultset('Artist')->find(5), undef
-	=> 'read after disconnect fails because it uses slave 1 which we have neglected to "replicate" yet';
-
 ## Make sure all the slaves have the table definitions
-$replicate->replicate;
+$replicated->replicate;
 
 ## Should find some data now
 
-ok my $artist2 = $replicate->{schema}->resultset('Artist')->find(5)
-	=> 'Sync succeed';
-	
+ok my $artist2 = $replicated->schema->resultset('Artist')->find(5)
+    => 'Sync succeed';
+    
 isa_ok $artist2
-	=> 'DBICTest::Artist';
-	
+    => 'DBICTest::Artist';
+    
 is $artist2->name, "Doom's Children"
-	=> 'Found expected name for first result';
+    => 'Found expected name for first result';
+
+## What happens when we disconnect all the replicants?
+
+is $replicated->schema->storage->pool->connected_replicants => 2
+    => "both replicants are connected";
+    
+$replicated->schema->storage->replicants->{$replicant_names[0]}->disconnect;
+$replicated->schema->storage->replicants->{$replicant_names[1]}->disconnect;
+
+is $replicated->schema->storage->pool->connected_replicants => 0
+    => "both replicants are now disconnected";
+
+## All these should pass, since the database should automatically reconnect
+
+ok my $artist3 = $replicated->schema->resultset('Artist')->find(6)
+    => 'Still finding stuff.';
+    
+isa_ok $artist3
+    => 'DBICTest::Artist';
+    
+is $artist3->name, "Dead On Arrival"
+    => 'Found expected name for first result';
+
+is $replicated->schema->storage->pool->connected_replicants => 1
+    => "One replicant reconnected to handle the job";
+    
+## What happens when we try to select something that doesn't exist?
+
+ok ! $replicated->schema->resultset('Artist')->find(666)
+    => 'Correctly failed to find something.';
+    
+## test the reliable option
+
+TESTRELIABLE: {
 	
-## What happens when we delete one of the slaves?
+	$replicated->schema->storage->set_reliable_storage;
+	
+	ok $replicated->schema->resultset('Artist')->find(2)
+	    => 'Read from master 1';
+	
+	ok $replicated->schema->resultset('Artist')->find(5)
+	    => 'Read from master 2';
+	    
+    $replicated->schema->storage->set_balanced_storage;	    
+	    
+	ok $replicated->schema->resultset('Artist')->find(3)
+        => 'Read from replicant';
+}
 
-ok my $slave1 = @{$replicate->{slaves}}[0]
-	=> 'Got Slave1';
+## Make sure when reliable goes out of scope, we are using replicants again
 
-ok $slave1->disconnect
-	=> 'disconnected slave1';
+ok $replicated->schema->resultset('Artist')->find(1)
+    => 'back to replicant 1.';
+    
+ok $replicated->schema->resultset('Artist')->find(2)
+    => 'back to replicant 2.';
 
-$replicate->reconnect;
+## set all the replicants to inactive, and make sure the balancer falls back to
+## the master.
 
-ok my $artist3 = $replicate->{schema}->resultset('Artist')->find(6)
-	=> 'Still finding stuff.';
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+    
+ok $replicated->schema->resultset('Artist')->find(2)
+    => 'Fallback to master';
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
+
+ok $replicated->schema->resultset('Artist')->find(2)
+    => 'Returned to replicates';
+    
+## Getting slave status tests
+
+SKIP: {
+    ## We skip this tests unless you have a custom replicants, since the default
+    ## sqlite based replication tests don't support these functions.
+    
+    skip 'Cannot Test Replicant Status on Non Replicating Database', 9
+     unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
+
+    $replicated->replicate; ## Give the slaves a chance to catchup.
+
+	ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating
+	    => 'Replicants are replicating';
+	    
+	is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0
+	    => 'Replicant is zero seconds behind master';
+	    
+	## Test the validate replicants
 	
-isa_ok $artist3
-	=> 'DBICTest::Artist';
+	$replicated->schema->storage->pool->validate_replicants;
 	
-is $artist3->name, "Dead On Arrival"
-	=> 'Found expected name for first result';
+	is $replicated->schema->storage->pool->active_replicants, 2
+	    => 'Still have 2 replicants after validation';
+	    
+	## Force the replicants to fail the validate test by required their lag to
+	## be negative (ie ahead of the master!)
 	
-## Let's delete all the slaves
+    $replicated->schema->storage->pool->maximum_lag(-10);
+    $replicated->schema->storage->pool->validate_replicants;
+    
+    is $replicated->schema->storage->pool->active_replicants, 0
+        => 'No way a replicant be be ahead of the master';
+        
+    ## Let's be fair to the replicants again.  Let them lag up to 5
+	
+    $replicated->schema->storage->pool->maximum_lag(5);
+    $replicated->schema->storage->pool->validate_replicants;
+    
+    is $replicated->schema->storage->pool->active_replicants, 2
+        => 'Both replicants in good standing again';	
+        
+	## Check auto validate
+	
+	is $replicated->schema->storage->balancer->auto_validate_every, 100
+	    => "Got the expected value for auto validate";
+	    
+		## This will make sure we auto validatge everytime
+		$replicated->schema->storage->balancer->auto_validate_every(0);
+		
+		## set all the replicants to inactive, and make sure the balancer falls back to
+		## the master.
+		
+		$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+		$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+		
+		## Ok, now when we go to run a query, autovalidate SHOULD reconnect
+	
+	is $replicated->schema->storage->pool->active_replicants => 0
+	    => "both replicants turned off";
+	    	
+	ok $replicated->schema->resultset('Artist')->find(5)
+	    => 'replicant reactivated';
+	    
+	is $replicated->schema->storage->pool->active_replicants => 2
+	    => "both replicants reactivated";        
+}
 
-ok my $slave2 = @{$replicate->{slaves}}[1]
-	=> 'Got Slave2';
+## Test the reliably callback
 
-ok $slave2->disconnect
-	=> 'Disconnected slave2';
+ok my $reliably = sub {
+	
+    ok $replicated->schema->resultset('Artist')->find(5)
+        => 'replicant reactivated';	
+	
+} => 'created coderef properly';
 
-$replicate->reconnect;
+$replicated->schema->storage->execute_reliably($reliably);
 
-## We expect an error now, since all the slaves are dead
+## Try something with an error
 
-eval {
-	$replicate->{schema}->resultset('Artist')->find(4)->name;
-};
+ok my $unreliably = sub {
+    
+    ok $replicated->schema->resultset('ArtistXX')->find(5)
+        => 'replicant reactivated'; 
+    
+} => 'created coderef properly';
 
-ok $@ => 'Got error when trying to find artistid 4';
+throws_ok {$replicated->schema->storage->execute_reliably($unreliably)} 
+    qr/Can't find source for ArtistXX/
+    => 'Bad coderef throws proper error';
+    
+## Make sure replication came back
 
-## This should also be an error
+ok $replicated->schema->resultset('Artist')->find(3)
+    => 'replicant reactivated';
+    
+## make sure transactions are set to execute_reliably
 
-eval {
-	my $artist4 = $replicate->{schema}->resultset('Artist')->find(7);	
-};
+ok my $transaction = sub {
+	
+	my $id = shift @_;
+	
+	$replicated
+	    ->schema
+	    ->populate('Artist', [
+	        [ qw/artistid name/ ],
+	        [ $id, "Children of the Grave"],
+	    ]);
+	    
+    ok my $result = $replicated->schema->resultset('Artist')->find($id)
+        => 'Found expected artist';
+        
+    ok my $more = $replicated->schema->resultset('Artist')->find(1)
+        => 'Found expected artist again';
+        
+   return ($result, $more);
+   
+} => 'Created a coderef properly';
 
-ok $@ => 'Got read errors after everything failed';
+## Test the transaction with multi return
+{
+	ok my @return = $replicated->schema->txn_do($transaction, 666)
+	    => 'did transaction';
+	    
+	    is $return[0]->id, 666
+	        => 'first returned value is correct';
+	        
+	    is $return[1]->id, 1
+	        => 'second returned value is correct';
+}
 
-## make sure ->connect_info returns something sane
+## Test that asking for single return works
+{
+	ok my $return = $replicated->schema->txn_do($transaction, 777)
+	    => 'did transaction';
+	    
+	    is $return->id, 777
+	        => 'first returned value is correct';
+}
 
-ok $replicate->{schema}->storage->connect_info
-    => 'got something out of ->connect_info';
+## Test transaction returning a single value
 
-## Force a connection to the write source for testing.
+{
+	ok my $result = $replicated->schema->txn_do(sub {
+		ok my $more = $replicated->schema->resultset('Artist')->find(1)
+		=> 'found inside a transaction';
+		return $more;
+	}) => 'successfully processed transaction';
+	
+	is $result->id, 1
+	   => 'Got expected single result from transaction';
+}
 
-$replicate->{schema}->storage($replicate->{schema}->storage->write_source);
+## Make sure replication came back
 
-## What happens when we do a find for something that doesn't exist?
+ok $replicated->schema->resultset('Artist')->find(1)
+    => 'replicant reactivated';
+    
+## Test Discard changes
 
-ok ! $replicate->{schema}->resultset('Artist')->find(666)
-    => 'Correctly did not find a bad artist id';
+{
+	ok my $artist = $replicated->schema->resultset('Artist')->find(2)
+	    => 'got an artist to test discard changes';
+	    
+	ok $artist->discard_changes
+	   => 'properly discard changes';
+}
 
+## Test some edge cases, like trying to do a transaction inside a transaction, etc
+
+{
+    ok my $result = $replicated->schema->txn_do(sub {
+    	return $replicated->schema->txn_do(sub {
+	        ok my $more = $replicated->schema->resultset('Artist')->find(1)
+	        => 'found inside a transaction inside a transaction';
+	        return $more;    		
+    	});
+    }) => 'successfully processed transaction';
+    
+    is $result->id, 1
+       => 'Got expected single result from transaction';	  
+}
+
+{
+    ok my $result = $replicated->schema->txn_do(sub {
+    	return $replicated->schema->storage->execute_reliably(sub {
+	    	return $replicated->schema->txn_do(sub {
+	    		return $replicated->schema->storage->execute_reliably(sub {
+			        ok my $more = $replicated->schema->resultset('Artist')->find(1)
+			        => 'found inside crazy deep transactions and execute_reliably';
+			        return $more; 	    			
+	    		});
+	    	});    	
+    	});
+    }) => 'successfully processed transaction';
+    
+    is $result->id, 1
+       => 'Got expected single result from transaction';	  
+}     
+
+## Test the force_pool resultset attribute.
+
+{
+	ok my $artist_rs = $replicated->schema->resultset('Artist')
+        => 'got artist resultset';
+	   
+	## Turn on Forced Pool Storage
+	ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'})
+        => 'Created a resultset using force_pool storage';
+	   
+    ok my $artist = $reliable_artist_rs->find(2) 
+        => 'got an artist result via force_pool storage';
+}
+
 ## Delete the old database files
-$replicate->cleanup;
+$replicated->cleanup;
 
 
 

Modified: DBIx-Class/0.08/trunk/t/lib/DBICTest/Schema/ForceForeign.pm
===================================================================
--- DBIx-Class/0.08/trunk/t/lib/DBICTest/Schema/ForceForeign.pm	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/t/lib/DBICTest/Schema/ForceForeign.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -14,7 +14,7 @@
 # since it uses the PK
 __PACKAGE__->might_have(
 			'artist_1', 'DBICTest::Schema::Artist', {
-			    'foreign.artist_id' => 'self.artist',
+			    'foreign.artistid' => 'self.artist',
 			}, {
 			    is_foreign_key_constraint => 1,
 			},

Modified: DBIx-Class/0.08/trunk/t/lib/DBICTest/Schema/TreeLike.pm
===================================================================
--- DBIx-Class/0.08/trunk/t/lib/DBICTest/Schema/TreeLike.pm	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/t/lib/DBICTest/Schema/TreeLike.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -6,7 +6,7 @@
 __PACKAGE__->table('treelike');
 __PACKAGE__->add_columns(
   'id' => { data_type => 'integer', is_auto_increment => 1 },
-  'parent' => { data_type => 'integer' },
+  'parent' => { data_type => 'integer' , is_nullable=>1},
   'name' => { data_type => 'varchar',
     size      => 100,
  },
@@ -16,4 +16,13 @@
                           { 'foreign.id' => 'self.parent' });
 __PACKAGE__->has_many('children', 'TreeLike', { 'foreign.parent' => 'self.id' });
 
+## since this is a self referential table we need to do a post deploy hook and get
+## some data in while constraints are off
+
+ sub sqlt_deploy_hook {
+   my ($self, $sqlt_table) = @_;
+
+   ## We don't seem to need this anymore, but keeping it for the moment
+   ## $sqlt_table->add_index(name => 'idx_name', fields => ['name']);
+ }
 1;

Modified: DBIx-Class/0.08/trunk/t/lib/DBICTest/Schema/TwoKeys.pm
===================================================================
--- DBIx-Class/0.08/trunk/t/lib/DBICTest/Schema/TwoKeys.pm	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/t/lib/DBICTest/Schema/TwoKeys.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -10,7 +10,11 @@
 );
 __PACKAGE__->set_primary_key(qw/artist cd/);
 
-__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' );
+__PACKAGE__->belongs_to(
+    artist => 'DBICTest::Schema::Artist',
+    {'foreign.artistid'=>'self.artist'},
+);
+
 __PACKAGE__->belongs_to( cd => 'DBICTest::Schema::CD', undef, { is_deferrable => 0, add_fk_index => 0 } );
 
 __PACKAGE__->has_many(

Modified: DBIx-Class/0.08/trunk/t/lib/DBICTest.pm
===================================================================
--- DBIx-Class/0.08/trunk/t/lib/DBICTest.pm	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/t/lib/DBICTest.pm	2008-07-21 21:09:21 UTC (rev 4601)
@@ -29,6 +29,10 @@
   my $schema = DBICTest->init_schema(
     no_deploy=>1,
     no_populate=>1,
+    storage_type=>'::DBI::Replicated',
+    storage_type_args=>{
+    	balancer_type=>'DBIx::Class::Storage::DBI::Replicated::Balancer::Random'
+    },
   );
 
 This method removes the test SQLite database in t/var/DBIxClass.db 
@@ -42,9 +46,17 @@
 
 =cut
 
+sub has_custom_dsn {
+	return $ENV{"DBICTEST_DSN"} ? 1:0;
+}
+
+sub _sqlite_dbfilename {
+	return "t/var/DBIxClass.db";
+}
+
 sub _database {
     my $self = shift;
-    my $db_file = "t/var/DBIxClass.db";
+    my $db_file = $self->_sqlite_dbfilename;
 
     unlink($db_file) if -e $db_file;
     unlink($db_file . "-journal") if -e $db_file . "-journal";
@@ -72,13 +84,18 @@
     } else {
       $schema = DBICTest::Schema->compose_namespace('DBICTest');
     }
+    if( $args{storage_type}) {
+    	$schema->storage_type($args{storage_type});
+    }    
     if ( !$args{no_connect} ) {
       $schema = $schema->connect($self->_database);
-      $schema->storage->on_connect_do(['PRAGMA synchronous = OFF']);
+      $schema->storage->on_connect_do(['PRAGMA synchronous = OFF'])
+       unless $self->has_custom_dsn;
     }
     if ( !$args{no_deploy} ) {
-        __PACKAGE__->deploy_schema( $schema );
-        __PACKAGE__->populate_schema( $schema ) if( !$args{no_populate} );
+        __PACKAGE__->deploy_schema( $schema, $args{deploy_args} );
+        __PACKAGE__->populate_schema( $schema )
+         if( !$args{no_populate} );
     }
     return $schema;
 }
@@ -98,9 +115,10 @@
 sub deploy_schema {
     my $self = shift;
     my $schema = shift;
+    my $args = shift || {};
 
-    if ($ENV{"DBICTEST_SQLT_DEPLOY"}) {
-        return $schema->deploy();
+    if ($ENV{"DBICTEST_SQLT_DEPLOY"}) { 
+        $schema->deploy($args);    
     } else {
         open IN, "t/lib/sqlite.sql";
         my $sql;
@@ -108,6 +126,7 @@
         close IN;
         ($schema->storage->dbh->do($_) || print "Error on SQL: $_\n") for split(/;\n/, $sql);
     }
+    return;
 }
 
 =head2 populate_schema
@@ -208,15 +227,16 @@
         [ 1, 2 ],
         [ 1, 3 ],
     ]);
-
+    
     $schema->populate('TreeLike', [
         [ qw/id parent name/ ],
-        [ 1, 0, 'foo'  ],
-        [ 2, 1, 'bar'  ],
-        [ 5, 1, 'blop' ],
-        [ 3, 2, 'baz'  ],
-        [ 4, 3, 'quux' ],
-        [ 6, 2, 'fong'  ],
+        [ 1, undef, 'root' ],        
+        [ 2, 1, 'foo'  ],
+        [ 3, 2, 'bar'  ],
+        [ 6, 2, 'blop' ],
+        [ 4, 3, 'baz'  ],
+        [ 5, 4, 'quux' ],
+        [ 7, 3, 'fong'  ],
     ]);
 
     $schema->populate('Track', [
@@ -258,7 +278,15 @@
         [ 1, "Tools" ],
         [ 2, "Body Parts" ],
     ]);
-
+    
+    $schema->populate('TypedObject', [
+        [ qw/objectid type value/ ],
+        [ 1, "pointy", "Awl" ],
+        [ 2, "round", "Bearing" ],
+        [ 3, "pointy", "Knife" ],
+        [ 4, "pointy", "Tooth" ],
+        [ 5, "round", "Head" ],
+    ]);
     $schema->populate('CollectionObject', [
         [ qw/collection object/ ],
         [ 1, 1 ],
@@ -268,15 +296,6 @@
         [ 2, 5 ],
     ]);
 
-    $schema->populate('TypedObject', [
-        [ qw/objectid type value/ ],
-        [ 1, "pointy", "Awl" ],
-        [ 2, "round", "Bearing" ],
-        [ 3, "pointy", "Knife" ],
-        [ 4, "pointy", "Tooth" ],
-        [ 5, "round", "Head" ],
-    ]);
-
     $schema->populate('Owners', [
         [ qw/ownerid name/ ],
         [ 1, "Newton" ],

Modified: DBIx-Class/0.08/trunk/t/lib/sqlite.sql
===================================================================
--- DBIx-Class/0.08/trunk/t/lib/sqlite.sql	2008-07-21 21:02:05 UTC (rev 4600)
+++ DBIx-Class/0.08/trunk/t/lib/sqlite.sql	2008-07-21 21:09:21 UTC (rev 4601)
@@ -151,7 +151,7 @@
 --
 CREATE TABLE treelike (
   id INTEGER PRIMARY KEY NOT NULL,
-  parent integer NOT NULL,
+  parent integer NULL,
   name varchar(100) NOT NULL
 );
 




More information about the Bast-commits mailing list