[Bast-commits] r4608 - in DBIx-Class/0.09/trunk: . lib/DBIx lib/DBIx/Class lib/DBIx/Class/InflateColumn lib/DBIx/Class/Manual lib/DBIx/Class/Relationship lib/DBIx/Class/ResultClass lib/DBIx/Class/Schema lib/DBIx/Class/Storage lib/DBIx/Class/Storage/DBI lib/DBIx/Class/Storage/DBI/Replicated lib/DBIx/Class/Storage/DBI/Replicated/Balancer lib/SQL/Translator/Parser/DBIx t t/cdbi-t t/lib t/lib/DBICTest t/lib/DBICTest/Schema

matthewt at dev.catalyst.perl.org matthewt at dev.catalyst.perl.org
Wed Jul 23 22:59:29 BST 2008


Author: matthewt
Date: 2008-07-23 22:59:28 +0100 (Wed, 23 Jul 2008)
New Revision: 4608

Added:
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/Reading.pod
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
   DBIx-Class/0.09/trunk/t/51threadtxn.t
   DBIx-Class/0.09/trunk/t/96_is_deteministic_value.t
   DBIx-Class/0.09/trunk/t/99dbic_sqlt_parser.t
   DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/Dummy.pm
Removed:
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/AtQueryInterval.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/Job.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/QueryInterval.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/Role/
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replication.pm
   DBIx-Class/0.09/trunk/t/99schema_roles.t
   DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/LongColumns.pm
Modified:
   DBIx-Class/0.09/trunk/
   DBIx-Class/0.09/trunk/Changes
   DBIx-Class/0.09/trunk/Makefile.PL
   DBIx-Class/0.09/trunk/lib/DBIx/Class.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/InflateColumn/DateTime.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/Cookbook.pod
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/FAQ.pod
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/Troubleshooting.pod
   DBIx-Class/0.09/trunk/lib/DBIx/Class/PK.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Relationship.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Relationship/Base.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultClass/HashRefInflator.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultSet.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultSetManager.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultSource.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Row.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/Versioned.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/StartupCheck.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/mysql.pm
   DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/TxnScopeGuard.pm
   DBIx-Class/0.09/trunk/lib/SQL/Translator/Parser/DBIx/Class.pm
   DBIx-Class/0.09/trunk/t/03podcoverage.t
   DBIx-Class/0.09/trunk/t/40resultsetmanager.t
   DBIx-Class/0.09/trunk/t/51threads.t
   DBIx-Class/0.09/trunk/t/60core.t
   DBIx-Class/0.09/trunk/t/61findnot.t
   DBIx-Class/0.09/trunk/t/68inflate_has_a.t
   DBIx-Class/0.09/trunk/t/71mysql.t
   DBIx-Class/0.09/trunk/t/77prefetch.t
   DBIx-Class/0.09/trunk/t/81transactions.t
   DBIx-Class/0.09/trunk/t/86sqlt.t
   DBIx-Class/0.09/trunk/t/89inflate_datetime.t
   DBIx-Class/0.09/trunk/t/93storage_replication.t
   DBIx-Class/0.09/trunk/t/94versioning.t
   DBIx-Class/0.09/trunk/t/96multi_create.t
   DBIx-Class/0.09/trunk/t/cdbi-t/set_to_undef.t
   DBIx-Class/0.09/trunk/t/lib/DBICTest.pm
   DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema.pm
   DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/Event.pm
   DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/ForceForeign.pm
   DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/TreeLike.pm
   DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/TwoKeys.pm
   DBIx-Class/0.09/trunk/t/lib/sqlite.sql
Log:
 r18426 at agaton (orig r4297):  captainL | 2008-04-25 18:35:24 +0100
 removed auto-generated indexes from being explicitly created
 r18427 at agaton (orig r4298):  jnapiorkowski | 2008-04-25 21:49:07 +0100
 renamed replication storage to replicated (since it's not really doing any replication itself)
 r18428 at agaton (orig r4299):  jnapiorkowski | 2008-04-25 23:33:29 +0100
 fixed up the replication test, added some tests for the dbd::multi problem of null results, fixed replicated.pm docs and a few typos
 r18433 at agaton (orig r4304):  jnapiorkowski | 2008-04-28 19:36:58 +0100
 minor formatting updates and typos fixes to the schema role tests
 r18587 at agaton (orig r4321):  matthewt | 2008-05-05 18:55:23 +0100
 reverse r4290 since we -do- -not- currently want these namespaces indexed
 r18592 at agaton (orig r4326):  jnapiorkowski | 2008-05-05 19:33:57 +0100
 removed query count stuff from trunk so we can play with this on a branch instead
 r18596 at agaton (orig r4330):  matthewt | 2008-05-05 20:59:31 +0100
 this was meant to be deleted with the rest of the Moose stuff
 r18597 at agaton (orig r4331):  matthewt | 2008-05-05 21:00:18 +0100
 make override follow Replication -> Replicated rename
 r18598 at agaton (orig r4332):  matthewt | 2008-05-05 21:00:49 +0100
 restore debolaz's cleanup patch since it's not what I thought it was and probably the right thing to do; mea culpa
 r18599 at agaton (orig r4333):  matthewt | 2008-05-05 21:22:17 +0100
 missed another package line
 r18600 at agaton (orig r4334):  matthewt | 2008-05-05 21:32:58 +0100
 improvements to the META.yml magic
 r18602 at agaton (orig r4336):  matthewt | 2008-05-05 21:46:21 +0100
 no author notes in Changes if you're already in CONTRIBUTORS please, once you have that and a commit bit you're just another poor bastard stuck maintaining an ORM, just like I am
 r18747 at agaton (orig r4339):  dwc | 2008-05-05 23:21:36 +0100
 Note that single expects a single row in the ResultSet docs
 r18755 at agaton (orig r4347):  jnapiorkowski | 2008-05-06 19:47:00 +0100
 changed the warning on DBIC::Storage::DBI->select_single so that it wont call fetch_* on an empty sth, updated tests for the above and added a todo test for the wrong count problem
 r18757 at agaton (orig r4349):  jnapiorkowski | 2008-05-06 19:55:17 +0100
 updated Changes to document Replicated Storage type changes
 r18762 at agaton (orig r4354):  ribasushi | 2008-05-07 14:33:50 +0100
 Fix a forgotten CDBICompat component relocation
 r18764 at agaton (orig r4356):  ribasushi | 2008-05-07 16:25:41 +0100
 Correct order of BEGIN/use so that skip_all does not produce failures
 r18765 at agaton (orig r4357):  ribasushi | 2008-05-07 17:37:56 +0100
 Various fixes to make testing with DBICTEST_SQLT_DEPLOY=1 possible
 r18768 at agaton (orig r4360):  nigel | 2008-05-08 14:21:33 +0100
 cascade_copy docs - takes from http://osdir.com/ml/lang.perl.modules.dbix-class/2007-03/msg00106.html
 r18769 at agaton (orig r4361):  nigel | 2008-05-08 14:22:44 +0100
 cascade_copy docs (format fixed)
 r18771 at agaton (orig r4363):  bricas | 2008-05-08 16:44:24 +0100
 add Test::Exception to deps (RT #34256). move testing modules to test_requires().
 r18772 at agaton (orig r4364):  bricas | 2008-05-08 21:08:18 +0100
 add RT ticket number to changes
 r18779 at agaton (orig r4371):  bricas | 2008-05-09 03:55:30 +0100
 pod fix (RT #32988)
 r18786 at agaton (orig r4378):  captainL | 2008-05-11 19:43:58 +0100
 added test for find or create related functionality in nested inserts
 r18787 at agaton (orig r4379):  captainL | 2008-05-11 20:03:27 +0100
 sanified new multi_create test
 r18789 at agaton (orig r4381):  ribasushi | 2008-05-12 12:09:20 +0100
 Add failing tests for missing safeguards for multilevel prefetch (don't know the internals well enough to fix the issue itself)
 r19222 at agaton (orig r4392):  castaway | 2008-05-19 22:57:25 +0100
 Added doc for "for => update" attribute, thanks StuartL.
 
 r19223 at agaton (orig r4393):  castaway | 2008-05-19 22:59:16 +0100
 Oops, =cut after the pod not in the middle
 
 r19249 at agaton (orig r4419):  ash | 2008-05-27 11:41:44 +0100
 Update FAQ to mention behavour of scalar refs w.r.t. update
 r19250 at agaton (orig r4420):  ash | 2008-05-27 13:49:09 +0100
 Add set_cache example to cookbook
 r19251 at agaton (orig r4421):  ash | 2008-05-27 13:50:10 +0100
 Fix var name typo in cookbook
 r19252 at agaton (orig r4422):  matthewt | 2008-05-27 14:31:58 +0100
 version bump, deprecated ResultSetManager
 r19333 at agaton (orig r4428):  matthewt | 2008-05-30 13:36:19 +0100
 fix SQL::Translator parser to add $table_idx_ on the front of index names to prevent clashes
 r19334 at agaton (orig r4429):  castaway | 2008-05-30 14:20:26 +0100
 Started doc standards doc in Manual::Reading.
 Fixed up belongs_to example to put more emphasis on the "column" part of "foreign_key_column" and corrected first paragraph.
 
 r19335 at agaton (orig r4430):  ash | 2008-05-30 14:29:32 +0100
 Make ingore_{constraint,index}_names default
 r19336 at agaton (orig r4431):  ash | 2008-05-30 14:34:39 +0100
 Update docs (and code changes that were missed from last commit, oops):
 Set ignore_contraint_names to default on
 r19339 at agaton (orig r4434):  matthewt | 2008-05-30 15:36:16 +0100
 throw exception for multi-has_many prefetch
 r19340 at agaton (orig r4435):  ash | 2008-05-30 15:40:31 +0100
 Make irc/mailing list much more prominent in the docs
 r19341 at agaton (orig r4436):  ash | 2008-05-30 16:13:36 +0100
 Tidy up TxnScopeGuard and its tests a bit
 r19342 at agaton (orig r4437):  ribasushi | 2008-05-30 16:19:58 +0100
 Adjust prefetch tests to not blow up after r4434
 r19343 at agaton (orig r4438):  castaway | 2008-05-30 16:58:16 +0100
 Updating to new doc standard.
 Adding more examples of alternate calls.
 
 
 r19346 at agaton (orig r4441):  castaway | 2008-05-30 23:41:03 +0100
 Update docs for best practices and correctness.
 
 r19353 at agaton (orig r4448):  castaway | 2008-05-31 21:14:05 +0100
 Fix broken pod test (missing newline!)
 
 r19356 at agaton (orig r4451):  ash | 2008-06-01 11:28:14 +0100
 Prepare for new dev release
 r19365 at agaton (orig r4456):  castaway | 2008-06-02 19:46:12 +0100
 I suck, should read code while doccing it, not assume someone else got it right!
 
 r19373 at agaton (orig r4462):  ash | 2008-06-03 17:11:25 +0100
 Remove the length limit on identifiers - it doesn't belong in DBIx::Class
 A few doc fixes
 r19374 at agaton (orig r4463):  ash | 2008-06-03 19:15:27 +0100
 Remove Digest::SHA1 dep too
 r19608 at agaton (orig r4470):  blblack | 2008-06-05 07:57:09 +0100
 _verify_pid mods _dbh, so we must call it again here
 r19609 at agaton (orig r4471):  blblack | 2008-06-05 08:21:42 +0100
 remove a line from the threads test that had the potential to mask bugs, and copy it to a new "threadtxn" test too, which uses a txn_do in each thread to do the same stuff
 r19626 at agaton (orig r4486):  lukes | 2008-06-10 21:08:17 +0100
  r8139 at luke-mbp (orig r4482):  lukes | 2008-06-10 17:25:34 +0100
  new branch for making the FK index optional
  r8140 at luke-mbp (orig r4483):  lukes | 2008-06-10 20:09:24 +0100
  added some perldoc
  r8141 at luke-mbp (orig r4484):  lukes | 2008-06-10 20:40:42 +0100
  allow add_fk_index param to be specified in rel def
  r8142 at luke-mbp (orig r4485):  lukes | 2008-06-10 21:07:56 +0100
  fixed failing test
 
 r19628 at agaton (orig r4488):  castaway | 2008-06-11 13:25:15 +0100
 Point at "prefetch" in the get/set cache docs
 
 r19629 at agaton (orig r4489):  castaway | 2008-06-11 13:54:58 +0100
 Add docs to update mentioning scalar refs and discard_changes
 
 r19648 at agaton (orig r4492):  castaway | 2008-06-11 17:27:21 +0100
 Blasted relation docs..
 
 r19690 at agaton (orig r4505):  gphat | 2008-06-19 14:06:57 +0100
 Add make_column_dirty to Row (per request from #dbix-class questions)
 
 r20287 at agaton (orig r4514):  wdh | 2008-06-25 11:52:30 +0100
 clarify that ->resultset_class must be called after ->load_components and ->table when using custom resultsets
 r20291 at agaton (orig r4518):  wdh | 2008-06-26 13:29:45 +0100
 add troubleshooting examples for quoting issues
 r20292 at agaton (orig r4519):  castaway | 2008-06-26 20:51:35 +0100
 Remove setup_connection_class from POD, skip in podcoverage
 
 r20293 at agaton (orig r4520):  lukes | 2008-06-27 11:18:08 +0100
 changed default behaviour of do_upgrade in versioned to just run everything
 r20446 at agaton (orig r4540):  bricas | 2008-06-30 14:32:03 +0100
 change my nick
 r20447 at agaton (orig r4541):  nigel | 2008-06-30 15:30:11 +0100
 Corrected spelling of TRANSACTION in code reading sql upgrade script.
 Pointed out by renormalist on IRC.
 
 r20448 at agaton (orig r4542):  bricas | 2008-06-30 15:36:37 +0100
 update marcus in the authors
 r20449 at agaton (orig r4543):  lukes | 2008-06-30 19:38:08 +0100
 added ignore_version connect attr and updated docs accordingly
 r20450 at agaton (orig r4544):  lukes | 2008-06-30 21:07:13 +0100
 implemented versioning tests for version warns
 r20457 at agaton (orig r4551):  ash | 2008-07-02 15:53:32 +0100
 Add caveat about prefetch
 r20458 at agaton (orig r4552):  wreis | 2008-07-02 23:19:39 +0100
 updating changelog
 r20459 at agaton (orig r4553):  ribasushi | 2008-07-04 00:52:31 +0100
 Minor cookbook fix (two adjacent examples were mixed up)
 r20460 at agaton (orig r4554):  lukes | 2008-07-04 13:03:51 +0100
 made versioning overwrite ddl and diff files where appropriate and made arg order of ddl_filename consistent with create_ddl_filename
 r20461 at agaton (orig r4555):  lukes | 2008-07-07 13:11:32 +0100
 moved schema_version from Versioning to core
 r20651 at agaton (orig r4560):  lukes | 2008-07-09 01:11:38 +0100
 fixed versioned stuff to strip BEGIN from sql files
 r20772 at agaton (orig r4575):  plu | 2008-07-15 09:36:20 +0100
 Skip custom query sources
 
 r20776 at agaton (orig r4579):  lukes | 2008-07-15 23:13:08 +0100
  r9099 at luke-mbp (orig r4573):  lukes | 2008-07-14 13:11:13 +0100
  new branch
  r9100 at luke-mbp (orig r4574):  lukes | 2008-07-14 15:01:50 +0100
  reordered methods of Versioned.pm and factored the initialisation stuff from upgrade to install
  r9128 at luke-mbp (orig r4576):  lukes | 2008-07-15 23:07:38 +0100
  major versioning doc refactor
  r9129 at luke-mbp (orig r4577):  lukes | 2008-07-15 23:11:10 +0100
  removed EXPERIMENTAL notices
 
 r20778 at agaton (orig r4581):  ash | 2008-07-16 17:41:52 +0100
 Update docs re txn_scope_guard
 r20825 at agaton (orig r4590):  groditi | 2008-07-17 21:22:40 +0100
  r20694 at martha (orig r4588):  groditi | 2008-07-16 16:17:07 -0400
  _is_deteministic_value
 
 r20826 at agaton (orig r4591):  groditi | 2008-07-17 21:22:50 +0100
  r20836 at martha (orig r4589):  groditi | 2008-07-17 16:21:07 -0400
  Changes and AUTHORS
 
 r20854 at agaton (orig r4596):  jshirley | 2008-07-18 17:48:10 +0100
 Adding datetime_undef_if_invalid to squelch errors on DateTime inflation of bogus values.
 r20881 at agaton (orig r4601):  jnapiorkowski | 2008-07-21 22:09:21 +0100
  r8426 at dev (orig r4305):  jnapiorkowski | 2008-04-28 13:38:43 -0500
  branch for replication rewrite
  r8427 at dev (orig r4307):  jnapiorkowski | 2008-04-30 10:51:48 -0500
  -new config option to DBICTest to let you set an alternative storage type, start on creating a DBIC based load balancer
  r8428 at dev (orig r4309):  jnapiorkowski | 2008-04-30 15:26:26 -0500
  got first pass on the replication and balancer, passing all of the old test suite (which is not much, but it is a milestone of some sort)
  r8429 at dev (orig r4311):  jnapiorkowski | 2008-04-30 17:16:55 -0500
  added some advice to debugging replicants so that we can see a replicant dsn, got this balancing between one master and slave, seems to run well
  r8430 at dev (orig r4312):  jnapiorkowski | 2008-04-30 17:30:47 -0500
  added some advice to debugging replicants so that we can see a replicant dsn, got this balancing between one master and slave, seems to run well
  r8431 at dev (orig r4313):  jnapiorkowski | 2008-04-30 17:50:09 -0500
  cleanup of some docs, got the default shuffling balancer to work properly.  Don't fall in love with this behavior, since I'm probably going to change the default to balancer to just return the first replicant in the list, since this is optimised for the common case of a single replicant
  r8432 at dev (orig r4314):  jnapiorkowski | 2008-04-30 18:15:28 -0500
  added test to check when nothing is found
  r8433 at dev (orig r4315):  jnapiorkowski | 2008-05-01 10:56:10 -0500
  changed replication test to support custom database connect info, added a little code to DBICTest to support this
  r8441 at dev (orig r4335):  jnapiorkowski | 2008-05-05 15:42:52 -0500
  updated mysql test to reflect the fetch without execute error
  r8444 at dev (orig r4338):  jnapiorkowski | 2008-05-05 16:41:48 -0500
  removed code that tossed an error in select_single when more than a single row is returned and updated the tests to TODO the bad count issue
  r8493 at dev (orig r4351):  jnapiorkowski | 2008-05-06 19:23:09 -0500
  refactored the duties of the different balancer classes, added tests and docs
  r8494 at dev (orig r4352):  jnapiorkowski | 2008-05-06 19:43:52 -0500
  documented methods for detecting replicant reliability, created stub methods
  r8567 at dev (orig r4359):  jnapiorkowski | 2008-05-07 17:40:30 -0500
  changed the way args are passed to a storage, should make it easier to use existing code using this, added the master as a fallback to the the replicants, lots of small documentation updates and test improvements.  all tests passing
  r8573 at dev (orig r4365):  jnapiorkowski | 2008-05-08 15:26:01 -0500
  lots of updates to make the test suite work with databases other than sqlite
  r8574 at dev (orig r4366):  jnapiorkowski | 2008-05-08 16:43:16 -0500
  more cleanup of the test suite so that we can run it against other databases.  fixed the problem with tests using self-referential constrainsts for dbs that have trouble handling that
  r8575 at dev (orig r4367):  jnapiorkowski | 2008-05-08 18:34:55 -0500
  converted replicant to a role so that we can apply it after ensure_connected properly reblesses the storage into the correct driver specific storage
  r8576 at dev (orig r4368):  jnapiorkowski | 2008-05-08 19:06:42 -0500
  fixed up the relicant status tests to exclude them if the database is not a real replicating setup, removed some debug code, got the lag_behind_master and is_replicating methods working properly.
  r8577 at dev (orig r4369):  jnapiorkowski | 2008-05-08 19:31:58 -0500
  fixed up the relicant status tests to exclude them if the database is not a real replicating setup, removed some debug code, got the lag_behind_master and is_replicating methods working properly.
  r8578 at dev (orig r4370):  jnapiorkowski | 2008-05-08 20:40:03 -0500
  good start on the validation of replicants and a system to automatically validate them (although that might be a better role than inside a class, for someday!)
  r8581 at dev (orig r4373):  jnapiorkowski | 2008-05-09 10:53:45 -0500
  changed the balancer to a role, created a new class to define the default balancer behavior, finished the autovalidate code and added tests for all the above
  r8582 at dev (orig r4374):  jnapiorkowski | 2008-05-09 12:00:46 -0500
  changed the way args are passed to a storage type that needs args so they can be in the form of a hash or array ref.  This plays nicer with Config::General for loading
  r8583 at dev (orig r4375):  jnapiorkowski | 2008-05-09 13:32:58 -0500
  doh, replaced ability to configure pool args via the storage args
  r8584 at dev (orig r4376):  jnapiorkowski | 2008-05-09 14:34:17 -0500
  removed bad tabbing
  r12902 at dev (orig r4385):  jnapiorkowski | 2008-05-14 11:05:22 -0500
  changed SQLT::Parser::DBIC so that in FK constraints, both the field order for the source and target tables are ensured to follow the same ordering rule
  r12903 at dev (orig r4386):  jnapiorkowski | 2008-05-14 12:57:57 -0500
  documentation updates
  r13046 at dev (orig r4389):  jnapiorkowski | 2008-05-16 12:31:16 -0500
  make sure that the Pool validates the replicants on the first query
  r13401 at dev (orig r4424):  jnapiorkowski | 2008-05-28 14:15:34 -0500
  created storage method to execute a coderef using master storage only, changed tnx_do to only use the master, wrote tests for both the above, wrote docs for both the above
  r13450 at dev (orig r4425):  jnapiorkowski | 2008-05-29 13:30:39 -0500
  discard changes now is forced to use master for replication.  changed discard_changes guts to point to new method called reload_row in storage.  fixed problem with replicated transactionws not returning the right thing.  added tests to all the above
  r13469 at dev (orig r4426):  jnapiorkowski | 2008-05-29 18:03:15 -0500
  changed Storage->reload_row to do less, reverted some behavior to PK->discard_changes.  Did this to solve some capatibility issues with partitioning.  updated docs to reflect this.
  r13470 at dev (orig r4427):  jnapiorkowski | 2008-05-29 19:18:39 -0500
  fixed failing test in podcoverage, fixed regression in replication test that caused the default fake sqlite replication to fail.  not so important since sqlite doesnt replicate but we do not like to see failing tests.
  r13476 at dev (orig r4433):  jnapiorkowski | 2008-05-30 09:32:24 -0500
  fixed failing test for sqlt
  r13498 at dev (orig r4440):  jnapiorkowski | 2008-05-30 11:37:56 -0500
  reverted unneeded change to field name in Treelike.pm to parent from parent_fk, updated all underlying tests and related bits as requested by Castaway.
  r13563 at dev (orig r4461):  jnapiorkowski | 2008-06-03 10:59:17 -0500
  fix to make sure execute_reliably method properly finds its attributes
  r13587 at dev (orig r4467):  jnapiorkowski | 2008-06-04 12:49:15 -0500
  fixed boneheaded failure to properly propogate txn_do
  r13639 at dev (orig r4473):  jnapiorkowski | 2008-06-05 11:28:23 -0500
  all your tabs belong to spaces
  r13684 at dev (orig r4474):  jnapiorkowski | 2008-06-07 10:59:28 -0500
  clarified documentation about setting up slave dsn
  r13803 at dev (orig r4491):  jnapiorkowski | 2008-06-11 10:01:00 -0500
  1) changed all 4 space indentation to 2 space style indents for replication code, 2) fixed broken index test that was broken after pulling from trunk, 3) updated some docs and better internal docs for replication test, 4) added a couple of new tests to make sure replication does not explode if you are careless about transactions inside of transactions inside of execute_reliably, etc.
  r13884 at dev (orig r4493):  jnapiorkowski | 2008-06-12 12:30:32 -0500
  more cleanly separated DBIC::Storage::Replicated from any storage functions (trying to make sure everything goes to the master or slave correctly), added some tests around this issue and updated the docs a bit.
  r14115 at dev (orig r4506):  jnapiorkowski | 2008-06-19 09:45:42 -0500
  removed ->reload_row from storage, changed this to a method based on the actual row object.  discard_changes is still semantically ambiguous but this solution is better
  r14116 at dev (orig r4507):  jnapiorkowski | 2008-06-19 10:48:39 -0500
  renamed get_current_storage to get_from_storage since the first method name is very poorly named
  r14749 at dev (orig r4557):  jnapiorkowski | 2008-07-07 13:58:37 -0500
  removed some debugging comments, removed transaction from Row->get_from_storage, enabled support for new resultset attribute "execute_reliably" which signals the Balancer to send read requests to the master.  Also refactored connect_replicants to break down functionality into two methods and added new Balancer method to roll the replicant to the next in the queque.  added tests for all the above.
  r14750 at dev (orig r4558):  jnapiorkowski | 2008-07-07 14:16:32 -0500
  added some notes in the tests and fixed get_from_storage to actually use the new resultset attribute
  r14751 at dev (orig r4559):  jnapiorkowski | 2008-07-07 16:38:49 -0500
  updated documentation, adding some hints and details, changed the way we can use the resultset attribute to force a particular storage backend.
  r14835 at dev (orig r4562):  jnapiorkowski | 2008-07-09 12:35:06 -0500
  use BUILDARGS intead of wrapping new, added make_immutable, removed unnneeded test, added some docs
  r14836 at dev (orig r4563):  jnapiorkowski | 2008-07-09 12:40:37 -0500
  use BUILDARGS intead of wrapping new, added make_immutable, removed unnneeded test, added some docs
  r14837 at dev (orig r4564):  jnapiorkowski | 2008-07-09 12:51:26 -0500
  removed the mistaken debug code
  r14838 at dev (orig r4565):  jnapiorkowski | 2008-07-09 13:07:17 -0500
  make sure various Storage mutators correctly return a useful value
  r14888 at dev (orig r4566):  jnapiorkowski | 2008-07-10 14:58:28 -0500
  fixed regression in the random balancer that I created when I removed the shuffle dependency, changed the syntax for returning the next storage in the pool to make debugging easier
  r14897 at dev (orig r4567):  jnapiorkowski | 2008-07-10 16:14:04 -0500
  make sure debugobj calls always go to the master
  r14941 at dev (orig r4569):  jnapiorkowski | 2008-07-11 15:18:10 -0500
  added tests for required modules, minor documentation update
  r14942 at dev (orig r4572):  jnapiorkowski | 2008-07-11 16:46:37 -0500
  just a tiny grammer fix to POD
  r15162 at dev (orig r4594):  jnapiorkowski | 2008-07-18 09:14:28 -0500
  updated CHANGES, removed debug code left by accident, added a bit of POD regarding the word "replicant"
 
 r20882 at agaton (orig r4602):  matthewt | 2008-07-23 20:36:53 +0100
 fix skip
 r20885 at agaton (orig r4605):  jnapiorkowski | 2008-07-23 21:09:22 +0100
 replication tests needs Test::Moose
 r20886 at agaton (orig r4606):  matthewt | 2008-07-23 21:09:26 +0100
 fix cycle, reformat to 80 cols
 r20887 at agaton (orig r4607):  matthewt | 2008-07-23 21:11:13 +0100
 more author requirements



Property changes on: DBIx-Class/0.09/trunk
___________________________________________________________________
Name: svk:merge
   - 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:4290
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
   + 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:4607
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510

Modified: DBIx-Class/0.09/trunk/Changes
===================================================================
--- DBIx-Class/0.09/trunk/Changes	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/Changes	2008-07-23 21:59:28 UTC (rev 4608)
@@ -1,7 +1,17 @@
 Revision history for DBIx::Class
 
+        - Added datetime_undef_if_invalid for InflateColumn::DateTime to
+          return undef on invalid date/time values
+        - Added search_related_rs method to ResultSet
+        - add a make_column_dirty method to Row to force updates
+        - throw a clear exception when user tries multi-has_many prefetch
+        - SQLT parser prefixes index names with ${table}_idx_ to avoid clashes
+        - mark ResultSetManager as deprecated and undocument it
+        - pod fix (RT #32988)
+        - add Test::Exception to test requirements (RT #34256)
+        - make ash's build_requires/META.yml fixes work better
         - is_deferable support on relations used by the SQL::Translator
-          parser (Anders Nor Berle)
+          parser
         - Refactored DBIx::Class::Schema::Versioned
         - Syntax errors from resultset components are now reported correctly
         - sqltargs respected correctly in deploy et al.
@@ -9,8 +19,14 @@
           nested transactions if auto_savepoint is set in connect_info.
         - Changed naming scheme for constraints and keys in the sqlt parser;
           names should now be consistent and collision-free.
-        - Improve handling of explicit key attr in ResultSet::find (zby)
-        - Add warnings for non-unique ResultSet::find queries (zby)
+        - Improve handling of explicit key attr in ResultSet::find
+        - Add warnings for non-unique ResultSet::find queries
+        - Changed Storage::DBI::Replication to Storage::DBI::Replicated and
+          refactored support.
+        - By default now deploy/diff et al. will ignore constraint and index 
+          names
+        - Add ResultSet::_is_deterministic_value, make new_result filter the
+          values passed to new to drop values that would generate invalid SQL.
 
 0.08010 2008-03-01 10:30
         - Fix t/94versioning.t so it passes with latest SQL::Translator
@@ -22,6 +38,7 @@
         - Added freeze, thaw and dclone methods to Schema so that thawed
           objects will get re-attached to the schema.
         - Moved dbicadmin to JSON::Any wrapped JSON.pm for a sane API
+          (also fixes RT #32393)
         - introduced DBIx::Class::set_inflated_columns
         - DBIx::Class::Row::copy uses set_inflated_columns
 

Modified: DBIx-Class/0.09/trunk/Makefile.PL
===================================================================
--- DBIx-Class/0.09/trunk/Makefile.PL	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/Makefile.PL	2008-07-23 21:59:28 UTC (rev 4608)
@@ -1,5 +1,9 @@
 use inc::Module::Install 0.67;
+use strict;
+use warnings;
 
+use 5.006001; # delete this line if you want to send patches for earlier.
+
 name     'DBIx-Class';
 perl_version '5.006001';
 all_from 'lib/DBIx/Class.pm';
@@ -18,27 +22,39 @@
 requires 'Class::Accessor::Grouped'  => 0.05002;
 requires 'JSON::Any'                 => 1.00; 
 requires 'Scope::Guard'              => 0.03;
-requires 'Digest::SHA1'              => 2.00;
 requires 'Path::Class'               => 0;
+requires 'List::Util'                => 1.19;
 
 # Perl 5.8.0 doesn't have utf8::is_utf8()
 requires 'Encode'                    => 0 if ($] <= 5.008000);  
 
-build_requires 'DBD::SQLite'         => 1.13;
-build_requires 'Test::Builder'       => 0.33;
-build_requires 'Test::Warn'          => 0.08;
-build_requires 'Test::NoWarnings'    => 0.08;
+test_requires 'DBD::SQLite'         => 1.13;
+test_requires 'Test::Builder'       => 0.33;
+test_requires 'Test::Warn'          => 0.08;
+test_requires 'Test::NoWarnings'    => 0.08;
+test_requires 'Test::Exception'     => 0;
 
 install_script 'script/dbicadmin';
 
 tests "t/*.t t/*/*.t";
 
 # re-build README and require CDBI modules for testing if we're in a checkout
-if( -e 'inc/.author' ) {
-  build_requires 'DBIx::ContextualFetch';
-  build_requires 'Class::Trigger';
-  build_requires 'Time::Piece';
 
+my @force_build_requires_if_author = qw(
+  DBIx::ContextualFetch
+  Class::Trigger
+  Time::Piece
+  Clone
+  Test::Pod::Coverage
+  Test::Memory::Cycle
+);
+
+if ($Module::Install::AUTHOR) {
+
+  foreach my $module (@force_build_requires_if_author) {
+    build_requires $module;
+  }
+
   system('pod2text lib/DBIx/Class.pm > README');
 }
 
@@ -52,16 +68,23 @@
 if ($Module::Install::AUTHOR) {
   # Need to do this _after_ WriteAll else it looses track of them
   Meta->{values}{build_requires} = [ grep {
-    $_->[0] !~ /
-      DBIx::ContextualFetch |
-      Class::Trigger |
-      Time::Piece
-    /x;
+    my $ok = 1;
+    foreach my $module (@force_build_requires_if_author) {
+      if ($_->[0] =~ /$module/) {
+        $ok = 0;
+        last;
+      }
+    }
+    $ok;
   } @{Meta->{values}{build_requires}} ];
 
   my @scalar_keys = Module::Install::Metadata::Meta_TupleKeys();
-  sub Module::Install::Metadata::Meta_TupleKeys {
-    return @scalar_keys, 'resources';
+  my $cr = Module::Install::Metadata->can("Meta_TupleKeys");
+  {
+    no warnings 'redefine';
+    *Module::Install::Metadata::Meta_TupleKeys = sub {
+      return $cr->(@_), 'resources';
+    };
   }
   Meta->{values}{resources} = [ 
     [ 'MailingList', 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class' ],

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/InflateColumn/DateTime.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/InflateColumn/DateTime.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/InflateColumn/DateTime.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -53,6 +53,18 @@
 up datetime columns appropriately.  This would not normally be
 directly called by end users.
 
+In the case of an invalid date, L<DateTime> will throw an exception.  To
+bypass these exceptions and just have the inflation return undef, use
+the C<datetime_undef_if_invalid> option in the column info:
+  
+    "broken_date",
+    {
+        data_type => "datetime",
+        default_value => '0000-00-00',
+        is_nullable => 1,
+        datetime_undef_if_invalid => 1
+    }
+
 =cut
 
 sub register_column {
@@ -66,6 +78,8 @@
     $timezone = $info->{extra}{timezone};
   }
 
+  my $undef_if_invalid = $info->{datetime_undef_if_invalid};
+
   if ($type eq 'datetime' || $type eq 'date') {
     my ($parse, $format) = ("parse_${type}", "format_${type}");
     $self->inflate_column(
@@ -73,7 +87,9 @@
         {
           inflate => sub {
             my ($value, $obj) = @_;
-            my $dt = $obj->_datetime_parser->$parse($value);
+            my $dt = eval { $obj->_datetime_parser->$parse($value); };
+            die "Error while inflating ${value} for ${column} on ${self}: $@"
+              if $@ and not $undef_if_invalid;
             $dt->set_time_zone($timezone) if $timezone;
             return $dt;
           },

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/Cookbook.pod
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/Cookbook.pod	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/Cookbook.pod	2008-07-23 21:59:28 UTC (rev 4608)
@@ -194,8 +194,6 @@
     }
   );
 
-  my $count = $rs->next->get_column('count');
-
 =head2 SELECT COUNT(DISTINCT colname)
 
   my $rs = $schema->resultset('Foo')->search(
@@ -208,6 +206,8 @@
     }
   );
 
+  my $count = $rs->next->get_column('count');
+
 =head2 Grouping results
 
 L<DBIx::Class> supports C<GROUP BY> as follows:
@@ -255,8 +255,15 @@
 To use your resultset, first tell DBIx::Class to create an instance of it
 for you, in your My::DBIC::Schema::CD class:
 
+  # class definition as normal
+  __PACKAGE__->load_components(qw/ Core /);
+  __PACKAGE__->table('cd');
+
+  # tell DBIC to use the custom ResultSet class
   __PACKAGE__->resultset_class('My::DBIC::ResultSet::CD');
 
+Note that C<resultset_class> must be called after C<load_components> and C<table>, or you will get errors about missing methods.
+
 Then call your new method in your code:
 
    my $ordered_cds = $schema->resultset('CD')->search_cds_ordered();
@@ -763,6 +770,23 @@
 Which will of course only work if your database supports this function.
 See L<DBIx::Class::ResultSetColumn> for more documentation.
 
+=head2 Creating a result set from a set of rows
+
+Sometimes you have a (set of) row objects that you want to put into a 
+resultset without the need to hit the DB again. You can do that by using the
+L<set_cache|DBIx::Class::Resultset/set_cache> method:
+
+ my @uploadable_groups;
+ while (my $group = $groups->next) {
+   if ($group->can_upload($self)) {
+     push @uploadable_groups, $group;
+   }
+ }
+ my $new_rs = $self->result_source->resultset;
+ $new_rs->set_cache(\@uploadable_groups);
+ return $new_rs;
+
+
 =head1 USING RELATIONSHIPS
 
 =head2 Create a new row in a related table

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/FAQ.pod
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/FAQ.pod	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/FAQ.pod	2008-07-23 21:59:28 UTC (rev 4608)
@@ -357,6 +357,19 @@
 
  ->update({ somecolumn => \'othercolumn' })
 
+But note that when using a scalar reference the column in the database
+will be updated but when you read the value from the object with e.g.
+ 
+ ->somecolumn()
+ 
+you still get back the scalar reference to the string, B<not> the new
+value in the database. To get that you must refresh the row from storage
+using C<discard_changes()>. Or chain your function calls like this:
+
+  ->update->discard_changes
+ 
+ to update the database and refresh the object in one step.
+ 
 =item .. store JSON/YAML in a column and have it deflate/inflate automatically?
 
 You can use L<DBIx::Class::InflateColumn> to accomplish YAML/JSON storage transparently.

Added: DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/Reading.pod
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/Reading.pod	                        (rev 0)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/Reading.pod	2008-07-23 21:59:28 UTC (rev 4608)
@@ -0,0 +1,133 @@
+
+=head1 NAME
+
+DBIx::Class::Manual::Reading - How to read and write DBIx::Class POD.
+
+=head1 DESCRIPTION
+
+This doc should help users to understand how the examples and
+documentation found in the L<DBIx::Class> distribution can be
+interpreted.
+
+Writers of DBIx::Class POD should also check here to make sure their
+additions are consistent with the rest of the documentation.
+
+=head1 METHODS
+
+Methods should be documented in the files which also contain the code
+for the method, or that file should be hidden from PAUSE completely,
+in which case the methods are documented in the file which loads
+it. Methods may also be documented and refered to in files
+representing the major objects or components on which they can be
+called.
+
+For example, L<DBIx::Class::Relationship> documents the methods
+actually coded in the helper relationship classes like
+DBIx::Class::Relationship::BelongsTo. The BelongsTo file itself is
+hidden from pause as it has no documentation. The accessors created by
+relationships should be mentioned in L<DBIx::Class::Row>, the major
+object that they will be called on.
+
+=head2 Method documentation
+
+=over
+
+=item *
+
+Each method starts with a "head2" statement of it's name.
+
+=item *
+
+The header is followed by a one-item list.
+
+The single item provides a list of all possible values for the
+arguments of the method in order, separated by C<, >, preceeded by the
+text "Arguments: "
+
+Example (for the belongs_to relationship):
+
+  =item Arguments: $accessor_name, $related_class, $fk_column|\%cond|\@cond?, \%attr?
+
+The following possible argument sigils can be shown:
+
+=over
+
+=item *
+
+$var - A scalar (string or numeric) variable.
+
+=item *
+
+\%var - A variable containing reference to a hash.
+
+=item *
+
+\@var - A variable containing a reference to an array.
+
+=item *
+
+\$var - A variable containing a reference to a scalar variable.
+
+=item *
+
+? - Optional, should be placed after the argument type and name.
+
+=item *
+
+| - Alternate argument types.
+
+=back
+
+NOTES:
+
+If several arguments are optional, it is always possible to pass
+C<undef> as one optional argument in order to skip it and provide a
+value for the following ones. This does not need to be indicated in
+the Arguments line, it is assumed.
+
+The C<?> for optional arguments always applies to the entire argument
+value, not a particular type or argument.
+
+=item *
+
+The argument list is followed by a single paragraph describing what
+the method does.
+
+=item *
+
+The description paragraph is followed by another list. Each item in
+the list explains one of the possible argument/type combinations.
+
+=item *
+
+The argument list is followed by some examples of how to use the
+method, using it's various types of arguments.
+
+The examples can also include ways to use the results if
+applicable. For instance if the documentation is for a relationship
+type, the examples can include how to call the resulting relation
+accessor, how to use the relation name in a search and so on.
+
+If some of the examples assume default values, these should be shown
+with and without the actual arguments, with hints about the equivalent
+calls.
+
+The example should be followed by one or more paragraphs explaining
+what it does.
+
+Examples and explaining paragraphs can be repeated as necessary.
+
+=back
+
+=head1 AUTHORS
+
+see L<DBIx::Class>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+
+

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/Troubleshooting.pod
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/Troubleshooting.pod	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Manual/Troubleshooting.pod	2008-07-23 21:59:28 UTC (rev 4608)
@@ -55,5 +55,73 @@
 specify a fully qualified namespace: C< package MySchema::MyTable; >
 for example.
 
+=head2 syntax error at or near "<something>" ...
+
+This can happen if you have a relation whose name is a word reserved by your
+database, e.g. "user":
+
+  package My::Schema::User;
+  ...
+  __PACKAGE__->table('users');
+  __PACKAGE__->add_columns(qw/ id name /);
+  __PACKAGE__->set_primary_key('id');
+  ...
+  1;
+
+  package My::Schema::ACL;
+  ...
+  __PACKAGE__->table('acl');
+  __PACKAGE__->add_columns(qw/ user_id /);
+  __PACKAGE__->belongs_to( 'user' => 'My::Schema::User', 'user_id' );
+  ...
+  1;
+
+  $schema->resultset('ACL')->search(
+    {},
+    {
+      join => [qw/ user /],
+      '+select' => [ 'user.name' ]
+    }
+  );
+
+The SQL generated would resemble something like:
+
+  SELECT me.user_id, user.name FROM acl me
+  JOIN users user ON me.user_id = user.id
+
+If, as is likely, your database treats "user" as a reserved word, you'd end
+up with the following errors:
+
+1) syntax error at or near "." - due to "user.name" in the SELECT clause
+
+2) syntax error at or near "user" - due to "user" in the JOIN clause
+
+The solution is to enable quoting - see
+L<DBIx::Class::Manual::Cookbook/Setting_quoting_for_the_generated_SQL> for
+details.
+
+Note that quoting may lead to problems with C<order_by> clauses, see
+L<... column "foo DESC" does not exist ...> for info on avoiding those.
+
+=head2 column "foo DESC" does not exist ...
+
+This can happen if you've turned on quoting and then done something like
+this:
+
+  $rs->search( {}, { order_by => [ 'name DESC' ] } );
+
+This results in SQL like this:
+
+  ... ORDER BY "name DESC"
+
+The solution is to pass your order_by items as scalar references to avoid
+quoting:
+
+  $rs->search( {}, { order_by => [ \'name DESC' ] } );
+
+Now you'll get SQL like this:
+
+  ... ORDER BY name DESC
+
 =cut
 

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/PK.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/PK.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/PK.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -25,7 +25,7 @@
   return (map { $self->{_column_data}{$_} } $self->primary_columns);
 }
 
-=head2 discard_changes
+=head2 discard_changes ($attrs)
 
 Re-selects the row from the database, losing any changes that had
 been made.
@@ -33,28 +33,30 @@
 This method can also be used to refresh from storage, retrieving any
 changes made since the row was last read from storage.
 
+$attrs is expected to be a hashref of attributes suitable for passing as the
+second argument to $resultset->search($cond, $attrs);
+
 =cut
 
 sub discard_changes {
-  my ($self) = @_;
+  my ($self, $attrs) = @_;
   delete $self->{_dirty_columns};
   return unless $self->in_storage; # Don't reload if we aren't real!
-
-  my $reload = $self->result_source->resultset->find(
-    map { $self->$_ } $self->primary_columns
-  );
-  unless ($reload) { # If we got deleted in the mean-time
+  
+  if( my $current_storage = $self->get_from_storage($attrs)) {
+  	
+    # Set $self to the current.
+  	%$self = %$current_storage;
+  	
+    # Avoid a possible infinite loop with
+    # sub DESTROY { $_[0]->discard_changes }
+    bless $current_storage, 'Do::Not::Exist';
+    
+    return $self;  	
+  } else {
     $self->in_storage(0);
-    return $self;
+    return $self;  	
   }
-
-  %$self = %$reload;
-  
-  # Avoid a possible infinite loop with
-  # sub DESTROY { $_[0]->discard_changes }
-  bless $reload, 'Do::Not::Exist';
-
-  return $self;
 }
 
 =head2 id

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/Relationship/Base.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Relationship/Base.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Relationship/Base.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -116,6 +116,12 @@
 until the end of the transaction. Currently, only the PostgreSQL producer
 actually supports this.
 
+=item add_fk_index
+
+Tells L<SQL::Translator> to add an index for this constraint. Can also be
+specified globally in the args to L<DBIx::Class::Schema/deploy> or
+L<DBIx::Class::Schema/create_ddl_dir>. Default is on, set to 0 to disable.
+
 =back
 
 =head2 register_relationship
@@ -435,7 +441,7 @@
 
   my $actor = $schema->resultset('Actor')->find(1);
   my @roles = $schema->resultset('Role')->search({ role => 
-     { '-in' -> ['Fred', 'Barney'] } } );
+     { '-in' => ['Fred', 'Barney'] } } );
 
   $actor->set_roles(\@roles);
      # Replaces all of $actor's previous roles with the two named

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/Relationship.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Relationship.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Relationship.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -19,6 +19,7 @@
 
 =head1 SYNOPSIS
 
+  ## Creating relationships
   MyDB::Schema::Actor->has_many('actorroles' => 'MyDB::Schema::ActorRole',
                                 'actor');
   MyDB::Schema::Role->has_many('actorroles' => 'MyDB::Schema::ActorRole',
@@ -29,6 +30,7 @@
   MyDB::Schema::Role->many_to_many('actors' => 'actorroles', 'actor');
   MyDB::Schema::Actor->many_to_many('roles' => 'actorroles', 'role');
 
+  ## Using relationships
   $schema->resultset('Actor')->roles();
   $schema->resultset('Role')->search_related('actors', { Name => 'Fred' });
   $schema->resultset('ActorRole')->add_to_roles({ Name => 'Sherlock Holmes'});
@@ -98,29 +100,31 @@
 
 =head1 METHODS
 
-All helper methods take the following arguments:
+All helper methods are called similar to the following template:
 
-  __PACKAGE__>$method_name('relname', 'Foreign::Class', $cond, $attrs);
+  __PACKAGE__->$method_name('relname', 'Foreign::Class', $cond, $attrs);
   
 Both C<$cond> and C<$attrs> are optional. Pass C<undef> for C<$cond> if
 you want to use the default value for it, but still want to set C<$attrs>.
 
-See L<DBIx::Class::Relationship::Base> for a list of valid attributes and valid
-relationship attributes.
+See L<DBIx::Class::Relationship::Base> for documentation on the
+attrubutes that are allowed in the C<$attrs> argument.
 
+
 =head2 belongs_to
 
 =over 4
 
-=item Arguments: $accessor_name, $related_class, $foreign_key_column|$cond?, $attr?
+=item Arguments: $accessor_name, $related_class, $fk_column|\%cond|\@cond?, \%attr?
 
 =back
 
-Creates a relationship where the calling class stores the foreign class's
-primary key in one (or more) of its columns. This relationship defaults to
-using C<$accessor_name> as the foreign key in C<$related_class> to resolve the
-join, unless C<$foreign_key_column> specifies the foreign key column in
-C<$related_class> or C<$cond> specifies a reference to a join condition hash.
+Creates a relationship where the calling class stores the foreign
+class's primary key in one (or more) of its columns. This relationship
+defaults to using C<$accessor_name> as the column in this class
+to resolve the join against the primary key from C<$related_class>,
+unless C<$fk_column> specifies the foreign key column in this class or
+C<cond> specifies a reference to a join condition hash.
 
 =over
 
@@ -128,9 +132,10 @@
 
 This argument is the name of the method you can call on a
 L<DBIx::Class::Row> object to retrieve the instance of the foreign
-class matching this relationship.
+class matching this relationship. This is often called the
+C<relation(ship) name>.
 
-Use this accessor_name (relation name) in L<DBIx::Class::ResultSet/join>
+Use this accessor_name in L<DBIx::Class::ResultSet/join>
 or L<DBIx::Class::ResultSet/prefetch> to join to the foreign table
 indicated by this relationship.
 
@@ -139,7 +144,7 @@
 This is the class name of the table referenced by the foreign key in
 this class.
 
-=item foreign_key_column
+=item fk_column
 
 The column name on this class that contains the foreign key.
 
@@ -155,17 +160,34 @@
 
 
   # in a Book class (where Author has many Books)
-  My::DBIC::Schema::Book->belongs_to( author => 'My::DBIC::Schema::Author' );
+  My::DBIC::Schema::Book->belongs_to( 
+    author => 
+    'My::DBIC::Schema::Author', 
+    'author_id'
+  );
 
-  my $author_obj = $obj->author; # get author object
-  $obj->author( $new_author_obj ); # set author object
+  # OR (same result)
+  My::DBIC::Schema::Book->belongs_to(
+    author =>
+    'My::DBIC::Schema::Author',
+    { 'foreign.author_id' => 'self.author_id' } 
+  );
 
-The above belongs_to relationship could also have been specified as,
+  # OR (similar result but uglier accessor name)
+  My::DBIC::Schema::Book->belongs_to( 
+    author_id =>
+    'My::DBIC::Schema::Author'
+  );
 
-  My::DBIC::Schema::Book->belongs_to( author,
-                                      'My::DBIC::Schema::Author',
-                                      { 'foreign.author' => 'self.author' } );
+  # Usage
+  my $author_obj = $book->author; # get author object
+  $book->author( $new_author_obj ); # set author object
+  $book->author_id(); # get the plain id
 
+  # To retrieve the plain id if you used the ugly version:
+  $book->get_column('author_id');
+
+
 If the relationship is optional -- i.e. the column containing the foreign key
 can be NULL -- then the belongs_to relationship does the right thing. Thus, in
 the example above C<$obj-E<gt>author> would return C<undef>.  However in this
@@ -174,8 +196,12 @@
 operations work correctly.  The modified declaration is shown below:
 
   # in a Book class (where Author has_many Books)
-  __PACKAGE__->belongs_to(author => 'My::DBIC::Schema::Author',
-                          'author', {join_type => 'left'});
+  __PACKAGE__->belongs_to(
+    author => 
+    'My::DBIC::Schema::Author',
+    'author', 
+    { join_type => 'left' }
+  );
 
 
 Cascading deletes are off by default on a C<belongs_to>
@@ -192,16 +218,17 @@
 
 =over 4
 
-=item Arguments: $accessor_name, $related_class, $foreign_key_column|$cond?, $attr?
+=item Arguments: $accessor_name, $related_class, $foreign_key_column|\%cond|\@cond?, \%attr?
 
 =back
 
-Creates a one-to-many relationship, where the corresponding elements of the
-foreign class store the calling class's primary key in one (or more) of its
-columns. This relationship defaults to using C<$accessor_name> as the foreign
-key in C<$related_class> to resolve the join, unless C<$foreign_key_column>
-specifies the foreign key column in C<$related_class> or C<$cond> specifies a
-reference to a join condition hash.
+Creates a one-to-many relationship, where the corresponding elements
+of the foreign class store the calling class's primary key in one (or
+more) of its columns. This relationship defaults to using the end of
+this classes namespace as the foreign key in C<$related_class> to
+resolve the join, unless C<$foreign_key_column> specifies the foreign
+key column in C<$related_class> or C<cond> specifies a reference to a
+join condition hash.
 
 =over
 
@@ -210,9 +237,10 @@
 This argument is the name of the method you can call on a
 L<DBIx::Class::Row> object to retrieve a resultset of the related
 class restricted to the ones related to the row object. In list
-context it returns the row objects.
+context it returns the row objects. This is often called the
+C<relation(ship) name>.
 
-Use this accessor_name (relation name) in L<DBIx::Class::ResultSet/join>
+Use this accessor_name in L<DBIx::Class::ResultSet/join>
 or L<DBIx::Class::ResultSet/prefetch> to join to the foreign table
 indicated by this relationship.
 
@@ -229,48 +257,88 @@
 
 =item cond
 
-A hashref where the keys are C<foreign.$column_on_related_table> and
-the values are C<self.$foreign_key_column>. This is useful for
+A hashref where the keys are C<foreign.$foreign_key_column> and
+the values are C<self.$matching_column>. This is useful for
 relations that are across multiple columns.
 
+OR
+
+An arrayref containing an SQL::Abstract-like condition. For example a
+link table where two columns link back to the same table. This is an
+OR condition.
+
+  My::Schema::Item->has_many('rels', 'My::Schema::Relationships',
+                             [ { 'foreign.LItemID' => 'self.ID' },
+                               { 'foreign.RItemID' => 'self.ID'} ]);
+
 =back
 
   # in an Author class (where Author has_many Books)
-  My::DBIC::Schema::Author->has_many(books => 'My::DBIC::Schema::Book', 'author');
+  # assuming related class is storing our PK in "author_id"
+  My::DBIC::Schema::Author->has_many(
+    books => 
+    'My::DBIC::Schema::Book', 
+    'author_id'
+  );
 
-  my $booklist = $obj->books;
-  my $booklist = $obj->books({
+  # OR (same result)
+  My::DBIC::Schema::Author->has_many(
+    books => 
+    'My::DBIC::Schema::Book', 
+    { 'foreign.author_id' => 'self.id' },
+  );
+  
+  # OR (similar result, assuming related_class is storing our PK, in "author")
+  # (the "author" is guessed at from "Author" in the class namespace)
+  My::DBIC::Schema::Author->has_many(
+    books => 
+    'My::DBIC::Schema::Book', 
+  );
+
+
+  # Usage
+  # resultset of Books belonging to author 
+  my $booklist = $author->books;
+
+  # resultset of Books belonging to author, restricted by author name
+  my $booklist = $author->books({
     name => { LIKE => '%macaroni%' },
     { prefetch => [qw/book/],
   });
-  my @book_objs = $obj->books;
-  my $books_rs = $obj->books;
+
+  # array of Book objects belonging to author
+  my @book_objs = $author->books;
+
+  # force resultset even in list context
+  my $books_rs = $author->books;
   ( $books_rs ) = $obj->books_rs;
 
-  $obj->add_to_books(\%col_data);
+  # create a new book for this author, the relation fields are auto-filled
+  $author->create_related('books', \%col_data);
+  # alternative method for the above
+  $author->add_to_books(\%col_data);
 
-The above C<has_many> relationship could also have been specified with an
-explicit join condition:
 
-  My::DBIC::Schema::Author->has_many( books => 'My::DBIC::Schema::Book', {
-    'foreign.author' => 'self.author',
-  });
-
 Three methods are created when you create a has_many relationship.  The first
 method is the expected accessor method, C<$accessor_name()>.  The second is
 almost exactly the same as the accessor method but "_rs" is added to the end of
 the method name.  This method works just like the normal accessor, except that
-it returns a resultset no matter what, even in list context. The third method,
+it always returns a resultset, even in list context. The third method,
 named C<< add_to_$relname >>, will also be added to your Row items; this
 allows you to insert new related items, using the same mechanism as in
 L<DBIx::Class::Relationship::Base/"create_related">.
 
 If you delete an object in a class with a C<has_many> relationship, all
 the related objects will be deleted as well.  To turn this behaviour off,
-pass C<< cascade_delete => 0 >> in the C<$attr> hashref. However, any
+pass C<< cascade_delete => 0 >> in the C<attr> hashref. However, any
 database-level cascade or restrict will take precedence over a
 DBIx-Class-based cascading delete.
 
+If you copy an object in a class with a C<has_many> relationship, all
+the related objects will be copied as well. To turn this behaviour off,
+pass C<< cascade_copy => 0 >> in the C<$attr> hashref. The behaviour
+defaults to C<< cascade_copy => 1 >>.
+
 See L<DBIx::Class::Relationship::Base> for documentation on relationship
 methods and valid relationship attributes.
 
@@ -278,14 +346,14 @@
 
 =over 4
 
-=item Arguments: $accessor_name, $related_class, $foreign_key_column|$cond?, $attr?
+=item Arguments: $accessor_name, $related_class, $foreign_key_column|\%cond|\@cond?, \%attr?
 
 =back
 
 Creates an optional one-to-one relationship with a class. This relationship
 defaults to using C<$accessor_name> as the foreign key in C<$related_class> to
 resolve the join, unless C<$foreign_key_column> specifies the foreign key
-column in C<$related_class> or C<$cond> specifies a reference to a join
+column in C<$related_class> or C<cond> specifies a reference to a join
 condition hash.
 
 =over
@@ -294,9 +362,10 @@
 
 This argument is the name of the method you can call on a
 L<DBIx::Class::Row> object to retrieve the instance of the foreign
-class matching this relationship.
+class matching this relationship. This is often called the
+C<relation(ship) name>.
 
-Use this accessor_name (relation name) in L<DBIx::Class::ResultSet/join>
+Use this accessor_name in L<DBIx::Class::ResultSet/join>
 or L<DBIx::Class::ResultSet/prefetch> to join to the foreign table
 indicated by this relationship.
 
@@ -314,28 +383,34 @@
 =item cond
 
 A hashref where the keys are C<foreign.$column_on_related_table> and
-the values are C<self.$foreign_key_column>. This is useful for
+the values are C<self.$matching_column>. This is useful for
 relations that are across multiple columns.
 
 =back
 
-  My::DBIC::Schema::Author->might_have( pseudonym =>
-                                        'My::DBIC::Schema::Pseudonym' );
+  # Author may have an entry in the pseudonym table
+  My::DBIC::Schema::Author->might_have(
+    pseudonym =>
+    'My::DBIC::Schema::Pseudonym',
+    'author_id',
+  );
 
-  my $pname = $obj->pseudonym; # to get the Pseudonym object
+  # OR (same result, assuming the related_class stores our PK)
+  My::DBIC::Schema::Author->might_have(
+    pseudonym =>
+    'My::DBIC::Schema::Pseudonym',
+  );
 
-The above might_have relationship could have been specified as:
+  # OR (same result)
+  My::DBIC::Schema::Author->might_have(
+    pseudonym =>
+    'My::DBIC::Schema::Pseudonym',
+    { 'foreign.author_id' => 'self.id' },
+  );
 
-  My::DBIC::Schema::Author->might_have( pseudonym =>
-                                        'My::DBIC::Schema::Pseudonym',
-                                        'author' );
+  # Usage
+  my $pname = $author->pseudonym; # to get the Pseudonym object
 
-Or even:
-
-  My::DBIC::Schema::Author->might_have( pseudonym =>
-                                        'My::DBIC::Schema::Pseudonym',
-                                        { 'foreign.author' => 'self.author' } );
-
 If you update or delete an object in a class with a C<might_have>
 relationship, the related object will be updated or deleted as well. To
 turn off this behavior, add C<< cascade_delete => 0 >> to the C<$attr>
@@ -349,20 +424,77 @@
 
 =over 4
 
-=item Arguments: $accessor_name, $related_class_name, $join_condition?, $attr?
+=item Arguments: $accessor_name, $related_class, $foreign_key_column|\%cond|\@cond?, \%attr?
 
 =back
 
-  My::DBIC::Schema::Book->has_one(isbn => 'My::DBIC::Schema::ISBN');
+Creates a one-to-one relationship with a class. This relationship
+defaults to using C<$accessor_name> as the foreign key in C<$related_class> to
+resolve the join, unless C<$foreign_key_column> specifies the foreign key
+column in C<$related_class> or C<cond> specifies a reference to a join
+condition hash.
 
-  my $isbn_obj = $obj->isbn; # to get the ISBN object
+=over
 
-Creates a one-to-one relationship with another class. This is just like
-C<might_have>, except the implication is that the other object is always
-present. The only difference between C<has_one> and C<might_have> is that
-C<has_one> uses an (ordinary) inner join, whereas C<might_have> uses a
-left join.
+=item accessor_name
 
+This argument is the name of the method you can call on a
+L<DBIx::Class::Row> object to retrieve the instance of the foreign
+class matching this relationship. This is often called the
+C<relation(ship) name>.
+
+Use this accessor_name in L<DBIx::Class::ResultSet/join>
+or L<DBIx::Class::ResultSet/prefetch> to join to the foreign table
+indicated by this relationship.
+
+=item related_class
+
+This is the class name of the table which contains a foreign key
+column containing PK values of this class.
+
+=item foreign_key_column
+
+The column name on the related class that contains the foreign key.
+
+OR
+
+=item cond
+
+A hashref where the keys are C<foreign.$column_on_related_table> and
+the values are C<self.$matching_column>. This is useful for
+relations that are across multiple columns.
+
+=back
+
+  # Every book has exactly one ISBN
+  My::DBIC::Schema::Book->has_one(
+    isbn => 
+    'My::DBIC::Schema::ISBN',
+    'book_id',
+  );
+
+  # OR (same result, assuming related_class stores our PK)
+  My::DBIC::Schema::Book->has_one(
+    isbn => 
+    'My::DBIC::Schema::ISBN',
+  );
+
+  # OR (same result)
+  My::DBIC::Schema::Book->has_one(
+    isbn => 
+    'My::DBIC::Schema::ISBN',
+    { 'foreign.book_id' => 'self.id' },
+  );
+
+  # Usage
+  my $isbn_obj = $book->isbn; # to get the ISBN object
+
+Creates a one-to-one relationship with another class. This is just
+like C<might_have>, except the implication is that the other object is
+always present. The only difference between C<has_one> and
+C<might_have> is that C<has_one> uses an (ordinary) inner join,
+whereas C<might_have> defaults to a left join.
+
 The has_one relationship should be used when a row in the table has exactly one
 related row in another table. If the related row might not exist in the foreign
 table, use the L<DBIx::Class::Relationship/might_have> relationship.
@@ -377,7 +509,7 @@
 
 =over 4
 
-=item Arguments: $accessor_name, $link_rel_name, $foreign_rel_name, $attr?
+=item Arguments: $accessor_name, $link_rel_name, $foreign_rel_name, \%attr?
 
 =back
 
@@ -449,7 +581,7 @@
 relation names are then used in the many_to_many call.
 
 In the above example, the Actor class will have 3 many_to_many accessor methods
-set: C<$roles>, C<$add_to_roles>, C<$set_roles>, and similarly named accessors
+set: C<roles>, C<add_to_roles>, C<set_roles>, and similarly named accessors
 will be created for the Role class for the C<actors> many_to_many
 relationship.
 
@@ -462,7 +594,7 @@
 
 =head1 AUTHORS
 
-Matt S. Trout <mst at shadowcatsystems.co.uk>
+see L<DBIx::Class>
 
 =head1 LICENSE
 

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultClass/HashRefInflator.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultClass/HashRefInflator.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultClass/HashRefInflator.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -87,4 +87,21 @@
     };
 }
 
+=head1 CAVEAT
+
+This will not work for relationships that have been prefetched. Consider the
+following:
+
+ my $artist = $artitsts_rs->search({}, {prefetch => 'cds' })->first;
+
+ my $cds = $artist->cds;
+ $cds->result_class('DBIx::Class::ResultClass::HashRefInflator');
+ my $first = $cds->first; 
+
+C<$first> will B<not> be a hashref, it will be a normal CD row since 
+HashRefInflator only affects resultsets at inflation time, and prefetch causes
+relations to be inflated when the master C<$artist> row is inflated.
+
+=cut
+
 1;

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultSet.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultSet.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultSet.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -12,6 +12,7 @@
 use DBIx::Class::ResultSetColumn;
 use DBIx::Class::ResultSourceHandle;
 use List::Util ();
+use Scalar::Util ();
 use base qw/DBIx::Class/;
 
 __PACKAGE__->mk_group_accessors('simple' => qw/result_class _source_handle/);
@@ -22,8 +23,8 @@
 
 =head1 SYNOPSIS
 
-  my $rs   = $schema->resultset('User')->search(registered => 1);
-  my @rows = $schema->resultset('CD')->search(year => 2005);
+  my $rs   = $schema->resultset('User')->search({ registered => 1 });
+  my @rows = $schema->resultset('CD')->search({ year => 2005 })->all();
 
 =head1 DESCRIPTION
 
@@ -53,7 +54,10 @@
 
 =head1 OVERLOADING
 
-If a resultset is used as a number it returns the C<count()>.  However, if it is used as a boolean it is always true.  So if you want to check if a result set has any results use C<if $rs != 0>.  C<if $rs> will always be true.
+If a resultset is used in a numeric context it returns the L</count>.
+However, if it is used in a booleand context it is always true.  So if
+you want to check if a resultset has any results use C<if $rs != 0>.
+C<if $rs> will always be true.
 
 =head1 METHODS
 
@@ -571,10 +575,18 @@
 Inflates the first result without creating a cursor if the resultset has
 any records in it; if not returns nothing. Used by L</find> as an optimisation.
 
-Can optionally take an additional condition *only* - this is a fast-code-path
-method; if you need to add extra joins or similar call ->search and then
-->single without a condition on the $rs returned from that.
+Can optionally take an additional condition B<only> - this is a fast-code-path
+method; if you need to add extra joins or similar call L</search> and then
+L</single> without a condition on the L<DBIx::Class::ResultSet> returned from
+that.
 
+B<Note>: As of 0.08100, this method assumes that the query returns only one
+row. If more than one row is returned, you will receive a warning:
+
+  Query returned more than one row
+
+In this case, you should be using L</first> or L</find> instead.
+
 =cut
 
 sub single {
@@ -1516,8 +1528,18 @@
 
   # precendence must be given to passed values over values inherited from the cond, 
   # so the order here is important.
-  my %new = (
-    %{ $self->_remove_alias($collapsed_cond, $alias) },
+  my %new;
+  my %implied =  %{$self->_remove_alias($collapsed_cond, $alias)};
+  while( my($col,$value) = each %implied ){
+    if(ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '='){
+      $new{$col} = $value->{'='};
+      next;
+    }
+    $new{$col} = $value if $self->_is_deterministic_value($value);
+  }
+
+  %new = (
+    %new,
     %{ $self->_remove_alias($values, $alias) },
     -source_handle => $self->_source_handle,
     -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED
@@ -1526,6 +1548,20 @@
   return $self->result_class->new(\%new);
 }
 
+# _is_deterministic_value
+#
+# Make an effor to strip non-deterministic values from the condition, 
+# to make sure new_result chokes less
+
+sub _is_deterministic_value {
+  my $self = shift;
+  my $value = shift;
+  my $ref_type = ref $value;
+  return 1 if $ref_type eq '' || $ref_type eq 'SCALAR';
+  return 1 if Scalar::Util::blessed($value);
+  return 0;
+}
+
 # _collapse_cond
 #
 # Recursively collapse the condition.
@@ -1794,6 +1830,9 @@
 
 Gets the contents of the cache for the resultset, if the cache is set.
 
+The cache is populated either by using the L</prefetch> attribute to
+L</search> or by calling L</set_cache>.
+
 =cut
 
 sub get_cache {
@@ -1815,6 +1854,9 @@
 if the cache is set the resultset will return the cached objects rather
 than re-querying the database even if the cache attr is not set.
 
+The contents of the cache can also be populated by using the
+L</prefetch> attribute to L</search>.
+
 =cut
 
 sub set_cache {
@@ -2427,14 +2469,28 @@
 case.
 
 Simple prefetches will be joined automatically, so there is no need
-for a C<join> attribute in the above search. If you're prefetching to
-depth (e.g. { cd => { artist => 'label' } or similar), you'll need to
-specify the join as well.
+for a C<join> attribute in the above search. 
 
 C<prefetch> can be used with the following relationship types: C<belongs_to>,
 C<has_one> (or if you're using C<add_relationship>, any relationship declared
-with an accessor type of 'single' or 'filter').
+with an accessor type of 'single' or 'filter'). A more complex example that
+prefetches an artists cds, the tracks on those cds, and the tags associted 
+with that artist is given below (assuming many-to-many from artists to tags):
 
+ my $rs = $schema->resultset('Artist')->search(
+   undef,
+   {
+     prefetch => [
+       { cds => 'tracks' },
+       { artist_tags => 'tags' }
+     ]
+   }
+ );
+ 
+
+B<NOTE:> If you specify a C<prefetch> attribute, the C<join> and C<select>
+attributes will be ignored.
+
 =head2 page
 
 =over 4
@@ -2646,6 +2702,17 @@
     # SELECT child.* FROM person child
     # INNER JOIN person father ON child.father_id = father.id
 
+=head2 for
+
+=over 4
+
+=item Value: ( 'update' | 'shared' )
+
+=back
+
+Set to 'update' for a SELECT ... FOR UPDATE or 'shared' for a SELECT
+... FOR SHARED.
+
 =cut
 
 1;

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultSetManager.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultSetManager.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultSetManager.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -4,38 +4,25 @@
 use base 'DBIx::Class';
 use Class::Inspector;
 
+warn "DBIx::Class::ResultSetManager never left experimental status and
+has now been DEPRECATED. This module will be deleted in 09000 so please
+migrate any and all code using it to explicit resultset classes using either
+__PACKAGE__->resultset_class(...) calls or by switching from using
+DBIx::Class::Schema->load_classes() to load_namespaces() and creating
+appropriate My::Schema::ResultSet::* classes for it to pick up.";
+
 =head1 NAME
 
-DBIx::Class::ResultSetManager - helpful methods for managing resultset
-classes (EXPERIMENTAL)
+DBIx::Class::ResultSetManager - scheduled for deletion in 09000
 
-=head1 SYNOPSIS
-
-  # in a table class
-  __PACKAGE__->load_components(qw/ResultSetManager Core/); # note order!
-
-  # will be removed from the table class and inserted into a
-  # table-specific resultset class
-  sub search_by_year_desc : ResultSet {
-    my $self = shift;
-    my $cond = shift;
-    my $attrs = shift || {};
-    $attrs->{order_by} = 'year DESC';
-    $self->search($cond, $attrs);
-  }
-
-  $rs = $schema->resultset('CD')->search_by_year_desc({ artist => 'Tool' });
-
 =head1 DESCRIPTION
 
-This package implements two useful features for customizing resultset
-classes.  C<load_resultset_components> loads components in addition to
-C<DBIx::Class::ResultSet> (or whatever you set as
-C<base_resultset_class>). Any methods tagged with the C<ResultSet>
-attribute will be moved into a table-specific resultset class (by
-default called C<Class::_resultset>, but configurable via
-C<table_resultset_class_suffix>).  Most of the magic is done when you
-call C<< __PACKAGE__->table >>.
+DBIx::Class::ResultSetManager never left experimental status and
+has now been DEPRECATED. This module will be deleted in 09000 so please
+migrate any and all code using it to explicit resultset classes using either
+__PACKAGE__->resultset_class(...) calls or by switching from using
+DBIx::Class::Schema->load_classes() to load_namespaces() and creating
+appropriate My::Schema::ResultSet::* classes for it to pick up.";
 
 =cut
 
@@ -44,17 +31,6 @@
 __PACKAGE__->base_resultset_class('DBIx::Class::ResultSet');
 __PACKAGE__->table_resultset_class_suffix('::_resultset');
 
-=head2 table
-
-Stacks on top of the normal L<DBIx::Class> C<table> method.  Any
-methods tagged with the C<ResultSet> attribute will be moved into a
-table-specific resultset class (by default called
-C<Class::_resultset>, but configurable via
-C<table_resultset_class_suffix>).  The magic for this is done within
-this C<< __PACKAGE__->table >> call.
-
-=cut
-
 sub table {
     my ($self, at rest) = @_;
     my $ret = $self->next::method(@rest);
@@ -65,14 +41,6 @@
     return $ret;
 }
 
-=head2 load_resultset_components
-
-C<load_resultset_components> loads components in addition to
-C<DBIx::Class::ResultSet> (or whatever you set as
-C<base_resultset_class>).
-
-=cut
-
 sub load_resultset_components {
     my ($self, at comp) = @_;
     my $resultset_class = $self->_setup_resultset_class;
@@ -119,13 +87,3 @@
 }
 
 1;
-
-=head1 AUTHORS
-
-David Kamholz <dkamholz at cpan.org>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultSource.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultSource.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/ResultSource.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -905,6 +905,16 @@
       $self->throw_exception(
         "Can't prefetch has_many ${pre} (join cond too complex)")
         unless ref($rel_info->{cond}) eq 'HASH';
+      my $dots = @{[$as_prefix =~ m/\./g]} + 1; # +1 to match the ".${as_prefix}"
+      if (my ($fail) = grep { @{[$_ =~ m/\./g]} == $dots }
+                         keys %{$collapse}) {
+        my ($last) = ($fail =~ /([^\.]+)$/);
+        $self->throw_exception(
+          "Can't prefetch multiple has_many rels ${last} and ${pre}"
+          .(length($as_prefix) ? "at the same level (${as_prefix})"
+                               : "at top level"
+        ));
+      }
       #my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); }
       #              values %{$rel_info->{cond}};
       $collapse->{".${as_prefix}${pre}"} = [ $rel_source->primary_columns ];

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/Row.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Row.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Row.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -295,6 +295,21 @@
 the same after a call to C<update>.  If you need to preserve the hashref, it is
 sufficient to pass a shallow copy to C<update>, e.g. ( { %{ $href } } )
 
+If the values passed or any of the column values set on the object
+contain scalar references, eg:
+
+  $obj->last_modified(\'NOW()');
+  # OR
+  $obj->update({ last_modified => \'NOW()' });
+
+The update will pass the values verbatim into SQL. (See
+L<SQL::Abstract> docs).  The values in your Row object will NOT change
+as a result of the update call, if you want the object to be updated
+with the actual values from the database, call L</discard_changes>
+after the update.
+
+  $obj->update()->discard_changes();
+
 =cut
 
 sub update {
@@ -436,6 +451,20 @@
            keys %{$self->{_dirty_columns}};
 }
 
+=head2 make_column_dirty
+
+Marks a column dirty regardless if it has really changed.  Throws an
+exception if the column does not exist.
+
+=cut
+sub make_column_dirty {
+  my ($self, $column) = @_;
+
+  $self->throw_exception( "No such column '${column}'" )
+    unless exists $self->{_column_data}{$column} || $self->has_column($column);
+  $self->{_dirty_columns}{$column} = 1;
+}
+
 =head2 get_inflated_columns
 
   my %inflated_data = $obj->get_inflated_columns;
@@ -547,7 +576,9 @@
 
   my $copy = $orig->copy({ change => $to, ... });
 
-Inserts a new row with the specified changes.
+Inserts a new row with the specified changes. If the row has related
+objects in a C<has_many> then those objects may be copied too depending
+on the C<cascade_copy> relationship attribute.
 
 =cut
 
@@ -768,7 +799,30 @@
   $class->mk_group_accessors('column' => $acc);
 }
 
+=head2 get_from_storage ($attrs)
 
+Returns a new Row which is whatever the Storage has for the currently created
+Row object.  You can use this to see if the storage has become inconsistent with
+whatever your Row object is.
+
+$attrs is expected to be a hashref of attributes suitable for passing as the
+second argument to $resultset->search($cond, $attrs);
+
+=cut
+
+sub get_from_storage {
+    my $self = shift @_;
+    my $attrs = shift @_;
+    my @primary_columns = map { $self->$_ } $self->primary_columns;
+    my $resultset = $self->result_source->resultset;
+    
+    if(defined $attrs) {
+    	$resultset = $resultset->search(undef, $attrs);
+    }
+    
+    return $resultset->find(@primary_columns);	
+}
+
 =head2 throw_exception
 
 See Schema's throw_exception.

Deleted: DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/AtQueryInterval.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/AtQueryInterval.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/AtQueryInterval.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -1,91 +0,0 @@
-package DBIx::Class::Schema::AtQueryInterval;
-
-use Moose;
-
-=head1 NAME
-
-DBIx::Class::Schema::Role::AtQueryInterval; Defines a job control interval.
-
-=head1 SYNOPSIS
-
-The following example shows how to define a job control interval and assign it
-to a particular L<DBIx::Class::Schema::Job> for a L<DBIx::Class::Schema>
-
-    my $job = DBIx::Class::Schema->new(runs => sub { print 'did job'});
-    my $interval = DBIx::Class::Schema::Interval->new(every => 10);
-    
-    if($interval->matches($query_count)) {
-    	print "I indentified the query count as matching";
-    }
-    
-    ## $schema->isa(DBIx::Class::Schema);
-    $schema->create_and_add_at_query_intervals($interval => $job);
-    
-=head1 DESCRIPTION
-
-An AtQueryInterval is a plan object that will execute a certain
-
-=head1 ATTRIBUTES
-
-This package defines the following attributes.
-
-=head2 job (DBIx::Class::Schema::Job)
-
-This is the job which will run at the specified query interval
-
-=cut
-
-has 'job' => (
-  is=>'ro',
-  isa=>'DBIx::Class::Schema::Job',
-  required=>1,
-  handles=>['execute'],
-);
-
-
-=head2 interval (Int)
-
-This is the interval we are watching for
-
-=cut
-
-has 'interval' => (
-  is=>'ro',
-  isa=>'DBIx::Class::Schema::QueryInterval',
-  required=>1,
-  handles=>['matches'],
-);
-
-
-=head1 METHODS
-
-This module defines the following methods.
-
-=head2 execute_if_matches ($query_count, @args)
-
-Does the $query_count match the defined interval?  Returns a Boolean.
-
-=cut
-
-sub execute_if_matches {
-  my ($self, $query_count, @args) = @_;
-  if($self->matches($query_count)) {
-  	return $self->execute(@args);
-  } else {
-  	return;
-  }
-}
-
-
-=head1 AUTHORS
-
-See L<DBIx::Class> for more information regarding authors.
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
-
-
-1;
\ No newline at end of file

Deleted: DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/Job.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/Job.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/Job.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -1,92 +0,0 @@
-package DBIx::Class::Schema::Job;
-
-use Moose;
-use Moose::Util::TypeConstraints;
-
-=head1 NAME
-
-DBIx::Class::Schema::Job; A job associated with a Schema
-
-=head1 SYNOPSIS
-
-The following example creates a new job and then executes it.
-
-    my $job = DBIx::Class::Schema->new(runs => sub { print 'did job'});
-    $job->execute; # 'did job' -> STDOUT
-
-=head1 DESCRIPTION
-
-This is a base class intended to hold code that get's executed by the schema
-according to rules known to the schema.  Subclassers may wish to override how
-the L</runs> attribute is defined in order to create custom behavior.
-
-=head1 SUBTYPES
-
-This package defines the following subtypes
-
-=head2 Handler
-
-A coderef based type that the job runs when L</execute> is called.
-
-=cut
-
-subtype 'DBIx::Class::Schema::Job::Handler'
-    => as 'CodeRef';
-    
-coerce 'DBIx::Class::Schema::Job::Handler'
-    => from 'Str'
-    => via {
-    	my $handler_method = $_; 
-        sub {
-        	my $job = shift @_;
-        	my $target = shift @_;
-        	$target->$handler_method($job, @_);
-        };                 
-    };
-
-=head1 ATTRIBUTES
-
-This package defines the following attributes.
-
-=head2 runs
-
-This is a coderef which is de-reffed by L</execute> and is passed the job object
-(ie $self), and any additional arguments passed to L</execute>
-
-=cut
-
-has 'runs' => (
-  is=>'ro',
-  isa=>'DBIx::Class::Schema::Job::Handler',
-  coerce=>1,
-  required=>1,
-);
-
-
-=head1 METHODS
-
-This module defines the following methods.
-
-=head2 execute ($schema, $query_interval)
-
-Method called by the L<DBIx::Class::Schema> when it wants a given job to run.
-
-=cut
-
-sub execute {
-	return $_[0]->runs->(@_);
-}
-
-
-=head1 AUTHORS
-
-See L<DBIx::Class> for more information regarding authors.
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
-
-
-1;
\ No newline at end of file

Deleted: DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/QueryInterval.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/QueryInterval.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/QueryInterval.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -1,93 +0,0 @@
-package DBIx::Class::Schema::QueryInterval;
-
-use Moose;
-
-=head1 NAME
-
-DBIx::Class::Schema::Role::QueryInterval; Defines a job control interval.
-
-=head1 SYNOPSIS
-
-The following example shows how to define a job control interval and assign it
-to a particular L<DBIx::Class::Schema::Job> for a L<DBIx::Class::Schema>
-
-    my $job = DBIx::Class::Schema->new(runs => sub { print 'did job'});
-    my $interval = DBIx::Class::Schema::Interval->new(every => 10);
-    
-    if($interval->matches($query_count)) {
-    	print "I indentified the query count as matching";
-    }
-    
-    ## $schema->isa(DBIx::Class::Schema);
-    $schema->create_and_add_at_query_intervals($interval => $job);
-    
-=head1 DESCRIPTION
-
-A Query Interval defines a reoccuring period based on the query count from a
-given offset.  For example, you can define a query interval of 10 queries
-with an offset of 1 query.  This interval identifies query number 11, 21, 31,
-and so on.
-
-=head1 ATTRIBUTES
-
-This package defines the following attributes.
-
-=head2 every (Int)
-
-This is the 'size' of the gap identifying a query as matching a particular
-interval.  Think, "I match every X queries".
-
-=cut
-
-has 'every' => (
-  is=>'ro',
-  isa=>'Int',
-  required=>1,
-);
-
-
-=head2 offset (Int)
-
-This is a number of queries from the start of all queries to offset the match
-counting mechanism.  This is basically added to the L</every> attribute to 
-identify a query as matching the interval we wish to define.
-
-=cut
-
-has 'offset' => (
-  is=>'ro',
-  isa=>'Int',
-  required=>1,
-  default=>0,
-);
-
-
-=head1 METHODS
-
-This module defines the following methods.
-
-=head2 matches ($query_count)
-
-Does the $query_count match the defined interval?  Returns a Boolean.
-
-=cut
-
-sub matches {
-  my ($self, $query_count) = @_;
-  my $offset_count = $query_count - $self->offset;
-  return $offset_count % $self->every ? 0:1;
-}
-
-
-=head1 AUTHORS
-
-See L<DBIx::Class> for more information regarding authors.
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
-
-
-1;
\ No newline at end of file

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/Versioned.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/Versioned.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema/Versioned.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -71,48 +71,109 @@
 =head1 SYNOPSIS
 
   package Library::Schema;
-  use base qw/DBIx::Class::Schema/;   
+  use base qw/DBIx::Class::Schema/;
+
+  our $VERSION = 0.001;
+
   # load Library::Schema::CD, Library::Schema::Book, Library::Schema::DVD
   __PACKAGE__->load_classes(qw/CD Book DVD/);
 
-  __PACKAGE__->load_components(qw/+DBIx::Class::Schema::Versioned/);
+  __PACKAGE__->load_components(qw/Schema::Versioned/);
   __PACKAGE__->upgrade_directory('/path/to/upgrades/');
-  __PACKAGE__->backup_directory('/path/to/backups/');
 
 
 =head1 DESCRIPTION
 
-This module is a component designed to extend L<DBIx::Class::Schema>
-classes, to enable them to upgrade to newer schema layouts. To use this
-module, you need to have called C<create_ddl_dir> on your Schema to
-create your upgrade files to include with your delivery.
+This module provides methods to apply DDL changes to your database using SQL
+diff files. Normally these diff files would be created using
+L<DBIx::Class::Schema/create_ddl_dir>.
 
 A table called I<dbix_class_schema_versions> is created and maintained by the
-module. This contains two fields, 'Version' and 'Installed', which
-contain each VERSION of your Schema, and the date+time it was installed.
+module. This is used to determine which version your database is currently at.
+Similarly the $VERSION in your DBIC schema class is used to determine the
+current DBIC schema version.
 
-The actual upgrade is called manually by calling C<upgrade> on your
-schema object. Code is run at connect time to determine whether an
-upgrade is needed, if so, a warning "Versions out of sync" is
-produced.
+The upgrade is initiated manually by calling C<upgrade> on your schema object,
+this will attempt to upgrade the database from its current version to the current
+schema version using a diff from your I<upgrade_directory>. If a suitable diff is
+not found then no upgrade is possible.
 
-So you'll probably want to write a script which generates your DDLs and diffs
-and another which executes the upgrade.
-
 NB: At the moment, only SQLite and MySQL are supported. This is due to
 spotty behaviour in the SQL::Translator producers, please help us by
-them.
+enhancing them. Ask on the mailing list or IRC channel for details (community details
+in L<DBIx::Class>).
 
-=head1 METHODS
+=head1 GETTING STARTED
 
-=head2 upgrade_directory
+Firstly you need to setup your schema class as per the L</SYNOPSIS>, make sure
+you have specified an upgrade_directory and an initial $VERSION.
 
-Use this to set the directory your upgrade files are stored in.
+Then you'll need two scripts, one to create DDL files and diffs and another to perform
+upgrades. Your creation script might look like a bit like this:
 
-=head2 backup_directory
+  use strict;
+  use Pod::Usage;
+  use Getopt::Long;
+  use MyApp::Schema;
 
-Use this to set the directory you want your backups stored in.
+  my ( $preversion, $help ); 
+  GetOptions(
+    'p|preversion:s'  => \$preversion,
+  ) or die pod2usage;
 
+  my $schema = MyApp::Schema->connect(
+    $dsn,
+    $user,
+    $password,
+  );
+  my $sql_dir = './sql';
+  my $version = $schema->schema_version();
+  $schema->create_ddl_dir( 'MySQL', $version, $sql_dir, $preversion );
+
+Then your upgrade script might look like so:
+
+  use strict;
+  use MyApp::Schema;
+
+  my $schema = MyApp::Schema->connect(
+    $dsn,
+    $user,
+    $password,
+  );
+
+  if (!$schema->get_db_version()) {
+    # schema is unversioned
+    $schema->deploy();
+  } else {
+    $schema->upgrade();
+  }
+
+The script above assumes that if the database is unversioned then it is empty
+and we can safely deploy the DDL to it. However things are not always so simple.
+
+if you want to initialise a pre-existing database where the DDL is not the same
+as the DDL for your current schema version then you will need a diff which 
+converts the database's DDL to the current DDL. The best way to do this is
+to get a dump of the database schema (without data) and save that in your
+SQL directory as version 0.000 (the filename must be as with
+L<DBIx::Class::Schema/ddl_filename>) then create a diff using your create DDL 
+script given above from version 0.000 to the current version. Then hand check
+and if necessary edit the resulting diff to ensure that it will apply. Once you have 
+done all that you can do this:
+
+  if (!$schema->get_db_version()) {
+    # schema is unversioned
+    $schema->install("0.000");
+  }
+
+  # this will now apply the 0.000 to current version diff
+  $schema->upgrade();
+
+In the case of an unversioned database the above code will create the
+dbix_class_schema_versions table and write version 0.000 to it, then 
+upgrade will then apply the diff we talked about creating in the previous paragraph
+and then you're good to go.
+
 =cut
 
 package DBIx::Class::Schema::Versioned;
@@ -129,147 +190,78 @@
 __PACKAGE__->mk_classdata('do_backup');
 __PACKAGE__->mk_classdata('do_diff_on_init');
 
-=head2 schema_version
 
-Returns the current schema class' $VERSION; does -not- use $schema->VERSION
-since that varies in results depending on if version.pm is installed, and if
-so the perl or XS versions. If you want this to change, bug the version.pm
-author to make vpp and vxs behave the same.
+=head1 METHODS
 
-=cut
+=head2 upgrade_directory
 
-sub schema_version {
-  my ($self) = @_;
-  my $class = ref($self)||$self;
-  my $version;
-  {
-    no strict 'refs';
-    $version = ${"${class}::VERSION"};
-  }
-  return $version;
-}
+Use this to set the directory your upgrade files are stored in.
 
-=head2 get_db_version
+=head2 backup_directory
 
-Returns the version that your database is currently at. This is determined by the values in the
-dbix_class_schema_versions table that $self->upgrade writes to.
+Use this to set the directory you want your backups stored in (note that backups
+are disabled by default).
 
 =cut
 
-sub get_db_version
-{
-    my ($self, $rs) = @_;
+=head2 install
 
-    my $vtable = $self->{vschema}->resultset('Table');
-    my $version = 0;
-    eval {
-      my $stamp = $vtable->get_column('installed')->max;
-      $version = $vtable->search({ installed => $stamp })->first->version;
-    };
-    return $version;
-}
+=over 4
 
-sub _source_exists
-{
-    my ($self, $rs) = @_;
+=item Arguments: $db_version
 
-    my $c = eval {
-        $rs->search({ 1, 0 })->count;
-    };
-    return 0 if $@ || !defined $c;
+=back
 
-    return 1;
-}
+Call this to initialise a previously unversioned database. The table 'dbix_class_schema_versions' will be created which will be used to store the database version.
 
-=head2 backup
+Takes one argument which should be the version that the database is currently at. Defaults to the return value of L</schema_version>.
 
-This is an overwritable method which is called just before the upgrade, to
-allow you to make a backup of the database. Per default this method attempts
-to call C<< $self->storage->backup >>, to run the standard backup on each
-database type. 
+See L</getting_started> for more details.
 
-This method should return the name of the backup file, if appropriate..
-
 =cut
 
-sub backup
+sub install
 {
-    my ($self) = @_;
-    ## Make each ::DBI::Foo do this
-    $self->storage->backup($self->backup_directory());
-}
+  my ($self, $new_version) = @_;
 
-# is this just a waste of time? if not then merge with DBI.pm
-sub _create_db_to_schema_diff {
-  my $self = shift;
+  # must be called on a fresh database
+  if ($self->get_db_version()) {
+    warn 'Install not possible as versions table already exists in database';
+  }
 
-  my %driver_to_db_map = (
-                          'mysql' => 'MySQL'
-                         );
+  # default to current version if none passed
+  $new_version ||= $self->schema_version();
 
-  my $db = $driver_to_db_map{$self->storage->dbh->{Driver}->{Name}};
-  unless ($db) {
-    print "Sorry, this is an unsupported DB\n";
-    return;
+  if ($new_version) {
+    # create versions table and version row
+    $self->{vschema}->deploy;
+    $self->_set_db_version;
   }
+}
 
-  eval 'require SQL::Translator "0.09"';
-  if ($@) {
-    $self->throw_exception("SQL::Translator 0.09 required");
-  }
+=head2 deploy
 
-  my $db_tr = SQL::Translator->new({ 
-                                    add_drop_table => 1, 
-                                    parser => 'DBI',
-                                    parser_args => { dbh => $self->storage->dbh }
-                                   });
+Same as L<DBIx::Class::Schema/deploy> but also calls C<install>.
 
-  $db_tr->producer($db);
-  my $dbic_tr = SQL::Translator->new;
-  $dbic_tr->parser('SQL::Translator::Parser::DBIx::Class');
-  $dbic_tr = $self->storage->configure_sqlt($dbic_tr, $db);
-  $dbic_tr->data($self);
-  $dbic_tr->producer($db);
+=cut
 
-  $db_tr->schema->name('db_schema');
-  $dbic_tr->schema->name('dbic_schema');
-
-  # is this really necessary?
-  foreach my $tr ($db_tr, $dbic_tr) {
-    my $data = $tr->data;
-    $tr->parser->($tr, $$data);
-  }
-
-  my $diff = SQL::Translator::Diff::schema_diff($db_tr->schema, $db, 
-                                                $dbic_tr->schema, $db,
-                                                { ignore_constraint_names => 1, ignore_index_names => 1, caseopt => 1 });
-
-  my $filename = $self->ddl_filename(
-                                         $db,
-                                         $self->upgrade_directory,
-                                         $self->schema_version,
-                                         'PRE',
-                                    );
-  my $file;
-  if(!open($file, ">$filename"))
-    {
-      $self->throw_exception("Can't open $filename for writing ($!)");
-      next;
-    }
-  print $file $diff;
-  close($file);
-
-  print "WARNING: There may be differences between your DB and your DBIC schema. Please review and if necessary run the SQL in $filename to sync your DB.\n";
+sub deploy {
+  my $self = shift;
+  $self->next::method(@_);
+  $self->install();
 }
 
 =head2 upgrade
 
 Call this to attempt to upgrade your database from the version it is at to the version
-this DBIC schema is at. 
+this DBIC schema is at. If they are the same it does nothing.
 
-It requires an SQL diff file to exist in $schema->upgrade_directory, normally you will
-have created this using $schema->create_ddl_dir.
+It requires an SQL diff file to exist in you I<upgrade_directory>, normally you will
+have created this using L<DBIx::Class::Schema/create_ddl_dir>.
 
+If successful the dbix_class_schema_versions table is updated with the current
+DBIC schema version.
+
 =cut
 
 sub upgrade
@@ -279,12 +271,7 @@
 
   # db unversioned
   unless ($db_version) {
-    # set version in dbix_class_schema_versions table, can't actually upgrade as we don 't know what version the DB is at
-    $self->_create_db_to_schema_diff() if ($self->do_diff_on_init);
-
-    # create versions table and version row
-    $self->{vschema}->deploy;
-    $self->_set_db_version;
+    warn 'Upgrade not possible as database is unversioned. Please call install first.';
     return;
   }
 
@@ -302,8 +289,8 @@
   
   my $upgrade_file = $self->ddl_filename(
                                          $self->storage->sqlt_type,
+                                         $self->schema_version,
                                          $self->upgrade_directory,
-                                         $self->schema_version,
                                          $db_version,
                                         );
 
@@ -321,31 +308,6 @@
   $self->_set_db_version;
 }
 
-sub _set_db_version {
-  my $self = shift;
-
-  my $vtable = $self->{vschema}->resultset('Table');
-  $vtable->create({ version => $self->schema_version,
-                      installed => strftime("%Y-%m-%d %H:%M:%S", gmtime())
-                      });
-
-}
-
-sub _read_sql_file {
-  my $self = shift;
-  my $file = shift || return;
-
-  my $fh;
-  open $fh, "<$file" or warn("Can't open upgrade file, $file ($!)");
-  my @data = split(/\n/, join('', <$fh>));
-  @data = grep(!/^--/, @data);
-  @data = split(/;/, join('', @data));
-  close($fh);
-  @data = grep { $_ && $_ !~ /^-- / } @data;
-  @data = grep { $_ !~ /^(BEGIN TRANACTION|COMMIT)/m } @data;
-  return \@data;
-}
-
 =head2 do_upgrade
 
 This is an overwritable method used to run your upgrade. The freeform method
@@ -353,22 +315,16 @@
 any number of times to run the actual SQL commands, and in between you can
 sandwich your data upgrading. For example, first run all the B<CREATE>
 commands, then migrate your data from old to new tables/formats, then 
-issue the DROP commands when you are finished.
+issue the DROP commands when you are finished. Will run the whole file as it is by default.
 
-Will run the whole file as it is by default.
-
 =cut
 
 sub do_upgrade
 {
-    my ($self) = @_;
+  my ($self) = @_;
 
-    ## overridable sub, per default just run all the commands.
-    $self->run_upgrade(qr/create/i);
-    $self->run_upgrade(qr/alter table .*? add/i);
-    $self->run_upgrade(qr/alter table .*? (?!drop)/i);
-    $self->run_upgrade(qr/alter table .*? drop/i);
-    $self->run_upgrade(qr/drop/i);
+  # just run all the commands (including inserts) in order                                                        
+  $self->run_upgrade(qr/.*?/);
 }
 
 =head2 run_upgrade
@@ -377,7 +333,7 @@
 
 Runs a set of SQL statements matching a passed in regular expression. The
 idea is that this method can be called any number of times from your
-C<upgrade> method, running whichever commands you specify via the
+C<do_upgrade> method, running whichever commands you specify via the
 regex in the parameter. Probably won't work unless called from the overridable
 do_upgrade method.
 
@@ -401,6 +357,52 @@
     return 1;
 }
 
+=head2 get_db_version
+
+Returns the version that your database is currently at. This is determined by the values in the
+dbix_class_schema_versions table that C<upgrade> and C<install> write to.
+
+=cut
+
+sub get_db_version
+{
+    my ($self, $rs) = @_;
+
+    my $vtable = $self->{vschema}->resultset('Table');
+    my $version = 0;
+    eval {
+      my $stamp = $vtable->get_column('installed')->max;
+      $version = $vtable->search({ installed => $stamp })->first->version;
+    };
+    return $version;
+}
+
+=head2 schema_version
+
+Returns the current schema class' $VERSION
+
+=cut
+
+=head2 backup
+
+This is an overwritable method which is called just before the upgrade, to
+allow you to make a backup of the database. Per default this method attempts
+to call C<< $self->storage->backup >>, to run the standard backup on each
+database type. 
+
+This method should return the name of the backup file, if appropriate..
+
+This method is disabled by default. Set $schema->do_backup(1) to enable it.
+
+=cut
+
+sub backup
+{
+    my ($self) = @_;
+    ## Make each ::DBI::Foo do this
+    $self->storage->backup($self->backup_directory());
+}
+
 =head2 connection
 
 Overloaded method. This checks the DBIC schema version against the DB version and
@@ -408,21 +410,29 @@
 compatibility between the old versions table (SchemaVersions) and the new one
 (dbix_class_schema_versions).
 
-To avoid the checks on connect, set the env var DBIC_NO_VERSION_CHECK. This can be
-useful for scripts.
+To avoid the checks on connect, set the env var DBIC_NO_VERSION_CHECK or alternatively you can set the ignore_version attr in the forth argument like so:
 
+  my $schema = MyApp::Schema->connect(
+    $dsn,
+    $user,
+    $password,
+    { ignore_version => 1 },
+  );
+
 =cut
 
 sub connection {
   my $self = shift;
   $self->next::method(@_);
-  $self->_on_connect;
+  $self->_on_connect($_[3]);
   return $self;
 }
 
 sub _on_connect
 {
-  my ($self) = @_;
+  my ($self, $args) = @_;
+
+  $args = {} unless $args;
   $self->{vschema} = DBIx::Class::Version->connect(@{$self->storage->connect_info()});
   my $vtable = $self->{vschema}->resultset('Table');
 
@@ -436,10 +446,9 @@
       $self->storage->dbh->do("DROP TABLE " . $vtable_compat->result_source->from);
     }
   }
-  
+
   # useful when connecting from scripts etc
-  return if ($ENV{DBIC_NO_VERSION_CHECK});
-  
+  return if ($args->{ignore_version} || ($ENV{DBIC_NO_VERSION_CHECK} && !exists $args->{ignore_version}));
   my $pversion = $self->get_db_version();
 
   if($pversion eq $self->schema_version)
@@ -458,6 +467,107 @@
     ", your database contains version $pversion, please call upgrade on your Schema.\n";
 }
 
+# is this just a waste of time? if not then merge with DBI.pm
+sub _create_db_to_schema_diff {
+  my $self = shift;
+
+  my %driver_to_db_map = (
+                          'mysql' => 'MySQL'
+                         );
+
+  my $db = $driver_to_db_map{$self->storage->dbh->{Driver}->{Name}};
+  unless ($db) {
+    print "Sorry, this is an unsupported DB\n";
+    return;
+  }
+
+  eval 'require SQL::Translator "0.09"';
+  if ($@) {
+    $self->throw_exception("SQL::Translator 0.09 required");
+  }
+
+  my $db_tr = SQL::Translator->new({ 
+                                    add_drop_table => 1, 
+                                    parser => 'DBI',
+                                    parser_args => { dbh => $self->storage->dbh }
+                                   });
+
+  $db_tr->producer($db);
+  my $dbic_tr = SQL::Translator->new;
+  $dbic_tr->parser('SQL::Translator::Parser::DBIx::Class');
+  $dbic_tr = $self->storage->configure_sqlt($dbic_tr, $db);
+  $dbic_tr->data($self);
+  $dbic_tr->producer($db);
+
+  $db_tr->schema->name('db_schema');
+  $dbic_tr->schema->name('dbic_schema');
+
+  # is this really necessary?
+  foreach my $tr ($db_tr, $dbic_tr) {
+    my $data = $tr->data;
+    $tr->parser->($tr, $$data);
+  }
+
+  my $diff = SQL::Translator::Diff::schema_diff($db_tr->schema, $db, 
+                                                $dbic_tr->schema, $db,
+                                                { ignore_constraint_names => 1, ignore_index_names => 1, caseopt => 1 });
+
+  my $filename = $self->ddl_filename(
+                                         $db,
+                                         $self->schema_version,
+                                         $self->upgrade_directory,
+                                         'PRE',
+                                    );
+  my $file;
+  if(!open($file, ">$filename"))
+    {
+      $self->throw_exception("Can't open $filename for writing ($!)");
+      next;
+    }
+  print $file $diff;
+  close($file);
+
+  print "WARNING: There may be differences between your DB and your DBIC schema. Please review and if necessary run the SQL in $filename to sync your DB.\n";
+}
+
+
+sub _set_db_version {
+  my $self = shift;
+
+  my $vtable = $self->{vschema}->resultset('Table');
+  $vtable->create({ version => $self->schema_version,
+                      installed => strftime("%Y-%m-%d %H:%M:%S", gmtime())
+                      });
+
+}
+
+sub _read_sql_file {
+  my $self = shift;
+  my $file = shift || return;
+
+  my $fh;
+  open $fh, "<$file" or warn("Can't open upgrade file, $file ($!)");
+  my @data = split(/\n/, join('', <$fh>));
+  @data = grep(!/^--/, @data);
+  @data = split(/;/, join('', @data));
+  close($fh);
+  @data = grep { $_ && $_ !~ /^-- / } @data;
+  @data = grep { $_ !~ /^(BEGIN|BEGIN TRANSACTION|COMMIT)/m } @data;
+  return \@data;
+}
+
+sub _source_exists
+{
+    my ($self, $rs) = @_;
+
+    my $c = eval {
+        $rs->search({ 1, 0 })->count;
+    };
+    return 0 if $@ || !defined $c;
+
+    return 1;
+}
+
 1;
 
 

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Schema.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -613,19 +613,6 @@
   return $schema;
 }
 
-=head2 setup_connection_class
-
-=over 4
-
-=item Arguments: $target, @info
-
-=back
-
-Sets up a database connection class to inject between the schema and the
-subclasses that the schema creates.
-
-=cut
-
 sub setup_connection_class {
   my ($class, $target, @info) = @_;
   $class->inject_base($target => 'DBIx::Class::DB');
@@ -637,9 +624,9 @@
 
 =over 4
 
-=item Arguments: $storage_type
+=item Arguments: $storage_type|{$storage_type, \%args}
 
-=item Return Value: $storage_type
+=item Return Value: $storage_type|{$storage_type, \%args}
 
 =back
 
@@ -653,6 +640,13 @@
 dealing with MSSQL via L<DBD::Sybase>, in which case you'd set it to
 C<::DBI::Sybase::MSSQL>.
 
+If your storage type requires instantiation arguments, those are defined as a 
+second argument in the form of a hashref and the entire value needs to be
+wrapped into an arrayref or a hashref.  We support both types of refs here in
+order to play nice with your Config::[class] or your choice.
+
+See L<DBIx::Class::Storage::DBI::Replicated> for an example of this.
+
 =head2 connection
 
 =over 4
@@ -675,19 +669,33 @@
 sub connection {
   my ($self, @info) = @_;
   return $self if !@info && $self->storage;
-  my $storage_class = $self->storage_type;
+  
+  my ($storage_class, $args) = ref $self->storage_type ? 
+    ($self->_normalize_storage_type($self->storage_type),{}) : ($self->storage_type, {});
+    
   $storage_class = 'DBIx::Class::Storage'.$storage_class
     if $storage_class =~ m/^::/;
   eval "require ${storage_class};";
   $self->throw_exception(
     "No arguments to load_classes and couldn't load ${storage_class} ($@)"
   ) if $@;
-  my $storage = $storage_class->new($self);
+  my $storage = $storage_class->new($self=>$args);
   $storage->connect_info(\@info);
   $self->storage($storage);
   return $self;
 }
 
+sub _normalize_storage_type {
+  my ($self, $storage_type) = @_;
+  if(ref $storage_type eq 'ARRAY') {
+    return @$storage_type;
+  } elsif(ref $storage_type eq 'HASH') {
+    return %$storage_type;
+  } else {
+    $self->throw_exception('Unsupported REFTYPE given: '. ref $storage_type);
+  }
+}
+
 =head2 connect
 
 =over 4
@@ -734,9 +742,10 @@
   $self->storage->txn_do(@_);
 }
 
-=head2 txn_scope_guard
+=head2 txn_scope_guard (EXPERIMENTAL)
 
-Runs C<txn_scope_guard> on the schema's storage.
+Runs C<txn_scope_guard> on the schema's storage. See 
+L<DBIx::Class::Storage/txn_scope_guard>.
 
 =cut
 
@@ -1028,7 +1037,9 @@
 
 Additionally, the DBIx::Class parser accepts a C<sources> parameter as a hash 
 ref or an array ref, containing a list of source to deploy. If present, then 
-only the sources listed will get deployed.
+only the sources listed will get deployed. Furthermore, you can use the
+C<add_fk_index> parser parameter to prevent the parser from creating an index for each
+FK.
 
 =cut
 
@@ -1082,6 +1093,8 @@
 name format. For the ALTER file, the same format is used, replacing
 $version in the name with "$preversion-$version".
 
+See L<DBIx::Class::Schema/deploy> for details of $sqlt_args.
+
 If no arguments are passed, then the following default values are used:
 
 =over 4
@@ -1110,15 +1123,15 @@
   $self->storage->create_ddl_dir($self, @_);
 }
 
-=head2 ddl_filename (EXPERIMENTAL)
+=head2 ddl_filename
 
 =over 4
 
-=item Arguments: $directory, $database-type, $version, $preversion
+=item Arguments: $database-type, $version, $directory, $preversion
 
 =back
 
-  my $filename = $table->ddl_filename($type, $dir, $version, $preversion)
+  my $filename = $table->ddl_filename($type, $version, $dir, $preversion)
 
 This method is called by C<create_ddl_dir> to compose a file name out of
 the supplied directory, database type and version number. The default file
@@ -1130,14 +1143,14 @@
 =cut
 
 sub ddl_filename {
-    my ($self, $type, $dir, $version, $pversion) = @_;
+  my ($self, $type, $version, $dir, $preversion) = @_;
 
-    my $filename = ref($self);
-    $filename =~ s/::/-/g;
-    $filename = File::Spec->catfile($dir, "$filename-$version-$type.sql");
-    $filename =~ s/$version/$pversion-$version/ if($pversion);
-
-    return $filename;
+  my $filename = ref($self);
+  $filename =~ s/::/-/g;
+  $filename = File::Spec->catfile($dir, "$filename-$version-$type.sql");
+  $filename =~ s/$version/$preversion-$version/ if($preversion);
+  
+  return $filename;
 }
 
 =head2 sqlt_deploy_hook($sqlt_schema)
@@ -1187,6 +1200,29 @@
   return Storable::dclone($obj);
 }
 
+=head2 schema_version
+
+Returns the current schema class' $VERSION
+
+=cut
+
+sub schema_version {
+  my ($self) = @_;
+  my $class = ref($self)||$self;
+
+  # does -not- use $schema->VERSION
+  # since that varies in results depending on if version.pm is installed, and if
+  # so the perl or XS versions. If you want this to change, bug the version.pm
+  # author to make vpp and vxs behave the same.
+
+  my $version;
+  {
+    no strict 'refs';
+    $version = ${"${class}::VERSION"};
+  }
+  return $version;
+}
+
 1;
 
 =head1 AUTHORS

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/StartupCheck.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/StartupCheck.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/StartupCheck.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -2,7 +2,8 @@
 
 BEGIN {
 
-    { package TestRHBug; use overload bool => sub { 0 } }
+    { package # don't want this in PAUSE
+        TestRHBug; use overload bool => sub { 0 } }
 
     sub _has_bug_34925 {
 	my %thing;

Added: DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm	                        (rev 0)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -0,0 +1,53 @@
+package DBIx::Class::Storage::DBI::Replicated::Balancer::First;
+
+use Moose;
+with 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Balancer::First; Just get the First Balancer
+
+=head1 SYNOPSIS
+
+This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.  You
+shouldn't need to create instances of this class.
+    
+=head1 DESCRIPTION
+
+Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
+database's (L<DBIx::Class::Storage::DBI::Replicated::Replicant>), defines a
+method by which query load can be spread out across each replicant in the pool.
+
+This Balancer just get's whatever is the first replicant in the pool
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 next_storage
+
+Just get the first storage.  Probably only good when you have one replicant.
+
+=cut
+
+sub next_storage {
+  return  (shift->pool->active_replicants)[0];
+}
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski at takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+__PACKAGE__->meta->make_immutable;
+
+1;
\ No newline at end of file

Added: DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm	                        (rev 0)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -0,0 +1,62 @@
+package DBIx::Class::Storage::DBI::Replicated::Balancer::Random;
+
+use Moose;
+with 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Balancer::Random; A 'random' Balancer
+
+=head1 SYNOPSIS
+
+This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.  You
+shouldn't need to create instances of this class.
+    
+=head1 DESCRIPTION
+
+Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
+database's (L<DBIx::Class::Storage::DBI::Replicated::Replicant>), defines a
+method by which query load can be spread out across each replicant in the pool.
+
+This Balancer uses L<List::Util> keyword 'shuffle' to randomly pick an active
+replicant from the associated pool.  This may or may not be random enough for
+you, patches welcome.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 next_storage
+
+Returns an active replicant at random.  Please note that due to the nature of
+the word 'random' this means it's possible for a particular active replicant to
+be requested several times in a row.
+
+=cut
+
+sub next_storage {
+  my $self = shift @_;
+  my @active_replicants = $self->pool->active_replicants;
+  my $count_active_replicants = $#active_replicants +1;
+  my $random_replicant = int(rand($count_active_replicants));
+  
+  return $active_replicants[$random_replicant];
+}
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski at takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+__PACKAGE__->meta->make_immutable;
+
+1;
\ No newline at end of file

Added: DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm	                        (rev 0)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -0,0 +1,240 @@
+package DBIx::Class::Storage::DBI::Replicated::Balancer;
+
+use Moose::Role;
+requires 'next_storage';
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Balancer; A Software Load Balancer 
+
+=head1 SYNOPSIS
+
+This role is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
+    
+=head1 DESCRIPTION
+
+Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
+database's (L<DBIx::Class::Storage::DBI::Replicated::Replicant>), defines a
+method by which query load can be spread out across each replicant in the pool.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head2 auto_validate_every ($seconds)
+
+If auto_validate has some sort of value, run the L<validate_replicants> every
+$seconds.  Be careful with this, because if you set it to 0 you will end up
+validating every query.
+
+=cut
+
+has 'auto_validate_every' => (
+  is=>'rw',
+  isa=>'Int',
+  predicate=>'has_auto_validate_every',
+);
+
+=head2 master
+
+The L<DBIx::Class::Storage::DBI> object that is the master database all the
+replicants are trying to follow.  The balancer needs to know it since it's the
+ultimate fallback.
+
+=cut
+
+has 'master' => (
+  is=>'ro',
+  isa=>'DBIx::Class::Storage::DBI',
+  required=>1,
+);
+
+=head2 pool
+
+The L<DBIx::Class::Storage::DBI::Replicated::Pool> object that we are trying to
+balance.
+
+=cut
+
+has 'pool' => (
+  is=>'ro',
+  isa=>'DBIx::Class::Storage::DBI::Replicated::Pool',
+  required=>1,
+);
+
+=head2 current_replicant
+
+Replicant storages (slaves) handle all read only traffic.  The assumption is
+that your database will become readbound well before it becomes write bound
+and that being able to spread your read only traffic around to multiple 
+databases is going to help you to scale traffic.
+
+This attribute returns the next slave to handle a read request.  Your L</pool>
+attribute has methods to help you shuffle through all the available replicants
+via it's balancer object.
+
+=cut
+
+has 'current_replicant' => (
+  is=> 'rw',
+  isa=>'DBIx::Class::Storage::DBI',
+  lazy_build=>1,
+  handles=>[qw/
+    select
+    select_single
+    columns_info_for
+  /],
+);
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 _build_current_replicant
+
+Lazy builder for the L</current_replicant_storage> attribute.
+
+=cut
+
+sub _build_current_replicant {
+  my $self = shift @_;
+  $self->next_storage;
+}
+
+=head2 next_storage
+
+This method should be defined in the class which consumes this role.
+
+Given a pool object, return the next replicant that will serve queries.  The
+default behavior is to grap the first replicant it finds but you can write 
+your own subclasses of L<DBIx::Class::Storage::DBI::Replicated::Balancer> to 
+support other balance systems.
+
+This returns from the pool of active replicants.  If there are no active
+replicants, then you should have it return the master as an ultimate fallback.
+
+=head2 around: next_storage
+
+Advice on next storage to add the autovalidation.  We have this broken out so
+that it's easier to break out the auto validation into a role.
+
+This also returns the master in the case that none of the replicants are active
+or just just forgot to create them :)
+
+=cut
+
+around 'next_storage' => sub {
+  my ($next_storage, $self, @args) = @_;
+  my $now = time;
+    
+  ## Do we need to validate the replicants?
+  if(
+     $self->has_auto_validate_every && 
+     ($self->auto_validate_every + $self->pool->last_validated) <= $now
+  ) {
+      $self->pool->validate_replicants;
+  }
+    
+  ## Get a replicant, or the master if none
+  if(my $next = $self->$next_storage(@args)) {
+    return $next;
+  } else {
+    return $self->master;
+  }
+};
+
+=head2 increment_storage
+
+Rolls the Storage to whatever is next in the queue, as defined by the Balancer.
+
+=cut
+
+sub increment_storage {
+  my $self = shift @_;
+  my $next_replicant = $self->next_storage;
+  $self->current_replicant($next_replicant);
+}
+
+=head2 around: select
+
+Advice on the select attribute.  Each time we use a replicant
+we need to change it via the storage pool algorithm.  That way we are spreading
+the load evenly (hopefully) across existing capacity.
+
+=cut
+
+around 'select' => sub {
+  my ($select, $self, @args) = @_;
+  
+  if (my $forced_pool = $args[-1]->{force_pool}) {
+    delete $args[-1]->{force_pool};
+    return $self->_get_forced_pool($forced_pool)->select(@args); 
+  } else {
+    $self->increment_storage;
+    return $self->$select(@args);
+  }
+};
+
+=head2 around: select_single
+
+Advice on the select_single attribute.  Each time we use a replicant
+we need to change it via the storage pool algorithm.  That way we are spreading
+the load evenly (hopefully) across existing capacity.
+
+=cut
+
+around 'select_single' => sub {
+  my ($select_single, $self, @args) = @_;
+  
+  if (my $forced_pool = $args[-1]->{force_pool}) {
+    delete $args[-1]->{force_pool};
+    return $self->_get_forced_pool($forced_pool)->select_single(@args); 
+  } else {
+    $self->increment_storage;
+    return $self->$select_single(@args);
+  }
+};
+
+=head2 before: columns_info_for
+
+Advice on the current_replicant_storage attribute.  Each time we use a replicant
+we need to change it via the storage pool algorithm.  That way we are spreading
+the load evenly (hopefully) across existing capacity.
+
+=cut
+
+before 'columns_info_for' => sub {
+  my $self = shift @_;
+  $self->increment_storage;
+};
+
+=head2 _get_forced_pool ($name)
+
+Given an identifier, find the most correct storage object to handle the query.
+
+=cut
+
+sub _get_forced_pool {
+  my ($self, $forced_pool) = @_;
+  if(blessed $forced_pool) {
+    return $forced_pool;
+  } elsif($forced_pool eq 'master') {
+    return $self->master;
+  } elsif(my $replicant = $self->pool->replicants($forced_pool)) {
+    return $replicant;
+  } else {
+    $self->master->throw_exception("$forced_pool is not a named replicant.");
+  }   
+}
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski at takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;

Added: DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm	                        (rev 0)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -0,0 +1,276 @@
+package DBIx::Class::Storage::DBI::Replicated::Pool;
+
+use Moose;
+use MooseX::AttributeHelpers;
+use DBIx::Class::Storage::DBI::Replicated::Replicant;
+use List::Util qw(sum);
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Pool; Manage a pool of replicants
+
+=head1 SYNOPSIS
+
+This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.  You
+shouldn't need to create instances of this class.
+  
+=head1 DESCRIPTION
+
+In a replicated storage type, there is at least one replicant to handle the
+read only traffic.  The Pool class manages this replicant, or list of 
+replicants, and gives some methods for querying information about their status.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head2 maximum_lag ($num)
+
+This is a number which defines the maximum allowed lag returned by the
+L<DBIx::Class::Storage::DBI/lag_behind_master> method.  The default is 0.  In
+general, this should return a larger number when the replicant is lagging
+behind it's master, however the implementation of this is database specific, so
+don't count on this number having a fixed meaning.  For example, MySQL will
+return a number of seconds that the replicating database is lagging.
+
+=cut
+
+has 'maximum_lag' => (
+  is=>'rw',
+  isa=>'Num',
+  required=>1,
+  lazy=>1,
+  default=>0,
+);
+
+=head2 last_validated
+
+This is an integer representing a time since the last time the replicants were
+validated. It's nothing fancy, just an integer provided via the perl time 
+builtin.
+
+=cut
+
+has 'last_validated' => (
+  is=>'rw',
+  isa=>'Int',
+  reader=>'last_validated',
+  writer=>'_last_validated',
+  lazy=>1,
+  default=>0,
+);
+
+=head2 replicant_type ($classname)
+
+Base class used to instantiate replicants that are in the pool.  Unless you
+need to subclass L<DBIx::Class::Storage::DBI::Replicated::Replicant> you should
+just leave this alone.
+
+=cut
+
+has 'replicant_type' => (
+  is=>'ro',
+  isa=>'ClassName',
+  required=>1,
+  default=>'DBIx::Class::Storage::DBI',
+  handles=>{
+    'create_replicant' => 'new',
+  },  
+);
+
+=head2 replicants
+
+A hashref of replicant, with the key being the dsn and the value returning the
+actual replicant storage.  For example if the $dsn element is something like:
+
+  "dbi:SQLite:dbname=dbfile"
+  
+You could access the specific replicant via:
+
+  $schema->storage->replicants->{'dbname=dbfile'}
+  
+This attributes also supports the following helper methods:
+
+=over 4
+
+=item set_replicant($key=>$storage)
+
+Pushes a replicant onto the HashRef under $key
+
+=item get_replicant($key)
+
+Retrieves the named replicant
+
+=item has_replicants
+
+Returns true if the Pool defines replicants.
+
+=item num_replicants
+
+The number of replicants in the pool
+
+=item delete_replicant ($key)
+
+removes the replicant under $key from the pool
+
+=back
+
+=cut
+
+has 'replicants' => (
+  is=>'rw',
+  metaclass => 'Collection::Hash',
+  isa=>'HashRef[DBIx::Class::Storage::DBI]',
+  default=>sub {{}},
+  provides  => {
+    'set' => 'set_replicant',
+    'get' => 'get_replicant',            
+    'empty' => 'has_replicants',
+    'count' => 'num_replicants',
+    'delete' => 'delete_replicant',
+  },
+);
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 connect_replicants ($schema, Array[$connect_info])
+
+Given an array of $dsn suitable for connected to a database, create an
+L<DBIx::Class::Storage::DBI::Replicated::Replicant> object and store it in the
+L</replicants> attribute.
+
+=cut
+
+sub connect_replicants {
+  my $self = shift @_;
+  my $schema = shift @_;
+  
+  my @newly_created = ();
+  foreach my $connect_info (@_) {
+    my $replicant = $self->connect_replicant($schema, $connect_info);
+    my ($key) = ($connect_info->[0]=~m/^dbi\:.+\:(.+)$/);
+    $self->set_replicant( $key => $replicant);  
+    push @newly_created, $replicant;
+  }
+  
+  return @newly_created;
+}
+
+=head2 connect_replicant ($schema, $connect_info)
+
+Given a schema object and a hashref of $connect_info, connect the replicant
+and return it.
+
+=cut
+
+sub connect_replicant {
+  my ($self, $schema, $connect_info) = @_;
+  my $replicant = $self->create_replicant($schema);
+    
+  $replicant->connect_info($connect_info);    
+  $replicant->ensure_connected;
+  DBIx::Class::Storage::DBI::Replicated::Replicant->meta->apply($replicant);
+    
+  return $replicant;
+}
+
+=head2 connected_replicants
+
+Returns true if there are connected replicants.  Actually is overloaded to
+return the number of replicants.  So you can do stuff like:
+
+  if( my $num_connected = $storage->has_connected_replicants ) {
+    print "I have $num_connected connected replicants";
+  } else {
+    print "Sorry, no replicants.";
+  }
+
+This method will actually test that each replicant in the L</replicants> hashref
+is actually connected, try not to hit this 10 times a second.
+
+=cut
+
+sub connected_replicants {
+  my $self = shift @_;
+  return sum( map {
+    $_->connected ? 1:0
+  } $self->all_replicants );
+}
+
+=head2 active_replicants
+
+This is an array of replicants that are considered to be active in the pool.
+This does not check to see if they are connected, but if they are not, DBIC
+should automatically reconnect them for us when we hit them with a query.
+
+=cut
+
+sub active_replicants {
+  my $self = shift @_;
+  return ( grep {$_} map {
+    $_->active ? $_:0
+  } $self->all_replicants );
+}
+
+=head2 all_replicants
+
+Just a simple array of all the replicant storages.  No particular order to the
+array is given, nor should any meaning be derived.
+
+=cut
+
+sub all_replicants {
+  my $self = shift @_;
+  return values %{$self->replicants};
+}
+
+=head2 validate_replicants
+
+This does a check to see if 1) each replicate is connected (or reconnectable),
+2) that is ->is_replicating, and 3) that it is not exceeding the lag amount
+defined by L</maximum_lag>.  Replicants that fail any of these tests are set to
+inactive, and thus removed from the replication pool.
+
+This tests L<all_replicants>, since a replicant that has been previous marked
+as inactive can be reactived should it start to pass the validation tests again.
+
+See L<DBIx::Class::Storage::DBI> for more about checking if a replicating
+connection is not following a master or is lagging.
+
+Calling this method will generate queries on the replicant databases so it is
+not recommended that you run them very often.
+
+=cut
+
+sub validate_replicants {
+  my $self = shift @_;
+  foreach my $replicant($self->all_replicants) {
+    if(
+      $replicant->is_replicating &&
+      $replicant->lag_behind_master <= $self->maximum_lag &&
+      $replicant->ensure_connected
+    ) {
+      $replicant->active(1)
+    } else {
+      $replicant->active(0);
+    }
+  }
+  ## Mark that we completed this validation.  
+  $self->_last_validated(time);
+}
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski at takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+__PACKAGE__->meta->make_immutable;
+
+1;

Added: DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm	                        (rev 0)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -0,0 +1,91 @@
+package DBIx::Class::Storage::DBI::Replicated::Replicant;
+
+use Moose::Role;
+requires qw/_query_start/;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Replicant; A replicated DBI Storage Role
+
+=head1 SYNOPSIS
+
+This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
+    
+=head1 DESCRIPTION
+
+Replicants are DBI Storages that follow a master DBI Storage.  Typically this
+is accomplished via an external replication system.  Please see the documents
+for L<DBIx::Class::Storage::DBI::Replicated> for more details.
+
+This class exists to define methods of a DBI Storage that only make sense when
+it's a classic 'slave' in a pool of slave databases which replicate from a
+given master database.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head2 active
+
+This is a boolean which allows you to programmatically activate or deactivate a
+replicant from the pool.  This way to you do stuff like disallow a replicant
+when it get's too far behind the master, if it stops replicating, etc.
+
+This attribute DOES NOT reflect a replicant's internal status, i.e. if it is
+properly replicating from a master and has not fallen too many seconds behind a
+reliability threshold.  For that, use L</is_replicating>  and L</lag_behind_master>.
+Since the implementation of those functions database specific (and not all DBIC
+supported DB's support replication) you should refer your database specific
+storage driver for more information.
+
+=cut
+
+has 'active' => (
+  is=>'rw',
+  isa=>'Bool',
+  lazy=>1,
+  required=>1,
+  default=>1,
+);
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 after: _query_start
+
+advice iof the _query_start method to add more debuggin
+
+=cut
+
+around '_query_start' => sub {
+  my ($method, $self, $sql, @bind) = @_;
+  my $dsn = $self->connect_info->[0];
+  $self->$method("DSN: $dsn SQL: $sql", @bind);
+};
+
+=head2 debugobj
+
+Override the debugobj method to redirect this method call back to the master.
+
+=cut
+
+sub debugobj {
+    return shift->schema->storage->debugobj;
+}
+
+=head1 ALSO SEE
+
+L<<a href="http://en.wikipedia.org/wiki/Replicant">http://en.wikipedia.org/wiki/Replicant</a>>
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski at takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;
\ No newline at end of file

Copied: DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated.pm (from rev 4291, DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replication.pm)
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated.pm	                        (rev 0)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replicated.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -0,0 +1,750 @@
+package DBIx::Class::Storage::DBI::Replicated;
+
+BEGIN {
+  use Carp::Clan qw/^DBIx::Class/;
+	
+  ## Modules required for Replication support not required for general DBIC
+  ## use, so we explicitly test for these.
+	
+  my %replication_required = (
+    Moose => '0.54',
+    MooseX::AttributeHelpers => '0.12',
+    Moose::Util::TypeConstraints => '0.54',
+    Class::MOP => '0.63',
+  );
+	
+  my @didnt_load;
+  
+  for my $module (keys %replication_required) {
+	eval "use $module $replication_required{$module}";
+	push @didnt_load, "$module $replication_required{$module}"
+	 if $@;
+  }
+	
+  croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication")
+    if @didnt_load;  	
+}
+
+use DBIx::Class::Storage::DBI;
+use DBIx::Class::Storage::DBI::Replicated::Pool;
+use DBIx::Class::Storage::DBI::Replicated::Balancer;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated - BETA Replicated database support
+
+=head1 SYNOPSIS
+
+The Following example shows how to change an existing $schema to a replicated
+storage type, add some replicated (readonly) databases, and perform reporting
+tasks.
+
+  ## Change storage_type in your schema class
+  $schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] );
+  
+  ## Add some slaves.  Basically this is an array of arrayrefs, where each
+  ## arrayref is database connect information
+  
+  $schema->storage->connect_replicants(
+    [$dsn1, $user, $pass, \%opts],
+    [$dsn2, $user, $pass, \%opts],
+    [$dsn3, $user, $pass, \%opts],
+  );
+  
+  ## Now, just use the $schema as normal
+  $schema->resultset('Source')->search({name=>'etc'});
+  
+  ## You can force a given query to use a particular storage using the search
+  ### attribute 'force_pool'.  For example:
+  
+  my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'});
+  
+  ## Now $RS will force everything (both reads and writes) to use whatever was
+  ## setup as the master storage.  'master' is hardcoded to always point to the
+  ## Master, but you can also use any Replicant name.  Please see:
+  ## L<DBIx::Class::Storage::Replicated::Pool> and the replicants attribute for
+  ## More. Also see transactions and L</execute_reliably> for alternative ways
+  ## to force read traffic to the master.
+  
+=head1 DESCRIPTION
+
+Warning: This class is marked BETA.  This has been running a production
+website using MySQL native replication as its backend and we have some decent
+test coverage but the code hasn't yet been stressed by a variety of databases.
+Individual DB's may have quirks we are not aware of.  Please use this in first
+development and pass along your experiences/bug fixes.
+
+This class implements replicated data store for DBI. Currently you can define
+one master and numerous slave database connections. All write-type queries
+(INSERT, UPDATE, DELETE and even LAST_INSERT_ID) are routed to master
+database, all read-type queries (SELECTs) go to the slave database.
+
+Basically, any method request that L<DBIx::Class::Storage::DBI> would normally
+handle gets delegated to one of the two attributes: L</read_handler> or to
+L</write_handler>.  Additionally, some methods need to be distributed
+to all existing storages.  This way our storage class is a drop in replacement
+for L<DBIx::Class::Storage::DBI>.
+
+Read traffic is spread across the replicants (slaves) occuring to a user
+selected algorithm.  The default algorithm is random weighted.
+
+=head1 NOTES
+
+The consistancy betweeen master and replicants is database specific.  The Pool
+gives you a method to validate it's replicants, removing and replacing them
+when they fail/pass predefined criteria.  Please make careful use of the ways
+to force a query to run against Master when needed.
+
+=head1 REQUIREMENTS
+
+Replicated Storage has additional requirements not currently part of L<DBIx::Class>
+
+  Moose => 1.54
+  MooseX::AttributeHelpers => 0.12 
+  Moose::Util::TypeConstraints => 0.54
+  Class::MOP => 0.63
+  
+You will need to install these modules manually via CPAN or make them part of the
+Makefile for your distribution.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head2 schema
+
+The underlying L<DBIx::Class::Schema> object this storage is attaching
+
+=cut
+
+has 'schema' => (
+    is=>'rw',
+    isa=>'DBIx::Class::Schema',
+    weak_ref=>1,
+    required=>1,
+);
+
+=head2 pool_type
+
+Contains the classname which will instantiate the L</pool> object.  Defaults 
+to: L<DBIx::Class::Storage::DBI::Replicated::Pool>.
+
+=cut
+
+has 'pool_type' => (
+  is=>'ro',
+  isa=>'ClassName',
+  required=>1,
+  default=>'DBIx::Class::Storage::DBI::Replicated::Pool',
+  handles=>{
+    'create_pool' => 'new',
+  },
+);
+
+=head2 pool_args
+
+Contains a hashref of initialized information to pass to the Balancer object.
+See L<DBIx::Class::Storage::Replicated::Pool> for available arguments.
+
+=cut
+
+has 'pool_args' => (
+  is=>'ro',
+  isa=>'HashRef',
+  lazy=>1,
+  required=>1,
+  default=>sub { {} },
+);
+
+
+=head2 balancer_type
+
+The replication pool requires a balance class to provider the methods for
+choose how to spread the query load across each replicant in the pool.
+
+=cut
+
+subtype 'DBIx::Class::Storage::DBI::Replicated::BalancerClassNamePart',
+  as 'ClassName';
+    
+coerce 'DBIx::Class::Storage::DBI::Replicated::BalancerClassNamePart',
+  from 'Str',
+  via {
+  	my $type = $_;
+    if($type=~m/^::/) {
+      $type = 'DBIx::Class::Storage::DBI::Replicated::Balancer'.$type;
+    }  
+    Class::MOP::load_class($type);  
+    $type;  	
+  };
+
+has 'balancer_type' => (
+  is=>'ro',
+  isa=>'DBIx::Class::Storage::DBI::Replicated::BalancerClassNamePart',
+  coerce=>1,
+  required=>1,
+  default=> 'DBIx::Class::Storage::DBI::Replicated::Balancer::First',
+  handles=>{
+    'create_balancer' => 'new',
+  },
+);
+
+=head2 balancer_args
+
+Contains a hashref of initialized information to pass to the Balancer object.
+See L<DBIx::Class::Storage::Replicated::Balancer> for available arguments.
+
+=cut
+
+has 'balancer_args' => (
+  is=>'ro',
+  isa=>'HashRef',
+  lazy=>1,
+  required=>1,
+  default=>sub { {} },
+);
+
+=head2 pool
+
+Is a <DBIx::Class::Storage::DBI::Replicated::Pool> or derived class.  This is a
+container class for one or more replicated databases.
+
+=cut
+
+has 'pool' => (
+  is=>'ro',
+  isa=>'DBIx::Class::Storage::DBI::Replicated::Pool',
+  lazy_build=>1,
+  handles=>[qw/
+    connect_replicants    
+    replicants
+    has_replicants
+  /],
+);
+
+=head2 balancer
+
+Is a <DBIx::Class::Storage::DBI::Replicated::Balancer> or derived class.  This 
+is a class that takes a pool (<DBIx::Class::Storage::DBI::Replicated::Pool>)
+
+=cut
+
+has 'balancer' => (
+  is=>'ro',
+  isa=>'DBIx::Class::Storage::DBI::Replicated::Balancer',
+  lazy_build=>1,
+  handles=>[qw/auto_validate_every/],
+);
+
+=head2 master
+
+The master defines the canonical state for a pool of connected databases.  All
+the replicants are expected to match this databases state.  Thus, in a classic
+Master / Slaves distributed system, all the slaves are expected to replicate
+the Master's state as quick as possible.  This is the only database in the
+pool of databases that is allowed to handle write traffic.
+
+=cut
+
+has 'master' => (
+  is=> 'ro',
+  isa=>'DBIx::Class::Storage::DBI',
+  lazy_build=>1,
+);
+
+=head1 ATTRIBUTES IMPLEMENTING THE DBIx::Storage::DBI INTERFACE
+
+The following methods are delegated all the methods required for the 
+L<DBIx::Class::Storage::DBI> interface.
+
+=head2 read_handler
+
+Defines an object that implements the read side of L<BIx::Class::Storage::DBI>.
+
+=cut
+
+has 'read_handler' => (
+  is=>'rw',
+  isa=>'Object',
+  lazy_build=>1,
+  handles=>[qw/
+    select
+    select_single
+    columns_info_for
+  /],    
+);
+
+=head2 write_handler
+
+Defines an object that implements the write side of L<BIx::Class::Storage::DBI>.
+
+=cut
+
+has 'write_handler' => (
+  is=>'ro',
+  isa=>'Object',
+  lazy_build=>1,
+  lazy_build=>1,
+  handles=>[qw/   
+    on_connect_do
+    on_disconnect_do       
+    connect_info
+    throw_exception
+    sql_maker
+    sqlt_type
+    create_ddl_dir
+    deployment_statements
+    datetime_parser
+    datetime_parser_type        
+    last_insert_id
+    insert
+    insert_bulk
+    update
+    delete
+    dbh
+    txn_begin
+    txn_do
+    txn_commit
+    txn_rollback
+    txn_scope_guard
+    sth
+    deploy
+
+    reload_row
+    _prep_for_execute
+    configure_sqlt
+    
+  /],
+);
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 BUILDARGS
+
+L<DBIx::Class::Schema> when instantiating it's storage passed itself as the
+first argument.  So we need to massage the arguments a bit so that all the
+bits get put into the correct places.
+
+=cut
+
+sub BUILDARGS {
+  my ($class, $schema, $storage_type_args, @args) = @_;	
+  
+  return {
+  	schema=>$schema, 
+  	%$storage_type_args,
+  	@args
+  }
+}
+
+=head2 _build_master
+
+Lazy builder for the L</master> attribute.
+
+=cut
+
+sub _build_master {
+  my $self = shift @_;
+  DBIx::Class::Storage::DBI->new($self->schema);
+}
+
+=head2 _build_pool
+
+Lazy builder for the L</pool> attribute.
+
+=cut
+
+sub _build_pool {
+  my $self = shift @_;
+  $self->create_pool(%{$self->pool_args});
+}
+
+=head2 _build_balancer
+
+Lazy builder for the L</balancer> attribute.  This takes a Pool object so that
+the balancer knows which pool it's balancing.
+
+=cut
+
+sub _build_balancer {
+  my $self = shift @_;
+  $self->create_balancer(
+    pool=>$self->pool, 
+    master=>$self->master,
+    %{$self->balancer_args},
+  );
+}
+
+=head2 _build_write_handler
+
+Lazy builder for the L</write_handler> attribute.  The default is to set this to
+the L</master>.
+
+=cut
+
+sub _build_write_handler {
+  return shift->master;
+}
+
+=head2 _build_read_handler
+
+Lazy builder for the L</read_handler> attribute.  The default is to set this to
+the L</balancer>.
+
+=cut
+
+sub _build_read_handler {
+  return shift->balancer;
+}
+
+=head2 around: connect_replicants
+
+All calls to connect_replicants needs to have an existing $schema tacked onto
+top of the args, since L<DBIx::Storage::DBI> needs it.
+
+=cut
+
+around 'connect_replicants' => sub {
+  my ($method, $self, @args) = @_;
+  $self->$method($self->schema, @args);
+};
+
+=head2 all_storages
+
+Returns an array of of all the connected storage backends.  The first element
+in the returned array is the master, and the remainings are each of the
+replicants.
+
+=cut
+
+sub all_storages {
+  my $self = shift @_;
+  return grep {defined $_ && blessed $_} (
+     $self->master,
+     $self->replicants,
+  );
+}
+
+=head2 execute_reliably ($coderef, ?@args)
+
+Given a coderef, saves the current state of the L</read_handler>, forces it to
+use reliable storage (ie sets it to the master), executes a coderef and then
+restores the original state.
+
+Example:
+
+  my $reliably = sub {
+    my $name = shift @_;
+    $schema->resultset('User')->create({name=>$name});
+    my $user_rs = $schema->resultset('User')->find({name=>$name}); 
+    return $user_rs;
+  };
+
+  my $user_rs = $schema->storage->execute_reliably($reliably, 'John');
+
+Use this when you must be certain of your database state, such as when you just
+inserted something and need to get a resultset including it, etc.
+
+=cut
+
+sub execute_reliably {
+  my ($self, $coderef, @args) = @_;
+  
+  unless( ref $coderef eq 'CODE') {
+    $self->throw_exception('Second argument must be a coderef');
+  }
+  
+  ##Get copy of master storage
+  my $master = $self->master;
+  
+  ##Get whatever the current read hander is
+  my $current = $self->read_handler;
+  
+  ##Set the read handler to master
+  $self->read_handler($master);
+  
+  ## do whatever the caller needs
+  my @result;
+  my $want_array = wantarray;
+  
+  eval {
+    if($want_array) {
+      @result = $coderef->(@args);
+    } elsif(defined $want_array) {
+      ($result[0]) = ($coderef->(@args));
+    } else {
+      $coderef->(@args);
+    }       
+  };
+  
+  ##Reset to the original state
+  $self->read_handler($current); 
+  
+  ##Exception testing has to come last, otherwise you might leave the 
+  ##read_handler set to master.
+  
+  if($@) {
+    $self->throw_exception("coderef returned an error: $@");
+  } else {
+    return $want_array ? @result : $result[0];
+  }
+}
+
+=head2 set_reliable_storage
+
+Sets the current $schema to be 'reliable', that is all queries, both read and
+write are sent to the master
+  
+=cut
+
+sub set_reliable_storage {
+  my $self = shift @_;
+  my $schema = $self->schema;
+  my $write_handler = $self->schema->storage->write_handler;
+  
+  $schema->storage->read_handler($write_handler);
+}
+
+=head2 set_balanced_storage
+
+Sets the current $schema to be use the </balancer> for all reads, while all
+writea are sent to the master only
+  
+=cut
+
+sub set_balanced_storage {
+  my $self = shift @_;
+  my $schema = $self->schema;
+  my $write_handler = $self->schema->storage->balancer;
+  
+  $schema->storage->read_handler($write_handler);
+}
+
+=head2 around: txn_do ($coderef)
+
+Overload to the txn_do method, which is delegated to whatever the
+L<write_handler> is set to.  We overload this in order to wrap in inside a
+L</execute_reliably> method.
+
+=cut
+
+around 'txn_do' => sub {
+  my($txn_do, $self, $coderef, @args) = @_;
+  $self->execute_reliably(sub {$self->$txn_do($coderef, @args)}); 
+};
+
+=head2 connected
+
+Check that the master and at least one of the replicants is connected.
+
+=cut
+
+sub connected {
+  my $self = shift @_;
+  return
+    $self->master->connected &&
+    $self->pool->connected_replicants;
+}
+
+=head2 ensure_connected
+
+Make sure all the storages are connected.
+
+=cut
+
+sub ensure_connected {
+  my $self = shift @_;
+  foreach my $source ($self->all_storages) {
+    $source->ensure_connected(@_);
+  }
+}
+
+=head2 limit_dialect
+
+Set the limit_dialect for all existing storages
+
+=cut
+
+sub limit_dialect {
+  my $self = shift @_;
+  foreach my $source ($self->all_storages) {
+    $source->limit_dialect(@_);
+  }
+  return $self->master->quote_char;
+}
+
+=head2 quote_char
+
+Set the quote_char for all existing storages
+
+=cut
+
+sub quote_char {
+  my $self = shift @_;
+  foreach my $source ($self->all_storages) {
+    $source->quote_char(@_);
+  }
+  return $self->master->quote_char;
+}
+
+=head2 name_sep
+
+Set the name_sep for all existing storages
+
+=cut
+
+sub name_sep {
+  my $self = shift @_;
+  foreach my $source ($self->all_storages) {
+    $source->name_sep(@_);
+  }
+  return $self->master->name_sep;
+}
+
+=head2 set_schema
+
+Set the schema object for all existing storages
+
+=cut
+
+sub set_schema {
+  my $self = shift @_;
+  foreach my $source ($self->all_storages) {
+    $source->set_schema(@_);
+  }
+}
+
+=head2 debug
+
+set a debug flag across all storages
+
+=cut
+
+sub debug {
+  my $self = shift @_;
+  if(@_) {
+    foreach my $source ($self->all_storages) {
+      $source->debug(@_);
+    }   
+  }
+  return $self->master->debug;
+}
+
+=head2 debugobj
+
+set a debug object across all storages
+
+=cut
+
+sub debugobj {
+  my $self = shift @_;
+  if(@_) {
+    foreach my $source ($self->all_storages) {
+      $source->debugobj(@_);
+    } 	
+  }
+  return $self->master->debugobj;
+}
+
+=head2 debugfh
+
+set a debugfh object across all storages
+
+=cut
+
+sub debugfh {
+  my $self = shift @_;
+  if(@_) {
+    foreach my $source ($self->all_storages) {
+      $source->debugfh(@_);
+    }   
+  }
+  return $self->master->debugfh;
+}
+
+=head2 debugcb
+
+set a debug callback across all storages
+
+=cut
+
+sub debugcb {
+  my $self = shift @_;
+  if(@_) {
+    foreach my $source ($self->all_storages) {
+      $source->debugcb(@_);
+    }   
+  }
+  return $self->master->debugcb;
+}
+
+=head2 disconnect
+
+disconnect everything
+
+=cut
+
+sub disconnect {
+  my $self = shift @_;
+  foreach my $source ($self->all_storages) {
+    $source->disconnect(@_);
+  }
+}
+
+=head1 GOTCHAS
+
+Due to the fact that replicants can lag behind a master, you must take care to
+make sure you use one of the methods to force read queries to a master should
+you need realtime data integrity.  For example, if you insert a row, and then
+immediately re-read it from the database (say, by doing $row->discard_changes)
+or you insert a row and then immediately build a query that expects that row
+to be an item, you should force the master to handle reads.  Otherwise, due to
+the lag, there is no certainty your data will be in the expected state.
+
+For data integrity, all transactions automatically use the master storage for
+all read and write queries.  Using a transaction is the preferred and recommended
+method to force the master to handle all read queries.
+
+Otherwise, you can force a single query to use the master with the 'force_pool'
+attribute:
+
+  my $row = $resultset->search(undef, {force_pool=>'master'})->find($pk);
+
+This attribute will safely be ignore by non replicated storages, so you can use
+the same code for both types of systems.
+
+Lastly, you can use the L</execute_reliably> method, which works very much like
+a transaction.
+
+For debugging, you can turn replication on/off with the methods L</set_reliable_storage>
+and L</set_balanced_storage>, however this operates at a global level and is not
+suitable if you have a shared Schema object being used by multiple processes,
+such as on a web application server.  You can get around this limitation by
+using the Schema clone method.
+
+  my $new_schema = $schema->clone;
+  $new_schema->set_reliable_storage;
+  
+  ## $new_schema will use only the Master storage for all reads/writes while
+  ## the $schema object will use replicated storage.
+
+=head1 AUTHOR
+
+  John Napiorkowski <john.napiorkowski at takkle.com>
+
+Based on code originated by:
+
+  Norbert Csongrádi <bert at cpan.org>
+  Peter Siklósi <einon at einon.hu>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+__PACKAGE__->meta->make_immutable;
+
+1;

Deleted: DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replication.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replication.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/Replication.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -1,236 +0,0 @@
-package DBIx::Class::Storage::DBI::Replication;
-
-use strict;
-use warnings;
-
-use DBIx::Class::Storage::DBI;
-use DBD::Multi;
-use base qw/Class::Accessor::Fast/;
-
-__PACKAGE__->mk_accessors( qw/read_source write_source/ );
-
-=head1 NAME
-
-DBIx::Class::Storage::DBI::Replication - EXPERIMENTAL Replicated database support
-
-=head1 SYNOPSIS
-
-  # change storage_type in your schema class
-    $schema->storage_type( '::DBI::Replication' );
-    $schema->connect_info( [
-		     [ "dbi:mysql:database=test;hostname=master", "username", "password", { AutoCommit => 1 } ], # master
-		     [ "dbi:mysql:database=test;hostname=slave1", "username", "password", { priority => 10 } ],  # slave1
-		     [ "dbi:mysql:database=test;hostname=slave2", "username", "password", { priority => 10 } ],  # slave2
-		     [ $dbh, '','', {priority=>10}], # add in a preexisting database handle
-		     [ sub {  DBI->connect }, '', '', {priority=>10}], # DBD::Multi will call this coderef for connects
-		     <...>,
-		     { limit_dialect => 'LimitXY' } # If needed, see below
-		    ] );
-
-=head1 DESCRIPTION
-
-Warning: This class is marked EXPERIMENTAL. It works for the authors but does
-not currently have automated tests so your mileage may vary.
-
-This class implements replicated data store for DBI. Currently you can define
-one master and numerous slave database connections. All write-type queries
-(INSERT, UPDATE, DELETE and even LAST_INSERT_ID) are routed to master
-database, all read-type queries (SELECTs) go to the slave database.
-
-For every slave database you can define a priority value, which controls data
-source usage pattern. It uses L<DBD::Multi>, so first the lower priority data
-sources used (if they have the same priority, the are used randomized), than
-if all low priority data sources fail, higher ones tried in order.
-
-=head1 CONFIGURATION
-
-=head2 Limit dialect
-
-If you use LIMIT in your queries (effectively, if you use
-SQL::Abstract::Limit), do not forget to set up limit_dialect (perldoc
-SQL::Abstract::Limit) by passing it as an option in the (optional) hash
-reference to connect_info.  DBIC can not set it up automatically, since it can
-not guess DBD::Multi connection types.
-
-=cut
-
-sub new {
-    my $proto = shift;
-    my $class = ref( $proto ) || $proto;
-    my $self = {};
-
-    bless( $self, $class );
-
-    $self->write_source( DBIx::Class::Storage::DBI->new );
-    $self->read_source( DBIx::Class::Storage::DBI->new );
-
-    return $self;
-}
-
-sub all_sources {
-    my $self = shift;
-
-    my @sources = ($self->read_source, $self->write_source);
-
-    return wantarray ? @sources : \@sources;
-}
-
-sub connect_info {
-    my( $self, $source_info ) = @_;
-
-    my( $info, $global_options, $options, @dsns );
-
-    $info = [ @$source_info ];
-
-    $global_options = ref $info->[-1] eq 'HASH' ? pop( @$info ) : {};
-    if( ref( $options = $info->[0]->[-1] ) eq 'HASH' ) {
-	# Local options present in dsn, merge them with global options
-        map { $global_options->{$_} = $options->{$_} } keys %$options;
-        pop @{$info->[0]};
-    }
-
-    # We need to copy-pass $global_options, since connect_info clears it while
-    # processing options
-    $self->write_source->connect_info( @{$info->[0]}, { %$global_options } );
-
-	## allow either a DSN string or an already connect $dbh.  Just remember if
-	## you use the $dbh option then DBD::Multi has no idea how to reconnect in
-	## the event of a failure.
-	
-    @dsns = map {
-        ## if the first element in the arrayhash is a ref, make that the value
-        my $db = ref $_->[0] ? $_->[0] : $_;
-        ($_->[3]->{priority} || 10) => $db;
-    } @{$info->[0]}[1..@{$info->[0]}-1];
-    
-    $global_options->{dsns} = \@dsns;
-
-    $self->read_source->connect_info( [ 'dbi:Multi:', undef, undef, { %$global_options } ] );
-}
-
-sub select {
-    shift->read_source->select( @_ );
-}
-sub select_single {
-    shift->read_source->select_single( @_ );
-}
-sub throw_exception {
-    shift->read_source->throw_exception( @_ );
-}
-sub sql_maker {
-    shift->read_source->sql_maker( @_ );
-}
-sub columns_info_for {
-    shift->read_source->columns_info_for( @_ );
-}
-sub sqlt_type {
-    shift->read_source->sqlt_type( @_ );
-}
-sub create_ddl_dir {
-    shift->read_source->create_ddl_dir( @_ );
-}
-sub deployment_statements {
-    shift->read_source->deployment_statements( @_ );
-}
-sub datetime_parser {
-    shift->read_source->datetime_parser( @_ );
-}
-sub datetime_parser_type {
-    shift->read_source->datetime_parser_type( @_ );
-}
-sub build_datetime_parser {
-    shift->read_source->build_datetime_parser( @_ );
-}
-
-sub limit_dialect { $_->limit_dialect( @_ ) for( shift->all_sources ) }
-sub quote_char { $_->quote_char( @_ ) for( shift->all_sources ) }
-sub name_sep { $_->quote_char( @_ ) for( shift->all_sources ) }
-sub disconnect { $_->disconnect( @_ ) for( shift->all_sources ) }
-sub set_schema { $_->set_schema( @_ ) for( shift->all_sources ) }
-
-sub DESTROY {
-    my $self = shift;
-
-    undef $self->{write_source};
-    undef $self->{read_sources};
-}
-
-sub last_insert_id {
-    shift->write_source->last_insert_id( @_ );
-}
-sub insert {
-    shift->write_source->insert( @_ );
-}
-sub update {
-    shift->write_source->update( @_ );
-}
-sub update_all {
-    shift->write_source->update_all( @_ );
-}
-sub delete {
-    shift->write_source->delete( @_ );
-}
-sub delete_all {
-    shift->write_source->delete_all( @_ );
-}
-sub create {
-    shift->write_source->create( @_ );
-}
-sub find_or_create {
-    shift->write_source->find_or_create( @_ );
-}
-sub update_or_create {
-    shift->write_source->update_or_create( @_ );
-}
-sub connected {
-    shift->write_source->connected( @_ );
-}
-sub ensure_connected {
-    shift->write_source->ensure_connected( @_ );
-}
-sub dbh {
-    shift->write_source->dbh( @_ );
-}
-sub txn_begin {
-    shift->write_source->txn_begin( @_ );
-}
-sub txn_commit {
-    shift->write_source->txn_commit( @_ );
-}
-sub txn_rollback {
-    shift->write_source->txn_rollback( @_ );
-}
-sub sth {
-    shift->write_source->sth( @_ );
-}
-sub deploy {
-    shift->write_source->deploy( @_ );
-}
-
-
-sub debugfh { shift->_not_supported( 'debugfh' ) };
-sub debugcb { shift->_not_supported( 'debugcb' ) };
-
-sub _not_supported {
-    my( $self, $method ) = @_;
-
-    die "This Storage does not support $method method.";
-}
-
-=head1 SEE ALSO
-
-L<DBI::Class::Storage::DBI>, L<DBD::Multi>, L<DBI>
-
-=head1 AUTHOR
-
-Norbert Csongrádi <bert at cpan.org>
-
-Peter Siklósi <einon at einon.hu>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
-
-1;

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/mysql.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/mysql.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI/mysql.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -33,7 +33,16 @@
 
     $self->dbh->do("ROLLBACK TO SAVEPOINT $name")
 }
+ 
+sub is_replicating {
+    my $status = shift->dbh->selectrow_hashref('show slave status');
+    return ($status->{Slave_IO_Running} eq 'Yes') && ($status->{Slave_SQL_Running} eq 'Yes');
+}
 
+sub lag_behind_master {
+    return shift->dbh->selectrow_hashref('show slave status')->{Seconds_Behind_Master};
+}
+
 1;
 
 =head1 NAME

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/DBI.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -600,7 +600,7 @@
 
   eval {
     $self->_verify_pid if $dbh;
-    if( !$dbh ) {
+    if(!$self->_dbh) {
         $self->_populate_dbh;
         $dbh = $self->_dbh;
     }
@@ -1063,6 +1063,7 @@
 
     if ( $self->debug ) {
         @bind = $self->_fix_bind_params(@bind);
+        
         $self->debugobj->query_start( $sql, @bind );
     }
 }
@@ -1287,7 +1288,9 @@
   my $self = shift;
   my ($rv, $sth, @bind) = $self->_select(@_);
   my @row = $sth->fetchrow_array;
-  carp "Query returned more than one row" if $sth->fetchrow_array;
+  if(@row && $sth->fetchrow_array) {
+    carp "Query returned more than one row.  SQL that returns multiple rows is DEPRECATED for ->find and ->single";
+  }
   # Need to call finish() to work round broken DBDs
   $sth->finish();
   return @row;
@@ -1430,28 +1433,40 @@
 
 =over 4
 
-=item Arguments: $schema \@databases, $version, $directory, $preversion, $sqlt_args
+=item Arguments: $schema \@databases, $version, $directory, $preversion, \%sqlt_args
 
 =back
 
 Creates a SQL file based on the Schema, for each of the specified
 database types, in the given directory.
 
+By default, C<\%sqlt_args> will have
+
+ { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 }
+
+merged with the hash passed in. To disable any of those features, pass in a 
+hashref like the following
+
+ { ignore_constraint_names => 0, # ... other options }
+
 =cut
 
-sub create_ddl_dir
-{
+sub create_ddl_dir {
   my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
 
-  if(!$dir || !-d $dir)
-  {
+  if(!$dir || !-d $dir) {
     warn "No directory given, using ./\n";
     $dir = "./";
   }
   $databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
   $databases = [ $databases ] if(ref($databases) ne 'ARRAY');
   $version ||= $schema->VERSION || '1.x';
-  $sqltargs = { ( add_drop_table => 1 ), %{$sqltargs || {}} };
+  $sqltargs = {
+    add_drop_table => 1, 
+    ignore_constraint_names => 1,
+    ignore_index_names => 1,
+    %{$sqltargs || {}}
+  };
 
   $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09: '}
       . $self->_check_sqlt_message . q{'})
@@ -1462,98 +1477,89 @@
   $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
   my $sqlt_schema = $sqlt->translate({ data => $schema }) or die $sqlt->error;
 
-  foreach my $db (@$databases)
-  {
+  foreach my $db (@$databases) {
     $sqlt->reset();
     $sqlt = $self->configure_sqlt($sqlt, $db);
     $sqlt->{schema} = $sqlt_schema;
     $sqlt->producer($db);
 
     my $file;
-    my $filename = $schema->ddl_filename($db, $dir, $version);
-    if(-e $filename)
-    {
-      warn("$filename already exists, skipping $db");
-      next unless ($preversion);
-    } else {
-      my $output = $sqlt->translate;
-      if(!$output)
-      {
-        warn("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
-        next;
-      }
-      if(!open($file, ">$filename"))
-      {
-          $self->throw_exception("Can't open $filename for writing ($!)");
-          next;
-      }
-      print $file $output;
-      close($file);
-    } 
-    if($preversion)
-    {
-      require SQL::Translator::Diff;
+    my $filename = $schema->ddl_filename($db, $version, $dir);
+    if (-e $filename && (!$version || ($version == $schema->schema_version()))) {
+      # if we are dumping the current version, overwrite the DDL
+      warn "Overwriting existing DDL file - $filename";
+      unlink($filename);
+    }
 
-      my $prefilename = $schema->ddl_filename($db, $dir, $preversion);
-#      print "Previous version $prefilename\n";
-      if(!-e $prefilename)
-      {
-        warn("No previous schema file found ($prefilename)");
-        next;
-      }
+    my $output = $sqlt->translate;
+    if(!$output) {
+      warn("Failed to translate to $db, skipping. (" . $sqlt->error . ")");
+      next;
+    }
+    if(!open($file, ">$filename")) {
+      $self->throw_exception("Can't open $filename for writing ($!)");
+      next;
+    }
+    print $file $output;
+    close($file);
+  
+    next unless ($preversion);
 
-      my $difffile = $schema->ddl_filename($db, $dir, $version, $preversion);
-      print STDERR "Diff: $difffile: $db, $dir, $version, $preversion \n";
-      if(-e $difffile)
-      {
-        warn("$difffile already exists, skipping");
-        next;
-      }
+    require SQL::Translator::Diff;
 
-      my $source_schema;
-      {
-        my $t = SQL::Translator->new($sqltargs);
-        $t->debug( 0 );
-        $t->trace( 0 );
-        $t->parser( $db )                       or die $t->error;
-        $t = $self->configure_sqlt($t, $db);
-        my $out = $t->translate( $prefilename ) or die $t->error;
-        $source_schema = $t->schema;
-        unless ( $source_schema->name ) {
-          $source_schema->name( $prefilename );
-        }
-      }
+    my $prefilename = $schema->ddl_filename($db, $preversion, $dir);
+    if(!-e $prefilename) {
+      warn("No previous schema file found ($prefilename)");
+      next;
+    }
 
-      # The "new" style of producers have sane normalization and can support 
-      # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
-      # And we have to diff parsed SQL against parsed SQL.
-      my $dest_schema = $sqlt_schema;
-
-      unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) {
-        my $t = SQL::Translator->new($sqltargs);
-        $t->debug( 0 );
-        $t->trace( 0 );
-        $t->parser( $db )                    or die $t->error;
-        $t = $self->configure_sqlt($t, $db);
-        my $out = $t->translate( $filename ) or die $t->error;
-        $dest_schema = $t->schema;
-        $dest_schema->name( $filename )
-          unless $dest_schema->name;
+    my $difffile = $schema->ddl_filename($db, $version, $dir, $preversion);
+    if(-e $difffile) {
+      warn("Overwriting existing diff file - $difffile");
+      unlink($difffile);
+    }
+    
+    my $source_schema;
+    {
+      my $t = SQL::Translator->new($sqltargs);
+      $t->debug( 0 );
+      $t->trace( 0 );
+      $t->parser( $db )                       or die $t->error;
+      $t = $self->configure_sqlt($t, $db);
+      my $out = $t->translate( $prefilename ) or die $t->error;
+      $source_schema = $t->schema;
+      unless ( $source_schema->name ) {
+        $source_schema->name( $prefilename );
       }
+    }
 
-      $DB::single = 1;
-      my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
-                                                    $dest_schema,   $db,
-                                                    $sqltargs
-                                                   );
-      if(!open $file, ">$difffile")
-      { 
-        $self->throw_exception("Can't write to $difffile ($!)");
-        next;
-      }
-      print $file $diff;
-      close($file);
+    # The "new" style of producers have sane normalization and can support 
+    # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
+    # And we have to diff parsed SQL against parsed SQL.
+    my $dest_schema = $sqlt_schema;
+    
+    unless ( "SQL::Translator::Producer::$db"->can('preprocess_schema') ) {
+      my $t = SQL::Translator->new($sqltargs);
+      $t->debug( 0 );
+      $t->trace( 0 );
+      $t->parser( $db )                    or die $t->error;
+      $t = $self->configure_sqlt($t, $db);
+      my $out = $t->translate( $filename ) or die $t->error;
+      $dest_schema = $t->schema;
+      $dest_schema->name( $filename )
+        unless $dest_schema->name;
     }
+    
+    my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
+                                                  $dest_schema,   $db,
+                                                  $sqltargs
+                                                 );
+    if(!open $file, ">$difffile") { 
+      $self->throw_exception("Can't write to $difffile ($!)");
+      next;
+    }
+    print $file $diff;
+    close($file);
   }
 }
 
@@ -1625,9 +1631,6 @@
   my $tr = SQL::Translator->new(%$sqltargs);
   SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema );
   return "SQL::Translator::Producer::${type}"->can('produce')->($tr);
-
-  return;
-
 }
 
 sub deploy {
@@ -1705,6 +1708,31 @@
     }
 }
 
+=head2 is_replicating
+
+A boolean that reports if a particular L<DBIx::Class::Storage::DBI> is set to
+replicate from a master database.  Default is undef, which is the result
+returned by databases that don't support replication.
+
+=cut
+
+sub is_replicating {
+    return;
+    
+}
+
+=head2 lag_behind_master
+
+Returns a number that represents a certain amount of lag behind a master db
+when a given storage is replicating.  The number is database dependent, but
+starts at zero and increases with the amount of lag. Default in undef
+
+=cut
+
+sub lag_behind_master {
+    return;
+}
+
 sub DESTROY {
   my $self = shift;
   return if !$self->_dbh;

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/TxnScopeGuard.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/TxnScopeGuard.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage/TxnScopeGuard.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -24,19 +24,20 @@
   return if $dismiss;
 
   my $exception = $@;
+  Carp::cluck("A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or an error - bad")
+    unless $exception; 
+  {
+    local $@;
+    eval { $storage->txn_rollback };
+    my $rollback_exception = $@;
+    if($rollback_exception) {
+      my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
 
-  $DB::single = 1;
-
-  local $@;
-  eval { $storage->txn_rollback };
-  my $rollback_exception = $@;
-  if($rollback_exception) {
-    my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
-
-    $storage->throw_exception(
-      "Transaction aborted: ${exception}. "
-      . "Rollback failed: ${rollback_exception}"
-    ) unless $rollback_exception =~ /$exception_class/;
+      $storage->throw_exception(
+        "Transaction aborted: ${exception}. "
+        . "Rollback failed: ${rollback_exception}"
+      ) unless $rollback_exception =~ /$exception_class/;
+    }
   }
 }
 
@@ -46,7 +47,7 @@
 
 =head1 NAME
 
-DBIx::Class::Storage::TxnScopeGuard
+DBIx::Class::Storage::TxnScopeGuard - Experimental
 
 =head1 SYNOPSIS
 

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class/Storage.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -299,10 +299,22 @@
 
 =for comment
 
-=head2 txn_scope_guard
+=head2 txn_scope_guard (EXPERIMENTAL)
 
-Return an object that does stuff.
+An alternative way of using transactions to C<txn_do>:
 
+ my $txn = $storage->txn_scope_guard;
+
+ $row->col1("val1");
+ $row->update;
+
+ $txn->commit;
+
+If a exception occurs, the transaction will be rolled back. This is still very
+experiemental, and we are not 100% sure it is working right when nested. The
+onus is on you as the user to make sure you dont forget to call
+$C<$txn->commit>.
+
 =cut
 
 sub txn_scope_guard {

Modified: DBIx-Class/0.09/trunk/lib/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/DBIx/Class.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/DBIx/Class.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -24,8 +24,10 @@
 # i.e. first release of 0.XX *must* be 0.XX000. This avoids fBSD ports
 # brain damage and presumably various other packaging systems too
 
-$VERSION = '0.08010';
+$VERSION = '0.08099_02';
 
+$VERSION = eval $VERSION; # numify for warning-free dev releases
+
 sub MODIFY_CODE_ATTRIBUTES {
   my ($class,$code, at attrs) = @_;
   $class->mk_classdata('__attr_cache' => {})
@@ -47,48 +49,62 @@
 
 DBIx::Class - Extensible and flexible object <-> relational mapper.
 
+=head1 GETTING HELP/SUPPORT
+
+The community can be found via:
+
+  Mailing list: http://lists.scsys.co.uk/mailman/listinfo/dbix-class/
+
+  SVN: http://dev.catalyst.perl.org/repos/bast/DBIx-Class/
+
+  SVNWeb: http://dev.catalyst.perl.org/svnweb/bast/browse/DBIx-Class/
+
+  IRC: irc.perl.org#dbix-class
+
 =head1 SYNOPSIS
 
-Create a schema class called DB/Main.pm:
+Create a schema class called MyDB/Schema.pm:
 
-  package DB::Main;
+  package MyDB::Schema;
   use base qw/DBIx::Class::Schema/;
 
   __PACKAGE__->load_classes();
 
   1;
 
-Create a table class to represent artists, who have many CDs, in DB/Main/Artist.pm:
+Create a table class to represent artists, who have many CDs, in
+MyDB/Schema/Artist.pm:
 
-  package DB::Main::Artist;
+  package MyDB::Schema::Artist;
   use base qw/DBIx::Class/;
 
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  __PACKAGE__->load_components(qw/Core/);
   __PACKAGE__->table('artist');
   __PACKAGE__->add_columns(qw/ artistid name /);
   __PACKAGE__->set_primary_key('artistid');
-  __PACKAGE__->has_many(cds => 'DB::Main::CD');
+  __PACKAGE__->has_many(cds => 'MyDB::Schema::CD');
 
   1;
 
-A table class to represent a CD, which belongs to an artist, in DB/Main/CD.pm:
+A table class to represent a CD, which belongs to an artist, in
+MyDB/Schema/CD.pm:
 
-  package DB::Main::CD;
+  package MyDB::Schema::CD;
   use base qw/DBIx::Class/;
 
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  __PACKAGE__->load_components(qw/Core/);
   __PACKAGE__->table('cd');
-  __PACKAGE__->add_columns(qw/ cdid artist title year /);
+  __PACKAGE__->add_columns(qw/ cdid artistid title year /);
   __PACKAGE__->set_primary_key('cdid');
-  __PACKAGE__->belongs_to(artist => 'DB::Main::Artist');
+  __PACKAGE__->belongs_to(artist => 'MyDB::Schema::Artist', 'artistid');
 
   1;
 
 Then you can use these classes in your application's code:
 
   # Connect to your database.
-  use DB::Main;
-  my $schema = DB::Main->connect($dbi_dsn, $user, $pass, \%dbi_params);
+  use MyDB::Schema;
+  my $schema = MyDB::Schema->connect($dbi_dsn, $user, $pass, \%dbi_params);
 
   # Query for all artists and put them in an array,
   # or retrieve them as a result set object.
@@ -114,7 +130,7 @@
     { order_by => 'title' }
   );
 
-  # Create a result set that will fetch the artist relationship
+  # Create a result set that will fetch the artist data
   # at the same time as it fetches CDs, using only one query.
   my $millennium_cds_rs = $schema->resultset('CD')->search(
     { year => 2000 },
@@ -122,7 +138,7 @@
   );
 
   my $cd = $millennium_cds_rs->next; # SELECT ... FROM cds JOIN artists ...
-  my $cd_artist_name = $cd->artist->name; # Already has the data so no query
+  my $cd_artist_name = $cd->artist->name; # Already has the data so no 2nd query
 
   # new() makes a DBIx::Class::Row object but doesnt insert it into the DB.
   # create() is the same as new() then insert().
@@ -133,17 +149,18 @@
 
   $schema->txn_do(sub { $new_cd->update }); # Runs the update in a transaction
 
-  $millennium_cds_rs->update({ year => 2002 }); # Single-query bulk update
+  # change the year of all the millennium CDs at once
+  $millennium_cds_rs->update({ year => 2002 });
 
 =head1 DESCRIPTION
 
 This is an SQL to OO mapper with an object API inspired by L<Class::DBI>
-(and a compatibility layer as a springboard for porting) and a resultset API
+(with a compatibility layer as a springboard for porting) and a resultset API
 that allows abstract encapsulation of database operations. It aims to make
 representing queries in your code as perl-ish as possible while still
 providing access to as many of the capabilities of the database as possible,
 including retrieving related records from multiple tables in a single query,
-JOIN, LEFT JOIN, COUNT, DISTINCT, GROUP BY and HAVING support.
+JOIN, LEFT JOIN, COUNT, DISTINCT, GROUP BY, ORDER BY and HAVING support.
 
 DBIx::Class can handle multi-column primary and foreign keys, complex
 queries and database-level paging, and does its best to only query the
@@ -168,16 +185,6 @@
 are generally made to CPAN before the branch for the next release is
 merged back to trunk for a major release.
 
-The community can be found via:
-
-  Mailing list: http://lists.scsys.co.uk/mailman/listinfo/dbix-class/
-
-  SVN: http://dev.catalyst.perl.org/repos/bast/DBIx-Class/
-
-  SVNWeb: http://dev.catalyst.perl.org/svnweb/bast/browse/DBIx-Class/
-
-  IRC: irc.perl.org#dbix-class
-
 =head1 WHERE TO GO NEXT
 
 L<DBIx::Class::Manual::DocMap> lists each task you might want help on, and
@@ -208,6 +215,8 @@
 
 bluefeet: Aran Deltac <bluefeet at cpan.org>
 
+bricas: Brian Cassidy <bricas at cpan.org>
+
 captainL: Luke Saunders <luke.saunders at gmail.com>
 
 castaway: Jess Robinson
@@ -224,14 +233,14 @@
 
 dnm: Justin Wheeler <jwheeler at datademons.com>
 
-draven: Marcus Ramberg <mramberg at cpan.org>
-
 dwc: Daniel Westermann-Clark <danieltwc at cpan.org>
 
 dyfrgi: Michael Leuchtenburg <michael at slashhome.org>
 
 gphat: Cory G Watson <gphat at cpan.org>
 
+groditi: Guillermo Roditi <groditi at cpan.org>
+
 jesper: Jesper Krogh
 
 jguenther: Justin Guenther <jguenther at cpan.org>
@@ -244,7 +253,7 @@
 
 konobi: Scott McWhirter
 
-LTJake: Brian Cassidy <bricas at cpan.org>
+marcus: Marcus Ramberg <mramberg at cpan.org>
 
 mattlaw: Matt Lawrence
 

Modified: DBIx-Class/0.09/trunk/lib/SQL/Translator/Parser/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.09/trunk/lib/SQL/Translator/Parser/DBIx/Class.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/lib/SQL/Translator/Parser/DBIx/Class.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -14,7 +14,6 @@
 
 use Exporter;
 use Data::Dumper;
-use Digest::SHA1 qw( sha1_hex );
 use SQL::Translator::Utils qw(debug normalize_name);
 
 use base qw(Exporter);
@@ -24,6 +23,9 @@
 # -------------------------------------------------------------------
 # parse($tr, $data)
 #
+# setting parser_args => { add_fk_index => 0 } will prevent
+# the auto-generation of an index for each FK.
+#
 # Note that $data, in the case of this parser, is not useful.
 # We're working with DBIx::Class Schemas, not data streams.
 # -------------------------------------------------------------------
@@ -67,6 +69,9 @@
     foreach my $moniker (sort @monikers)
     {
         my $source = $dbicschema->source($moniker);
+        
+        # Skip custom query sources
+        next if ref($source->name);
 
         # Its possible to have multiple DBIC source using same table
         next if $seen_tables{$source->name}++;
@@ -96,34 +101,23 @@
         $table->primary_key($source->primary_columns);
 
         my @primary = $source->primary_columns;
-        foreach my $field (@primary) {
-          my $index = $table->add_index(
-                                        name   => $field,
-                                        fields => [$field],
-                                        type   => 'NORMAL',
-                                       );
-        }
         my %unique_constraints = $source->unique_constraints;
         foreach my $uniq (sort keys %unique_constraints) {
             if (!$source->compare_relationship_keys($unique_constraints{$uniq}, \@primary)) {
                 $table->add_constraint(
                             type             => 'unique',
-                            name             => _create_unique_symbol($uniq),
+                            name             => $uniq,
                             fields           => $unique_constraints{$uniq}
                 );
-
-               my $index = $table->add_index(
-                            name   => _create_unique_symbol(join('_', @{$unique_constraints{$uniq}})),
-                            fields => $unique_constraints{$uniq},
-                            type   => 'NORMAL',
-               );
-
             }
         }
 
         my @rels = $source->relationships();
 
         my %created_FK_rels;
+        
+        # global add_fk_index set in parser_args
+        my $add_fk_index = (exists $args->{add_fk_index} && ($args->{add_fk_index} == 0)) ? 0 : 1;
 
         foreach my $rel (sort @rels)
         {
@@ -135,8 +129,12 @@
             my $othertable = $source->related_source($rel);
             my $rel_table = $othertable->name;
 
+            # Force the order of @cond to match the order of ->add_columns
+            my $idx;
+            my %other_columns_idx = map {'foreign.'.$_ => ++$idx } $othertable->columns;            
+            my @cond = sort { $other_columns_idx{$a} cmp $other_columns_idx{$b} } keys(%{$rel_info->{cond}}); 
+      
             # Get the key information, mapping off the foreign/self markers
-            my @cond = keys(%{$rel_info->{cond}});
             my @refkeys = map {/^\w+\.(\w+)$/} @cond;
             my @keys = map {$rel_info->{cond}->{$_} =~ /^\w+\.(\w+)$/} @cond;
 
@@ -154,6 +152,9 @@
                 }
 
                 my $is_deferrable = $rel_info->{attrs}{is_deferrable};
+                
+                # global parser_args add_fk_index param can be overridden on the rel def
+                my $add_fk_index_rel = (exists $rel_info->{attrs}{add_fk_index}) ? $rel_info->{attrs}{add_fk_index} : $add_fk_index;
 
                 # Make sure we dont create the same foreign key constraint twice
                 my $key_test = join("\x00", @keys);
@@ -179,9 +180,7 @@
                 if (scalar(@keys)) {
                   $table->add_constraint(
                                     type             => 'foreign_key',
-                                    name             => _create_unique_symbol($table->name
-                                                                            . '_fk_'
-                                                                            . join('_', @keys)),
+                                    name             => join('_', $table->name, 'fk', @keys),
                                     fields           => \@keys,
                                     reference_fields => \@refkeys,
                                     reference_table  => $rel_table,
@@ -190,15 +189,17 @@
                                     (defined $is_deferrable ? ( deferrable => $is_deferrable ) : ()),
                   );
                     
-                  my $index = $table->add_index(
-                                    name   => _create_unique_symbol(join('_', @keys)),
-                                    fields => \@keys,
-                                    type   => 'NORMAL',
-                  );
-                }
+                  if ($add_fk_index_rel) {
+                      my $index = $table->add_index(
+                                                    name   => join('_', $table->name, 'idx', @keys),
+                                                    fields => \@keys,
+                                                    type   => 'NORMAL',
+                                                    );
+                  }
+              }
             }
         }
-
+		
         if ($source->result_class->can('sqlt_deploy_hook')) {
           $source->result_class->sqlt_deploy_hook($table);
         }
@@ -211,31 +212,4 @@
     return 1;
 }
 
-# TODO - is there a reasonable way to pass configuration?
-# Default of 64 comes from mysql's limit.
-our $MAX_SYMBOL_LENGTH    ||= 64;
-our $COLLISION_TAG_LENGTH ||= 8;
-
-# -------------------------------------------------------------------
-# $resolved_name = _create_unique_symbol($desired_name)
-#
-# If desired_name is really long, it will be truncated in a way that
-# has a high probability of leaving it unique.
-# -------------------------------------------------------------------
-sub _create_unique_symbol {
-    my $desired_name = shift;
-    return $desired_name if length $desired_name <= $MAX_SYMBOL_LENGTH;
-
-    my $truncated_name = substr $desired_name, 0, $MAX_SYMBOL_LENGTH - $COLLISION_TAG_LENGTH - 1;
-
-    # Hex isn't the most space-efficient, but it skirts around allowed
-    # charset issues
-    my $digest = sha1_hex($desired_name);
-    my $collision_tag = substr $digest, 0, $COLLISION_TAG_LENGTH;
-
-    return $truncated_name
-         . '_'
-         . $collision_tag;
-}
-
 1;

Modified: DBIx-Class/0.09/trunk/t/03podcoverage.t
===================================================================
--- DBIx-Class/0.09/trunk/t/03podcoverage.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/03podcoverage.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -31,6 +31,11 @@
             qw(cursor)
         ]
     },
+    'DBIx::Class::Schema' => {
+        ignore => [
+            qw(setup_connection_class)
+        ]
+    },
     'DBIx::Class::CDBICompat::AccessorMapping'          => { skip => 1 },
     'DBIx::Class::CDBICompat::AbstractSearch' => {
         ignore => [qw(search_where)]
@@ -76,6 +81,7 @@
     'DBIx::Class::Relationship::ManyToMany'             => { skip => 1 },
     'DBIx::Class::Relationship::ProxyMethods'           => { skip => 1 },
     'DBIx::Class::ResultSetProxy'                       => { skip => 1 },
+    'DBIx::Class::ResultSetManager'                     => { skip => 1 },
     'DBIx::Class::ResultSourceProxy'                    => { skip => 1 },
     'DBIx::Class::Storage::DBI'                         => { skip => 1 },
     'DBIx::Class::Storage::DBI::DB2'                    => { skip => 1 },
@@ -98,9 +104,8 @@
 
     'DBIx::Class::Schema::Versioned' => { ignore => [ qw(connection) ] },
 
-# must kill authors.
-
-    'DBIx::Class::Storage::DBI::Replication' => { skip => 1 },
+# don't bother since it's heavily deprecated
+    'DBIx::Class::ResultSetManager' => { skip => 1 },
 };
 
 foreach my $module (@modules) {

Modified: DBIx-Class/0.09/trunk/t/40resultsetmanager.t
===================================================================
--- DBIx-Class/0.09/trunk/t/40resultsetmanager.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/40resultsetmanager.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -15,6 +15,11 @@
   }
 }
 
+BEGIN {
+  local $SIG{__WARN__} = sub {};
+  require DBIx::Class::ResultSetManager;
+}
+
 use DBICTest::ResultSetManager; # uses Class::Inspector
 
 my $schema = DBICTest::ResultSetManager->compose_namespace('DB');

Modified: DBIx-Class/0.09/trunk/t/51threads.t
===================================================================
--- DBIx-Class/0.09/trunk/t/51threads.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/51threads.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -60,7 +60,6 @@
 
     my $newthread = async {
         my $tid = threads->tid;
-        my $dbh = $schema->storage->dbh;
 
         my $child_rs = $schema->resultset('CD')->search({ year => 1901 });
         my $row = $parent_rs->next;

Added: DBIx-Class/0.09/trunk/t/51threadtxn.t
===================================================================
--- DBIx-Class/0.09/trunk/t/51threadtxn.t	                        (rev 0)
+++ DBIx-Class/0.09/trunk/t/51threadtxn.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -0,0 +1,95 @@
+use strict;
+use warnings;
+use Test::More;
+use Config;
+
+# README: If you set the env var to a number greater than 10,
+#   we will use that many children
+
+BEGIN {
+    plan skip_all => 'Your perl does not support ithreads'
+        if !$Config{useithreads} || $] < 5.008;
+}
+
+use threads;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/};
+my $num_children = $ENV{DBICTEST_THREAD_STRESS};
+
+plan skip_all => 'Set $ENV{DBICTEST_THREAD_STRESS} to run this test'
+    unless $num_children;
+
+plan skip_all => 'Set $ENV{DBICTEST_PG_DSN}, _USER and _PASS to run this test'
+      . ' (note: creates and drops a table named artist!)' unless ($dsn && $user);
+
+diag 'It is normal to see a series of "Scalars leaked: ..." messages during this test';
+
+if($num_children !~ /^[0-9]+$/ || $num_children < 10) {
+   $num_children = 10;
+}
+
+plan tests => $num_children + 5;
+
+use lib qw(t/lib);
+
+use_ok('DBICTest::Schema');
+
+my $schema = DBICTest::Schema->connection($dsn, $user, $pass, { AutoCommit => 1, RaiseError => 1, PrintError => 0 });
+
+my $parent_rs;
+
+eval {
+    my $dbh = $schema->storage->dbh;
+
+    {
+        local $SIG{__WARN__} = sub {};
+        eval { $dbh->do("DROP TABLE cd") };
+        $dbh->do("CREATE TABLE cd (cdid serial PRIMARY KEY, artist INTEGER NOT NULL UNIQUE, title VARCHAR(255) NOT NULL UNIQUE, year VARCHAR(255));");
+    }
+
+    $schema->resultset('CD')->create({ title => 'vacation in antarctica', artist => 123, year => 1901 });
+    $schema->resultset('CD')->create({ title => 'vacation in antarctica part 2', artist => 456, year => 1901 });
+
+    $parent_rs = $schema->resultset('CD')->search({ year => 1901 });
+    $parent_rs->next;
+};
+ok(!$@) or diag "Creation eval failed: $@";
+
+my @children;
+while(@children < $num_children) {
+
+    my $newthread = async {
+        my $tid = threads->tid;
+        # my $dbh = $schema->storage->dbh;
+
+        $schema->txn_do(sub {
+            my $child_rs = $schema->resultset('CD')->search({ year => 1901 });
+            my $row = $parent_rs->next;
+            if($row && $row->get_column('artist') =~ /^(?:123|456)$/) {
+                $schema->resultset('CD')->create({ title => "test success $tid", artist => $tid, year => scalar(@children) });
+            }
+        });
+        sleep(3);
+    };
+    die "Thread creation failed: $! $@" if !defined $newthread;
+    push(@children, $newthread);
+}
+
+ok(1, "past spawning");
+
+{
+    $_->join for(@children);
+}
+
+ok(1, "past joining");
+
+while(@children) {
+    my $child = pop(@children);
+    my $tid = $child->tid;
+    my $rs = $schema->resultset('CD')->search({ title => "test success $tid", artist => $tid, year => scalar(@children) });
+    is($rs->next->get_column('artist'), $tid, "Child $tid successful");
+}
+
+ok(1, "Made it to the end");
+
+$schema->storage->dbh->do("DROP TABLE cd");

Modified: DBIx-Class/0.09/trunk/t/60core.t
===================================================================
--- DBIx-Class/0.09/trunk/t/60core.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/60core.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -7,7 +7,7 @@
 
 my $schema = DBICTest->init_schema();
 
-plan tests => 78;
+plan tests => 84;
 
 eval { require DateTime::Format::MySQL };
 my $NO_DTFM = $@ ? 1 : 0;
@@ -37,10 +37,26 @@
 
 is($art->name, 'We Are In Rehab', "Accessor update ok");
 
+my %dirty = $art->get_dirty_columns();
+cmp_ok(scalar(keys(%dirty)), '==', 1, '1 dirty column');
+ok(grep($_ eq 'name', keys(%dirty)), 'name is dirty');
+
 is($art->get_column("name"), 'We Are In Rehab', 'And via get_column');
 
 ok($art->update, 'Update run');
 
+my %not_dirty = $art->get_dirty_columns();
+cmp_ok(scalar(keys(%not_dirty)), '==', 0, 'Nothing is dirty');
+
+eval {
+  my $ret = $art->make_column_dirty('name2');
+};
+ok(defined($@), 'Failed to make non-existent column dirty');
+$art->make_column_dirty('name');
+my %fake_dirty = $art->get_dirty_columns();
+cmp_ok(scalar(keys(%fake_dirty)), '==', 1, '1 fake dirty column');
+ok(grep($_ eq 'name', keys(%fake_dirty)), 'name is fake dirty');
+
 my $record_jp = $schema->resultset("Artist")->search(undef, { join => 'cds' })->search(undef, { prefetch => 'cds' })->next;
 
 ok($record_jp, "prefetch on same rel okay");

Modified: DBIx-Class/0.09/trunk/t/61findnot.t
===================================================================
--- DBIx-Class/0.09/trunk/t/61findnot.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/61findnot.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -54,7 +54,8 @@
 $artist_rs = $schema->resultset("Artist");
 warning_is {
   $artist_rs->find({}, { key => 'primary' })
-} "DBIx::Class::ResultSet::find(): Query returned more than one row", "Non-unique find generated a cursor inexhaustion warning";
+} "DBIx::Class::ResultSet::find(): Query returned more than one row.  SQL that returns multiple rows is DEPRECATED for ->find and ->single"
+    =>  "Non-unique find generated a cursor inexhaustion warning";
 
 $artist_rs = $schema->resultset("Artist")->search({}, { prefetch => 'cds' });
 warning_is {

Modified: DBIx-Class/0.09/trunk/t/68inflate_has_a.t
===================================================================
--- DBIx-Class/0.09/trunk/t/68inflate_has_a.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/68inflate_has_a.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -12,7 +12,7 @@
 
 plan tests => 6;
 
-DBICTest::Schema::CD->load_components(qw/CDBICompat::HasA/);
+DBICTest::Schema::CD->load_components(qw/CDBICompat::Relationships/);
 
 DBICTest::Schema::CD->has_a( 'year', 'DateTime',
       inflate => sub { DateTime->new( year => shift ) },

Modified: DBIx-Class/0.09/trunk/t/71mysql.t
===================================================================
--- DBIx-Class/0.09/trunk/t/71mysql.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/71mysql.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -13,7 +13,7 @@
 plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
   unless ($dsn && $user);
 
-plan tests => 5;
+plan tests => 10;
 
 my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
 
@@ -85,7 +85,36 @@
     is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
 }
 
+## Can we properly deal with the null search problem?
+##
+## Only way is to do a SET SQL_AUTO_IS_NULL = 0; on connect
+## But I'm not sure if we should do this or not (Ash, 2008/06/03)
+
+NULLINSEARCH: {
+    
+    ok my $artist1_rs = $schema->resultset('Artist')->search({artistid=>6666})
+    => 'Created an artist resultset of 6666';
+    
+    is $artist1_rs->count, 0
+    => 'Got no returned rows';
+    
+    ok my $artist2_rs = $schema->resultset('Artist')->search({artistid=>undef})
+    => 'Created an artist resultset of undef';
+    
+    TODO: {
+    	$TODO = "need to fix the row count =1 when select * from table where pk IS NULL problem";
+	    is $artist2_rs->count, 0
+	    => 'got no rows';    	
+    }
+
+    my $artist = $artist2_rs->single;
+    
+    is $artist => undef
+    => 'Nothing Found!';
+}
+    
+
 # clean up our mess
 END {
-    $dbh->do("DROP TABLE artist") if $dbh;
-}
+    #$dbh->do("DROP TABLE artist") if $dbh;
+}
\ No newline at end of file

Modified: DBIx-Class/0.09/trunk/t/77prefetch.t
===================================================================
--- DBIx-Class/0.09/trunk/t/77prefetch.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/77prefetch.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -16,7 +16,7 @@
     eval "use DBD::SQLite";
     plan $@
         ? ( skip_all => 'needs DBD::SQLite for testing' )
-        : ( tests => 56 );
+        : ( tests => 58 );
 }
 
 # figure out if we've got a version of sqlite that is older than 3.2.6, in
@@ -227,7 +227,7 @@
 $schema->storage->debug(1);
 
 my $tree_like =
-     $schema->resultset('TreeLike')->find(4,
+     $schema->resultset('TreeLike')->find(5,
        { join     => { parent => { parent => 'parent' } },
          prefetch => { parent => { parent => 'parent' } } });
 
@@ -244,21 +244,21 @@
 
 cmp_ok($queries, '==', 1, 'Only one query run');
 
-$tree_like = $schema->resultset('TreeLike')->search({'me.id' => 1});
+$tree_like = $schema->resultset('TreeLike')->search({'me.id' => 2});
 $tree_like = $tree_like->search_related('children')->search_related('children')->search_related('children')->first;
 is($tree_like->name, 'quux', 'Tree search_related ok');
 
 $tree_like = $schema->resultset('TreeLike')->search_related('children',
-    { 'children.id' => 2, 'children_2.id' => 3 },
+    { 'children.id' => 3, 'children_2.id' => 4 },
     { prefetch => { children => 'children' } }
   )->first;
 is(eval { $tree_like->children->first->children->first->name }, 'quux',
    'Tree search_related with prefetch ok');
 
 $tree_like = eval { $schema->resultset('TreeLike')->search(
-    { 'children.id' => 2, 'children_2.id' => 5 }, 
+    { 'children.id' => 3, 'children_2.id' => 6 }, 
     { join => [qw/children children/] }
-  )->search_related('children', { 'children_4.id' => 6 }, { prefetch => 'children' }
+  )->search_related('children', { 'children_4.id' => 7 }, { prefetch => 'children' }
   )->first->children->first; };
 is(eval { $tree_like->name }, 'fong', 'Tree with multiple has_many joins ok');
 
@@ -344,7 +344,8 @@
 
 is($queries, 0, 'chained search_related after has_many->has_many prefetch ran no queries');
 
-
+# once the following TODO is complete, remove the 2 stop-gap tests immediately after the TODO block
+# (the TODO block itself contains tests ensuring that the stop-gaps are removed)
 TODO: {
     local $TODO = 'Prefetch of multiple has_many rels at the same level (currently must die to protect the clueless git)';
     use DBIx::Class::ResultClass::HashRefInflator;
@@ -367,17 +368,23 @@
         $pr_tracks_rs = $pr_cd_rs->first->tracks;
         $pr_tracks_count = $pr_tracks_rs->count;
     };
-    ok(! $@, 'exception on attempt to prefetch several same level has_many\'s (1 -> M + M)');
-    is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
 
-    is($pr_tracks_count, $tracks_count, 'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)');
+    my $o_mm_exc = $@;
+    ok(! $o_mm_exc, 'exception on attempt to prefetch several same level has_many\'s (1 -> M + M)');
 
-    for ($pr_tracks_rs, $tracks_rs) {
-        $_->result_class ('DBIx::Class::ResultClass::HashRefInflator');
-    }
+    SKIP: {
+        skip "1 -> M + M prefetch died", 3 if $o_mm_exc;
+    
+        is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
+        is($pr_tracks_count, $tracks_count, 'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)');
 
-    is_deeply ([$pr_tracks_rs->all], [$tracks_rs->all], 'same structure returned with and without prefetch over several same level has_many\'s (1 -> M + M)');
+        for ($pr_tracks_rs, $tracks_rs) {
+            $_->result_class ('DBIx::Class::ResultClass::HashRefInflator');
+        }
 
+        is_deeply ([$pr_tracks_rs->all], [$tracks_rs->all], 'same structure returned with and without prefetch over several same level has_many\'s (1 -> M + M)');
+    };
+
     #( M -> 1 -> M + M )
     my $note_rs = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' });
     my $pr_note_rs = $note_rs->search ({}, {
@@ -398,14 +405,26 @@
         $pr_tags_rs = $pr_note_rs->first->cd->tags;
         $pr_tags_count = $pr_tags_rs->count;
     };
-    ok(! $@, 'exception on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)');
-    is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
 
-    is($pr_tags_count, $tags_count, 'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)');
+    my $m_o_mm_exc = $@;
+    ok(! $m_o_mm_exc, 'exception on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)');
 
-    for ($pr_tags_rs, $tags_rs) {
-        $_->result_class ('DBIx::Class::ResultClass::HashRefInflator');
-    }
+    SKIP: {
+        skip "M -> 1 -> M + M prefetch died", 3 if $m_o_mm_exc;
+    
+        is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
 
-    is_deeply ([$pr_tags_rs->all], [$tags_rs->all], 'same structure returned with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)');
+        is($pr_tags_count, $tags_count, 'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)');
+
+        for ($pr_tags_rs, $tags_rs) {
+            $_->result_class ('DBIx::Class::ResultClass::HashRefInflator');
+        }
+
+        is_deeply ([$pr_tags_rs->all], [$tags_rs->all], 'same structure returned with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)');
+    };
 };
+
+eval { my $track = $schema->resultset('CD')->search ({ 'me.title' => 'Forkful of bees' }, { prefetch => [qw/tracks tags/] })->first->tracks->first };
+ok ($@, 'exception on attempt to prefetch several same level has_many\'s (1 -> M + M)');
+eval { my $tag = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' }, { prefetch => { cd => [qw/tags tracks/] } })->first->cd->tags->first };
+ok ($@, 'exception on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)');

Modified: DBIx-Class/0.09/trunk/t/81transactions.t
===================================================================
--- DBIx-Class/0.09/trunk/t/81transactions.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/81transactions.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -8,7 +8,7 @@
 
 my $schema = DBICTest->init_schema();
 
-plan tests => 67;
+plan tests => 63;
 
 my $code = sub {
   my ($artist, @cd_titles) = @_;
@@ -236,32 +236,12 @@
     };
     my $err = $@;
     ok(($err eq ''), 'Pre-connection nested transactions.');
+    $schema2->storage->disconnect;
 }
+$schema->storage->disconnect;
 
-# Test txn_rollback with nested
-{
-  local $TODO = "Work out how this should work";
-  my $local_schema = DBICTest->init_schema();
-
-  my $artist_rs = $local_schema->resultset('Artist');
-  throws_ok {
-   
-    $local_schema->txn_begin;
-    $artist_rs->create({ name => 'Test artist rollback 1'});
-    $local_schema->txn_begin;
-    is($local_schema->storage->transaction_depth, 2, "Correct transaction depth");
-    $artist_rs->create({ name => 'Test artist rollback 2'});
-    $local_schema->txn_rollback;
-  } qr/Not sure what this should be.... something tho/, "Rolled back okay";
-  is($local_schema->storage->transaction_depth, 0, "Correct transaction depth");
-
-  ok(!$artist_rs->find({ name => 'Test artist rollback 1'}), "Test Artist not created")
-    || $artist_rs->find({ name => 'Test artist rollback 1'})->delete;
-}
-
 # Test txn_scope_guard
 {
-  local $TODO = "Work out how this should work";
   my $schema = DBICTest->init_schema();
 
   is($schema->storage->transaction_depth, 0, "Correct transaction depth");
@@ -276,7 +256,7 @@
     });
     
    $guard->commit;
-  } qr/No such column made_up_column.*?line 16/, "Error propogated okay";
+  } qr/No such column made_up_column .*? at .*?81transactions.t line \d+/, "Error propogated okay";
 
   ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");
 
@@ -294,6 +274,7 @@
     # forcing a txn_rollback to happen
     outer($schema, 0);
   };
+  local $TODO = "Work out how this should work";
   is($@, "Not sure what we want here, but something", "Rollback okay");
 
   ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");

Modified: DBIx-Class/0.09/trunk/t/86sqlt.t
===================================================================
--- DBIx-Class/0.09/trunk/t/86sqlt.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/86sqlt.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -10,7 +10,7 @@
 
 my $schema = DBICTest->init_schema;
 
-plan tests => 160;
+plan tests => 130;
 
 my $translator = SQL::Translator->new( 
   parser_args => {
@@ -40,14 +40,15 @@
   twokeys => [
     {
       'display' => 'twokeys->cd',
-      'name' => 'twokeys_fk_cd', 'index_name' => 'cd',
+      'name' => 'twokeys_fk_cd', 'index_name' => 'twokeys_idx_cd',
       'selftable' => 'twokeys', 'foreigntable' => 'cd', 
       'selfcols'  => ['cd'], 'foreigncols' => ['cdid'], 
+      'noindex'  => 1,
       on_delete => '', on_update => '', deferrable => 0,
     },
     {
       'display' => 'twokeys->artist',
-      'name' => 'twokeys_fk_artist', 'index_name' => 'artist',
+      'name' => 'twokeys_fk_artist', 'index_name' => 'twokeys_idx_artist',
       'selftable' => 'twokeys', 'foreigntable' => 'artist', 
       'selfcols'  => ['artist'], 'foreigncols' => ['artistid'],
       on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 1,
@@ -58,14 +59,14 @@
   fourkeys_to_twokeys => [
     {
       'display' => 'fourkeys_to_twokeys->twokeys',
-      'name' => 'fourkeys_to_twokeys_fk_t_cd_t_artist', 'index_name' => 't_cd_t_artist',
+      'name' => 'fourkeys_to_twokeys_fk_t_artist_t_cd', 'index_name' => 'fourkeys_to_twokeys_idx_t_artist_t_cd',
       'selftable' => 'fourkeys_to_twokeys', 'foreigntable' => 'twokeys', 
       'selfcols'  => ['t_artist', 't_cd'], 'foreigncols' => ['artist', 'cd'], 
       on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 1,
     },
     {
-      'display' => 'fourkeys_to_twokeys->fourkeys', 'index_name' => 'f_foo_f_goodbye_f_hello_f_bar',
-      'name' => 'fourkeys_to_twokeys_fk_f_foo_f_goodbye_f_hello_f_bar',
+      'display' => 'fourkeys_to_twokeys->fourkeys', 'index_name' => 'fourkeys_to_twokeys_idx_f_foo_f_bar_f_hello_f_goodbye',
+      'name' => 'fourkeys_to_twokeys_fk_f_foo_f_bar_f_hello_f_goodbye',
       'selftable' => 'fourkeys_to_twokeys', 'foreigntable' => 'fourkeys', 
       'selfcols'  => [qw(f_foo f_bar f_hello f_goodbye)],
       'foreigncols' => [qw(foo bar hello goodbye)], 
@@ -77,14 +78,14 @@
   cd_to_producer => [
     {
       'display' => 'cd_to_producer->cd',
-      'name' => 'cd_to_producer_fk_cd', 'index_name' => 'cd',
+      'name' => 'cd_to_producer_fk_cd', 'index_name' => 'cd_to_producer_idx_cd',
       'selftable' => 'cd_to_producer', 'foreigntable' => 'cd', 
       'selfcols'  => ['cd'], 'foreigncols' => ['cdid'],
       on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 1,
     },
     {
       'display' => 'cd_to_producer->producer',
-      'name' => 'cd_to_producer_fk_producer', 'index_name' => 'producer',
+      'name' => 'cd_to_producer_fk_producer', 'index_name' => 'cd_to_producer_idx_producer',
       'selftable' => 'cd_to_producer', 'foreigntable' => 'producer', 
       'selfcols'  => ['producer'], 'foreigncols' => ['producerid'],
       on_delete => '', on_update => '', deferrable => 1,
@@ -95,14 +96,14 @@
   self_ref_alias => [
     {
       'display' => 'self_ref_alias->self_ref for self_ref',
-      'name' => 'self_ref_alias_fk_self_ref', 'index_name' => 'self_ref',
+      'name' => 'self_ref_alias_fk_self_ref', 'index_name' => 'self_ref_alias_idx_self_ref',
       'selftable' => 'self_ref_alias', 'foreigntable' => 'self_ref', 
       'selfcols'  => ['self_ref'], 'foreigncols' => ['id'],
       on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 1,
     },
     {
       'display' => 'self_ref_alias->self_ref for alias',
-      'name' => 'self_ref_alias_fk_alias', 'index_name' => 'alias',
+      'name' => 'self_ref_alias_fk_alias', 'index_name' => 'self_ref_alias_idx_alias',
       'selftable' => 'self_ref_alias', 'foreigntable' => 'self_ref', 
       'selfcols'  => ['alias'], 'foreigncols' => ['id'],
       on_delete => '', on_update => '', deferrable => 1,
@@ -113,7 +114,7 @@
   cd => [
     {
       'display' => 'cd->artist',
-      'name' => 'cd_fk_artist', 'index_name' => 'artist',
+      'name' => 'cd_fk_artist', 'index_name' => 'cd_idx_artist',
       'selftable' => 'cd', 'foreigntable' => 'artist', 
       'selfcols'  => ['artist'], 'foreigncols' => ['artistid'],
       on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 1,
@@ -124,14 +125,14 @@
   artist_undirected_map => [
     {
       'display' => 'artist_undirected_map->artist for id1',
-      'name' => 'artist_undirected_map_fk_id1', 'index_name' => 'id1',
+      'name' => 'artist_undirected_map_fk_id1', 'index_name' => 'artist_undirected_map_idx_id1',
       'selftable' => 'artist_undirected_map', 'foreigntable' => 'artist', 
       'selfcols'  => ['id1'], 'foreigncols' => ['artistid'],
       on_delete => 'CASCADE', on_update => '', deferrable => 1,
     },
     {
       'display' => 'artist_undirected_map->artist for id2',
-      'name' => 'artist_undirected_map_fk_id2', 'index_name' => 'id2',
+      'name' => 'artist_undirected_map_fk_id2', 'index_name' => 'artist_undirected_map_idx_id2',
       'selftable' => 'artist_undirected_map', 'foreigntable' => 'artist', 
       'selfcols'  => ['id2'], 'foreigncols' => ['artistid'],
       on_delete => 'CASCADE', on_update => '', deferrable => 1,
@@ -142,7 +143,7 @@
   track => [
     {
       'display' => 'track->cd',
-      'name' => 'track_fk_cd', 'index_name' => 'cd',
+      'name' => 'track_fk_cd', 'index_name' => 'track_idx_cd',
       'selftable' => 'track', 'foreigntable' => 'cd', 
       'selfcols'  => ['cd'], 'foreigncols' => ['cdid'],
       on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 1,
@@ -153,7 +154,7 @@
   treelike => [
     {
       'display' => 'treelike->treelike for parent',
-      'name' => 'treelike_fk_parent', 'index_name' => 'parent',
+      'name' => 'treelike_fk_parent', 'index_name' => 'treelike_idx_parent',
       'selftable' => 'treelike', 'foreigntable' => 'treelike', 
       'selfcols'  => ['parent'], 'foreigncols' => ['id'],
       on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 1,
@@ -164,7 +165,7 @@
   twokeytreelike => [
     {
       'display' => 'twokeytreelike->twokeytreelike for parent1,parent2',
-      'name' => 'twokeytreelike_fk_parent1_parent2', 'index_name' => 'parent1_parent2',
+      'name' => 'twokeytreelike_fk_parent1_parent2', 'index_name' => 'twokeytreelike_idx_parent1_parent2',
       'selftable' => 'twokeytreelike', 'foreigntable' => 'twokeytreelike', 
       'selfcols'  => ['parent1', 'parent2'], 'foreigncols' => ['id1','id2'],
       on_delete => '', on_update => '', deferrable => 1,
@@ -175,7 +176,7 @@
   tags => [
     {
       'display' => 'tags->cd',
-      'name' => 'tags_fk_cd', 'index_name' => 'cd',
+      'name' => 'tags_fk_cd', 'index_name' => 'tags_idx_cd',
       'selftable' => 'tags', 'foreigntable' => 'cd', 
       'selfcols'  => ['cd'], 'foreigncols' => ['cdid'],
       on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 1,
@@ -186,7 +187,7 @@
   bookmark => [
     {
       'display' => 'bookmark->link',
-      'name' => 'bookmark_fk_link', 'index_name' => 'link',
+      'name' => 'bookmark_fk_link', 'index_name' => 'bookmark_idx_link',
       'selftable' => 'bookmark', 'foreigntable' => 'link', 
       'selfcols'  => ['link'], 'foreigncols' => ['id'],
       on_delete => '', on_update => '', deferrable => 1,
@@ -196,42 +197,13 @@
   forceforeign => [
     {
       'display' => 'forceforeign->artist',
-      'name' => 'forceforeign_fk_artist', 'index_name' => 'artist',
+      'name' => 'forceforeign_fk_artist', 'index_name' => 'forceforeign_idx_artist',
       'selftable' => 'forceforeign', 'foreigntable' => 'artist', 
-      'selfcols'  => ['artist'], 'foreigncols' => ['artist_id'], 
+      'selfcols'  => ['artist'], 'foreigncols' => ['artistid'], 
       on_delete => '', on_update => '', deferrable => 1,
     },
   ],
 
-  # LongColumns
-  long_columns => [
-    {
-      'display' => 'long_columns->owner',
-      'name' => 'long_columns_fk_64_character_column_aaaaaaaaaaaaaaaaaaa_1ca973e2',
-      'index_name' => '64_character_column_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
-      'selftable' => 'long_columns', 'foreigntable' => 'long_columns',
-      'selfcols' => ['64_character_column_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'],
-      'foreigncols' => ['lcid'],
-      on_delete => '', on_update => '', deferrable => 1,
-    },
-    {
-      'display' => 'long_columns->owner2',
-      'name' => 'long_columns_fk_32_character_column_aaaaaaaaaaaa_32_cha_6060a8f3',
-      'index_name' => '32_character_column_aaaaaaaaaaaa_32_character_column_bb_30f7a7fe',
-      'selftable' => 'long_columns', 'foreigntable' => 'long_columns',
-      'selfcols' => ['32_character_column_bbbbbbbbbbbb', '32_character_column_aaaaaaaaaaaa'],
-      'foreigncols' => ['32_character_column_aaaaaaaaaaaa', '32_character_column_bbbbbbbbbbbb'],
-      on_delete => '', on_update => '', deferrable => 1,
-    },
-    {
-      'display' => 'long_columns->owner3',
-      'name' => 'long_columns_fk_16_character_col',
-      'index_name' => '16_character_col',
-      'selftable' => 'long_columns', 'foreigntable' => 'long_columns',
-      'selfcols' => ['16_character_col'], 'foreigncols' => ['8_char_c'],
-      on_delete => '', on_update => '', deferrable => 1,
-    },
-  ],
 );
 
 my %unique_constraints = (
@@ -253,29 +225,6 @@
     },
   ],
 
-  long_columns => [
-    {
-      'display' => 'long but not quite truncated unique',
-      'name' => 'long_columns_16_character_col_32_character_column_aaaaaaaaaaaa',
-      'table' => 'long_columns', 'cols' => [qw( 32_character_column_aaaaaaaaaaaa 16_character_col )],
-    },
-    {
-      'display' => 'multi column truncated unique',
-      'name' => 'long_columns_8_char_c_16_character_col_32_character_col_ee4a438c',
-      'table' => 'long_columns', 'cols' => [qw( 32_character_column_aaaaaaaaaaaa 16_character_col 8_char_c )],
-    },
-    {
-      'display' => 'different multi column truncated unique with same base',
-      'name' => 'long_columns_8_char_c_16_character_col_32_character_col_c5dbc7a7',
-      'table' => 'long_columns', 'cols' => [qw( 32_character_column_bbbbbbbbbbbb 16_character_col 8_char_c )],
-    },
-    {
-      'display' => 'single column truncated unique',
-      'name' => 'long_columns_64_character_column_aaaaaaaaaaaaaaaaaaaaaa_095dc664',
-      'table' => 'long_columns', 'cols' => ['64_character_column_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'],
-    },
-  ],
-
   # TwoKeyTreeLike
   twokeytreelike => [
     {
@@ -306,8 +255,8 @@
 
 my $tschema = $translator->schema();
 # Test that the $schema->sqlt_deploy_hook was called okay and that it removed
-# the 'link' table
-ok( !defined($tschema->get_table('link')), "Link table was removed by hook");
+# the 'dummy' table
+ok( !defined($tschema->get_table('dummy')), "Dummy table was removed by hook");
 
 # Test that nonexistent constraints are not found
 my $constraint = get_constraint('FOREIGN KEY', 'cd', ['title'], 'cd', ['year']);
@@ -364,6 +313,7 @@
   my %fields = map { $_ => 1 } @$cols;
   my %f_fields = map { $_ => 1 } @$f_cols;
 
+  die "No $table_name" unless $table;
  CONSTRAINT:
   for my $constraint ( $table->get_constraints ) {
     next unless $constraint->type eq $type;
@@ -439,8 +389,13 @@
       "is_deferrable parameter correct for `$desc'" );
 
   my $index = get_index( $got->table, { fields => $expected->{selfcols} } );
-  ok( defined $index, "index exists for `$desc'" );
-  is( $index->name, $expected->{index_name}, "index has correct name for `$desc'" );
+
+  if ($expected->{noindex}) {
+      ok( !defined $index, "index doesn't for `$desc'" );
+  } else {
+      ok( defined $index, "index exists for `$desc'" );
+      is( $index->name, $expected->{index_name}, "index has correct name for `$desc'" );
+  }
 }
 
 sub test_unique {

Modified: DBIx-Class/0.09/trunk/t/89inflate_datetime.t
===================================================================
--- DBIx-Class/0.09/trunk/t/89inflate_datetime.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/89inflate_datetime.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -10,7 +10,7 @@
 eval { require DateTime::Format::MySQL };
 plan skip_all => "Need DateTime::Format::MySQL for inflation tests" if $@;
 
-plan tests => 17;
+plan tests => 21;
 
 # inflation test
 my $event = $schema->resultset("Event")->find(1);
@@ -70,3 +70,25 @@
 is("$created_on", '2006-01-31T12:34:56', 'Loaded correct timestamp using timezone');
 is($created_on->time_zone->name, 'America/Chicago', 'Correct timezone');
 
+# This should fail to set
+my $prev_str = "$created_on";
+$loaded_event->update({ created_on => '0000-00-00' });
+is("$created_on", $prev_str, "Don't update invalid dates");
+
+my $invalid = $schema->resultset('Event')->create({
+    starts_at  => '0000-00-00',
+    created_on => $created_on
+});
+
+is( $invalid->get_column('starts_at'), '0000-00-00', "Invalid date stored" );
+is( $invalid->starts_at, undef, "Inflate to undef" );
+
+$invalid->created_on('0000-00-00');
+$invalid->update;
+
+{
+    local $@;
+    eval { $invalid->created_on };
+    like( $@, qr/invalid date format/i, "Invalid date format exception");
+}
+

Modified: DBIx-Class/0.09/trunk/t/93storage_replication.t
===================================================================
--- DBIx-Class/0.09/trunk/t/93storage_replication.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/93storage_replication.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -2,261 +2,575 @@
 use warnings;
 use lib qw(t/lib);
 use Test::More;
+use Test::Exception;
+use DBICTest;
 
 BEGIN {
-    eval "use DBD::Multi";
+    eval "use DBIx::Class::Storage::DBI::Replicated; use Test::Moose";
     plan $@
-        ? ( skip_all => 'needs DBD::Multi for testing' )
-        : ( tests => 18 );
-}	
+        ? ( skip_all => "Deps not installed: $@" )
+        : ( tests => 79 );
+}
 
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+use_ok 'DBIx::Class::Storage::DBI::Replicated';
+
+=head1 HOW TO USE
+
+    This is a test of the replicated storage system.  This will work in one of
+    two ways, either it was try to fake replication with a couple of SQLite DBs
+    and creative use of copy, or if you define a couple of %ENV vars correctly
+    will try to test those.  If you do that, it will assume the setup is properly
+    replicating.  Your results may vary, but I have demonstrated this to work with
+    mysql native replication.
+    
+=cut
+
+
 ## ----------------------------------------------------------------------------
 ## Build a class to hold all our required testing data and methods.
 ## ----------------------------------------------------------------------------
 
-TESTSCHEMACLASS: {
-	
-	package DBIx::Class::DBI::Replication::TestReplication;
+TESTSCHEMACLASSES: {
 
-	use DBI;	
-	use DBICTest;
-	use File::Copy;
-	
-	## Create a constructor
-	
+    ## --------------------------------------------------------------------- ##
+    ## Create an object to contain your replicated stuff.
+    ## --------------------------------------------------------------------- ##
+    
+    package DBIx::Class::DBI::Replicated::TestReplication;
+   
+    use DBICTest;
+    use base qw/Class::Accessor::Fast/;
+    
+    __PACKAGE__->mk_accessors( qw/schema/ );
+
+    ## Initialize the object
+    
 	sub new {
-		my $class = shift @_;
-		my %params = @_;
-		
-		my $self = bless {
-			db_paths => $params{db_paths},
-			dsns => $class->init_dsns(%params),
-			schema=>$class->init_schema,
-		}, $class;
-		
-		$self->connect;
-		return $self;
-	}
+	    my $class = shift @_;
+	    my $self = $class->SUPER::new(@_);
 	
-	## get the DSNs.  We build this up from the list of file paths
-	
-	sub init_dsns {
-		my $class = shift @_;
-		my %params = @_;
-		my $db_paths = $params{db_paths};
-
-		my @dsn = map {
-			"dbi:SQLite:${_}";
-		} @$db_paths;
-		
-		return \@dsn;
+	    $self->schema( $self->init_schema );
+	    return $self;
 	}
+    
+    ## Get the Schema and set the replication storage type
+    
+    sub init_schema {
+        my $class = shift @_;
+        
+        my $schema = DBICTest->init_schema(
+            storage_type=>{
+            	'::DBI::Replicated' => {
+            		balancer_type=>'::Random',
+                    balancer_args=>{
+                    	auto_validate_every=>100,
+                    },
+            	}
+            },
+            deploy_args=>{
+                   add_drop_table => 1,
+            },
+        );
 
-	## get the Schema and set the replication storage type
+        return $schema;
+    }
+    
+    sub generate_replicant_connect_info {}
+    sub replicate {}
+    sub cleanup {}
+
+  
+    ## --------------------------------------------------------------------- ##
+    ## Subclass for when you are using SQLite for testing, this provides a fake
+    ## replication support.
+    ## --------------------------------------------------------------------- ##
+        
+    package DBIx::Class::DBI::Replicated::TestReplication::SQLite;
+
+    use DBICTest;
+    use File::Copy;    
+    use base 'DBIx::Class::DBI::Replicated::TestReplication';
+    
+    __PACKAGE__->mk_accessors( qw/master_path slave_paths/ );
+    
+    ## Set the mastep path from DBICTest
+    
+	sub new {
+	    my $class = shift @_;
+	    my $self = $class->SUPER::new(@_);
 	
-	sub init_schema {
-		my $class = shift @_;
-		my $schema = DBICTest->init_schema();
-		$schema->storage_type( '::DBI::Replication' );
-		
-		return $schema;
-	}
+	    $self->master_path( DBICTest->_sqlite_dbfilename );
+	    $self->slave_paths([
+            "t/var/DBIxClass_slave1.db",
+            "t/var/DBIxClass_slave2.db",    
+        ]);
+        
+	    return $self;
+	}    
 	
-	## connect the Schema
-	
-	sub connect {
-		my $self = shift @_;
-		my ($master, @slaves) = @{$self->{dsns}};
-		my @connections = ([$master, '','', {AutoCommit=>1, PrintError=>0}]);
-		my @slavesob;
-		
-		foreach my $slave (@slaves)
-		{
-			my $dbh = shift @{$self->{slaves}}
-			 || DBI->connect($slave,"","",{PrintError=>0, PrintWarn=>0});
-			
-			push @connections,
-			 [$dbh, '','',{priority=>10}];
-			 
-			push @slavesob,
-			 $dbh;
-		}
-		
-		## Keep track of the created slave databases
-		$self->{slaves} = \@slavesob;
-		
-		$self
-			->{schema}
-			->connect([
-				@connections,
-				{limit_dialect => 'LimitXY'}
-			]);
-	}
-	
-	## replication
-	
-	sub replicate {
-		my $self = shift @_;
-		my ($master, @slaves) = @{$self->{db_paths}};
-		
-		foreach my $slave (@slaves) {
-			copy($master, $slave);
-		}
-	}
-	
-	## Cleanup afer ourselves.
-	
-	sub cleanup {
-		my $self = shift @_;
-		my ($master, @slaves) = @{$self->{db_paths}};
-		
-		foreach my $slave (@slaves) {
-			unlink $slave;
-		}		
-	}
-	
-	## Force a reconnection
-	
-	sub reconnect {
-		my $self = shift @_;
-		my $schema = $self->connect;
-		$self->{schema} = $schema;
-		return $schema;
-	}
+    ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
+    ## $storage->connect_info to be used for connecting replicants.
+    
+    sub generate_replicant_connect_info {
+        my $self = shift @_;
+        my @dsn = map {
+            "dbi:SQLite:${_}";
+        } @{$self->slave_paths};
+        
+        return map { [$_,'','',{AutoCommit=>1}] } @dsn;
+    }
+    
+    ## Do a 'good enough' replication by copying the master dbfile over each of
+    ## the slave dbfiles.  If the master is SQLite we do this, otherwise we
+    ## just do a one second pause to let the slaves catch up.
+    
+    sub replicate {
+        my $self = shift @_;
+        foreach my $slave (@{$self->slave_paths}) {
+            copy($self->master_path, $slave);
+        }
+    }
+    
+    ## Cleanup after ourselves.  Unlink all gthe slave paths.
+    
+    sub cleanup {
+        my $self = shift @_;
+        foreach my $slave (@{$self->slave_paths}) {
+            unlink $slave;
+        }     
+    }
+    
+    ## --------------------------------------------------------------------- ##
+    ## Subclass for when you are setting the databases via custom export vars
+    ## This is for when you have a replicating database setup that you are
+    ## going to test against.  You'll need to define the correct $ENV and have
+    ## two slave databases to test against, as well as a replication system
+    ## that will replicate in less than 1 second.
+    ## --------------------------------------------------------------------- ##
+        
+    package DBIx::Class::DBI::Replicated::TestReplication::Custom; 
+    use base 'DBIx::Class::DBI::Replicated::TestReplication';
+    
+    ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
+    ## $storage->connect_info to be used for connecting replicants.
+    
+    sub generate_replicant_connect_info { 
+        return (
+            [$ENV{"DBICTEST_SLAVE0_DSN"}, $ENV{"DBICTEST_SLAVE0_DBUSER"}, $ENV{"DBICTEST_SLAVE0_DBPASS"}, {AutoCommit => 1}],
+            [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}],           
+        );
+    }
+    
+    ## pause a bit to let the replication catch up 
+    
+    sub replicate {
+    	sleep 1;
+    } 
 }
 
 ## ----------------------------------------------------------------------------
 ## Create an object and run some tests
 ## ----------------------------------------------------------------------------
 
-my %params = (
-	db_paths => [
-		"t/var/DBIxClass.db",
-		"t/var/DBIxClass_slave1.db",
-		"t/var/DBIxClass_slave2.db",
-	],
-);
+## Thi first bunch of tests are basic, just make sure all the bits are behaving
 
-ok my $replicate = DBIx::Class::DBI::Replication::TestReplication->new(%params)
-	=> 'Created a replication object';
-	
-isa_ok $replicate->{schema}
-	=> 'DBIx::Class::Schema';
+my $replicated_class = DBICTest->has_custom_dsn ?
+    'DBIx::Class::DBI::Replicated::TestReplication::Custom' :
+    'DBIx::Class::DBI::Replicated::TestReplication::SQLite';
 
+ok my $replicated = $replicated_class->new
+    => 'Created a replication object';
+    
+isa_ok $replicated->schema
+    => 'DBIx::Class::Schema';
+    
+isa_ok $replicated->schema->storage
+    => 'DBIx::Class::Storage::DBI::Replicated';
+
+ok $replicated->schema->storage->meta
+    => 'has a meta object';
+    
+isa_ok $replicated->schema->storage->master
+    => 'DBIx::Class::Storage::DBI';
+    
+isa_ok $replicated->schema->storage->pool
+    => 'DBIx::Class::Storage::DBI::Replicated::Pool';
+    
+does_ok $replicated->schema->storage->balancer
+    => 'DBIx::Class::Storage::DBI::Replicated::Balancer'; 
+
+ok my @replicant_connects = $replicated->generate_replicant_connect_info
+    => 'got replication connect information';
+
+ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects)
+    => 'Created some storages suitable for replicants';
+    
+isa_ok $replicated->schema->storage->balancer->current_replicant
+    => 'DBIx::Class::Storage::DBI';
+    
+ok $replicated->schema->storage->pool->has_replicants
+    => 'does have replicants';     
+
+is $replicated->schema->storage->pool->num_replicants => 2
+    => 'has two replicants';
+       
+does_ok $replicated_storages[0]
+    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated_storages[1]
+    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+    
+my @replicant_names = keys %{$replicated->schema->storage->replicants};
+
+does_ok $replicated->schema->storage->replicants->{$replicant_names[0]}
+    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated->schema->storage->replicants->{$replicant_names[1]}
+    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';  
+
 ## Add some info to the database
 
-$replicate
-	->{schema}
-	->populate('Artist', [
-		[ qw/artistid name/ ],
-		[ 4, "Ozric Tentacles"],
-	]);
-			    
+$replicated
+    ->schema
+    ->populate('Artist', [
+        [ qw/artistid name/ ],
+        [ 4, "Ozric Tentacles"],
+    ]);
+                
 ## Make sure all the slaves have the table definitions
 
-$replicate->replicate;
+$replicated->replicate;
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
 
 ## Make sure we can read the data.
 
-ok my $artist1 = $replicate->{schema}->resultset('Artist')->find(4)
-	=> 'Created Result';
+ok my $artist1 = $replicated->schema->resultset('Artist')->find(4)
+    => 'Created Result';
 
 isa_ok $artist1
-	=> 'DBICTest::Artist';
-	
+    => 'DBICTest::Artist';
+    
 is $artist1->name, 'Ozric Tentacles'
-	=> 'Found expected name for first result';
+    => 'Found expected name for first result';
 
 ## Add some new rows that only the master will have  This is because
 ## we overload any type of write operation so that is must hit the master
 ## database.
 
-use Fcntl qw (:flock);
+$replicated
+    ->schema
+    ->populate('Artist', [
+        [ qw/artistid name/ ],
+        [ 5, "Doom's Children"],
+        [ 6, "Dead On Arrival"],
+        [ 7, "Watergate"],
+    ]);
 
-my $master_path = $replicate->{db_paths}->[0];
-open LOCKFILE, ">>$master_path"
- or die "Cannot open $master_path";
-flock(LOCKFILE, LOCK_EX);
+## Make sure all the slaves have the table definitions
+$replicated->replicate;
 
-$replicate
-	->{schema}
-	->populate('Artist', [
-		[ qw/artistid name/ ],
-		[ 5, "Doom's Children"],
-		[ 6, "Dead On Arrival"],
-		[ 7, "Watergate"],
-	]);
+## Should find some data now
+
+ok my $artist2 = $replicated->schema->resultset('Artist')->find(5)
+    => 'Sync succeed';
+    
+isa_ok $artist2
+    => 'DBICTest::Artist';
+    
+is $artist2->name, "Doom's Children"
+    => 'Found expected name for first result';
+
+## What happens when we disconnect all the replicants?
+
+is $replicated->schema->storage->pool->connected_replicants => 2
+    => "both replicants are connected";
+    
+$replicated->schema->storage->replicants->{$replicant_names[0]}->disconnect;
+$replicated->schema->storage->replicants->{$replicant_names[1]}->disconnect;
+
+is $replicated->schema->storage->pool->connected_replicants => 0
+    => "both replicants are now disconnected";
+
+## All these should pass, since the database should automatically reconnect
+
+ok my $artist3 = $replicated->schema->resultset('Artist')->find(6)
+    => 'Still finding stuff.';
+    
+isa_ok $artist3
+    => 'DBICTest::Artist';
+    
+is $artist3->name, "Dead On Arrival"
+    => 'Found expected name for first result';
+
+is $replicated->schema->storage->pool->connected_replicants => 1
+    => "One replicant reconnected to handle the job";
+    
+## What happens when we try to select something that doesn't exist?
+
+ok ! $replicated->schema->resultset('Artist')->find(666)
+    => 'Correctly failed to find something.';
+    
+## test the reliable option
+
+TESTRELIABLE: {
 	
-## Reconnect the database
-$replicate->reconnect;
+	$replicated->schema->storage->set_reliable_storage;
+	
+	ok $replicated->schema->resultset('Artist')->find(2)
+	    => 'Read from master 1';
+	
+	ok $replicated->schema->resultset('Artist')->find(5)
+	    => 'Read from master 2';
+	    
+    $replicated->schema->storage->set_balanced_storage;	    
+	    
+	ok $replicated->schema->resultset('Artist')->find(3)
+        => 'Read from replicant';
+}
 
-## Alright, the database 'cluster' is not in a consistent state.  When we do
-## a read now we expect bad news
+## Make sure when reliable goes out of scope, we are using replicants again
 
-is $replicate->{schema}->resultset('Artist')->find(5), undef
-	=> 'read after disconnect fails because it uses slave 1 which we have neglected to "replicate" yet';
+ok $replicated->schema->resultset('Artist')->find(1)
+    => 'back to replicant 1.';
+    
+ok $replicated->schema->resultset('Artist')->find(2)
+    => 'back to replicant 2.';
 
-## Make sure all the slaves have the table definitions
-$replicate->replicate;
+## set all the replicants to inactive, and make sure the balancer falls back to
+## the master.
 
-## Should find some data now
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+    
+ok $replicated->schema->resultset('Artist')->find(2)
+    => 'Fallback to master';
 
-ok my $artist2 = $replicate->{schema}->resultset('Artist')->find(5)
-	=> 'Sync succeed';
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
+
+ok $replicated->schema->resultset('Artist')->find(2)
+    => 'Returned to replicates';
+    
+## Getting slave status tests
+
+SKIP: {
+    ## We skip this tests unless you have a custom replicants, since the default
+    ## sqlite based replication tests don't support these functions.
+    
+    skip 'Cannot Test Replicant Status on Non Replicating Database', 9
+     unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
+
+    $replicated->replicate; ## Give the slaves a chance to catchup.
+
+	ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating
+	    => 'Replicants are replicating';
+	    
+	is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0
+	    => 'Replicant is zero seconds behind master';
+	    
+	## Test the validate replicants
 	
-isa_ok $artist2
-	=> 'DBICTest::Artist';
+	$replicated->schema->storage->pool->validate_replicants;
 	
-is $artist2->name, "Doom's Children"
-	=> 'Found expected name for first result';
+	is $replicated->schema->storage->pool->active_replicants, 2
+	    => 'Still have 2 replicants after validation';
+	    
+	## Force the replicants to fail the validate test by required their lag to
+	## be negative (ie ahead of the master!)
 	
-## What happens when we delete one of the slaves?
+    $replicated->schema->storage->pool->maximum_lag(-10);
+    $replicated->schema->storage->pool->validate_replicants;
+    
+    is $replicated->schema->storage->pool->active_replicants, 0
+        => 'No way a replicant be be ahead of the master';
+        
+    ## Let's be fair to the replicants again.  Let them lag up to 5
+	
+    $replicated->schema->storage->pool->maximum_lag(5);
+    $replicated->schema->storage->pool->validate_replicants;
+    
+    is $replicated->schema->storage->pool->active_replicants, 2
+        => 'Both replicants in good standing again';	
+        
+	## Check auto validate
+	
+	is $replicated->schema->storage->balancer->auto_validate_every, 100
+	    => "Got the expected value for auto validate";
+	    
+		## This will make sure we auto validatge everytime
+		$replicated->schema->storage->balancer->auto_validate_every(0);
+		
+		## set all the replicants to inactive, and make sure the balancer falls back to
+		## the master.
+		
+		$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+		$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+		
+		## Ok, now when we go to run a query, autovalidate SHOULD reconnect
+	
+	is $replicated->schema->storage->pool->active_replicants => 0
+	    => "both replicants turned off";
+	    	
+	ok $replicated->schema->resultset('Artist')->find(5)
+	    => 'replicant reactivated';
+	    
+	is $replicated->schema->storage->pool->active_replicants => 2
+	    => "both replicants reactivated";        
+}
 
-ok my $slave1 = @{$replicate->{slaves}}[0]
-	=> 'Got Slave1';
+## Test the reliably callback
 
-ok $slave1->disconnect
-	=> 'disconnected slave1';
+ok my $reliably = sub {
+	
+    ok $replicated->schema->resultset('Artist')->find(5)
+        => 'replicant reactivated';	
+	
+} => 'created coderef properly';
 
-$replicate->reconnect;
+$replicated->schema->storage->execute_reliably($reliably);
 
-ok my $artist3 = $replicate->{schema}->resultset('Artist')->find(6)
-	=> 'Still finding stuff.';
+## Try something with an error
+
+ok my $unreliably = sub {
+    
+    ok $replicated->schema->resultset('ArtistXX')->find(5)
+        => 'replicant reactivated'; 
+    
+} => 'created coderef properly';
+
+throws_ok {$replicated->schema->storage->execute_reliably($unreliably)} 
+    qr/Can't find source for ArtistXX/
+    => 'Bad coderef throws proper error';
+    
+## Make sure replication came back
+
+ok $replicated->schema->resultset('Artist')->find(3)
+    => 'replicant reactivated';
+    
+## make sure transactions are set to execute_reliably
+
+ok my $transaction = sub {
 	
-isa_ok $artist3
-	=> 'DBICTest::Artist';
+	my $id = shift @_;
 	
-is $artist3->name, "Dead On Arrival"
-	=> 'Found expected name for first result';
+	$replicated
+	    ->schema
+	    ->populate('Artist', [
+	        [ qw/artistid name/ ],
+	        [ $id, "Children of the Grave"],
+	    ]);
+	    
+    ok my $result = $replicated->schema->resultset('Artist')->find($id)
+        => 'Found expected artist';
+        
+    ok my $more = $replicated->schema->resultset('Artist')->find(1)
+        => 'Found expected artist again';
+        
+   return ($result, $more);
+   
+} => 'Created a coderef properly';
+
+## Test the transaction with multi return
+{
+	ok my @return = $replicated->schema->txn_do($transaction, 666)
+	    => 'did transaction';
+	    
+	    is $return[0]->id, 666
+	        => 'first returned value is correct';
+	        
+	    is $return[1]->id, 1
+	        => 'second returned value is correct';
+}
+
+## Test that asking for single return works
+{
+	ok my $return = $replicated->schema->txn_do($transaction, 777)
+	    => 'did transaction';
+	    
+	    is $return->id, 777
+	        => 'first returned value is correct';
+}
+
+## Test transaction returning a single value
+
+{
+	ok my $result = $replicated->schema->txn_do(sub {
+		ok my $more = $replicated->schema->resultset('Artist')->find(1)
+		=> 'found inside a transaction';
+		return $more;
+	}) => 'successfully processed transaction';
 	
-## Let's delete all the slaves
+	is $result->id, 1
+	   => 'Got expected single result from transaction';
+}
 
-ok my $slave2 = @{$replicate->{slaves}}[1]
-	=> 'Got Slave2';
+## Make sure replication came back
 
-ok $slave2->disconnect
-	=> 'Disconnected slave2';
+ok $replicated->schema->resultset('Artist')->find(1)
+    => 'replicant reactivated';
+    
+## Test Discard changes
 
-$replicate->reconnect;
+{
+	ok my $artist = $replicated->schema->resultset('Artist')->find(2)
+	    => 'got an artist to test discard changes';
+	    
+	ok $artist->discard_changes
+	   => 'properly discard changes';
+}
 
-## We expect an error now, since all the slaves are dead
+## Test some edge cases, like trying to do a transaction inside a transaction, etc
 
-eval {
-	$replicate->{schema}->resultset('Artist')->find(4)->name;
-};
+{
+    ok my $result = $replicated->schema->txn_do(sub {
+    	return $replicated->schema->txn_do(sub {
+	        ok my $more = $replicated->schema->resultset('Artist')->find(1)
+	        => 'found inside a transaction inside a transaction';
+	        return $more;    		
+    	});
+    }) => 'successfully processed transaction';
+    
+    is $result->id, 1
+       => 'Got expected single result from transaction';	  
+}
 
-ok $@ => 'Got error when trying to find artistid 4';
+{
+    ok my $result = $replicated->schema->txn_do(sub {
+    	return $replicated->schema->storage->execute_reliably(sub {
+	    	return $replicated->schema->txn_do(sub {
+	    		return $replicated->schema->storage->execute_reliably(sub {
+			        ok my $more = $replicated->schema->resultset('Artist')->find(1)
+			        => 'found inside crazy deep transactions and execute_reliably';
+			        return $more; 	    			
+	    		});
+	    	});    	
+    	});
+    }) => 'successfully processed transaction';
+    
+    is $result->id, 1
+       => 'Got expected single result from transaction';	  
+}     
 
-## This should also be an error
+## Test the force_pool resultset attribute.
 
-eval {
-	my $artist4 = $replicate->{schema}->resultset('Artist')->find(7);	
-};
+{
+	ok my $artist_rs = $replicated->schema->resultset('Artist')
+        => 'got artist resultset';
+	   
+	## Turn on Forced Pool Storage
+	ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'})
+        => 'Created a resultset using force_pool storage';
+	   
+    ok my $artist = $reliable_artist_rs->find(2) 
+        => 'got an artist result via force_pool storage';
+}
 
-ok $@ => 'Got read errors after everything failed';
-
 ## Delete the old database files
-$replicate->cleanup;
+$replicated->cleanup;
 
 
 

Modified: DBIx-Class/0.09/trunk/t/94versioning.t
===================================================================
--- DBIx-Class/0.09/trunk/t/94versioning.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/94versioning.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -15,10 +15,10 @@
     unless ($dsn);
 
 
-    eval "use DBD::mysql; use SQL::Translator 0.08;";
+    eval "use DBD::mysql; use SQL::Translator 0.09;";
     plan $@
-        ? ( skip_all => 'needs DBD::mysql and SQL::Translator 0.08 for testing' )
-        : ( tests => 13 );
+        ? ( skip_all => 'needs DBD::mysql and SQL::Translator 0.09 for testing' )
+        : ( tests => 17 );
 }
 
 my $version_table_name = 'dbix_class_schema_versions';
@@ -27,17 +27,16 @@
 use lib qw(t/lib);
 use_ok('DBICVersionOrig');
 
-my $schema_orig = DBICVersion::Schema->connect($dsn, $user, $pass);
+my $schema_orig = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 1 });
 eval { $schema_orig->storage->dbh->do('drop table ' . $version_table_name) };
 eval { $schema_orig->storage->dbh->do('drop table ' . $old_table_name) };
 
-is($schema_orig->ddl_filename('MySQL', 't/var', '1.0'), File::Spec->catfile('t', 'var', 'DBICVersion-Schema-1.0-MySQL.sql'), 'Filename creation working');
+is($schema_orig->ddl_filename('MySQL', '1.0', 't/var'), File::Spec->catfile('t', 'var', 'DBICVersion-Schema-1.0-MySQL.sql'), 'Filename creation working');
 unlink('t/var/DBICVersion-Schema-1.0-MySQL.sql') if (-e 't/var/DBICVersion-Schema-1.0-MySQL.sql');
 $schema_orig->create_ddl_dir('MySQL', undef, 't/var');
 
 ok(-f 't/var/DBICVersion-Schema-1.0-MySQL.sql', 'Created DDL file');
 $schema_orig->deploy({ add_drop_table => 1 });
-$schema_orig->upgrade();
 
 my $tvrs = $schema_orig->{vschema}->resultset('Table');
 is($schema_orig->_source_exists($tvrs), 1, 'Created schema from DDL file');
@@ -47,7 +46,7 @@
   unlink('t/var/DBICVersion-Schema-2.0-MySQL.sql');
   unlink('t/var/DBICVersion-Schema-1.0-2.0-MySQL.sql');
 
-  my $schema_upgrade = DBICVersion::Schema->connect($dsn, $user, $pass);
+  my $schema_upgrade = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 1 });
   is($schema_upgrade->get_db_version(), '1.0', 'get_db_version ok');
   is($schema_upgrade->schema_version, '2.0', 'schema version ok');
   $schema_upgrade->create_ddl_dir('MySQL', '2.0', 't/var', '1.0');
@@ -59,6 +58,9 @@
     $schema_upgrade->storage->dbh->do('select NewVersionName from TestVersion');
   };
   is($@, '', 'new column created');
+
+  # should overwrite files
+  $schema_upgrade->create_ddl_dir('MySQL', '2.0', 't/var', '1.0');
 }
 
 {
@@ -83,3 +85,35 @@
   ok($@, 'old version table gone');
 
 }
+
+# check behaviour of DBIC_NO_VERSION_CHECK env var and ignore_version connect attr
+{
+  my $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
+  eval {
+    $schema_version->storage->dbh->do("DELETE from $version_table_name");
+  };
+
+
+  my $warn = '';
+  $SIG{__WARN__} = sub { $warn = shift };
+  $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
+  like($warn, qr/Your DB is currently unversioned/, 'warning detected without env var or attr');
+
+
+  # should warn
+  $warn = '';
+  $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 1 });
+  is($warn, '', 'warning not detected with attr set');
+  # should not warn
+
+  $ENV{DBIC_NO_VERSION_CHECK} = 1;
+  $warn = '';
+  $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
+  is($warn, '', 'warning not detected with env var set');
+  # should not warn
+
+  $warn = '';
+  $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 0 });
+  like($warn, qr/Your DB is currently unversioned/, 'warning detected without env var or attr');
+  # should warn
+}

Added: DBIx-Class/0.09/trunk/t/96_is_deteministic_value.t
===================================================================
--- DBIx-Class/0.09/trunk/t/96_is_deteministic_value.t	                        (rev 0)
+++ DBIx-Class/0.09/trunk/t/96_is_deteministic_value.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -0,0 +1,65 @@
+use strict;
+use warnings;
+
+# 6 tests
+
+use Test::More qw(no_plan);
+use lib qw(t/lib);
+use DBICTest;
+use DateTime;
+use DateTime::Format::Strptime;
+use Test::Exception;
+
+my $schema = DBICTest->init_schema();
+my $artist_rs = $schema->resultset('Artist');
+my $cd_rs = $schema->resultset('CD');
+
+ {
+   my $cd;
+   lives_ok {
+     $cd = $cd_rs->search({ year => {'=' => 1999}})->create
+       ({
+         artist => {name => 'Guillermo1'},
+         title => 'Guillermo 1',
+        });
+   };
+   is($cd->year, 1999);
+ }
+
+ {
+   my $formatter = DateTime::Format::Strptime->new(pattern => '%Y');
+   my $dt = DateTime->new(year => 2006, month => 06, day => 06,
+                          formatter => $formatter );
+   my $cd;
+   lives_ok {
+     $cd = $cd_rs->search({ year => $dt})->create
+       ({
+         artist => {name => 'Guillermo2'},
+         title => 'Guillermo 2',
+        });
+   };
+   is($cd->year, 2006);
+ }
+
+
+{
+  my $artist;
+  lives_ok {
+    $artist = $artist_rs->search({ name => {'!=' => 'Killer'}})
+      ->create({artistid => undef});
+  };
+  is($artist->name, undef);
+}
+
+
+{
+  my $artist;
+  lives_ok {
+    $artist = $artist_rs->search({ name => [ q/ some stupid names here/]})
+      ->create({artistid => undef});
+  };
+  is($artist->name, undef);
+}
+
+
+1;

Modified: DBIx-Class/0.09/trunk/t/96multi_create.t
===================================================================
--- DBIx-Class/0.09/trunk/t/96multi_create.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/96multi_create.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -196,6 +196,19 @@
 is($new_cd->artist->id, 17, 'new id retained okay');
 
 
+# Test find or create related functionality
+my $new_artist = $schema->resultset("Artist")->create({ artistid => 18, name => 'larry' });
+
+eval {
+	$schema->resultset("CD")->create({ 
+              cdid => 28, 
+               title => 'Boogie Wiggle', 
+              year => '2007', 
+              artist => { artistid => 18, name => 'larry' }
+             });
+};
+is($@, '', 'new cd created without clash on related artist');
+
 # Make sure exceptions from errors in created rels propogate
 eval {
     my $t = $schema->resultset("Track")->new({});

Added: DBIx-Class/0.09/trunk/t/99dbic_sqlt_parser.t
===================================================================
--- DBIx-Class/0.09/trunk/t/99dbic_sqlt_parser.t	                        (rev 0)
+++ DBIx-Class/0.09/trunk/t/99dbic_sqlt_parser.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -0,0 +1,74 @@
+#!/usr/bin/perl
+use strict;
+use warnings;
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+BEGIN {
+    eval "use DBD::mysql; use SQL::Translator 0.09;";
+    plan $@
+        ? ( skip_all => 'needs SQL::Translator 0.09 for testing' )
+        : ( tests => 99 );
+}
+
+my $schema = DBICTest->init_schema();
+
+{ 
+	my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { } } });
+
+	foreach my $source ($schema->sources) {
+		my $table = $sqlt_schema->get_table($schema->source($source)->from);
+
+		my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
+		my @indices = $table->get_indices;
+		my $index_count = scalar(@indices);
+    $index_count++ if ($source eq 'TwoKeys'); # TwoKeys has the index turned off on the rel def
+		is($index_count, $fk_count, "correct number of indices for $source with no args");
+	}
+}
+
+{ 
+	my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 1 } } });
+
+	foreach my $source ($schema->sources) {
+		my $table = $sqlt_schema->get_table($schema->source($source)->from);
+
+		my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
+		my @indices = $table->get_indices;
+		my $index_count = scalar(@indices);
+    $index_count++ if ($source eq 'TwoKeys'); # TwoKeys has the index turned off on the rel def
+		is($index_count, $fk_count, "correct number of indices for $source with add_fk_index => 1");
+	}
+}
+
+{ 
+	my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 0 } } });
+
+	foreach my $source ($schema->sources) {
+		my $table = $sqlt_schema->get_table($schema->source($source)->from);
+
+		my @indices = $table->get_indices;
+		my $index_count = scalar(@indices);
+		is($index_count, 0, "correct number of indices for $source with add_fk_index => 0");
+	}
+}
+
+sub create_schema {
+	my $args = shift;
+
+	my $schema = $args->{schema};
+	my $additional_sqltargs = $args->{args} || {};
+
+	my $sqltargs = {
+		add_drop_table => 1, 
+		ignore_constraint_names => 1,
+		ignore_index_names => 1,
+		%{$additional_sqltargs}
+		};
+
+	my $sqlt = SQL::Translator->new( $sqltargs );
+
+	$sqlt->parser('SQL::Translator::Parser::DBIx::Class');
+	return $sqlt->translate({ data => $schema }) or die $sqlt->error;
+}

Deleted: DBIx-Class/0.09/trunk/t/99schema_roles.t
===================================================================
--- DBIx-Class/0.09/trunk/t/99schema_roles.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/99schema_roles.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -1,189 +0,0 @@
-use strict;
-use warnings;
-use lib qw(t/lib);
-use Test::More;
-
-BEGIN {
-    eval "use Moose";
-    plan $@
-        ? ( skip_all => 'needs Moose for testing' )
-        : ( tests => 35 );
-}
-
-=head1 NAME
-
-DBICNGTest::Schema::ResultSet:Person; Example Resultset
-
-=head1 DESCRIPTION
-
-Tests for the various Schema roles you can either use or apply
-
-=head1 TESTS
-
-=head2 initialize database
-
-create a schema and setup
-
-=cut
-
-use_ok 'DBICNGTest::Schema';
-
-ok my $db_file = Path::Class::File->new(qw/t var DBIxClassNG.db/)
-    => 'created a path for the test database';
-
-unlink $db_file;
-
-ok my $schema = DBICNGTest::Schema->connect_and_setup($db_file)
-    => 'Created a good Schema';
-
-is ref $schema->source('Person'), 'DBIx::Class::ResultSource::Table'
-    => 'Found Expected Person Source';
-    
-is $schema->resultset('Person')->count, 5
-    => 'Got the correct number of people';
-
-is $schema->resultset('Gender')->count, 3
-    => 'Got the correct number of genders';
-
-
-=head2 check query counter
-
-Test the query counter role
-
-=cut
-
-use_ok 'DBIx::Class::Storage::DBI::Role::QueryCounter';
-DBIx::Class::Storage::DBI::Role::QueryCounter->meta->apply($schema->storage);
-
-is $schema->storage->query_count, 0
-    => 'Query Count is zero';
-    
-is $schema->resultset('Person')->find(1)->name, 'john'
-    => 'Found John!';
-
-is $schema->resultset('Person')->find(2)->name, 'dan'
-    => 'Found Dan!';
-
-is $schema->storage->query_count, 2
-    => 'Query Count is two';
-
-
-=head2 check at query interval 
-    
-Test the role for associating events with a given query interval
-
-=cut
-
-use_ok 'DBIx::Class::Schema::Role::AtQueryInterval';
-DBIx::Class::Schema::Role::AtQueryInterval->meta->apply($schema);
-
-ok my $job1 = $schema->create_job(runs=>sub { 'hello'})
-    => 'Created a job';
-
-is $job1->execute, 'hello',
-    => 'Got expected information from the job';
-
-ok my $job2 = $schema->create_job(runs=>'job_handler_echo')
-    => 'Created a job';
-
-is $job2->execute($schema, 'hello1'), 'hello1',
-    => 'Got expected information from the job';
-
-ok my $interval1 = $schema->create_query_interval(every=>10)
-    => 'Created a interval';
-
-ok $interval1->matches(10)
-    => 'correctly matched 10';
-
-ok $interval1->matches(20)
-    => 'correctly matched 20';
-
-ok !$interval1->matches(22)
-    => 'correctly didnt matched 22';
-
-ok my $interval2 = $schema->create_query_interval(every=>10, offset=>2)
-    => 'Created a interval';
-
-ok $interval2->matches(12)
-    => 'correctly matched 12';
-
-ok $interval2->matches(22)
-    => 'correctly matched 22';
-
-ok !$interval2->matches(25)
-    => 'correctly didnt matched 25';
-    
-ok my $at = $schema->create_at_query_interval(interval=>$interval2, job=>$job2)
-    => 'created the at query interval object';
-    
-is $at->execute_if_matches(32, $schema, 'hello2'), 'hello2'
-    => 'Got correct return';
-    
-ok $schema->at_query_intervals([$at])
-    => 'added job to run at a given interval';
-
-is_deeply [$schema->execute_jobs_at_query_interval(42, 'hello4')], ['hello4']
-    => 'got expected job return value';
-    
-=head2 create jobs via express method
-
-Using the express method, build a bunch of jobs
-
-=cut
-
-ok my @ats = $schema->create_and_add_at_query_intervals(
-
-    {every => 10} => {
-        runs => sub {10},
-    },
-    {every => 20} => {
-        runs => sub {20},
-    },
-    {every => 30} => {
-        runs => sub {30},
-    },
-    {every => 101} => [
-        {runs => sub {101.1}},
-        {runs => sub {101.2}},       
-    ],
-           
-) => 'created express method at query intervals';
-
-
-is_deeply [$schema->execute_jobs_at_query_interval(10)], [10]
-    => 'Got Expected return for 10';
-
-is_deeply [$schema->execute_jobs_at_query_interval(12, 'hello5')], ['hello5']
-    => 'Got Expected return for 12';
-       
-is_deeply [$schema->execute_jobs_at_query_interval(20)], [10,20]
-    => 'Got Expected return for 20';
-
-is_deeply [$schema->execute_jobs_at_query_interval(30)], [10,30]
-    => 'Got Expected return for 30';
-    
-is_deeply [$schema->execute_jobs_at_query_interval(60)], [10,20,30]
-    => 'Got Expected return for 60';    
-     
-is_deeply [$schema->execute_jobs_at_query_interval(101)], [101.1,101.2]
-    => 'Got Expected return for 101';
-    
-    
-=head2 cleanup
-
-Cleanup after ourselves
-
-=cut
-
-unlink $db_file;
-
-
-=head1 AUTHORS
-
-See L<DBIx::Class> for more information regarding authors.
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
\ No newline at end of file

Modified: DBIx-Class/0.09/trunk/t/cdbi-t/set_to_undef.t
===================================================================
--- DBIx-Class/0.09/trunk/t/cdbi-t/set_to_undef.t	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/cdbi-t/set_to_undef.t	2008-07-23 21:59:28 UTC (rev 4608)
@@ -2,7 +2,6 @@
 
 use strict;
 use Test::More;
-use Test::NoWarnings;
 
 BEGIN {
   eval "use DBIx::Class::CDBICompat;";
@@ -12,6 +11,8 @@
   plan tests => 1;
 }
 
+use Test::NoWarnings;
+
 {
     package Thing;
 

Added: DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/Dummy.pm
===================================================================
--- DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/Dummy.pm	                        (rev 0)
+++ DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/Dummy.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -0,0 +1,23 @@
+package # hide from PAUSE
+    DBICTest::Schema::Dummy;
+
+use base 'DBIx::Class::Core';
+
+use strict;
+use warnings;
+
+__PACKAGE__->table('dummy');
+__PACKAGE__->add_columns(
+    'id' => {
+        data_type => 'integer',
+        is_auto_increment => 1
+    },
+    'gittery' => {
+        data_type => 'varchar',
+        size      => 100,
+        is_nullable => 1,
+    },
+);
+__PACKAGE__->set_primary_key('id');
+
+1;

Modified: DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/Event.pm
===================================================================
--- DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/Event.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/Event.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -10,7 +10,7 @@
 
 __PACKAGE__->add_columns(
   id => { data_type => 'integer', is_auto_increment => 1 },
-  starts_at => { data_type => 'datetime' },
+  starts_at => { data_type => 'datetime', datetime_undef_if_invalid => 1 },
   created_on => { data_type => 'timestamp' }
 );
 

Modified: DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/ForceForeign.pm
===================================================================
--- DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/ForceForeign.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/ForceForeign.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -14,7 +14,7 @@
 # since it uses the PK
 __PACKAGE__->might_have(
 			'artist_1', 'DBICTest::Schema::Artist', {
-			    'foreign.artist_id' => 'self.artist',
+			    'foreign.artistid' => 'self.artist',
 			}, {
 			    is_foreign_key_constraint => 1,
 			},

Deleted: DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/LongColumns.pm
===================================================================
--- DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/LongColumns.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/LongColumns.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -1,64 +0,0 @@
-package # hide from PAUSE
-    DBICTest::Schema::LongColumns;
-
-use base qw/DBIx::Class::Core/;
-
-__PACKAGE__->table('long_columns');
-__PACKAGE__->add_columns(
-    'lcid' => {
-        data_type => 'int',
-        is_auto_increment => 1,
-    },
-    '64_character_column_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' => {
-        data_type => 'int',
-    },
-    '32_character_column_aaaaaaaaaaaa' => {
-        data_type => 'int',
-    },
-    '32_character_column_bbbbbbbbbbbb' => {
-        data_type => 'int',
-    },
-    '16_character_col' => {
-        data_type => 'int',
-    },
-    '8_char_c' => {
-        data_type => 'int',
-    },
-);
-
-__PACKAGE__->set_primary_key('lcid');
-
-__PACKAGE__->add_unique_constraint([qw( 16_character_col 32_character_column_aaaaaaaaaaaa )]);
-
-__PACKAGE__->add_unique_constraint([qw( 8_char_c 16_character_col 32_character_column_aaaaaaaaaaaa )]);
-
-__PACKAGE__->add_unique_constraint([qw( 8_char_c 16_character_col 32_character_column_bbbbbbbbbbbb )]);
-
-__PACKAGE__->add_unique_constraint([qw( 64_character_column_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa )]);
-
-__PACKAGE__->belongs_to(
-    'owner',
-    'DBICTest::Schema::LongColumns',
-    {
-        'foreign.lcid' => 'self.64_character_column_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
-    },
-);
-
-__PACKAGE__->belongs_to(
-    'owner2',
-    'DBICTest::Schema::LongColumns',
-    {
-        'foreign.32_character_column_aaaaaaaaaaaa' => 'self.32_character_column_bbbbbbbbbbbb',
-        'foreign.32_character_column_bbbbbbbbbbbb' => 'self.32_character_column_aaaaaaaaaaaa',
-    },
-);
-
-__PACKAGE__->belongs_to(
-    'owner3',
-    'DBICTest::Schema::LongColumns',
-    {
-        'foreign.8_char_c' => 'self.16_character_col',
-    },
-);
-
-1;

Modified: DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/TreeLike.pm
===================================================================
--- DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/TreeLike.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/TreeLike.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -6,7 +6,7 @@
 __PACKAGE__->table('treelike');
 __PACKAGE__->add_columns(
   'id' => { data_type => 'integer', is_auto_increment => 1 },
-  'parent' => { data_type => 'integer' },
+  'parent' => { data_type => 'integer' , is_nullable=>1},
   'name' => { data_type => 'varchar',
     size      => 100,
  },
@@ -16,4 +16,13 @@
                           { 'foreign.id' => 'self.parent' });
 __PACKAGE__->has_many('children', 'TreeLike', { 'foreign.parent' => 'self.id' });
 
+## since this is a self referential table we need to do a post deploy hook and get
+## some data in while constraints are off
+
+ sub sqlt_deploy_hook {
+   my ($self, $sqlt_table) = @_;
+
+   ## We don't seem to need this anymore, but keeping it for the moment
+   ## $sqlt_table->add_index(name => 'idx_name', fields => ['name']);
+ }
 1;

Modified: DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/TwoKeys.pm
===================================================================
--- DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/TwoKeys.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema/TwoKeys.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -10,9 +10,13 @@
 );
 __PACKAGE__->set_primary_key(qw/artist cd/);
 
-__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' );
-__PACKAGE__->belongs_to( cd => 'DBICTest::Schema::CD', undef, { is_deferrable => 0 } );
+__PACKAGE__->belongs_to(
+    artist => 'DBICTest::Schema::Artist',
+    {'foreign.artistid'=>'self.artist'},
+);
 
+__PACKAGE__->belongs_to( cd => 'DBICTest::Schema::CD', undef, { is_deferrable => 0, add_fk_index => 0 } );
+
 __PACKAGE__->has_many(
   'fourkeys_to_twokeys', 'DBICTest::Schema::FourKeys_to_TwoKeys', {
     'foreign.t_artist' => 'self.artist',

Modified: DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema.pm
===================================================================
--- DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/lib/DBICTest/Schema.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -36,16 +36,14 @@
     'CD_to_Producer',
   ),
   qw/SelfRefAlias TreeLike TwoKeyTreeLike Event EventTZ NoPrimaryKey/,
-  qw/Collection CollectionObject TypedObject/,
-  qw/Owners BooksInLibrary/,
+  qw/Collection CollectionObject TypedObject Owners BooksInLibrary/,
   qw/ForceForeign/,
-  qw/LongColumns/,
 );
 
 sub sqlt_deploy_hook {
   my ($self, $sqlt_schema) = @_;
 
-  $sqlt_schema->drop_table('link');
+  $sqlt_schema->drop_table('dummy');
 }
 
 1;

Modified: DBIx-Class/0.09/trunk/t/lib/DBICTest.pm
===================================================================
--- DBIx-Class/0.09/trunk/t/lib/DBICTest.pm	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/lib/DBICTest.pm	2008-07-23 21:59:28 UTC (rev 4608)
@@ -29,6 +29,10 @@
   my $schema = DBICTest->init_schema(
     no_deploy=>1,
     no_populate=>1,
+    storage_type=>'::DBI::Replicated',
+    storage_type_args=>{
+    	balancer_type=>'DBIx::Class::Storage::DBI::Replicated::Balancer::Random'
+    },
   );
 
 This method removes the test SQLite database in t/var/DBIxClass.db 
@@ -42,9 +46,17 @@
 
 =cut
 
+sub has_custom_dsn {
+	return $ENV{"DBICTEST_DSN"} ? 1:0;
+}
+
+sub _sqlite_dbfilename {
+	return "t/var/DBIxClass.db";
+}
+
 sub _database {
     my $self = shift;
-    my $db_file = "t/var/DBIxClass.db";
+    my $db_file = $self->_sqlite_dbfilename;
 
     unlink($db_file) if -e $db_file;
     unlink($db_file . "-journal") if -e $db_file . "-journal";
@@ -72,13 +84,18 @@
     } else {
       $schema = DBICTest::Schema->compose_namespace('DBICTest');
     }
+    if( $args{storage_type}) {
+    	$schema->storage_type($args{storage_type});
+    }    
     if ( !$args{no_connect} ) {
       $schema = $schema->connect($self->_database);
-      $schema->storage->on_connect_do(['PRAGMA synchronous = OFF']);
+      $schema->storage->on_connect_do(['PRAGMA synchronous = OFF'])
+       unless $self->has_custom_dsn;
     }
     if ( !$args{no_deploy} ) {
-        __PACKAGE__->deploy_schema( $schema );
-        __PACKAGE__->populate_schema( $schema ) if( !$args{no_populate} );
+        __PACKAGE__->deploy_schema( $schema, $args{deploy_args} );
+        __PACKAGE__->populate_schema( $schema )
+         if( !$args{no_populate} );
     }
     return $schema;
 }
@@ -98,9 +115,10 @@
 sub deploy_schema {
     my $self = shift;
     my $schema = shift;
+    my $args = shift || {};
 
-    if ($ENV{"DBICTEST_SQLT_DEPLOY"}) {
-        return $schema->deploy();
+    if ($ENV{"DBICTEST_SQLT_DEPLOY"}) { 
+        $schema->deploy($args);    
     } else {
         open IN, "t/lib/sqlite.sql";
         my $sql;
@@ -108,6 +126,7 @@
         close IN;
         ($schema->storage->dbh->do($_) || print "Error on SQL: $_\n") for split(/;\n/, $sql);
     }
+    return;
 }
 
 =head2 populate_schema
@@ -208,15 +227,16 @@
         [ 1, 2 ],
         [ 1, 3 ],
     ]);
-
+    
     $schema->populate('TreeLike', [
         [ qw/id parent name/ ],
-        [ 1, 0, 'foo'  ],
-        [ 2, 1, 'bar'  ],
-        [ 5, 1, 'blop' ],
-        [ 3, 2, 'baz'  ],
-        [ 4, 3, 'quux' ],
-        [ 6, 2, 'fong'  ],
+        [ 1, undef, 'root' ],        
+        [ 2, 1, 'foo'  ],
+        [ 3, 2, 'bar'  ],
+        [ 6, 2, 'blop' ],
+        [ 4, 3, 'baz'  ],
+        [ 5, 4, 'quux' ],
+        [ 7, 3, 'fong'  ],
     ]);
 
     $schema->populate('Track', [
@@ -258,7 +278,15 @@
         [ 1, "Tools" ],
         [ 2, "Body Parts" ],
     ]);
-
+    
+    $schema->populate('TypedObject', [
+        [ qw/objectid type value/ ],
+        [ 1, "pointy", "Awl" ],
+        [ 2, "round", "Bearing" ],
+        [ 3, "pointy", "Knife" ],
+        [ 4, "pointy", "Tooth" ],
+        [ 5, "round", "Head" ],
+    ]);
     $schema->populate('CollectionObject', [
         [ qw/collection object/ ],
         [ 1, 1 ],
@@ -268,15 +296,6 @@
         [ 2, 5 ],
     ]);
 
-    $schema->populate('TypedObject', [
-        [ qw/objectid type value/ ],
-        [ 1, "pointy", "Awl" ],
-        [ 2, "round", "Bearing" ],
-        [ 3, "pointy", "Knife" ],
-        [ 4, "pointy", "Tooth" ],
-        [ 5, "round", "Head" ],
-    ]);
-
     $schema->populate('Owners', [
         [ qw/ownerid name/ ],
         [ 1, "Newton" ],

Modified: DBIx-Class/0.09/trunk/t/lib/sqlite.sql
===================================================================
--- DBIx-Class/0.09/trunk/t/lib/sqlite.sql	2008-07-23 20:11:13 UTC (rev 4607)
+++ DBIx-Class/0.09/trunk/t/lib/sqlite.sql	2008-07-23 21:59:28 UTC (rev 4608)
@@ -151,7 +151,7 @@
 --
 CREATE TABLE treelike (
   id INTEGER PRIMARY KEY NOT NULL,
-  parent integer NOT NULL,
+  parent integer NULL,
   name varchar(100) NOT NULL
 );
 




More information about the Bast-commits mailing list