[Bast-commits] r8006 - in DBIx-Class/0.08/branches/extended_rels: . lib/DBIx lib/DBIx/Class lib/DBIx/Class/CDBICompat lib/DBIx/Class/Manual lib/DBIx/Class/Schema lib/DBIx/Class/Storage lib/DBIx/Class/Storage/DBI lib/DBIx/Class/Storage/DBI/ADO lib/DBIx/Class/Storage/DBI/ODBC lib/DBIx/Class/Storage/DBI/Oracle lib/DBIx/Class/Storage/DBI/Sybase lib/DBIx/Class/Storage/DBI/Sybase/ASE lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server lib/SQL/Translator/Parser/DBIx t t/count t/inflate t/lib t/lib/DBICTest/Schema t/prefetch t/relationship t/resultset t/storage

ribasushi at dev.catalyst.perl.org ribasushi at dev.catalyst.perl.org
Tue Dec 1 10:59:25 GMT 2009


Author: ribasushi
Date: 2009-12-01 10:59:18 +0000 (Tue, 01 Dec 2009)
New Revision: 8006

Added:
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ADO.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ADO/
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/ASE/
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBIHacks.pm
   DBIx-Class/0.08/branches/extended_rels/t/747mssql_ado.t
   DBIx-Class/0.08/branches/extended_rels/t/count/search_related.t
   DBIx-Class/0.08/branches/extended_rels/t/inflate/datetime_sybase.t
   DBIx-Class/0.08/branches/extended_rels/t/prefetch/join_type.t
   DBIx-Class/0.08/branches/extended_rels/t/resultset/is_paged.t
   DBIx-Class/0.08/branches/extended_rels/t/resultset/plus_select.t
Removed:
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm
Modified:
   DBIx-Class/0.08/branches/extended_rels/
   DBIx-Class/0.08/branches/extended_rels/Changes
   DBIx-Class/0.08/branches/extended_rels/Makefile.PL
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/AccessorGroup.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/CDBICompat/Constructor.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Componentised.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Cursor.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Manual/FAQ.pod
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Manual/Joining.pod
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Manual/Troubleshooting.pod
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Ordered.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Relationship.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/ResultSet.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/ResultSetColumn.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/ResultSource.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Row.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/SQLAHacks.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Schema.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Schema/Versioned.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/AutoCast.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Cursor.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/MSSQL.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/NoBindVars.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Oracle.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Pg.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Replicated.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/SQLite.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/Statistics.pm
   DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/TxnScopeGuard.pm
   DBIx-Class/0.08/branches/extended_rels/lib/SQL/Translator/Parser/DBIx/Class.pm
   DBIx-Class/0.08/branches/extended_rels/t/05components.t
   DBIx-Class/0.08/branches/extended_rels/t/100populate.t
   DBIx-Class/0.08/branches/extended_rels/t/101populate_rs.t
   DBIx-Class/0.08/branches/extended_rels/t/104view.t
   DBIx-Class/0.08/branches/extended_rels/t/60core.t
   DBIx-Class/0.08/branches/extended_rels/t/71mysql.t
   DBIx-Class/0.08/branches/extended_rels/t/72pg.t
   DBIx-Class/0.08/branches/extended_rels/t/73oracle.t
   DBIx-Class/0.08/branches/extended_rels/t/746mssql.t
   DBIx-Class/0.08/branches/extended_rels/t/746sybase.t
   DBIx-Class/0.08/branches/extended_rels/t/74mssql.t
   DBIx-Class/0.08/branches/extended_rels/t/79aliasing.t
   DBIx-Class/0.08/branches/extended_rels/t/80unique.t
   DBIx-Class/0.08/branches/extended_rels/t/81transactions.t
   DBIx-Class/0.08/branches/extended_rels/t/86sqlt.t
   DBIx-Class/0.08/branches/extended_rels/t/93single_accessor_object.t
   DBIx-Class/0.08/branches/extended_rels/t/94versioning.t
   DBIx-Class/0.08/branches/extended_rels/t/95sql_maker.t
   DBIx-Class/0.08/branches/extended_rels/t/95sql_maker_quote.t
   DBIx-Class/0.08/branches/extended_rels/t/99dbic_sqlt_parser.t
   DBIx-Class/0.08/branches/extended_rels/t/count/prefetch.t
   DBIx-Class/0.08/branches/extended_rels/t/from_subquery.t
   DBIx-Class/0.08/branches/extended_rels/t/inflate/hri.t
   DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest.pm
   DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Artist.pm
   DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/CD.pm
   DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Track.pm
   DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Year1999CDs.pm
   DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Year2000CDs.pm
   DBIx-Class/0.08/branches/extended_rels/t/lib/sqlite.sql
   DBIx-Class/0.08/branches/extended_rels/t/prefetch/grouped.t
   DBIx-Class/0.08/branches/extended_rels/t/prefetch/via_search_related.t
   DBIx-Class/0.08/branches/extended_rels/t/relationship/core.t
   DBIx-Class/0.08/branches/extended_rels/t/relationship/update_or_create_multi.t
   DBIx-Class/0.08/branches/extended_rels/t/resultset/as_query.t
   DBIx-Class/0.08/branches/extended_rels/t/resultset/update_delete.t
   DBIx-Class/0.08/branches/extended_rels/t/storage/on_connect_call.t
   DBIx-Class/0.08/branches/extended_rels/t/storage/on_connect_do.t
   DBIx-Class/0.08/branches/extended_rels/t/storage/replication.t
   DBIx-Class/0.08/branches/extended_rels/t/zzzzzzz_sqlite_deadlock.t
Log:
 r7644 at Thesaurus (orig r7641):  ribasushi | 2009-09-12 12:46:51 +0200
 Even better localization of $@, and don't use Test::Warn for the time being, as something is freaking out Sub::UpLevel
 r7670 at Thesaurus (orig r7659):  ribasushi | 2009-09-14 18:24:44 +0200
 Someone claimed this is a problem...
 r7673 at Thesaurus (orig r7662):  ribasushi | 2009-09-15 09:43:46 +0200
 Warn when distinct is used with group_by
 r7674 at Thesaurus (orig r7663):  rbuels | 2009-09-15 22:45:32 +0200
 doc patch, clarified warning about using find_or_create() and friends on tables with auto-increment or similar columns
 r7675 at Thesaurus (orig r7664):  rbuels | 2009-09-15 22:55:15 +0200
 another doc clarification regarding auto-inc columns with find_or_create() and such functions
 r7683 at Thesaurus (orig r7672):  ribasushi | 2009-09-17 13:54:44 +0200
 Fix left-join chaining
 r7694 at Thesaurus (orig r7683):  ribasushi | 2009-09-18 12:36:42 +0200
  r6389 at Thesaurus (orig r6388):  caelum | 2009-05-23 22:48:06 +0200
  recreating Sybase branch
  r6395 at Thesaurus (orig r6394):  caelum | 2009-05-24 01:47:32 +0200
  try not to fuck mssql with the sybase crap
  r6488 at Thesaurus (orig r6487):  caelum | 2009-06-03 17:31:24 +0200
  resolve conflict
  r6490 at Thesaurus (orig r6489):  caelum | 2009-06-03 18:25:36 +0200
  add missing files to sybase branch
  r6492 at Thesaurus (orig r6491):  caelum | 2009-06-04 01:51:39 +0200
  fix Sybase DT stuff and storage bases
  r6493 at Thesaurus (orig r6492):  caelum | 2009-06-04 02:10:45 +0200
  fix base for mssql (can't be a sybase anymore)
  r6494 at Thesaurus (orig r6493):  caelum | 2009-06-04 02:20:37 +0200
  test sybase SMALLDATETIME inflation
  r6495 at Thesaurus (orig r6494):  caelum | 2009-06-04 04:52:31 +0200
  update Sybase docs
  r6501 at Thesaurus (orig r6500):  caelum | 2009-06-04 14:50:49 +0200
  sybase limit count without offset now works
  r6504 at Thesaurus (orig r6503):  caelum | 2009-06-04 18:03:01 +0200
  use TOP for sybase limit count thanks to refactored count
  r6505 at Thesaurus (orig r6504):  caelum | 2009-06-04 18:41:54 +0200
  back to counting rows for Sybase LIMIT counts
  r6506 at Thesaurus (orig r6505):  caelum | 2009-06-04 19:07:48 +0200
  minor sybase count fix
  r6512 at Thesaurus (orig r6511):  caelum | 2009-06-05 01:02:48 +0200
  test sybase group_by count, works
  r6513 at Thesaurus (orig r6512):  caelum | 2009-06-05 01:28:18 +0200
  set date format on _rebless correctly
  r6516 at Thesaurus (orig r6515):  caelum | 2009-06-05 02:24:46 +0200
  manually merged in sybase_noquote branch
  r6518 at Thesaurus (orig r6517):  caelum | 2009-06-05 06:34:25 +0200
  shit doesn't work yet
  r6520 at Thesaurus (orig r6519):  caelum | 2009-06-05 16:55:41 +0200
  update sybase types which shouldn't be quoted
  r6525 at Thesaurus (orig r6524):  caelum | 2009-06-06 04:40:51 +0200
  tweaks to sybase types
  r6527 at Thesaurus (orig r6526):  caelum | 2009-06-06 05:36:03 +0200
  temporary sybase noquote hack
  r6595 at Thesaurus (orig r6594):  caelum | 2009-06-10 13:46:37 +0200
  Sybase::NoBindVars now correctly quotes
  r6596 at Thesaurus (orig r6595):  caelum | 2009-06-10 14:04:19 +0200
  cache rsrc in NoBindVars, use name_sep
  r6597 at Thesaurus (orig r6596):  caelum | 2009-06-10 14:35:52 +0200
  Sybase count by first pk, if available
  r6599 at Thesaurus (orig r6598):  caelum | 2009-06-10 15:00:42 +0200
  cache rsrc in NoBindVars correctly
  r6600 at Thesaurus (orig r6599):  caelum | 2009-06-10 15:27:41 +0200
  handle unknown rsrc in NoBindVars and Sybase::NoBindVars
  r6605 at Thesaurus (orig r6604):  caelum | 2009-06-10 18:17:31 +0200
  cache rsrc properly in NoBindVars, return undef if no rsrc
  r6658 at Thesaurus (orig r6657):  caelum | 2009-06-13 05:57:40 +0200
  switch to DateTime::Format::Sybase
  r6700 at Thesaurus (orig r6699):  caelum | 2009-06-17 16:25:28 +0200
  rename and document dt setup method, will be an on_connect_call at later merge point
  r6701 at Thesaurus (orig r6700):  caelum | 2009-06-17 16:30:08 +0200
  more dt docs reorg
  r6715 at Thesaurus (orig r6714):  caelum | 2009-06-19 01:28:17 +0200
  todo tests for text/image columns in sybase
  r6716 at Thesaurus (orig r6715):  caelum | 2009-06-19 01:46:56 +0200
  added connect_call_blob_setup for Sybase
  r6724 at Thesaurus (orig r6723):  caelum | 2009-06-19 17:12:20 +0200
  cleanups
  r6771 at Thesaurus (orig r6770):  caelum | 2009-06-23 16:42:32 +0200
  minor changes
  r6788 at Thesaurus (orig r6787):  caelum | 2009-06-25 05:31:06 +0200
  fixup POD, comment out count
  r6811 at Thesaurus (orig r6810):  caelum | 2009-06-28 02:14:56 +0200
  prototype blob implementation
  r6857 at Thesaurus (orig r6856):  caelum | 2009-06-29 23:45:19 +0200
  branch pushed, removing
  r6868 at Thesaurus (orig r6867):  caelum | 2009-06-30 03:39:51 +0200
  merge on_connect_call updates
  r6877 at Thesaurus (orig r6876):  caelum | 2009-06-30 12:46:43 +0200
  code cleanups
  r6957 at Thesaurus (orig r6956):  caelum | 2009-07-03 02:32:48 +0200
  minor changes
  r6959 at Thesaurus (orig r6958):  caelum | 2009-07-03 05:04:12 +0200
  fix sybase mro
  r7001 at Thesaurus (orig r7000):  caelum | 2009-07-07 13:34:23 +0200
  fix sybase rebless to NoBindVars
  r7021 at Thesaurus (orig r7020):  caelum | 2009-07-10 12:52:13 +0200
  fix NoBindVars
  r7053 at Thesaurus (orig r7052):  caelum | 2009-07-15 01:39:02 +0200
  set maxConnect in DSN and add docs
  r7065 at Thesaurus (orig r7064):  caelum | 2009-07-17 09:39:54 +0200
  make insertion of blobs into tables with identity columns work, other minor fixes
  r7070 at Thesaurus (orig r7069):  caelum | 2009-07-17 23:30:13 +0200
  some compatibility updated for older DBD::Sybase versions, some initial work on _select_args for blobs
  r7072 at Thesaurus (orig r7071):  caelum | 2009-07-19 23:57:11 +0200
  mangling _select_args turned out to be unnecessary
  r7073 at Thesaurus (orig r7072):  caelum | 2009-07-20 01:02:19 +0200
  minor cleanups
  r7074 at Thesaurus (orig r7073):  caelum | 2009-07-20 15:47:48 +0200
  blob update now works
  r7076 at Thesaurus (orig r7075):  caelum | 2009-07-20 19:06:46 +0200
  change the (incorrect) version check to a check for FreeTDS
  r7077 at Thesaurus (orig r7076):  caelum | 2009-07-20 19:13:25 +0200
  better check for FreeTDS thanks to arcanez
  r7089 at Thesaurus (orig r7086):  caelum | 2009-07-22 07:09:21 +0200
  minor cleanups
  r7091 at Thesaurus (orig r7088):  caelum | 2009-07-22 17:05:37 +0200
  remove unnecessary test Result class
  r7092 at Thesaurus (orig r7089):  caelum | 2009-07-23 00:47:14 +0200
  fix doc for how to check for FreeTDS
  r7095 at Thesaurus (orig r7092):  caelum | 2009-07-23 14:35:53 +0200
  doc tweak
  r7115 at Thesaurus (orig r7112):  caelum | 2009-07-24 09:58:24 +0200
  add support for IDENTITY_INSERT
  r7117 at Thesaurus (orig r7114):  caelum | 2009-07-24 16:19:08 +0200
  savepoint support
  r7120 at Thesaurus (orig r7117):  caelum | 2009-07-24 20:35:37 +0200
  fix race condition in last_insert_id with placeholders
  r7121 at Thesaurus (orig r7118):  caelum | 2009-07-24 21:22:25 +0200
  code cleanup
  r7124 at Thesaurus (orig r7121):  caelum | 2009-07-25 16:19:58 +0200
  use _resolve_column_info in NoBindVars
  r7125 at Thesaurus (orig r7122):  caelum | 2009-07-25 21:23:49 +0200
  make insert work as a nested transaction too
  r7126 at Thesaurus (orig r7123):  caelum | 2009-07-25 22:52:17 +0200
  add money type support
  r7128 at Thesaurus (orig r7125):  caelum | 2009-07-27 03:48:35 +0200
  better FreeTDS support
  r7130 at Thesaurus (orig r7127):  caelum | 2009-07-28 06:23:54 +0200
  minor refactoring, cleanups, doc updates
  r7131 at Thesaurus (orig r7128):  caelum | 2009-07-28 09:32:45 +0200
  forgot to set mro in dbi::cursor
  r7141 at Thesaurus (orig r7138):  caelum | 2009-07-30 10:21:20 +0200
  better test for "smalldatetime" in Sybase
  r7146 at Thesaurus (orig r7143):  caelum | 2009-07-30 15:37:18 +0200
  update sqlite test schema
  r7207 at Thesaurus (orig r7204):  caelum | 2009-08-04 23:40:16 +0200
  update Changes
  r7222 at Thesaurus (orig r7219):  caelum | 2009-08-05 11:02:26 +0200
  fix a couple minor issues after pull from trunk
  r7260 at Thesaurus (orig r7257):  caelum | 2009-08-07 14:45:18 +0200
  add note about where to get Schema::Loader
  r7273 at Thesaurus (orig r7270):  ribasushi | 2009-08-09 01:19:49 +0200
  Changes and minor code rewrap
  r7285 at Thesaurus (orig r7282):  ribasushi | 2009-08-10 08:08:06 +0200
  pesky whitespace
  r7286 at Thesaurus (orig r7283):  ribasushi | 2009-08-10 08:11:46 +0200
  privatize dormant method - it may be useful for sybase at *some* point
  r7287 at Thesaurus (orig r7284):  ribasushi | 2009-08-10 08:19:55 +0200
  Whoops
  r7289 at Thesaurus (orig r7286):  caelum | 2009-08-10 08:44:51 +0200
  document placeholders_with_type_conversion_supported and add a redispatch to reblessed storage in DBI::update
  r7290 at Thesaurus (orig r7287):  caelum | 2009-08-10 10:07:45 +0200
  fix and test redispatch to reblessed storage insert/update
  r7292 at Thesaurus (orig r7289):  caelum | 2009-08-10 10:32:37 +0200
  rename get_connected_schema to get_schema in sybase test
  r7345 at Thesaurus (orig r7342):  ribasushi | 2009-08-18 22:45:06 +0200
  Fix Changes
  r7367 at Thesaurus (orig r7364):  ribasushi | 2009-08-23 10:00:34 +0200
  Minaor speedup
  r7368 at Thesaurus (orig r7365):  ribasushi | 2009-08-23 10:01:10 +0200
  Generalize and hide placeholder support check
  r7369 at Thesaurus (orig r7366):  ribasushi | 2009-08-23 10:04:26 +0200
  Rename the common sybase driver
  r7373 at Thesaurus (orig r7370):  caelum | 2009-08-24 13:21:51 +0200
  make insert only use a txn if needed, add connect_call_unsafe_insert
  r7374 at Thesaurus (orig r7371):  caelum | 2009-08-24 14:42:57 +0200
  add test for IDENTITY_INSERT
  r7378 at Thesaurus (orig r7375):  caelum | 2009-08-24 15:51:48 +0200
  use debugobj->callback instead of local *_query_start in test to capture query
  r7379 at Thesaurus (orig r7376):  caelum | 2009-08-24 17:19:46 +0200
  remove duplicate oracle method and fix an mssql method call
  r7417 at Thesaurus (orig r7414):  caelum | 2009-08-29 07:23:45 +0200
  update link to Schema::Loader branch
  r7427 at Thesaurus (orig r7424):  caelum | 2009-08-29 09:31:41 +0200
  switch to ::DBI::AutoCast
  r7428 at Thesaurus (orig r7425):  ribasushi | 2009-08-29 13:36:22 +0200
  Cleanup:
  Added commented method signatures for easier debugging
  privatize transform_unbound_value as _prep_bind_value
  Remove \@_ splice's in lieu of of simple shifts
  Exposed TYPE_MAPPING used by native_data_type via our
  Removed use of txn_do - internal code uses the scope guard
  Renamed some variables, whitespace cleanup, the works
  r7429 at Thesaurus (orig r7426):  ribasushi | 2009-08-29 13:40:48 +0200
  Varname was absolutely correct
  r7430 at Thesaurus (orig r7427):  caelum | 2009-08-29 14:09:13 +0200
  minor changes for tests to pass again
  r7431 at Thesaurus (orig r7428):  caelum | 2009-08-29 21:08:51 +0200
  fix inserts with active cursors
  r7432 at Thesaurus (orig r7429):  caelum | 2009-08-29 22:53:02 +0200
  remove extra connection
  r7434 at Thesaurus (orig r7431):  caelum | 2009-08-30 00:02:20 +0200
  test correlated subquery
  r7442 at Thesaurus (orig r7439):  ribasushi | 2009-08-30 09:07:00 +0200
  Put the ocmment back
  r7443 at Thesaurus (orig r7440):  ribasushi | 2009-08-30 09:15:41 +0200
  Change should_quote_value to interpolate_unquoted to make it harder to stop quoting by accident (it's easier to return a undef by accident than a 1)
  r7446 at Thesaurus (orig r7443):  caelum | 2009-08-30 18:19:46 +0200
  added txn_scope_guards for blob operations
  r7447 at Thesaurus (orig r7444):  ribasushi | 2009-08-30 18:56:43 +0200
  Rename insert_txn to unsafe_insert
  r7512 at Thesaurus (orig r7509):  ribasushi | 2009-09-03 20:24:14 +0200
  Minor cleanups
  r7575 at Thesaurus (orig r7572):  caelum | 2009-09-05 07:23:57 +0200
  pending review by mpeppler
  r7593 at Thesaurus (orig r7590):  ribasushi | 2009-09-07 09:10:05 +0200
  Release 0.08111 tag
  r7594 at Thesaurus (orig r7591):  ribasushi | 2009-09-07 09:14:33 +0200
  Whoops this should not have committed
  r7602 at Thesaurus (orig r7599):  caelum | 2009-09-07 21:31:38 +0200
  fix _insert_dbh code to only connect when needed, doc update
  r7607 at Thesaurus (orig r7604):  caelum | 2009-09-09 02:15:54 +0200
  remove unsafe_insert
  r7608 at Thesaurus (orig r7605):  ribasushi | 2009-09-09 09:14:20 +0200
  Localisation ain't free, we don't do it unless we have to
  r7609 at Thesaurus (orig r7606):  ribasushi | 2009-09-09 09:40:29 +0200
  Much simpler
  r7610 at Thesaurus (orig r7607):  ribasushi | 2009-09-09 10:38:41 +0200
  Reduce amount of perl-golf :)
  r7611 at Thesaurus (orig r7608):  ribasushi | 2009-09-09 10:41:15 +0200
  This should not have worked - I guess we lack tests?
  r7614 at Thesaurus (orig r7611):  caelum | 2009-09-09 12:08:36 +0200
  test multi-row blob update
  r7619 at Thesaurus (orig r7616):  caelum | 2009-09-09 18:01:15 +0200
  remove Sub::Name hack for method dispatch, pass $next instead
  r7620 at Thesaurus (orig r7617):  caelum | 2009-09-10 02:16:03 +0200
  do blob update over _insert_dbh
  r7661 at Thesaurus (orig r7650):  caelum | 2009-09-13 10:27:44 +0200
  change _insert_dbh to _insert_storage
  r7663 at Thesaurus (orig r7652):  caelum | 2009-09-13 11:52:20 +0200
  make sure _init doesn't loop, steal insert_bulk from mssql, add some insert_bulk tests
  r7664 at Thesaurus (orig r7653):  caelum | 2009-09-13 13:27:51 +0200
  allow subclassing of methods proxied to _writer_storage
  r7666 at Thesaurus (orig r7655):  caelum | 2009-09-14 15:09:21 +0200
  sybase bulk API support stuff (no blobs yet, coming soon...)
  r7667 at Thesaurus (orig r7656):  caelum | 2009-09-14 15:33:14 +0200
  add another test for sybase bulk stuff (passes)
  r7668 at Thesaurus (orig r7657):  caelum | 2009-09-14 15:44:06 +0200
  minor change (fix inverted boolean for warning)
  r7669 at Thesaurus (orig r7658):  caelum | 2009-09-14 15:48:52 +0200
  remove @args from DBI::sth, use full arg list
  r7676 at Thesaurus (orig r7665):  caelum | 2009-09-16 15:06:35 +0200
  use execute_array for insert_bulk, test insert_bulk with blobs, clean up blob tests a bit
  r7680 at Thesaurus (orig r7669):  ribasushi | 2009-09-16 19:36:19 +0200
  Remove branched changes
  r7682 at Thesaurus (orig r7671):  caelum | 2009-09-17 03:03:34 +0200
  I'll rewrite this bit tomorrow to be less retarded
  r7684 at Thesaurus (orig r7673):  caelum | 2009-09-18 04:03:15 +0200
  fix yesterday's stuff, identity_update works, blob updates are better
  r7686 at Thesaurus (orig r7675):  caelum | 2009-09-18 04:22:38 +0200
  column no longer necessary in test
  r7688 at Thesaurus (orig r7677):  caelum | 2009-09-18 08:33:14 +0200
  fix freetds
  r7691 at Thesaurus (orig r7680):  ribasushi | 2009-09-18 12:25:42 +0200
   r7678 at Thesaurus (orig r7667):  ribasushi | 2009-09-16 19:31:14 +0200
   New subbranch
   r7679 at Thesaurus (orig r7668):  ribasushi | 2009-09-16 19:34:29 +0200
   Caelum's work so far
   r7690 at Thesaurus (orig r7679):  caelum | 2009-09-18 11:10:16 +0200
   support for blobs in insert_bulk fallback
  
  r7692 at Thesaurus (orig r7681):  ribasushi | 2009-09-18 12:28:09 +0200
  Rollback all bulk insert code before merge
 
 r7699 at Thesaurus (orig r7688):  ribasushi | 2009-09-18 14:12:05 +0200
 Cleanup exception handling
 r7700 at Thesaurus (orig r7689):  ribasushi | 2009-09-18 14:22:02 +0200
 duh
 r7701 at Thesaurus (orig r7690):  ribasushi | 2009-09-18 14:25:06 +0200
 Minor cleanup of RSC with has_many joins
 r7702 at Thesaurus (orig r7691):  ribasushi | 2009-09-18 14:32:15 +0200
 Changes and dev notes in makefile
 r7705 at Thesaurus (orig r7694):  ribasushi | 2009-09-18 14:52:26 +0200
 Nothing says the grouping column can not be nullable
 r7706 at Thesaurus (orig r7695):  ribasushi | 2009-09-18 14:53:33 +0200
 Changes
 r7707 at Thesaurus (orig r7696):  ribasushi | 2009-09-18 20:09:04 +0200
 This code belogs in Storage::DBI
 r7708 at Thesaurus (orig r7697):  ribasushi | 2009-09-18 20:38:26 +0200
 Clear up some legacy cruft and straighten inheritance
 r7710 at Thesaurus (orig r7699):  ribasushi | 2009-09-21 00:25:20 +0200
 Backout sybase changes
 r7713 at Thesaurus (orig r7702):  ribasushi | 2009-09-21 00:46:32 +0200
 Missed a part of the revert
 r7720 at Thesaurus (orig r7709):  ribasushi | 2009-09-21 02:49:11 +0200
 Oops
 r7721 at Thesaurus (orig r7710):  ribasushi | 2009-09-21 11:02:14 +0200
 Changes
 r7722 at Thesaurus (orig r7711):  ribasushi | 2009-09-21 12:49:30 +0200
 Undocument the from attribute (the description was mostly outdated anyway)
 r7723 at Thesaurus (orig r7712):  ribasushi | 2009-09-21 12:58:58 +0200
 Release 0.08112
 r7726 at Thesaurus (orig r7715):  ribasushi | 2009-09-21 16:26:07 +0200
 A test for an obscure join syntax - make sure we don't break it
 r7732 at Thesaurus (orig r7721):  ribasushi | 2009-09-22 12:58:09 +0200
 this would break in the future - sanitize sql fed to the tester
 r7735 at Thesaurus (orig r7724):  ribasushi | 2009-09-22 13:07:31 +0200
 The hack is no longer necessary with a recent sqla
 r7740 at Thesaurus (orig r7729):  caelum | 2009-09-24 23:44:01 +0200
 add test for multiple active statements in mssql over dbd::sybase
 r7741 at Thesaurus (orig r7730):  caelum | 2009-09-25 08:46:22 +0200
 test on_connect_do with a coderef connect_info too
 r7742 at Thesaurus (orig r7731):  caelum | 2009-09-25 23:26:52 +0200
 failing test for simple transaction with mssql via dbd::sybase
 r7765 at Thesaurus (orig r7753):  ribasushi | 2009-10-03 15:49:14 +0200
 Test reorg (no changes)
 r7766 at Thesaurus (orig r7754):  ribasushi | 2009-10-03 15:55:25 +0200
 Add failing tests for RT#50003
 r7767 at Thesaurus (orig r7755):  caelum | 2009-10-03 16:09:45 +0200
 fix on_connect_ with coderef connect_info
 r7771 at Thesaurus (orig r7759):  ribasushi | 2009-10-04 13:17:53 +0200
 Fix AutoCast's POD
 r7782 at Thesaurus (orig r7770):  ribasushi | 2009-10-09 06:57:20 +0200
  r7777 at Thesaurus (orig r7765):  frew | 2009-10-07 20:05:05 +0200
  add method to check if an rs is paginated
  r7778 at Thesaurus (orig r7766):  frew | 2009-10-07 20:31:02 +0200
  is_paginated method and test
  r7780 at Thesaurus (orig r7768):  frew | 2009-10-09 06:45:36 +0200
  change name of method
  r7781 at Thesaurus (orig r7769):  frew | 2009-10-09 06:47:31 +0200
  add message to changelog for is_paged
 
 r7785 at Thesaurus (orig r7773):  ribasushi | 2009-10-09 11:00:36 +0200
 Ugh CRLF
 r7786 at Thesaurus (orig r7774):  ribasushi | 2009-10-09 11:04:35 +0200
 Skip versioning test on really old perls lacking Time::HiRes
 r7787 at Thesaurus (orig r7775):  ribasushi | 2009-10-09 11:04:50 +0200
 Changes
 r7788 at Thesaurus (orig r7776):  triode | 2009-10-09 22:32:04 +0200
 added troubleshooting case of excessive memory allocation involving TEXT/BLOB/etc
 columns and large LongReadLen
 
 r7789 at Thesaurus (orig r7777):  triode | 2009-10-09 22:44:21 +0200
 added my name to contributors list
 
 r7790 at Thesaurus (orig r7778):  ribasushi | 2009-10-10 18:49:15 +0200
 Whoops, this isn't right
 r7791 at Thesaurus (orig r7779):  ribasushi | 2009-10-11 15:44:18 +0200
 More ordered fixes
 r7793 at Thesaurus (orig r7781):  norbi | 2009-10-13 11:27:18 +0200
  r7982 at vger:  mendel | 2009-10-13 11:26:11 +0200
  Fixed a typo and a POD error.
 
 r7805 at Thesaurus (orig r7793):  ribasushi | 2009-10-16 14:28:35 +0200
 Fix test to stop failing when DT-support is not present
 r7811 at Thesaurus (orig r7799):  caelum | 2009-10-18 11:13:29 +0200
  r20728 at hlagh (orig r7703):  ribasushi | 2009-09-20 18:51:16 -0400
  Another try at a clean sybase branch
  r20730 at hlagh (orig r7705):  ribasushi | 2009-09-20 18:58:09 -0400
  Part one of the sybase work by Caelum (mostly reviewed)
  r20731 at hlagh (orig r7706):  ribasushi | 2009-09-20 19:18:40 -0400
  main sybase branch ready
  r21051 at hlagh (orig r7797):  caelum | 2009-10-18 04:57:43 -0400
   r20732 at hlagh (orig r7707):  ribasushi | 2009-09-20 19:20:00 -0400
   Branch for bulk insert
   r20733 at hlagh (orig r7708):  ribasushi | 2009-09-20 20:06:21 -0400
   All sybase bulk-insert code by Caelum
   r20750 at hlagh (orig r7725):  caelum | 2009-09-24 02:47:39 -0400
   clean up set_identity stuff
   r20751 at hlagh (orig r7726):  caelum | 2009-09-24 05:21:18 -0400
   minor cleanups, test update of blob to NULL
   r20752 at hlagh (orig r7727):  caelum | 2009-09-24 08:45:04 -0400
   remove some duplicate code
   r20753 at hlagh (orig r7728):  caelum | 2009-09-24 09:57:58 -0400
   fix insert with all defaults
   r20786 at hlagh (orig r7732):  caelum | 2009-09-25 21:17:16 -0400
   some cleanups
   r20804 at hlagh (orig r7736):  caelum | 2009-09-28 05:31:38 -0400
   minor changes
   r20805 at hlagh (orig r7737):  caelum | 2009-09-28 06:25:48 -0400
   fix DT stuff
   r20809 at hlagh (orig r7741):  caelum | 2009-09-28 22:25:55 -0400
   removed some dead code, added fix and test for _execute_array_empty
   r20811 at hlagh (orig r7743):  caelum | 2009-09-29 13:36:20 -0400
   minor changes after review
   r20812 at hlagh (orig r7744):  caelum | 2009-09-29 14:16:03 -0400
   do not clobber $rv from execute_array
   r20813 at hlagh (orig r7745):  caelum | 2009-09-29 14:38:14 -0400
   make insert_bulk atomic
   r20815 at hlagh (orig r7747):  caelum | 2009-09-29 20:35:26 -0400
   remove _exhaaust_statements
   r20816 at hlagh (orig r7748):  caelum | 2009-09-29 21:48:38 -0400
   fix insert_bulk when not using bulk api inside a txn
   r20831 at hlagh (orig r7749):  caelum | 2009-09-30 02:53:42 -0400
   added test for populate being atomic
   r20832 at hlagh (orig r7750):  caelum | 2009-09-30 03:00:59 -0400
   factor out subclass-specific _execute_array callback
   r20833 at hlagh (orig r7751):  caelum | 2009-10-01 11:59:30 -0400
   remove a piece of dead code
   r20840 at hlagh (orig r7758):  caelum | 2009-10-03 15:46:56 -0400
   remove _pretty_print
   r20842 at hlagh (orig r7760):  caelum | 2009-10-04 16:19:56 -0400
   minor optimization for insert_bulk
   r21050 at hlagh (orig r7796):  caelum | 2009-10-18 04:56:54 -0400
   error checking related to literal SQL for insert_bulk
  
 
 r7820 at Thesaurus (orig r7808):  caelum | 2009-10-21 03:10:39 +0200
 add test for populate with literal sql mixed with binds, improve error messages
 r7823 at Thesaurus (orig r7811):  ribasushi | 2009-10-21 16:33:45 +0200
 Show what's wrong with the current populate code
 r7824 at Thesaurus (orig r7812):  caelum | 2009-10-22 11:10:38 +0200
 stringify values passed to populate/insert_bulk
 r7825 at Thesaurus (orig r7813):  ribasushi | 2009-10-22 13:17:41 +0200
 Some smoker run the suite for 30 *minutes* - the timeout seems to be too short for them (boggle)
 r7826 at Thesaurus (orig r7814):  caelum | 2009-10-22 14:41:37 +0200
 a few extra tests can never hurt, right? :)
 r7827 at Thesaurus (orig r7815):  ribasushi | 2009-10-23 10:51:05 +0200
 Prevent sqlt from failing silently
 r7828 at Thesaurus (orig r7816):  ribasushi | 2009-10-23 10:52:49 +0200
 { is_foreign_key_constraint => 0, on_delete => undef } is a valid construct - no need to carp
 r7832 at Thesaurus (orig r7820):  robkinyon | 2009-10-26 20:11:22 +0100
 Fixed bad if-check in columns()
 r7840 at Thesaurus (orig r7828):  caelum | 2009-10-31 14:01:56 +0100
 change repository in meta to point to real svn url rather than svnweb
 r7842 at Thesaurus (orig r7830):  caelum | 2009-10-31 21:04:39 +0100
 pass sqlite_version to SQLT
 r7843 at Thesaurus (orig r7831):  caelum | 2009-10-31 21:22:37 +0100
 fix regex to numify sqlite_version
 r7844 at Thesaurus (orig r7832):  caelum | 2009-10-31 23:59:19 +0100
 work-around disconnect bug with DBD::Pg 2.15.1
 r7855 at Thesaurus (orig r7843):  ribasushi | 2009-11-04 10:55:51 +0100
  r7817 at Thesaurus (orig r7805):  rbuels | 2009-10-21 02:37:28 +0200
  making a branch, here we go again with the pg_unqualified_schema
  r7818 at Thesaurus (orig r7806):  rbuels | 2009-10-21 02:38:59 +0200
  more pg unqualified schema tests, which expose a gap in the coverage
  r7819 at Thesaurus (orig r7807):  rbuels | 2009-10-21 03:10:38 +0200
  gutted Pg storage driver's sequence discovery to just rely on DBD::Pg's last_insert_id.  this needs testing with older versions of DBD::Pg
  r7821 at Thesaurus (orig r7809):  rbuels | 2009-10-21 04:00:39 +0200
  more coverage in Pg sequence-discovery tests.  i think this shows why last_insert_id cannot be used.
  r7822 at Thesaurus (orig r7810):  rbuels | 2009-10-21 04:07:05 +0200
  reverted [7807], and just changed code to use the custom pg_catalog query, which is the only thing that works in the pathological case where DBIC is told a different primary key from the primary key that is set on the table in the DB ([7809] added testing for this)
  r7852 at Thesaurus (orig r7840):  rbuels | 2009-11-03 18:47:05 +0100
  added Changes line mentioning tweak to Pg auto-inc fix
  r7854 at Thesaurus (orig r7842):  ribasushi | 2009-11-04 10:55:35 +0100
  Cleanup exceptions
 
 r7858 at Thesaurus (orig r7846):  caelum | 2009-11-06 16:01:30 +0100
 transactions for MSSQL over DBD::Sybase
 r7861 at Thesaurus (orig r7849):  caelum | 2009-11-10 13:16:18 +0100
 made commit/rollback when disconnected an exception
 r7862 at Thesaurus (orig r7850):  robkinyon | 2009-11-10 17:19:57 +0100
 Added a note about select
 r7863 at Thesaurus (orig r7851):  ribasushi | 2009-11-10 18:23:10 +0100
 Changes
 r7867 at Thesaurus (orig r7855):  frew | 2009-11-11 21:56:37 +0100
 RT50874
 r7868 at Thesaurus (orig r7856):  frew | 2009-11-11 23:50:43 +0100
 RT50828
 r7869 at Thesaurus (orig r7857):  frew | 2009-11-11 23:54:15 +0100
 clearer test message
 r7870 at Thesaurus (orig r7858):  frew | 2009-11-12 00:37:27 +0100
 some cleanup for $rs->populate
 r7872 at Thesaurus (orig r7860):  ribasushi | 2009-11-12 01:35:36 +0100
 Fix find on resultset with custom result_class
 r7873 at Thesaurus (orig r7861):  ribasushi | 2009-11-12 01:40:14 +0100
 Fix return value of in_storage
 r7874 at Thesaurus (orig r7862):  ribasushi | 2009-11-12 01:43:48 +0100
 Extra FAQ entry
 r7875 at Thesaurus (orig r7863):  ribasushi | 2009-11-12 02:11:25 +0100
 Sanify _determine_driver handling in ::Storage::DBI
 r7876 at Thesaurus (orig r7864):  ribasushi | 2009-11-12 02:14:37 +0100
 Add mysql determine_driver test by Pedro Melo
 r7881 at Thesaurus (orig r7869):  ribasushi | 2009-11-12 11:10:04 +0100
 _cond_for_update_delete is hopelessly broken attempting to introspect SQLA1. Replace with a horrific but effective hack
 r7882 at Thesaurus (orig r7870):  ribasushi | 2009-11-12 11:15:12 +0100
 Clarifying comment
 r7884 at Thesaurus (orig r7872):  ribasushi | 2009-11-13 00:13:40 +0100
 The real fix for the non-introspectable condition bug, mst++
 r7885 at Thesaurus (orig r7873):  ribasushi | 2009-11-13 00:24:56 +0100
 Some cleanup
 r7887 at Thesaurus (orig r7875):  frew | 2009-11-13 10:01:37 +0100
 fix subtle bug with Sybase database type determination
 r7892 at Thesaurus (orig r7880):  frew | 2009-11-14 00:53:29 +0100
 release woo!
 r7894 at Thesaurus (orig r7882):  caelum | 2009-11-14 03:57:52 +0100
 fix oracle dep in Makefile.PL
 r7895 at Thesaurus (orig r7883):  caelum | 2009-11-14 04:20:53 +0100
 skip Oracle BLOB tests on DBD::Oracle == 1.23
 r7897 at Thesaurus (orig r7885):  caelum | 2009-11-14 09:40:01 +0100
  r7357 at pentium (orig r7355):  caelum | 2009-08-20 17:58:23 -0400
  branch to support MSSQL over ADO
  r7358 at pentium (orig r7356):  caelum | 2009-08-21 00:32:14 -0400
  something apparently working
  r7359 at pentium (orig r7357):  caelum | 2009-08-21 00:53:53 -0400
  slightly better mars test, still passes
 
 r7899 at Thesaurus (orig r7887):  caelum | 2009-11-14 09:41:54 +0100
  r7888 at pentium (orig r7886):  caelum | 2009-11-14 03:41:25 -0500
  add TODO test for large column list in select
 
 r7901 at Thesaurus (orig r7889):  caelum | 2009-11-14 09:47:16 +0100
 add ADO/MSSQL to Changes
 r7902 at Thesaurus (orig r7890):  caelum | 2009-11-14 10:27:29 +0100
 fix the large column list test for ADO/MSSQL, now passes
 r7904 at Thesaurus (orig r7892):  caelum | 2009-11-14 12:20:58 +0100
 fix Changes (ADO change in wrong release)
 r7905 at Thesaurus (orig r7893):  ribasushi | 2009-11-14 19:23:23 +0100
 Release 0.08114
 r7907 at Thesaurus (orig r7895):  ribasushi | 2009-11-15 12:09:17 +0100
 Failing test to highlight mssql autoconnect regression
 r7908 at Thesaurus (orig r7896):  ribasushi | 2009-11-15 12:20:25 +0100
 Fix plan
 r7913 at Thesaurus (orig r7901):  ribasushi | 2009-11-15 13:11:38 +0100
  r7773 at Thesaurus (orig r7761):  norbi | 2009-10-05 14:49:06 +0200
  Created branch 'prefetch_bug-unqualified_column_in_search_related_cond': A bug that manifests when a prefetched table's column is referenced without the table name in the condition of a search_related() on an M:N relationship.
  r7878 at Thesaurus (orig r7866):  ribasushi | 2009-11-12 02:36:08 +0100
  Factor some code out
  r7879 at Thesaurus (orig r7867):  ribasushi | 2009-11-12 09:11:03 +0100
  Factor out more stuff
  r7880 at Thesaurus (orig r7868):  ribasushi | 2009-11-12 09:21:04 +0100
  Saner naming/comments
  r7910 at Thesaurus (orig r7898):  ribasushi | 2009-11-15 12:39:29 +0100
  Move more code to DBIHacks, put back the update/delete rs check, just in case
  r7911 at Thesaurus (orig r7899):  ribasushi | 2009-11-15 13:01:34 +0100
  TODOify test until we get an AST
  r7912 at Thesaurus (orig r7900):  ribasushi | 2009-11-15 13:10:15 +0100
  Hide from pause
 
 r7921 at Thesaurus (orig r7909):  ribasushi | 2009-11-15 14:17:48 +0100
  r7871 at Thesaurus (orig r7859):  ribasushi | 2009-11-12 00:46:07 +0100
  Branches to test some ideas
  r7889 at Thesaurus (orig r7877):  abraxxa | 2009-11-13 12:05:50 +0100
  added rels to view result classes in test schema
  
  r7890 at Thesaurus (orig r7878):  abraxxa | 2009-11-13 13:05:45 +0100
  seems I found the bugger
  
  r7917 at Thesaurus (orig r7905):  ribasushi | 2009-11-15 13:29:23 +0100
  FK constraints towards a view don't quite work
  r7918 at Thesaurus (orig r7906):  ribasushi | 2009-11-15 14:10:10 +0100
  Turn into a straight-inheritance view class
  r7919 at Thesaurus (orig r7907):  ribasushi | 2009-11-15 14:11:03 +0100
  Extensive test of virtual and classic view relationships
  r7920 at Thesaurus (orig r7908):  ribasushi | 2009-11-15 14:17:23 +0100
  Fix non-sqlt schema file
 
 r7923 at Thesaurus (orig r7911):  caelum | 2009-11-15 18:31:37 +0100
 fix MSSQL via DBD::Sybase regression
 r7930 at Thesaurus (orig r7918):  ribasushi | 2009-11-16 19:15:45 +0100
  r7864 at Thesaurus (orig r7852):  edenc | 2009-11-10 20:15:15 +0100
  branching for fixes related to prefetch, distinct and group by
  r7865 at Thesaurus (orig r7853):  edenc | 2009-11-10 20:21:38 +0100
  added test case for ensuring a column mentioned in the order by clause is also included in the group by clause
  r7926 at Thesaurus (orig r7914):  ribasushi | 2009-11-16 08:09:30 +0100
  Make _resolve_column_info function without supplying column names
  r7927 at Thesaurus (orig r7915):  ribasushi | 2009-11-16 08:11:17 +0100
  Fix order_by/distinct bug
 
 r7937 at Thesaurus (orig r7925):  ribasushi | 2009-11-19 12:04:21 +0100
 Bail out eary in Versioned if no versioning checks are requested
 r7938 at Thesaurus (orig r7926):  ribasushi | 2009-11-19 12:06:13 +0100
 POD fixes
 r7940 at Thesaurus (orig r7928):  caelum | 2009-11-22 11:03:33 +0100
 fix connection setup for Sybase
 r7943 at Thesaurus (orig r7931):  caelum | 2009-11-22 13:27:43 +0100
 override _run_connection_actions for internal connection setup in sybase stuff, much cleaner this way
 r7947 at Thesaurus (orig r7935):  ribasushi | 2009-11-23 01:18:28 +0100
 Whoops
 r7948 at Thesaurus (orig r7936):  ribasushi | 2009-11-23 01:28:50 +0100
 Fix ::Versioned regression introduced in r7925
 r7951 at Thesaurus (orig r7939):  caelum | 2009-11-23 12:32:10 +0100
 add subname to rdbms_specific_methods wrapper
 r7953 at Thesaurus (orig r7941):  caelum | 2009-11-23 13:23:14 +0100
  r21187 at hlagh (orig r7933):  ribasushi | 2009-11-22 18:38:34 -0500
  New sybase refactor branch
  r21188 at hlagh (orig r7934):  ribasushi | 2009-11-22 19:06:48 -0500
  refactor part1
  r21192 at hlagh (orig r7938):  ribasushi | 2009-11-22 19:30:05 -0500
  refactor part 2
  r21194 at hlagh (orig r7940):  caelum | 2009-11-23 07:06:46 -0500
  fix test
 
 r7955 at Thesaurus (orig r7943):  ribasushi | 2009-11-23 16:30:13 +0100
 Add missing Sub::Name invocations and improve the SQLA Carp overrides
 r7957 at Thesaurus (orig r7945):  ribasushi | 2009-11-24 10:12:49 +0100
  r7749 at Thesaurus (orig r7738):  norbi | 2009-09-28 22:01:39 +0200
  Created branch 'void_populate_resultset_cond': Fixing a bug: $rs->populate in void context does not use the conditions from $rs.
  r7751 at Thesaurus (orig r7740):  norbi | 2009-09-28 23:26:06 +0200
   r7935 at vger:  mendel | 2009-09-28 23:25:52 +0200
   Undid the previous tweaks to the already existing tests and added new tests instead.
  
  r7928 at Thesaurus (orig r7916):  ribasushi | 2009-11-16 08:48:42 +0100
  Change plan
  r7956 at Thesaurus (orig r7944):  ribasushi | 2009-11-24 10:10:49 +0100
  Better naming and a bit leaner implementation. Main idea remains the same
 
 r7959 at Thesaurus (orig r7947):  ribasushi | 2009-11-24 10:39:52 +0100
 Changes and prevent a spurious todo-pass
 r7962 at Thesaurus (orig r7950):  ribasushi | 2009-11-24 19:43:42 +0100
 Extra sqla quoting test
 r7963 at Thesaurus (orig r7951):  ribasushi | 2009-11-24 19:48:01 +0100
 Extra sqla quoting test(2)
 r7964 at Thesaurus (orig r7952):  ribasushi | 2009-11-25 21:24:10 +0100
 wtf
 r7967 at Thesaurus (orig r7955):  ribasushi | 2009-11-26 11:07:06 +0100
 cleanups
 r7968 at Thesaurus (orig r7956):  ribasushi | 2009-11-26 12:11:21 +0100
 Sanify search_related chaining code (no functional changes)
 r7969 at Thesaurus (orig r7957):  ribasushi | 2009-11-26 12:52:05 +0100
 Another count() quirk down
 r7970 at Thesaurus (orig r7958):  ribasushi | 2009-11-26 14:23:28 +0100
 Add a no-accessor column to generally test handling
 r7972 at Thesaurus (orig r7960):  ribasushi | 2009-11-26 15:32:17 +0100
 Whoops, wrong accessor (things still work though)
 r7977 at Thesaurus (orig r7965):  ribasushi | 2009-11-26 16:43:21 +0100
  r7971 at Thesaurus (orig r7959):  ribasushi | 2009-11-26 14:54:17 +0100
  New branch for get_inflated_column bugfix
  r7974 at Thesaurus (orig r7962):  ribasushi | 2009-11-26 15:56:20 +0100
  Fix for rt46953
  r7975 at Thesaurus (orig r7963):  ribasushi | 2009-11-26 16:05:17 +0100
  Make Test::More happy
  r7976 at Thesaurus (orig r7964):  ribasushi | 2009-11-26 16:43:09 +0100
  Changes
 
 r7980 at Thesaurus (orig r7968):  ribasushi | 2009-11-27 01:38:11 +0100
 Fix search_related wrt grouped resultsets (distinct is currently passed to the new resultset, this is probably wrong)
 r7987 at Thesaurus (orig r7975):  ribasushi | 2009-11-28 16:54:23 +0100
 Cleanup the s.c.o. index
 r7988 at Thesaurus (orig r7976):  ribasushi | 2009-11-28 16:57:04 +0100
 Test based on http://lists.scsys.co.uk/pipermail/dbix-class/2009-November/008599.html
 r8007 at Thesaurus (orig r7995):  castaway | 2009-11-30 16:20:19 +0100
 Remove over-emphasis on +select/+as. Add docs on prefetch and other ways to get related data, with caveats etc. 
 
 r8009 at Thesaurus (orig r7997):  dew | 2009-11-30 19:37:00 +0100
 Alter the docs for has_many relationships to make them a little easier to grok



Property changes on: DBIx-Class/0.08/branches/extended_rels
___________________________________________________________________
Name: svk:merge
   - 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/cookbook_fixes:7657
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7237
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/autocast:7418
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connect_info_hash:7435
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cookbook_fixes:7479
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_has_many_join:7382
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7566
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:5651
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:7637
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
   + 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/cookbook_fixes:7657
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/prefetch_bug-unqualified_column_in_search_related_cond:7959
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/void_populate_resultset_cond:7935
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7982
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/ado_mssql:7886
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/autocast:7418
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connect_info_hash:7435
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cookbook_fixes:7479
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/get_inflated_columns_rt46953:7964
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_has_many_join:7382
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/is_resultset_paginated:7769
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7842
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch-group_by:7917
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_bug-unqualified_column_in_search_related_cond:7900
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:7682
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulk_insert:7679
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulkinsert_support:7796
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_refactor:7940
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_support:7797
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/view_rels:7908
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/void_populate_resultset_cond:7944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:7997
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510

Modified: DBIx-Class/0.08/branches/extended_rels/Changes
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/Changes	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/Changes	2009-12-01 10:59:18 UTC (rev 8006)
@@ -1,18 +1,80 @@
 Revision history for DBIx::Class
 
+        - Fix distinct => 1 with non-selecting order_by (the columns
+          in order_by also need to be aded to the resulting group_by)
+        - Do not attempt to deploy FK constraints pointing to a View
+        - Refactored Sybase storage driver into a central ::DBI::Sybase
+          dispatcher, and a sybase-specific ::DBI::Sybase::ASE
+        - Make sure populate() inherits the resultset conditions just
+          like create() does
+        - Fix count/objects from search_related on limited resultset
+        - Make get_inflated_columns behave identically to get_columns
+          wrt +select/+as (RT#46953)
+
+0.08114 2009-11-14 17:45:00 (UTC)
+        - Preliminary support for MSSQL via DBD::ADO
+        - Fix botched 0.08113 release (invalid tarball)
+
+0.08113 2009-11-13 23:13:00 (UTC)
+        - Fix populate with has_many bug
+          (RT #50828)
+        - Fix Oracle autoincrement broken for Resultsets with scalar refs
+          (RT #50874)
+        - Complete Sybase RDBMS support including:
+          - Support for TEXT/IMAGE columns
+          - Support for the 'money' datatype
+          - Transaction savepoints support
+          - DateTime inflation support
+          - Support for bind variables when connecting to a newer Sybase with
+             OpenClient libraries
+          - Support for connections via FreeTDS with CASTs for bind variables
+             when needed
+          - Support for interpolated variables with proper quoting when
+             connecting to an older Sybase and/or via FreeTDS
+          - bulk API support for populate()
+        - Transaction support for MSSQL via DBD::Sybase
+        - Add is_paged method to DBIx::Class::ResultSet so that we can
+          check that if we want a pager
+        - Skip versioning test on really old perls lacking Time::HiRes
+          (RT #50209)
+        - Fixed on_connect_do/call regression when used with a coderef
+          connector (RT #50003)
+        - A couple of fixes to Ordered to remedy subclassing issues
+        - Fixed another lingering problem with PostgreSQL
+          auto-increment support and its interaction with multiple
+          schemas
+        - Remove some IN workarounds, and require a recent version of
+          SQLA instead
+        - Improvements to populate's handling of mixed scalarref values
+        - Fixed regression losing result_class after $rs->find (introduced
+          in 0.08108)
+        - Fix in_storage() to return 1|0 as per existing documentation
+        - Centralize handling of _determine_driver calls prior to certain
+          ::Storage::DBI methods
+        - Fix update/delete arbitrary condition handling (RT#51409)
+        - POD improvements
+
+0.08112 2009-09-21 10:57:00 (UTC)
         - Remove the recommends from Makefile.PL, DBIx::Class is not
           supposed to have optional dependencies. ever.
         - Mangle the DBIx/Class.pm POD to be more clear about
           copyright and license
         - Put back PG's multiple autoinc per table support, accidentally
-          dropped during the serial-autodetection rwrite
+          dropped during the serial-autodetection rewrite
         - Make sure ResultSetColumn does not depend on the (undefined)
           return value of ->cursor->reset()
         - Add single() to ResultSetColumn (same semantics as ResultSet)
         - Make sure to turn off IDENTITY_INSERT after insert() on MSSQL
           tables that needed it
         - More informative exception on failing _resolve_relationship
+        - Allow undef/NULL as the sole grouping value in Ordered
         - Fix unreported rollback exceptions in TxnScopeGuard
+        - Fix overly-eager left-join chain enforcing code
+        - Warn about using distinct with an existing group_by
+        - Warn about attempting to $rs->get_column a non-unique column
+          when has_many joins are added to resultset
+        - Refactor of the exception handling system (now everything is a
+          DBIx::Class::Exception object)
 
 0.08111 2009-09-06 21:58:00 (UTC)
         - The hashref to connection_info now accepts a 'dbh_maker'
@@ -55,7 +117,7 @@
         - Support for MSSQL 'money' type
         - Support for 'smalldatetime' type used in MSSQL and Sybase for
           InflateColumn::DateTime
-        - support for Postgres 'timestamp without timezone' type in
+        - Support for Postgres 'timestamp without timezone' type in
           InflateColumn::DateTime (RT#48389)
         - Added new MySQL specific on_connect_call macro 'set_strict_mode'
           (also known as make_mysql_not_suck_as_much)
@@ -94,7 +156,7 @@
           nonexisting prefetch
         - make_column_dirty() now overwrites the deflated value with an
           inflated one if such exists
-        - Fixed set_$rel with where restriction deleting rows outside 
+        - Fixed set_$rel with where restriction deleting rows outside
           the restriction
         - populate() returns the created objects or an arrayref of the
           created objects depending on scalar vs. list context
@@ -146,7 +208,7 @@
           side of the relation, to avoid duplicates
         - DBIC now properly handles empty inserts (invoking all default
           values from the DB, normally via INSERT INTO tbl DEFAULT VALUES
-        - Fix find_or_new/create to stop returning random rows when 
+        - Fix find_or_new/create to stop returning random rows when
           default value insert is requested (RT#28875)
         - Make IC::DT extra warning state the column name too
         - It is now possible to transparrently search() on columns
@@ -168,9 +230,9 @@
         - Change ->count code to work correctly with DISTINCT (distinct => 1)
           via GROUP BY
         - Removed interpolation of bind vars for as_query - placeholders
-          are preserved and nested query bind variables are properly 
+          are preserved and nested query bind variables are properly
           merged in the correct order
-        - Refactor DBIx::Class::Storage::DBI::Sybase to automatically 
+        - Refactor DBIx::Class::Storage::DBI::Sybase to automatically
           load a subclass, namely Microsoft_SQL_Server.pm
           (similar to DBIx::Class::Storage::DBI::ODBC)
         - Refactor InflateColumn::DateTime to allow components to
@@ -233,7 +295,7 @@
           - not try and insert things tagged on via new_related unless required
         - Possible to set locale in IC::DateTime extra => {} config
         - Calling the accessor of a belongs_to when the foreign_key
-          was NULL and the row was not stored would unexpectedly fail 
+          was NULL and the row was not stored would unexpectedly fail
         - Split sql statements for deploy only if SQLT::Producer returned a scalar
           containing all statements to be executed
         - Add as_query() for ResultSet and ResultSetColumn. This makes subqueries
@@ -261,8 +323,8 @@
         - new order_by => { -desc => 'colname' } syntax supported
         - PG array datatype supported
         - insert should use store_column, not set_column to avoid marking
-          clean just-stored values as dirty. New test for this 
-        - regression test for source_name 
+          clean just-stored values as dirty. New test for this
+        - regression test for source_name
 
 0.08099_05 2008-10-30 21:30:00 (UTC)
         - Rewrite of Storage::DBI::connect_info(), extended with an
@@ -276,7 +338,7 @@
         - Fixed up related resultsets and multi-create
         - Fixed superfluous connection in ODBC::_rebless
         - Fixed undef PK for first insert in ODBC::Microsoft_SQL_Server
-        - Added virtual method to Versioned so a user can create upgrade 
+        - Added virtual method to Versioned so a user can create upgrade
           path across multiple versions (jgoulah)
         - Better (and marginally faster) implementation of the HashRefInflator
           hash construction algorithm
@@ -285,7 +347,7 @@
 
 0.08099_04 2008-07-24 01:00:00
         - Functionality to storage to enable a sub to be run without FK checks
-        - Fixed $schema->clone bug which caused clone and source to share 
+        - Fixed $schema->clone bug which caused clone and source to share
           internal hash refs
         - Added register_extra_source methods for additional sources
         - Added datetime_undef_if_invalid for InflateColumn::DateTime to
@@ -311,11 +373,11 @@
         - Add warnings for non-unique ResultSet::find queries
         - Changed Storage::DBI::Replication to Storage::DBI::Replicated and
           refactored support.
-        - By default now deploy/diff et al. will ignore constraint and index 
+        - By default now deploy/diff et al. will ignore constraint and index
           names
         - Add ResultSet::_is_deterministic_value, make new_result filter the
           values passed to new to drop values that would generate invalid SQL.
-        - Use Sub::Name to name closures before installing them. Fixes 
+        - Use Sub::Name to name closures before installing them. Fixes
           incompatibility with Moose method modifiers on generated methods.
 
 0.08010 2008-03-01 10:30
@@ -324,7 +386,7 @@
 0.08009 2008-01-20 13:30
         - Made search_rs smarter about when to preserve the cache to fix
           mm prefetch usage
-        - Added Storage::DBI subclass for MSSQL over ODBC. 
+        - Added Storage::DBI subclass for MSSQL over ODBC.
         - Added freeze, thaw and dclone methods to Schema so that thawed
           objects will get re-attached to the schema.
         - Moved dbicadmin to JSON::Any wrapped JSON.pm for a sane API
@@ -338,20 +400,20 @@
           foreign and self parts the wrong way round in the condition
         - ResultSetColumn::func() now returns all results if called in list
           context; this makes things like func('DISTINCT') work as expected
-        - Many-to-many relationships now warn if the utility methods would 
+        - Many-to-many relationships now warn if the utility methods would
           clash
         - InflateColumn::DateTime now accepts an extra parameter of timezone
           to set timezone on the DT object (thanks Sergio Salvi)
-        - Added sqlt_deploy_hook to result classes so that indexes can be 
+        - Added sqlt_deploy_hook to result classes so that indexes can be
           added.
-        - Added startup checks to warn loudly if we appear to be running on 
+        - Added startup checks to warn loudly if we appear to be running on
           RedHat systems from perl-5.8.8-10 and up that have the bless/overload
           patch applied (badly) which causes 2x -> 100x performance penalty.
           (Jon Schutz)
-        - ResultSource::reverse_relationship_info can distinguish between 
+        - ResultSource::reverse_relationship_info can distinguish between
           sources using the same table
         - Row::insert will now not fall over if passed duplicate related objects
-        - Row::copy will not fall over if you have two relationships to the 
+        - Row::copy will not fall over if you have two relationships to the
           same source with a unique constraint on it
 
 0.08007 2007-09-04 19:36:00
@@ -363,7 +425,7 @@
         - Move to using Class::C3::Componentised
         - Remove warn statement from DBIx::Class::Row
 
-0.08005 2007-08-06 
+0.08005 2007-08-06
         - add timestamp fix re rt.cpan 26978 - no test yet but change
           clearly should cause no regressions
         - provide alias for related_resultset via local() so it's set
@@ -378,7 +440,7 @@
           (original fix from diz)
 
 0.08004 2007-08-06 19:00:00
-        - fix storage connect code to not trigger bug via auto-viv 
+        - fix storage connect code to not trigger bug via auto-viv
           (test from aherzog)
         - fixup cursor_class to be an 'inherited' attr for per-package defaults
         - add default_resultset_attributes entry to Schema

Modified: DBIx-Class/0.08/branches/extended_rels/Makefile.PL
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/Makefile.PL	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/Makefile.PL	2009-12-01 10:59:18 UTC (rev 8006)
@@ -42,9 +42,10 @@
 requires 'Module::Find'             => '0.06';
 requires 'Path::Class'              => '0.16';
 requires 'Scope::Guard'             => '0.03';
-requires 'SQL::Abstract'            => '1.58';
+requires 'SQL::Abstract'            => '1.60';
 requires 'SQL::Abstract::Limit'     => '0.13';
 requires 'Sub::Name'                => '0.04';
+requires 'Data::Dumper::Concise'    => '1.000';
 
 my %replication_requires = (
   'Moose',                    => '0.87',
@@ -54,14 +55,18 @@
   'Hash::Merge',              => '0.11',
 );
 
-# when changing also adjust $DBIx::Class::minimum_sqlt_version
-my $sqlt_recommends = '0.11002';
-
+#************************************************************************#
+# Make *ABSOLUTELY SURE* that nothing on this list is a real require,    #
+# since every module listed in %force_requires_if_author is deleted      #
+# from the final META.yml (thus will never make it as a CPAN dependency) #
+#************************************************************************#
 my %force_requires_if_author = (
   %replication_requires,
 
+  # when changing also adjust $DBIx::Class::Storage::DBI::minimum_sqlt_version
+  'SQL::Translator'           => '0.11002',
+
 #  'Module::Install::Pod::Inherit' => '0.01',
-  'SQL::Translator'           => $sqlt_recommends,
 
   # when changing also adjust version in t/02pod.t
   'Test::Pod'                 => '1.26',
@@ -105,12 +110,23 @@
     ) : ()
   ,
 
-  $ENV{DBICTEST_ORACLE_DSN}
+  $ENV{DBICTEST_ORA_DSN}
     ? (
       'DateTime::Format::Oracle' => '0',
     ) : ()
   ,
+
+  $ENV{DBICTEST_SYBASE_DSN}
+    ? (
+      'DateTime::Format::Sybase' => 0,
+    ) : ()
+  ,
 );
+#************************************************************************#
+# Make ABSOLUTELY SURE that nothing on the list above is a real require, #
+# since every module listed in %force_requires_if_author is deleted      #
+# from the final META.yml (thus will never make it as a CPAN dependency) #
+#************************************************************************#
 
 
 install_script (qw|
@@ -123,15 +139,20 @@
 
 resources 'IRC'         => 'irc://irc.perl.org/#dbix-class';
 resources 'license'     => 'http://dev.perl.org/licenses/';
-resources 'repository'  => 'http://dev.catalyst.perl.org/svnweb/bast/browse/DBIx-Class/';
+resources 'repository'  => 'http://dev.catalyst.perl.org/repos/bast/DBIx-Class/';
 resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
 
-no_index 'DBIx::Class::Storage::DBI::Sybase::Base';
 no_index 'DBIx::Class::SQLAHacks';
 no_index 'DBIx::Class::SQLAHacks::MSSQL';
+no_index 'DBIx::Class::SQLAHacks::OracleJoins';
 no_index 'DBIx::Class::Storage::DBI::AmbiguousGlob';
-no_index 'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server';
-no_index 'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars';
+no_index 'DBIx::Class::Storage::DBIHacks';
+no_index 'DBIx::Class::PK::Auto::DB2';
+no_index 'DBIx::Class::PK::Auto::MSSQL';
+no_index 'DBIx::Class::PK::Auto::MySQL';
+no_index 'DBIx::Class::PK::Auto::Oracle';
+no_index 'DBIx::Class::PK::Auto::Pg';
+no_index 'DBIx::Class::PK::Auto::SQLite';
 
 # re-build README and require extra modules for testing if we're in a checkout
 

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/AccessorGroup.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/AccessorGroup.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/AccessorGroup.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -17,8 +17,6 @@
 
 This class now exists in its own right on CPAN as Class::Accessor::Grouped
 
-1;
-
 =head1 AUTHORS
 
 Matt S. Trout <mst at shadowcatsystems.co.uk>

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/CDBICompat/Constructor.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/CDBICompat/Constructor.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/CDBICompat/Constructor.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -3,6 +3,8 @@
 
 use base qw(DBIx::Class::CDBICompat::ImaDBI);
 
+use Sub::Name();
+
 use strict;
 use warnings;
 
@@ -22,7 +24,7 @@
     return carp("$method already exists in $class")
             if *$meth{CODE};
 
-    *$meth = sub {
+    *$meth = Sub::Name::subname $meth => sub {
             my $self = shift;
             $self->sth_to_objects($self->sql_Retrieve($fragment), \@_);
     };

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Componentised.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Componentised.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Componentised.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -4,31 +4,10 @@
 use strict;
 use warnings;
 
+###
+# Keep this class for backwards compatibility
+###
+
 use base 'Class::C3::Componentised';
-use Carp::Clan qw/^DBIx::Class/;
 
-sub inject_base {
-  my ($class, $target, @to_inject) = @_;
-  {
-    no strict 'refs';
-    foreach my $to (reverse @to_inject) {
-      my @comps = qw(DigestColumns ResultSetManager Ordered UTF8Columns);
-           # Add components here that need to be loaded before Core
-      foreach my $first_comp (@comps) {
-        if ($to eq 'DBIx::Class::Core' &&
-            $target->isa("DBIx::Class::${first_comp}")) {
-          carp "Possible incorrect order of components in ".
-               "${target}::load_components($first_comp) call: Core loaded ".
-               "before $first_comp. See the documentation for ".
-               "DBIx::Class::$first_comp for more information";
-        }
-      }
-      unshift( @{"${target}::ISA"}, $to )
-        unless ($target eq $to || $target->isa($to));
-    }
-  }
-
-  $class->next::method($target, @to_inject);
-}
-
 1;

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Cursor.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Cursor.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Cursor.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -3,6 +3,8 @@
 use strict;
 use warnings;
 
+use base qw/DBIx::Class/;
+
 =head1 NAME
 
 DBIx::Class::Cursor - Abstract object representing a query cursor on a

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Manual/FAQ.pod
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Manual/FAQ.pod	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Manual/FAQ.pod	2009-12-01 10:59:18 UTC (rev 8006)
@@ -371,6 +371,9 @@
 
 =item .. insert many rows of data efficiently?
 
+The C<populate> method in L<DBIx::Class::ResultSet> provides
+efficient bulk inserts.
+
 =item .. update a collection of rows at the same time?
 
 Create a resultset using a search, to filter the rows of data you

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Manual/Joining.pod
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Manual/Joining.pod	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Manual/Joining.pod	2009-12-01 10:59:18 UTC (rev 8006)
@@ -17,7 +17,7 @@
 But I'll explain anyway. Assuming you have created your database in a
 more or less sensible way, you will end up with several tables that
 contain C<related> information. For example, you may have a table
-containing information about C<CDs>, containing the CD title and it's
+containing information about C<CD>s, containing the CD title and it's
 year of publication, and another table containing all the C<Track>s
 for the CDs, one track per row.
 
@@ -34,7 +34,8 @@
 So, joins are a way of extending simple select statements to include
 fields from other, related, tables. There are various types of joins,
 depending on which combination of the data you wish to retrieve, see
-MySQL's doc on JOINs: L<http://dev.mysql.com/doc/refman/5.0/en/join.html>.
+MySQL's doc on JOINs:
+L<http://dev.mysql.com/doc/refman/5.0/en/join.html>.
 
 =head1 DEFINING JOINS AND RELATIONSHIPS
 
@@ -42,7 +43,7 @@
 be defined in the L<ResultSource|DBIx::Class::Manual::Glossary/ResultSource> for the
 table. If the relationship needs to be accessed in both directions
 (i.e. Fetch all tracks of a CD, and fetch the CD data for a Track),
-then it needs to be defined in both tables.
+then it needs to be defined for both tables.
 
 For the CDs/Tracks example, that means writing, in C<MySchema::CD>:
 
@@ -68,14 +69,15 @@
 
 When performing either a L<search|DBIx::Class::ResultSet/search> or a
 L<find|DBIx::Class::ResultSet/find> operation, you can specify which
-C<relations> to also fetch data from (or sort by), using the
+C<relations> to also refine your results based on, using the
 L<join|DBIx::Class::ResultSet/join> attribute, like this:
 
   $schema->resultset('CD')->search(
-    { 'Title' => 'Funky CD' },
+    { 'Title' => 'Funky CD',
+      'tracks.Name' => { like => 'T%' }
+    },
     { join      => 'tracks',
-      '+select' => [ 'tracks.Name', 'tracks.Artist' ],
-      '+as'     => [ 'TrackName', 'ArtistName' ]
+      order_by  => ['tracks.id'],
     }
   );
 
@@ -84,18 +86,120 @@
 L<DBIx::Class::ResultSet/ATTRIBUTES>, but here's a quick break down:
 
 The first argument to search is a hashref of the WHERE attributes, in
-this case a simple restriction on the Title column. The second
-argument is a hashref of attributes to the search, '+select' adds
-extra columns to the select (from the joined table(s) or from
-calculations), and '+as' gives aliases to those fields.
+this case a restriction on the Title column in the CD table, and a
+restriction on the name of the track in the Tracks table, but ONLY for
+tracks actually related to the chosen CD(s). The second argument is a
+hashref of attributes to the search, the results will be returned
+sorted by the C<id> of the related tracks.
 
-'join' specifies which C<relationships> to include in the query. The
-distinction between C<relationships> and C<tables> is important here,
-only the C<relationship> names are valid.
+The special 'join' attribute specifies which C<relationships> to
+include in the query. The distinction between C<relationships> and
+C<tables> is important here, only the C<relationship> names are valid.
 
-This example should magically produce SQL like the second select in
-L</WHAT ARE JOINS> above.
+This slightly nonsense example will produce SQL similar to:
 
+  SELECT cd.ID, cd.Title, cd.Year FROM CD cd JOIN Tracks tracks ON cd.ID = tracks.CDID WHERE cd.Title = 'Funky CD' AND tracks.Name LIKE 'T%' ORDER BY 'tracks.id';
+
+=head1 FETCHING RELATED DATA
+
+Another common use for joining to related tables, is to fetch the data
+from both tables in one query, preventing extra round-trips to the
+database. See the example above in L</WHAT ARE JOINS>.
+
+=head2 Whole related objects
+
+To fetch entire related objects, eg CDs and all Track data, use the
+'prefetch' attribute:
+
+  $schema->resultset('CD')->search(
+    { 'Title' => 'Funky CD',
+    },
+    { prefetch      => 'tracks',
+      order_by  => ['tracks.id'],
+    }
+  );
+
+This will produce SQL similar to the following:
+
+  SELECT cd.ID, cd.Title, cd.Year, tracks.id, tracks.Name, tracks.Artist FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
+
+The syntax of 'prefetch' is the same as 'join' and implies the
+joining, so no need to use both together.
+
+=head2 Subset of related fields
+
+To fetch a subset or the related fields, the '+select' and '+as'
+attributes can be used. For example, if the CD data is required and
+just the track name from the Tracks table:
+
+  $schema->resultset('CD')->search(
+    { 'Title' => 'Funky CD',
+    },
+    { join      => 'tracks',
+      '+select' => ['tracks.Name'],
+      '+as'     => ['track_name'],
+      order_by  => ['tracks.id'],
+    }
+  );
+
+Which will produce the query:
+
+  SELECT cd.ID, cd.Title, cd.Year, tracks.Name FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
+
+Note that the '+as' does not produce an SQL 'AS' keyword in the
+output, see the L<DBIx::Class::Manual::FAQ> for an explanation.
+
+This type of column restriction has a downside, the resulting $row
+object will have no 'track_name' accessor:
+
+  while(my $row = $search_rs->next) {
+     print $row->track_name; ## ERROR
+  }
+
+Instead C<get_column> must be used:
+
+  while(my $row = $search_rs->next) {
+     print $row->get_colum('track_name'); ## WORKS
+  }
+
+=head2 Incomplete related objects
+
+In rare circumstances, you may also wish to fetch related data as
+incomplete objects. The usual reason to do is when the related table
+has a very large field you don't need for the current data
+output. This is better solved by storing that field in a separate
+table which you only join to when needed.
+
+To fetch an incomplete related object, supply the dotted notation to the '+as' attribute: 
+
+  $schema->resultset('CD')->search(
+    { 'Title' => 'Funky CD',
+    },
+    { join      => 'tracks',
+      '+select' => ['tracks.Name'],
+      '+as'     => ['tracks.Name'], 
+      order_by  => ['tracks.id'],
+    }
+  );
+
+Which will produce same query as above;
+
+  SELECT cd.ID, cd.Title, cd.Year, tracks.Name FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
+
+Now you can access the result using the relationship accessor:
+
+  while(my $row = $search_rs->next) {
+     print $row->tracks->name; ## WORKS
+  }
+
+However, this will produce broken objects. If the tracks id column is
+not fetched, the object will not be usable for any operation other
+than reading its data. Use the L</Whole related objects> method as
+much as possible to avoid confusion in your code later.
+
+Broken means: Update will not work. Fetching other related objects
+will not work. Deleting the object will not work.
+
 =head1 COMPLEX JOINS AND STUFF
 
 =head2 Across multiple relations
@@ -114,14 +218,12 @@
   $schema->resultset('CD')->search(
     { 'Title' => 'Funky CD' },
     { join      => { 'tracks' => 'artist' },
-      '+select' => [ 'tracks.Name', 'artist.Artist' ],
-      '+as'     => [ 'TrackName', 'ArtistName' ]
     }
   );
 
 Which is:
 
-  SELECT me.ID, me.Title, me.Year, tracks.Name, artist.Artist FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD';
+  SELECT me.ID, me.Title, me.Year FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD';
 
 To perform joins using relations of the tables you are joining to, use
 a hashref to indicate the join depth. This can theoretically go as
@@ -147,12 +249,10 @@
     { 'Title' => 'Funky CD' },
     { join      => { 'tracks' => 'artist' },
       order_by  => [ 'tracks.Name', 'artist.Artist' ],
-      '+select' => [ 'tracks.Name', 'artist.Artist' ],
-      '+as'     => [ 'TrackName', 'ArtistName' ]
     }
   );
 
-  SELECT me.ID, me.Title, me.Year, tracks.Name, artist.Artist FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD' ORDER BY tracks.Name, artist.Artist;
+  SELECT me.ID, me.Title, me.Year FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD' ORDER BY tracks.Name, artist.Artist;
 
 This is essential if any of your tables have columns with the same names.
 

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Manual/Troubleshooting.pod
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Manual/Troubleshooting.pod	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Manual/Troubleshooting.pod	2009-12-01 10:59:18 UTC (rev 8006)
@@ -156,5 +156,16 @@
 L<https://bugzilla.redhat.com/show_bug.cgi?id=460308> and
 L<http://rhn.redhat.com/errata/RHBA-2008-0876.html>
 
+=head2 Excessive Memory Allocation with TEXT/BLOB/etc. Columns and Large LongReadLen
+
+It has been observed, using L<DBD::ODBC>, that a creating a L<DBIx::Class::Row> 
+object which includes a column of data type TEXT/BLOB/etc. will allocate 
+LongReadLen bytes.  This allocation does not leak, but if LongReadLen 
+is large in size, and many such row objects are created, e.g. as the 
+output of a ResultSet query, the memory footprint of the Perl interpreter 
+can grow very large.
+
+The solution is to use the smallest practical value for LongReadLen.
+
 =cut
 

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Ordered.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Ordered.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Ordered.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -434,10 +434,7 @@
 sub move_to_group {
     my( $self, $to_group, $to_position ) = @_;
 
-    $self->throw_exception ('move_to_group() expects a group specification')
-        unless defined $to_group;
-
-    # if we're given a string, turn it into a hashref
+    # if we're given a single value, turn it into a hashref
     unless (ref $to_group eq 'HASH') {
         my @gcols = $self->_grouping_columns;
 
@@ -504,7 +501,7 @@
     }
     else {
       my $bumped_pos_val = $self->_position_value ($to_position);
-      my @between = ($to_position, $new_group_last_position);
+      my @between = map { $self->_position_value ($_) } ($to_position, $new_group_last_position);
       $self->_shift_siblings (1, @between);   #shift right
       $self->set_column( $position_column => $bumped_pos_val );
     }
@@ -685,27 +682,9 @@
 if you are working with preexisting non-normalised position data,
 or if you need to work with materialized path columns.
 
-=head2 _position
-
-  my $num_pos = $item->_position;
-
-Returns the B<absolute numeric position> of the current object, with the
-first object being at position 1, its sibling at position 2 and so on.
-By default simply returns the value of L</position_column>.
-
-=cut
-sub _position {
-    my $self = shift;
-
-#    #the right way to do this
-#    return $self->previous_siblings->count + 1;
-
-    return $self->get_column ($self->position_column);
-}
-
 =head2 _position_from_value
 
-  my $num_pos = $item->_position_of_value ( $pos_value )
+  my $num_pos = $item->_position_from_value ( $pos_value )
 
 Returns the B<absolute numeric position> of an object with a B<position
 value> set to C<$pos_value>. By default simply returns C<$pos_value>.
@@ -867,6 +846,19 @@
     );
 }
 
+=head2 _position
+
+  my $num_pos = $item->_position;
+
+Returns the B<absolute numeric position> of the current object, with the
+first object being at position 1, its sibling at position 2 and so on.
+
+=cut
+sub _position {
+    my $self = shift;
+    return $self->_position_from_value ($self->get_column ($self->position_column) );
+}
+
 =head2 _grouping_clause
 
 This method returns one or more name=>value pairs for limiting a search

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Relationship.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Relationship.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Relationship.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -232,13 +232,13 @@
 
 =back
 
-Creates a one-to-many relationship, where the corresponding elements
-of the foreign class store the calling class's primary key in one (or
-more) of the foreign class columns. This relationship defaults to using
-the end of this classes namespace as the foreign key in C<$related_class>
-to resolve the join, unless C<$their_fk_column> specifies the foreign
-key column in C<$related_class> or C<cond> specifies a reference to a
-join condition hash.
+Creates a one-to-many relationship where the foreign class refers to
+this class's primary key. This relationship refers to zero or more
+records in the foreign table (ie, a C<LEFT JOIN>). This relationship 
+defaults to using the end of this classes namespace as the foreign key
+in C<$related_class> to resolve the join, unless C<$their_fk_column>
+specifies the foreign key column in C<$related_class> or C<cond>
+specifies a reference to a join condition hash.
 
 =over
 

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/ResultSet.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/ResultSet.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/ResultSet.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -7,6 +7,7 @@
         'bool'   => "_bool",
         fallback => 1;
 use Carp::Clan qw/^DBIx::Class/;
+use DBIx::Class::Exception;
 use Data::Page;
 use Storable;
 use DBIx::Class::ResultSetColumn;
@@ -356,9 +357,9 @@
   }
 
   my $rs = (ref $self)->new($self->result_source, $new_attrs);
-  if ($rows) {
-    $rs->set_cache($rows);
-  }
+
+  $rs->set_cache($rows) if ($rows);
+
   return $rs;
 }
 
@@ -518,7 +519,7 @@
     # in ::Relationship::Base::search_related (the row method), and furthermore
     # the relationship is of the 'single' type. This means that the condition
     # provided by the relationship (already attached to $self) is sufficient,
-    # as there can be only one row in the databse that would satisfy the 
+    # as there can be only one row in the databse that would satisfy the
     # relationship
   }
   else {
@@ -529,7 +530,7 @@
   }
 
   # Run the query
-  my $rs = $self->search ($query, $attrs);
+  my $rs = $self->search ($query, {result_class => $self->result_class, %$attrs});
   if (keys %{$rs->_resolved_attrs->{collapse}}) {
     my $row = $rs->next;
     carp "Query returned more than one row" if $rs->next;
@@ -1239,7 +1240,7 @@
 
   my $tmp_attrs = { %$attrs };
 
-  # take off any limits, record_filter is cdbi, and no point of ordering a count 
+  # take off any limits, record_filter is cdbi, and no point of ordering a count
   delete $tmp_attrs->{$_} for (qw/select as rows offset order_by record_filter/);
 
   # overwrite the selector (supplied by the storage)
@@ -1247,7 +1248,7 @@
   $tmp_attrs->{as} = 'count';
 
   # read the comment on top of the actual function to see what this does
-  $tmp_attrs->{from} = $self->_switch_to_inner_join_if_needed (
+  $tmp_attrs->{from} = $self->result_source->schema->storage->_straight_join_to_node (
     $tmp_attrs->{from}, $tmp_attrs->{alias}
   );
 
@@ -1279,11 +1280,13 @@
   $sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $sub_attrs);
 
   # read the comment on top of the actual function to see what this does
-  $sub_attrs->{from} = $self->_switch_to_inner_join_if_needed (
+  $sub_attrs->{from} = $self->result_source->schema->storage->_straight_join_to_node (
     $sub_attrs->{from}, $sub_attrs->{alias}
   );
 
-  # this is so that ordering can be thrown away in things like Top limit
+  # this is so that the query can be simplified e.g.
+  # * non-limiting joins can be pruned
+  # * ordering can be thrown away in things like Top limit
   $sub_attrs->{-for_count_only} = 1;
 
   my $sub_rs = $rsrc->resultset_class->new ($rsrc, $sub_attrs);
@@ -1300,77 +1303,6 @@
   return $self->_count_rs ($attrs);
 }
 
-
-# The DBIC relationship chaining implementation is pretty simple - every
-# new related_relationship is pushed onto the {from} stack, and the {select}
-# window simply slides further in. This means that when we count somewhere
-# in the middle, we got to make sure that everything in the join chain is an
-# actual inner join, otherwise the count will come back with unpredictable
-# results (a resultset may be generated with _some_ rows regardless of if
-# the relation which the $rs currently selects has rows or not). E.g.
-# $artist_rs->cds->count - normally generates:
-# SELECT COUNT( * ) FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid
-# which actually returns the number of artists * (number of cds || 1)
-#
-# So what we do here is crawl {from}, determine if the current alias is at
-# the top of the stack, and if not - make sure the chain is inner-joined down
-# to the root.
-#
-sub _switch_to_inner_join_if_needed {
-  my ($self, $from, $alias) = @_;
-
-  # subqueries and other oddness is naturally not supported
-  return $from if (
-    ref $from ne 'ARRAY'
-      ||
-    @$from <= 1
-      ||
-    ref $from->[0] ne 'HASH'
-      ||
-    ! $from->[0]{-alias}
-      ||
-    $from->[0]{-alias} eq $alias
-  );
-
-  my $switch_branch;
-  JOINSCAN:
-  for my $j (@{$from}[1 .. $#$from]) {
-    if ($j->[0]{-alias} eq $alias) {
-      $switch_branch = $j->[0]{-join_path};
-      last JOINSCAN;
-    }
-  }
-
-  # something else went wrong
-  return $from unless $switch_branch;
-
-  # So it looks like we will have to switch some stuff around.
-  # local() is useless here as we will be leaving the scope
-  # anyway, and deep cloning is just too fucking expensive
-  # So replace the inner hashref manually
-  my @new_from = ($from->[0]);
-  my $sw_idx = { map { $_ => 1 } @$switch_branch };
-
-  for my $j (@{$from}[1 .. $#$from]) {
-    my $jalias = $j->[0]{-alias};
-
-    if ($sw_idx->{$jalias}) {
-      my %attrs = %{$j->[0]};
-      delete $attrs{-join_type};
-      push @new_from, [
-        \%attrs,
-        @{$j}[ 1 .. $#$j ],
-      ];
-    }
-    else {
-      push @new_from, $j;
-    }
-  }
-
-  return \@new_from;
-}
-
-
 sub _bool {
   return 1;
 }
@@ -1494,8 +1426,12 @@
 
   my $rsrc = $self->result_source;
 
+  # if a condition exists we need to strip all table qualifiers
+  # if this is not possible we'll force a subquery below
+  my $cond = $rsrc->schema->storage->_strip_cond_qualifiers ($self->{cond});
+
   my $needs_group_by_subq = $self->_has_resolved_attr (qw/collapse group_by -join/);
-  my $needs_subq = $self->_has_resolved_attr (qw/row offset/);
+  my $needs_subq = (not defined $cond) || $self->_has_resolved_attr(qw/row offset/);
 
   if ($needs_group_by_subq or $needs_subq) {
 
@@ -1543,70 +1479,11 @@
     return $rsrc->storage->$op(
       $rsrc,
       $op eq 'update' ? $values : (),
-      $self->_cond_for_update_delete,
+      $cond,
     );
   }
 }
 
-
-# _cond_for_update_delete
-#
-# update/delete require the condition to be modified to handle
-# the differing SQL syntax available.  This transforms the $self->{cond}
-# appropriately, returning the new condition.
-
-sub _cond_for_update_delete {
-  my ($self, $full_cond) = @_;
-  my $cond = {};
-
-  $full_cond ||= $self->{cond};
-  # No-op. No condition, we're updating/deleting everything
-  return $cond unless ref $full_cond;
-
-  if (ref $full_cond eq 'ARRAY') {
-    $cond = [
-      map {
-        my %hash;
-        foreach my $key (keys %{$_}) {
-          $key =~ /([^.]+)$/;
-          $hash{$1} = $_->{$key};
-        }
-        \%hash;
-      } @{$full_cond}
-    ];
-  }
-  elsif (ref $full_cond eq 'HASH') {
-    if ((keys %{$full_cond})[0] eq '-and') {
-      $cond->{-and} = [];
-      my @cond = @{$full_cond->{-and}};
-       for (my $i = 0; $i < @cond; $i++) {
-        my $entry = $cond[$i];
-        my $hash;
-        if (ref $entry eq 'HASH') {
-          $hash = $self->_cond_for_update_delete($entry);
-        }
-        else {
-          $entry =~ /([^.]+)$/;
-          $hash->{$1} = $cond[++$i];
-        }
-        push @{$cond->{-and}}, $hash;
-      }
-    }
-    else {
-      foreach my $key (keys %{$full_cond}) {
-        $key =~ /([^.]+)$/;
-        $cond->{$1} = $full_cond->{$key};
-      }
-    }
-  }
-  else {
-    $self->throw_exception("Can't update/delete on resultset with condition unless hash or array");
-  }
-
-  return $cond;
-}
-
-
 =head2 update
 
 =over 4
@@ -1793,10 +1670,19 @@
     }
     return wantarray ? @created : \@created;
   } else {
-    my ($first, @rest) = @$data;
+    my $first = $data->[0];
 
-    my @names = grep {!ref $first->{$_}} keys %$first;
-    my @rels = grep { $self->result_source->has_relationship($_) } keys %$first;
+    # if a column is a registered relationship, and is a non-blessed hash/array, consider
+    # it relationship data
+    my (@rels, @columns);
+    for (keys %$first) {
+      my $ref = ref $first->{$_};
+      $self->result_source->has_relationship($_) && ($ref eq 'ARRAY' or $ref eq 'HASH')
+        ? push @rels, $_
+        : push @columns, $_
+      ;
+    }
+
     my @pks = $self->result_source->primary_columns;
 
     ## do the belongs_to relationships
@@ -1825,17 +1711,21 @@
         delete $data->[$index]->{$rel};
         $data->[$index] = {%{$data->[$index]}, %$related};
 
-        push @names, keys %$related if $index == 0;
+        push @columns, keys %$related if $index == 0;
       }
     }
 
-    ## do bulk insert on current row
-    my @values = map { [ @$_{@names} ] } @$data;
+    ## inherit the data locked in the conditions of the resultset
+    my ($rs_data) = $self->_merge_cond_with_data({});
+    delete @{$rs_data}{@columns};
+    my @inherit_cols = keys %$rs_data;
+    my @inherit_data = values %$rs_data;
 
+    ## do bulk insert on current row
     $self->result_source->storage->insert_bulk(
       $self->result_source,
-      \@names,
-      \@values,
+      [@columns, @inherit_cols],
+      [ map { [ @$_{@columns}, @inherit_data ] } @$data ],
     );
 
     ## do the has_many relationships
@@ -1844,7 +1734,7 @@
       foreach my $rel (@rels) {
         next unless $item->{$rel} && ref $item->{$rel} eq "ARRAY";
 
-        my $parent = $self->find(map {{$_=>$item->{$_}} } @pks)
+        my $parent = $self->find({map { $_ => $item->{$_} } @pks})
      || $self->throw_exception('Cannot find the relating object.');
 
         my $child = $parent->$rel;
@@ -1972,46 +1862,66 @@
   $self->throw_exception( "new_result needs a hash" )
     unless (ref $values eq 'HASH');
 
-  my %new;
+  my ($merged_cond, $cols_from_relations) = $self->_merge_cond_with_data($values);
+
+  my %new = (
+    %$merged_cond,
+    @$cols_from_relations
+      ? (-cols_from_relations => $cols_from_relations)
+      : (),
+    -source_handle => $self->_source_handle,
+    -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED
+  );
+
+  return $self->result_class->new(\%new);
+}
+
+# _merge_cond_with_data
+#
+# Takes a simple hash of K/V data and returns its copy merged with the
+# condition already present on the resultset. Additionally returns an
+# arrayref of value/condition names, which were inferred from related
+# objects (this is needed for in-memory related objects)
+sub _merge_cond_with_data {
+  my ($self, $data) = @_;
+
+  my (%new_data, @cols_from_relations);
+
   my $alias = $self->{attrs}{alias};
 
-  if (
-    defined $self->{cond}
-    && $self->{cond} eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION
-  ) {
-    %new = %{ $self->{attrs}{related_objects} || {} };  # nothing might have been inserted yet
-    $new{-from_resultset} = [ keys %new ] if keys %new;
-  } else {
+  if (! defined $self->{cond}) {
+    # just massage $data below
+  }
+  elsif ($self->{cond} eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION) {
+    %new_data = %{ $self->{attrs}{related_objects} || {} };  # nothing might have been inserted yet
+    @cols_from_relations = keys %new_data;
+  }
+  elsif (ref $self->{cond} ne 'HASH') {
     $self->throw_exception(
-      "Can't abstract implicit construct, condition not a hash"
-    ) if ($self->{cond} && !(ref $self->{cond} eq 'HASH'));
-
-    my $collapsed_cond = (
-      $self->{cond}
-        ? $self->_collapse_cond($self->{cond})
-        : {}
+      "Can't abstract implicit construct, resultset condition not a hash"
     );
-
+  }
+  else {
     # precendence must be given to passed values over values inherited from
     # the cond, so the order here is important.
-    my %implied =  %{$self->_remove_alias($collapsed_cond, $alias)};
-    while( my($col,$value) = each %implied ){
-      if(ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '='){
-        $new{$col} = $value->{'='};
+    my $collapsed_cond = $self->_collapse_cond($self->{cond});
+    my %implied = %{$self->_remove_alias($collapsed_cond, $alias)};
+
+    while ( my($col, $value) = each %implied ) {
+      if (ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '=') {
+        $new_data{$col} = $value->{'='};
         next;
       }
-      $new{$col} = $value if $self->_is_deterministic_value($value);
+      $new_data{$col} = $value if $self->_is_deterministic_value($value);
     }
   }
 
-  %new = (
-    %new,
-    %{ $self->_remove_alias($values, $alias) },
-    -source_handle => $self->_source_handle,
-    -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED
+  %new_data = (
+    %new_data,
+    %{ $self->_remove_alias($data, $alias) },
   );
 
-  return $self->result_class->new(\%new);
+  return (\%new_data, \@cols_from_relations);
 }
 
 # _is_deterministic_value
@@ -2196,13 +2106,14 @@
 a unique constraint that is not the primary key, or looking for
 related rows.
 
-If you want objects to be saved immediately, use L</find_or_create> instead.
+If you want objects to be saved immediately, use L</find_or_create>
+instead.
 
-B<Note>: C<find_or_new> is probably not what you want when creating a
-new row in a table that uses primary keys supplied by the
-database. Passing in a primary key column with a value of I<undef>
-will cause L</find> to attempt to search for a row with a value of
-I<NULL>.
+B<Note>: Take care when using C<find_or_new> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<find_or_new>, even when set to C<undef>.
 
 =cut
 
@@ -2344,11 +2255,11 @@
 the find has completed and before the create has started. To avoid
 this problem, use find_or_create() inside a transaction.
 
-B<Note>: C<find_or_create> is probably not what you want when creating
-a new row in a table that uses primary keys supplied by the
-database. Passing in a primary key column with a value of I<undef>
-will cause L</find> to attempt to search for a row with a value of
-I<NULL>.
+B<Note>: Take care when using C<find_or_create> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<find_or_create>, even when set to C<undef>.
 
 See also L</find> and L</update_or_create>. For information on how to declare
 unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
@@ -2411,11 +2322,11 @@
 See also L</find> and L</find_or_create>. For information on how to declare
 unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
 
-B<Note>: C<update_or_create> is probably not what you want when
-looking for a row in a table that uses primary keys supplied by the
-database, unless you actually have a key value. Passing in a primary
-key column with a value of I<undef> will cause L</find> to attempt to
-search for a row with a value of I<NULL>.
+B<Note>: Take care when using C<update_or_create> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<update_or_create>, even when set to C<undef>.
 
 =cut
 
@@ -2472,8 +2383,14 @@
       $cd->insert;
   }
 
-See also L</find>, L</find_or_create> and L<find_or_new>.
+B<Note>: Take care when using C<update_or_new> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<update_or_new>, even when set to C<undef>.
 
+See also L</find>, L</find_or_create> and L</find_or_new>.
+
 =cut
 
 sub update_or_new {
@@ -2556,6 +2473,23 @@
   shift->set_cache(undef);
 }
 
+=head2 is_paged
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: true, if the resultset has been paginated
+
+=back
+
+=cut
+
+sub is_paged {
+  my ($self) = @_;
+  return !!$self->{attrs}{page};
+}
+
 =head2 related_resultset
 
 =over 4
@@ -2584,14 +2518,13 @@
         "' has no such relationship $rel")
       unless $rel_info;
 
-    my ($from,$seen) = $self->_chain_relationship($rel);
+    my $attrs = $self->_chain_relationship($rel);
 
-    my $join_count = $seen->{$rel};
+    my $join_count = $attrs->{seen_join}{$rel};
     my $alias = ($join_count > 1 ? join('_', $rel, $join_count) : $rel);
 
     #XXX - temp fix for result_class bug. There likely is a more elegant fix -groditi
-    my %attrs = %{$self->{attrs}||{}};
-    delete @attrs{qw(result_class alias)};
+    delete @{$attrs}{qw(result_class alias)};
 
     my $new_cache;
 
@@ -2612,20 +2545,14 @@
       # to work sanely (e.g. RestrictWithObject wants to be able to add
       # extra query restrictions, and these may need to be $alias.)
 
-      my $attrs = $rel_source->resultset_attributes;
-      local $attrs->{alias} = $alias;
+      my $rel_attrs = $rel_source->resultset_attributes;
+      local $rel_attrs->{alias} = $alias;
 
       $rel_source->resultset
                  ->search_rs(
                      undef, {
-                       %attrs,
-                       join => undef,
-                       prefetch => undef,
-                       select => undef,
-                       as => undef,
-                       where => $self->{cond},
-                       seen_join => $seen,
-                       from => $from,
+                       %$attrs,
+                       where => $attrs->{where},
                    });
     };
     $new->set_cache($new_cache) if $new_cache;
@@ -2683,37 +2610,58 @@
 # with a relation_chain_depth less than the depth of the
 # current prefetch is not considered)
 #
-# The increments happen in 1/2s to make it easier to correlate the
-# join depth with the join path. An integer means a relationship
-# specified via a search_related, whereas a fraction means an added
-# join/prefetch via attributes
+# The increments happen twice per join. An even number means a
+# relationship specified via a search_related, whereas an odd
+# number indicates a join/prefetch added via attributes
+#
+# Also this code will wrap the current resultset (the one we
+# chain to) in a subselect IFF it contains limiting attributes
 sub _chain_relationship {
   my ($self, $rel) = @_;
   my $source = $self->result_source;
-  my $attrs = $self->{attrs};
+  my $attrs = { %{$self->{attrs}||{}} };
 
-  my $from = [ @{
-      $attrs->{from}
-        ||
-      [{
-        -source_handle => $source->handle,
-        -alias => $attrs->{alias},
-        $attrs->{alias} => $source->from,
-      }]
-  }];
+  # we need to take the prefetch the attrs into account before we
+  # ->_resolve_join as otherwise they get lost - captainL
+  my $join = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
 
-  my $seen = { %{$attrs->{seen_join} || {} } };
-  my $jpath = ($attrs->{seen_join} && keys %{$attrs->{seen_join}}) 
-    ? $from->[-1][0]{-join_path} 
-    : [];
+  delete @{$attrs}{qw/join prefetch collapse select as columns +select +as +columns/};
 
+  my $seen = { %{ (delete $attrs->{seen_join}) || {} } };
 
-  # we need to take the prefetch the attrs into account before we
-  # ->_resolve_join as otherwise they get lost - captainL
-  my $merged = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
+  my $from;
+  my @force_subq_attrs = qw/offset rows group_by having/;
 
+  if (
+    ($attrs->{from} && ref $attrs->{from} ne 'ARRAY')
+      ||
+    $self->_has_resolved_attr (@force_subq_attrs)
+  ) {
+    $from = [{
+      -source_handle => $source->handle,
+      -alias => $attrs->{alias},
+      $attrs->{alias} => $self->as_query,
+    }];
+    delete @{$attrs}{@force_subq_attrs, 'where'};
+    $seen->{-relation_chain_depth} = 0;
+  }
+  elsif ($attrs->{from}) {  #shallow copy suffices
+    $from = [ @{$attrs->{from}} ];
+  }
+  else {
+    $from = [{
+      -source_handle => $source->handle,
+      -alias => $attrs->{alias},
+      $attrs->{alias} => $source->from,
+    }];
+  }
+
+  my $jpath = ($seen->{-relation_chain_depth})
+    ? $from->[-1][0]{-join_path}
+    : [];
+
   my @requested_joins = $source->_resolve_join(
-    $merged,
+    $join,
     $attrs->{alias},
     $seen,
     $jpath,
@@ -2721,7 +2669,7 @@
 
   push @$from, @requested_joins;
 
-  $seen->{-relation_chain_depth} += 0.5;
+  $seen->{-relation_chain_depth}++;
 
   # if $self already had a join/prefetch specified on it, the requested
   # $rel might very well be already included. What we do in this case
@@ -2733,7 +2681,7 @@
   # we consider the last one thus reverse
   for my $j (reverse @requested_joins) {
     if ($rel eq $j->[0]{-join_path}[-1]) {
-      $j->[0]{-relation_chain_depth} += 0.5;
+      $j->[0]{-relation_chain_depth}++;
       $already_joined++;
       last;
     }
@@ -2743,7 +2691,7 @@
 #  for my $j (reverse @$from) {
 #    next unless ref $j eq 'ARRAY';
 #    if ($j->[0]{-join_path} && $j->[0]{-join_path}[-1] eq $rel) {
-#      $j->[0]{-relation_chain_depth} += 0.5;
+#      $j->[0]{-relation_chain_depth}++;
 #      $already_joined++;
 #      last;
 #    }
@@ -2758,9 +2706,9 @@
     );
   }
 
-  $seen->{-relation_chain_depth} += 0.5;
+  $seen->{-relation_chain_depth}++;
 
-  return ($from,$seen);
+  return {%$attrs, from => $from, seen_join => $seen};
 }
 
 # too many times we have to do $attrs = { %{$self->_resolved_attrs} }
@@ -2782,24 +2730,35 @@
 
   # build columns (as long as select isn't set) into a set of as/select hashes
   unless ( $attrs->{select} ) {
-      @colbits = map {
-          ( ref($_) eq 'HASH' )
-              ? $_
-              : {
-                  (
-                    /^\Q${alias}.\E(.+)$/
-                      ? "$1"
-                      : "$_"
-                  )
-                =>
-                  (
-                    /\./
-                      ? "$_"
-                      : "${alias}.$_"
-                  )
-            }
-      } ( ref($attrs->{columns}) eq 'ARRAY' ) ? @{ delete $attrs->{columns}} : (delete $attrs->{columns} || $source->columns );
+
+    my @cols = ( ref($attrs->{columns}) eq 'ARRAY' )
+      ? @{ delete $attrs->{columns}}
+      : (
+          ( delete $attrs->{columns} )
+            ||
+          $source->columns
+        )
+    ;
+
+    @colbits = map {
+      ( ref($_) eq 'HASH' )
+      ? $_
+      : {
+          (
+            /^\Q${alias}.\E(.+)$/
+              ? "$1"
+              : "$_"
+          )
+            =>
+          (
+            /\./
+              ? "$_"
+              : "${alias}.$_"
+          )
+        }
+    } @cols;
   }
+
   # add the additional columns on
   foreach ( 'include_columns', '+columns' ) {
       push @colbits, map {
@@ -2896,7 +2855,28 @@
   # generate the distinct induced group_by early, as prefetch will be carried via a
   # subquery (since a group_by is present)
   if (delete $attrs->{distinct}) {
-    $attrs->{group_by} ||= [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
+    if ($attrs->{group_by}) {
+      carp ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)");
+    }
+    else {
+      $attrs->{group_by} = [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
+
+      # add any order_by parts that are not already present in the group_by
+      # we need to be careful not to add any named functions/aggregates
+      # i.e. select => [ ... { count => 'foo', -as 'foocount' } ... ]
+      my %already_grouped = map { $_ => 1 } (@{$attrs->{group_by}});
+
+      my $storage = $self->result_source->schema->storage;
+      my $rs_column_list = $storage->_resolve_column_info ($attrs->{from});
+      my @chunks = $storage->sql_maker->_order_by_chunks ($attrs->{order_by});
+
+      for my $chunk (map { ref $_ ? @$_ : $_ } (@chunks) ) {
+        $chunk =~ s/\s+ (?: ASC|DESC ) \s* $//ix;
+        if ($rs_column_list->{$chunk} && not $already_grouped{$chunk}++) {
+          push @{$attrs->{group_by}}, $chunk;
+        }
+      }
+    }
   }
 
   $attrs->{collapse} ||= {};
@@ -2924,7 +2904,7 @@
   # even though it doesn't make much sense, this is what pre 081xx has
   # been doing
   if (my $page = delete $attrs->{page}) {
-    $attrs->{offset} = 
+    $attrs->{offset} =
       ($attrs->{rows} * ($page - 1))
             +
       ($attrs->{offset} || 0)
@@ -2942,8 +2922,8 @@
 
   my $cur_depth = $seen->{-relation_chain_depth} || 0;
 
-  if (int ($cur_depth) != $cur_depth) {
-    $self->throw_exception ("-relation_chain_depth is not an integer, something went horribly wrong ($cur_depth)");
+  if ($cur_depth % 2) {
+    $self->throw_exception ("-relation_chain_depth is not even, something went horribly wrong ($cur_depth)");
   }
 
   for my $j (@$fromspec) {
@@ -2954,7 +2934,7 @@
     my $jpath = $j->[0]{-join_path};
 
     my $p = $paths;
-    $p = $p->{$_} ||= {} for @{$jpath}[$cur_depth .. $#$jpath];
+    $p = $p->{$_} ||= {} for @{$jpath}[$cur_depth/2 .. $#$jpath]; #only even depths are actual jpath boundaries
     push @{$p->{-join_aliases} }, $j->[0]{-alias};
   }
 
@@ -3091,12 +3071,13 @@
 
 sub throw_exception {
   my $self=shift;
+
   if (ref $self && $self->_source_handle->schema) {
     $self->_source_handle->schema->throw_exception(@_)
-  } else {
-    croak(@_);
   }
-
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 # XXX: FIXME: Attributes docs need clearing up
@@ -3118,7 +3099,7 @@
 
 =back
 
-Which column(s) to order the results by. 
+Which column(s) to order the results by.
 
 [The full list of suitable values is documented in
 L<SQL::Abstract/"ORDER BY CLAUSES">; the following is a summary of
@@ -3213,6 +3194,9 @@
 attribute, the column names returned are storage-dependent. E.g. MySQL would
 return a column named C<count(employeeid)> in the above example.
 
+B<NOTE:> You will almost always need a corresponding 'as' entry when you use
+'select'.
+
 =head2 +select
 
 =over 4
@@ -3410,12 +3394,12 @@
 
 =over 4
 
-=item * 
+=item *
 
 Prefetch uses the L</cache> to populate the prefetched relationships. This
 may or may not be what you want.
 
-=item * 
+=item *
 
 If you specify a condition on a prefetched relationship, ONLY those
 rows that match the prefetched condition will be fetched into that relationship.
@@ -3515,7 +3499,8 @@
 
 =back
 
-Set to 1 to group by all columns.
+Set to 1 to group by all columns. If the resultset already has a group_by
+attribute, this setting is ignored and an appropriate warning is issued.
 
 =head2 where
 
@@ -3526,8 +3511,8 @@
   # only return rows WHERE deleted IS NULL for all searches
   __PACKAGE__->resultset_attributes({ where => { deleted => undef } }); )
 
-Can be overridden by passing C<{ where => undef }> as an attribute
-to a resulset.
+Can be overridden by passing C<< { where => undef } >> as an attribute
+to a resultset.
 
 =back
 
@@ -3549,177 +3534,6 @@
 For more examples of using these attributes, see
 L<DBIx::Class::Manual::Cookbook>.
 
-=head2 from
-
-=over 4
-
-=item Value: \@from_clause
-
-=back
-
-The C<from> attribute gives you manual control over the C<FROM> clause of SQL
-statements generated by L<DBIx::Class>, allowing you to express custom C<JOIN>
-clauses.
-
-NOTE: Use this on your own risk.  This allows you to shoot off your foot!
-
-C<join> will usually do what you need and it is strongly recommended that you
-avoid using C<from> unless you cannot achieve the desired result using C<join>.
-And we really do mean "cannot", not just tried and failed. Attempting to use
-this because you're having problems with C<join> is like trying to use x86
-ASM because you've got a syntax error in your C. Trust us on this.
-
-Now, if you're still really, really sure you need to use this (and if you're
-not 100% sure, ask the mailing list first), here's an explanation of how this
-works.
-
-The syntax is as follows -
-
-  [
-    { <alias1> => <table1> },
-    [
-      { <alias2> => <table2>, -join_type => 'inner|left|right' },
-      [], # nested JOIN (optional)
-      { <table1.column1> => <table2.column2>, ... (more conditions) },
-    ],
-    # More of the above [ ] may follow for additional joins
-  ]
-
-  <table1> <alias1>
-  JOIN
-    <table2> <alias2>
-    [JOIN ...]
-  ON <table1.column1> = <table2.column2>
-  <more joins may follow>
-
-An easy way to follow the examples below is to remember the following:
-
-    Anything inside "[]" is a JOIN
-    Anything inside "{}" is a condition for the enclosing JOIN
-
-The following examples utilize a "person" table in a family tree application.
-In order to express parent->child relationships, this table is self-joined:
-
-    # Person->belongs_to('father' => 'Person');
-    # Person->belongs_to('mother' => 'Person');
-
-C<from> can be used to nest joins. Here we return all children with a father,
-then search against all mothers of those children:
-
-  $rs = $schema->resultset('Person')->search(
-      undef,
-      {
-          alias => 'mother', # alias columns in accordance with "from"
-          from => [
-              { mother => 'person' },
-              [
-                  [
-                      { child => 'person' },
-                      [
-                          { father => 'person' },
-                          { 'father.person_id' => 'child.father_id' }
-                      ]
-                  ],
-                  { 'mother.person_id' => 'child.mother_id' }
-              ],
-          ]
-      },
-  );
-
-  # Equivalent SQL:
-  # SELECT mother.* FROM person mother
-  # JOIN (
-  #   person child
-  #   JOIN person father
-  #   ON ( father.person_id = child.father_id )
-  # )
-  # ON ( mother.person_id = child.mother_id )
-
-The type of any join can be controlled manually. To search against only people
-with a father in the person table, we could explicitly use C<INNER JOIN>:
-
-    $rs = $schema->resultset('Person')->search(
-        undef,
-        {
-            alias => 'child', # alias columns in accordance with "from"
-            from => [
-                { child => 'person' },
-                [
-                    { father => 'person', -join_type => 'inner' },
-                    { 'father.id' => 'child.father_id' }
-                ],
-            ]
-        },
-    );
-
-    # Equivalent SQL:
-    # SELECT child.* FROM person child
-    # INNER JOIN person father ON child.father_id = father.id
-
-You can select from a subquery by passing a resultset to from as follows.
-
-    $schema->resultset('Artist')->search( 
-        undef, 
-        {   alias => 'artist2',
-            from  => [ { artist2 => $artist_rs->as_query } ],
-        } );
-
-    # and you'll get sql like this..
-    # SELECT artist2.artistid, artist2.name, artist2.rank, artist2.charfield FROM 
-    #   ( SELECT me.artistid, me.name, me.rank, me.charfield FROM artists me ) artist2
-
-If you need to express really complex joins, you
-can supply literal SQL to C<from> via a scalar reference. In this case
-the contents of the scalar will replace the table name associated with the
-resultsource.
-
-WARNING: This technique might very well not work as expected on chained
-searches - you have been warned.
-
-    # Assuming the Event resultsource is defined as:
-
-        MySchema::Event->add_columns (
-            sequence => {
-                data_type => 'INT',
-                is_auto_increment => 1,
-            },
-            location => {
-                data_type => 'INT',
-            },
-            type => {
-                data_type => 'INT',
-            },
-        );
-        MySchema::Event->set_primary_key ('sequence');
-
-    # This will get back the latest event for every location. The column
-    # selector is still provided by DBIC, all we do is add a JOIN/WHERE
-    # combo to limit the resultset
-
-    $rs = $schema->resultset('Event');
-    $table = $rs->result_source->name;
-    $latest = $rs->search (
-        undef,
-        { from => \ "
-            (SELECT e1.* FROM $table e1
-                JOIN $table e2
-                    ON e1.location = e2.location
-                    AND e1.sequence < e2.sequence
-                WHERE e2.sequence is NULL
-            ) me",
-        },
-    );
-
-    # Equivalent SQL (with the DBIC chunks added):
-
-    SELECT me.sequence, me.location, me.type FROM
-       (SELECT e1.* FROM events e1
-           JOIN events e2
-               ON e1.location = e2.location
-               AND e1.sequence < e2.sequence
-           WHERE e2.sequence is NULL
-       ) me;
-
 =head2 for
 
 =over 4

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/ResultSetColumn.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/ResultSetColumn.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/ResultSetColumn.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -1,7 +1,12 @@
 package DBIx::Class::ResultSetColumn;
+
 use strict;
 use warnings;
+
 use base 'DBIx::Class';
+
+use Carp::Clan qw/^DBIx::Class/;
+use DBIx::Class::Exception;
 use List::Util;
 
 =head1 NAME
@@ -61,7 +66,7 @@
   my $select = defined $as_index ? $select_list->[$as_index] : $column;
 
   # {collapse} would mean a has_many join was injected, which in turn means
-  # we need to group IF WE CAN (only if the column in question is unique)
+  # we need to group *IF WE CAN* (only if the column in question is unique)
   if (!$new_attrs->{group_by} && keys %{$orig_attrs->{collapse}}) {
 
     # scan for a constraint that would contain our column only - that'd be proof
@@ -76,9 +81,17 @@
 
       if ($col eq $select or $fqcol eq $select) {
         $new_attrs->{group_by} = [ $select ];
+        delete $new_attrs->{distinct}; # it is ignored when group_by is present
         last;
       }
     }
+
+    if (!$new_attrs->{group_by}) {
+      carp (
+          "Attempting to retrieve non-unique column '$column' on a resultset containing "
+        . 'one-to-many joins will return duplicate results.'
+      );
+    }
   }
 
   my $new = bless { _select => $select, _as => $column, _parent_resultset => $new_parent_rs }, $class;
@@ -414,11 +427,13 @@
 
 sub throw_exception {
   my $self=shift;
+
   if (ref $self && $self->{_parent_resultset}) {
-    $self->{_parent_resultset}->throw_exception(@_)
-  } else {
-    croak(@_);
+    $self->{_parent_resultset}->throw_exception(@_);
   }
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 # _resultset

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/ResultSource.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/ResultSource.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/ResultSource.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -5,6 +5,8 @@
 
 use DBIx::Class::ResultSet;
 use DBIx::Class::ResultSourceHandle;
+
+use DBIx::Class::Exception;
 use Carp::Clan qw/^DBIx::Class/;
 
 use base qw/DBIx::Class/;
@@ -387,7 +389,7 @@
   my $self = shift;
   $self->throw_exception(
     "columns() is a read-only accessor, did you mean add_columns()?"
-  ) if (@_ > 1);
+  ) if @_;
   return @{$self->{_ordered_columns}||[]};
 }
 
@@ -1194,7 +1196,7 @@
 
 # Returns the {from} structure used to express JOIN conditions
 sub _resolve_join {
-  my ($self, $join, $alias, $seen, $jpath, $force_left) = @_;
+  my ($self, $join, $alias, $seen, $jpath, $parent_force_left) = @_;
 
   # we need a supplied one, because we do in-place modifications, no returns
   $self->throw_exception ('You must supply a seen hashref as the 3rd argument to _resolve_join')
@@ -1205,47 +1207,56 @@
 
   $jpath = [@$jpath];
 
-  if (ref $join eq 'ARRAY') {
+  if (not defined $join) {
+    return ();
+  }
+  elsif (ref $join eq 'ARRAY') {
     return
       map {
-        $self->_resolve_join($_, $alias, $seen, $jpath, $force_left);
+        $self->_resolve_join($_, $alias, $seen, $jpath, $parent_force_left);
       } @$join;
-  } elsif (ref $join eq 'HASH') {
-    return
-      map {
-        my $as = ($seen->{$_} ? join ('_', $_, $seen->{$_} + 1) : $_);  # the actual seen value will be incremented below
-        local $force_left->{force} = $force_left->{force};
-        (
-          $self->_resolve_join($_, $alias, $seen, [@$jpath], $force_left),
-          $self->related_source($_)->_resolve_join(
-            $join->{$_}, $as, $seen, [@$jpath, $_], $force_left
-          )
-        );
-      } keys %$join;
-  } elsif (ref $join) {
-    $self->throw_exception("No idea how to resolve join reftype ".ref $join);
-  } else {
+  }
+  elsif (ref $join eq 'HASH') {
 
-    return() unless defined $join;
+    my @ret;
+    for my $rel (keys %$join) {
 
+      my $rel_info = $self->relationship_info($rel)
+        or $self->throw_exception("No such relationship ${rel}");
+
+      my $force_left = $parent_force_left;
+      $force_left ||= lc($rel_info->{attrs}{join_type}||'') eq 'left';
+
+      # the actual seen value will be incremented by the recursion
+      my $as = ($seen->{$rel} ? join ('_', $rel, $seen->{$rel} + 1) : $rel);
+
+      push @ret, (
+        $self->_resolve_join($rel, $alias, $seen, [@$jpath], $force_left),
+        $self->related_source($rel)->_resolve_join(
+          $join->{$rel}, $as, $seen, [@$jpath, $rel], $force_left
+        )
+      );
+    }
+    return @ret;
+
+  }
+  elsif (ref $join) {
+    $self->throw_exception("No idea how to resolve join reftype ".ref $join);
+  }
+  else {
     my $count = ++$seen->{$join};
     my $as = ($count > 1 ? "${join}_${count}" : $join);
 
-    my $rel_info = $self->relationship_info($join);
-    $self->throw_exception("No such relationship ${join}") unless $rel_info;
-    my $type;
-    if ($force_left) {
-      $type = 'left';
-    }
-    else {
-      $type = $rel_info->{attrs}{join_type};
-      $force_left = 1 if lc($type||'') eq 'left';
-    }
+    my $rel_info = $self->relationship_info($join)
+      or $self->throw_exception("No such relationship ${join}");
 
     my $rel_src = $self->related_source($join);
     return [ { $as => $rel_src->from,
                -source_handle => $rel_src->handle,
-               -join_type => $type,
+               -join_type => $parent_force_left
+                  ? 'left'
+                  : $rel_info->{attrs}{join_type}
+                ,
                -join_path => [@$jpath, $join],
                -alias => $as,
                -relation_chain_depth => $seen->{-relation_chain_depth} || 0,
@@ -1439,7 +1450,10 @@
   my ($self, $pre, $alias, $alias_map, $order, $collapse, $pref_path) = @_;
   $pref_path ||= [];
 
-  if( ref $pre eq 'ARRAY' ) {
+  if (not defined $pre) {
+    return ();
+  }
+  elsif( ref $pre eq 'ARRAY' ) {
     return
       map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ) }
         @$pre;
@@ -1579,11 +1593,13 @@
 
 sub throw_exception {
   my $self = shift;
+
   if (defined $self->schema) {
     $self->schema->throw_exception(@_);
-  } else {
-    croak(@_);
   }
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 =head2 source_info

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Row.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Row.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Row.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -4,9 +4,9 @@
 use warnings;
 
 use base qw/DBIx::Class/;
-use Carp::Clan qw/^DBIx::Class/;
+
+use DBIx::Class::Exception;
 use Scalar::Util ();
-use Scope::Guard;
 
 ###
 ### Internal method
@@ -155,7 +155,7 @@
     $new->result_source($source);
   }
 
-  if (my $related = delete $attrs->{-from_resultset}) {
+  if (my $related = delete $attrs->{-cols_from_relations}) {
     @{$new->{_ignore_at_insert}={}}{@$related} = ();
   }
 
@@ -168,7 +168,8 @@
     foreach my $key (keys %$attrs) {
       if (ref $attrs->{$key}) {
         ## Can we extract this lot to use with update(_or .. ) ?
-        confess "Can't do multi-create without result source" unless $source;
+        $new->throw_exception("Can't do multi-create without result source")
+          unless $source;
         my $info = $source->relationship_info($key);
         if ($info && $info->{attrs}{accessor}
           && $info->{attrs}{accessor} eq 'single')
@@ -423,7 +424,7 @@
 sub in_storage {
   my ($self, $val) = @_;
   $self->{_in_storage} = $val if @_ > 1;
-  return $self->{_in_storage};
+  return $self->{_in_storage} ? 1 : 0;
 }
 
 =head2 update
@@ -750,10 +751,27 @@
 
 sub get_inflated_columns {
   my $self = shift;
-  return map {
-    my $accessor = $self->column_info($_)->{'accessor'} || $_;
-    ($_ => $self->$accessor);
-  } grep $self->has_column_loaded($_), $self->columns;
+
+  my %loaded_colinfo = (map
+    { $_ => $self->column_info($_) }
+    (grep { $self->has_column_loaded($_) } $self->columns)
+  );
+
+  my %inflated;
+  for my $col (keys %loaded_colinfo) {
+    if (exists $loaded_colinfo{$col}{accessor}) {
+      my $acc = $loaded_colinfo{$col}{accessor};
+      if (defined $acc) {
+        $inflated{$col} = $self->$acc;
+      }
+    }
+    else {
+      $inflated{$col} = $self->$col;
+    }
+  }
+
+  # return all loaded columns with the inflations overlayed on top
+  return ($self->get_columns, %inflated);
 }
 
 =head2 set_column
@@ -1330,11 +1348,13 @@
 
 sub throw_exception {
   my $self=shift;
+
   if (ref $self && ref $self->result_source && $self->result_source->schema) {
-    $self->result_source->schema->throw_exception(@_);
-  } else {
-    croak(@_);
+    $self->result_source->schema->throw_exception(@_)
   }
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 =head2 id

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/SQLAHacks.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/SQLAHacks.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/SQLAHacks.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -9,6 +9,7 @@
 use strict;
 use warnings;
 use Carp::Clan qw/^DBIx::Class|^SQL::Abstract/;
+use Sub::Name();
 
 BEGIN {
   # reinstall the carp()/croak() functions imported into SQL::Abstract
@@ -18,17 +19,15 @@
   for my $f (qw/carp croak/) {
 
     my $orig = \&{"SQL::Abstract::$f"};
-    *{"SQL::Abstract::$f"} = sub {
-
-      local $Carp::CarpLevel = 1;   # even though Carp::Clan ignores this, $orig will not
-
-      if (Carp::longmess() =~ /DBIx::Class::SQLAHacks::[\w]+ .+? called \s at/x) {
-        __PACKAGE__->can($f)->(@_);
-      }
-      else {
-        $orig->(@_);
-      }
-    }
+    *{"SQL::Abstract::$f"} = Sub::Name::subname "SQL::Abstract::$f" =>
+      sub {
+        if (Carp::longmess() =~ /DBIx::Class::SQLAHacks::[\w]+ .+? called \s at/x) {
+          __PACKAGE__->can($f)->(@_);
+        }
+        else {
+          goto $orig;
+        }
+      };
   }
 }
 
@@ -47,53 +46,7 @@
   $self;
 }
 
-# Some databases (sqlite) do not handle multiple parenthesis
-# around in/between arguments. A tentative x IN ( (1, 2 ,3) )
-# is interpreted as x IN 1 or something similar.
-#
-# Since we currently do not have access to the SQLA AST, resort
-# to barbaric mutilation of any SQL supplied in literal form
-sub _strip_outer_paren {
-  my ($self, $arg) = @_;
 
-  return $self->_SWITCH_refkind ($arg, {
-    ARRAYREFREF => sub {
-      $$arg->[0] = __strip_outer_paren ($$arg->[0]);
-      return $arg;
-    },
-    SCALARREF => sub {
-      return \__strip_outer_paren( $$arg );
-    },
-    FALLBACK => sub {
-      return $arg
-    },
-  });
-}
-
-sub __strip_outer_paren {
-  my $sql = shift;
-
-  if ($sql and not ref $sql) {
-    while ($sql =~ /^ \s* \( (.*) \) \s* $/x ) {
-      $sql = $1;
-    }
-  }
-
-  return $sql;
-}
-
-sub _where_field_IN {
-  my ($self, $lhs, $op, $rhs) = @_;
-  $rhs = $self->_strip_outer_paren ($rhs);
-  return $self->SUPER::_where_field_IN ($lhs, $op, $rhs);
-}
-
-sub _where_field_BETWEEN {
-  my ($self, $lhs, $op, $rhs) = @_;
-  $rhs = $self->_strip_outer_paren ($rhs);
-  return $self->SUPER::_where_field_BETWEEN ($lhs, $op, $rhs);
-}
-
 # Slow but ANSI standard Limit/Offset support. DB2 uses this
 sub _RowNumberOver {
   my ($self, $sql, $order, $rows, $offset ) = @_;

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Schema/Versioned.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Schema/Versioned.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Schema/Versioned.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -472,9 +472,13 @@
   my ($self, $args) = @_;
 
   $args = {} unless $args;
+
   $self->{vschema} = DBIx::Class::Version->connect(@{$self->storage->connect_info()});
   my $vtable = $self->{vschema}->resultset('Table');
 
+  # useful when connecting from scripts etc
+  return if ($args->{ignore_version} || ($ENV{DBIC_NO_VERSION_CHECK} && !exists $args->{ignore_version}));
+
   # check for legacy versions table and move to new if exists
   my $vschema_compat = DBIx::Class::VersionCompat->connect(@{$self->storage->connect_info()});
   unless ($self->_source_exists($vtable)) {
@@ -486,8 +490,6 @@
     }
   }
 
-  # useful when connecting from scripts etc
-  return if ($args->{ignore_version} || ($ENV{DBIC_NO_VERSION_CHECK} && !exists $args->{ignore_version}));
   my $pversion = $self->get_db_version();
 
   if($pversion eq $self->schema_version)
@@ -520,11 +522,11 @@
     return;
   }
 
-  $self->throw_exception($self->_sqlt_version_error)
-    if (not $self->_sqlt_version_ok);
+  $self->throw_exception($self->storage->_sqlt_version_error)
+    if (not $self->storage->_sqlt_version_ok);
 
-  my $db_tr = SQL::Translator->new({ 
-                                    add_drop_table => 1, 
+  my $db_tr = SQL::Translator->new({
+                                    add_drop_table => 1,
                                     parser => 'DBI',
                                     parser_args => { dbh => $self->storage->dbh }
                                    });

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Schema.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Schema.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Schema.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -406,12 +406,10 @@
 
 Set the storage class that will be instantiated when L</connect> is called.
 If the classname starts with C<::>, the prefix C<DBIx::Class::Storage> is
-assumed by L</connect>.  
+assumed by L</connect>.
 
 You want to use this to set subclasses of L<DBIx::Class::Storage::DBI>
-in cases where the appropriate subclass is not autodetected, such as
-when dealing with MSSQL via L<DBD::Sybase>, in which case you'd set it
-to C<::DBI::Sybase::MSSQL>.
+in cases where the appropriate subclass is not autodetected.
 
 If your storage type requires instantiation arguments, those are
 defined as a second argument in the form of a hashref and the entire
@@ -631,13 +629,13 @@
 This interface is preferred over using the individual methods L</txn_begin>,
 L</txn_commit>, and L</txn_rollback> below.
 
-WARNING: If you are connected with C<AutoCommit => 0> the transaction is
+WARNING: If you are connected with C<< AutoCommit => 0 >> the transaction is
 considered nested, and you will still need to call L</txn_commit> to write your
-changes when appropriate. You will also want to connect with C<auto_savepoint =>
-1> to get partial rollback to work, if the storage driver for your database
+changes when appropriate. You will also want to connect with C<< auto_savepoint =>
+1 >> to get partial rollback to work, if the storage driver for your database
 supports it.
 
-Connecting with C<AutoCommit => 1> is recommended.
+Connecting with C<< AutoCommit => 1 >> is recommended.
 
 =cut
 
@@ -910,7 +908,7 @@
     no strict 'refs';
     no warnings 'redefine';
     foreach my $meth (qw/class source resultset/) {
-      *{"${target}::${meth}"} =
+      *{"${target}::${meth}"} = Sub::Name::subname "${target}::${meth}" =>
         sub { shift->schema->$meth(@_) };
     }
   }

Added: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -0,0 +1,45 @@
+package DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server;
+
+use strict;
+use warnings;
+
+use base qw/
+  DBIx::Class::Storage::DBI::ADO
+  DBIx::Class::Storage::DBI::MSSQL
+/;
+use mro 'c3';
+
+sub _rebless {
+  my $self = shift;
+  $self->_identity_method('@@identity');
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server - Support for Microsoft
+SQL Server via DBD::ADO
+
+=head1 SYNOPSIS
+
+This subclass supports MSSQL server connections via L<DBD::ADO>.
+
+=head1 DESCRIPTION
+
+The MSSQL specific functionality is provided by
+L<DBIx::Class::Storage::DBI::MSSQL>.
+
+C<_identity_method> is set to C<@@identity>, as C<SCOPE_IDENTITY()> doesn't work
+with L<DBD::ADO>. See L<DBIx::Class::Storage::DBI::MSSQL/IMPLEMENTATION NOTES>
+for caveats regarding this.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Added: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ADO.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ADO.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ADO.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -0,0 +1,42 @@
+package # hide from PAUSE
+    DBIx::Class::Storage::DBI::ADO;
+
+use base 'DBIx::Class::Storage::DBI';
+
+sub _rebless {
+  my $self = shift;
+
+# check for MSSQL
+# XXX This should be using an OpenSchema method of some sort, but I don't know
+# how.
+# Current version is stolen from Sybase.pm
+  my $dbtype = eval {
+    @{$self->_get_dbh
+      ->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})
+    }[2]
+  };
+
+  unless ($@) {
+    $dbtype =~ s/\W/_/gi;
+    my $subclass = "DBIx::Class::Storage::DBI::ADO::${dbtype}";
+    if ($self->load_optional_class($subclass) && !$self->isa($subclass)) {
+      bless $self, $subclass;
+      $self->_rebless;
+    }
+  }
+}
+
+# set cursor type here, if necessary
+#sub _dbh_sth {
+#  my ($self, $dbh, $sql) = @_;
+#
+#  my $sth = $self->disable_sth_caching
+#    ? $dbh->prepare($sql, { CursorType => 'adOpenStatic' })
+#    : $dbh->prepare_cached($sql, { CursorType => 'adOpenStatic' }, 3);
+#
+#  $self->throw_exception($dbh->errstr) if !$sth;
+#
+#  $sth;
+#}
+
+1;

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/AutoCast.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/AutoCast.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/AutoCast.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -10,7 +10,7 @@
 
 =head1 NAME
 
-DBIx::Class::Storage::DBI::AutoCast
+DBIx::Class::Storage::DBI::AutoCast - Storage component for RDBMS requiring explicit placeholder typing
 
 =head1 SYNOPSIS
 
@@ -29,6 +29,10 @@
 
   CAST(? as $mapped_type)
 
+This option can also be enabled in L<DBIx::Class::Storage::DBI/connect_info> as:
+
+  on_connect_call => ['set_auto_cast']
+
 =cut
 
 sub _prep_for_execute {
@@ -60,9 +64,29 @@
   return ($sql, $bind);
 }
 
+=head2 connect_call_set_auto_cast
 
-=head1 AUTHORS
+Executes:
 
+  $schema->storage->auto_cast(1);
+
+on connection.
+
+Used as:
+
+    on_connect_call => ['set_auto_cast']
+
+in L<DBIx::Class::Storage::DBI/connect_info>.
+
+=cut
+
+sub connect_call_set_auto_cast {
+  my $self = shift;
+  $self->auto_cast(1);
+}
+
+=head1 AUTHOR
+
 See L<DBIx::Class/CONTRIBUTORS>
 
 =head1 LICENSE

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Cursor.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Cursor.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Cursor.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -5,6 +5,10 @@
 
 use base qw/DBIx::Class::Cursor/;
 
+__PACKAGE__->mk_group_accessors('simple' =>
+    qw/sth/
+);
+
 =head1 NAME
 
 DBIx::Class::Storage::DBI::Cursor - Object representing a query cursor on a
@@ -73,24 +77,24 @@
       && $self->{attrs}{rows}
         && $self->{pos} >= $self->{attrs}{rows}
   ) {
-    $self->{sth}->finish if $self->{sth}->{Active};
-    delete $self->{sth};
+    $self->sth->finish if $self->sth->{Active};
+    $self->sth(undef);
     $self->{done} = 1;
   }
   return if $self->{done};
-  unless ($self->{sth}) {
-    $self->{sth} = ($storage->_select(@{$self->{args}}))[1];
+  unless ($self->sth) {
+    $self->sth(($storage->_select(@{$self->{args}}))[1]);
     if ($self->{attrs}{software_limit}) {
       if (my $offset = $self->{attrs}{offset}) {
-        $self->{sth}->fetch for 1 .. $offset;
+        $self->sth->fetch for 1 .. $offset;
       }
     }
   }
-  my @row = $self->{sth}->fetchrow_array;
+  my @row = $self->sth->fetchrow_array;
   if (@row) {
     $self->{pos}++;
   } else {
-    delete $self->{sth};
+    $self->sth(undef);
     $self->{done} = 1;
   }
   return @row;
@@ -120,8 +124,8 @@
   my ($storage, $dbh, $self) = @_;
 
   $self->_check_dbh_gen;
-  $self->{sth}->finish if $self->{sth}->{Active};
-  delete $self->{sth};
+  $self->sth->finish if $self->sth && $self->sth->{Active};
+  $self->sth(undef);
   my ($rv, $sth) = $storage->_select(@{$self->{args}});
   return @{$sth->fetchall_arrayref};
 }
@@ -146,7 +150,7 @@
   my ($self) = @_;
 
   # No need to care about failures here
-  eval { $self->{sth}->finish if $self->{sth} && $self->{sth}->{Active} };
+  eval { $self->sth->finish if $self->sth && $self->sth->{Active} };
   $self->_soft_reset;
   return undef;
 }
@@ -154,7 +158,7 @@
 sub _soft_reset {
   my ($self) = @_;
 
-  delete $self->{sth};
+  $self->sth(undef);
   delete $self->{done};
   $self->{pos} = 0;
 }
@@ -173,7 +177,7 @@
 
   # None of the reasons this would die matter if we're in DESTROY anyways
   local $@;
-  eval { $self->{sth}->finish if $self->{sth} && $self->{sth}->{Active} };
+  eval { $self->sth->finish if $self->sth && $self->sth->{Active} };
 }
 
 1;

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/MSSQL.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/MSSQL.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/MSSQL.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -160,7 +160,7 @@
 
     # this should bring back the result of SELECT SCOPE_IDENTITY() we tacked
     # on in _prep_for_execute above
-    my ($identity) = $sth->fetchrow_array;
+    my ($identity) = eval { $sth->fetchrow_array };
 
     # SCOPE_IDENTITY failed, but we can do something else
     if ( (! $identity) && $self->_identity_method) {

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/NoBindVars.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/NoBindVars.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -40,24 +40,32 @@
 sub _prep_for_execute {
   my $self = shift;
 
-  my ($op, $extra_bind, $ident) = @_;
-
   my ($sql, $bind) = $self->next::method(@_);
 
-  # stringify args, quote via $dbh, and manually insert
+  # stringify bind args, quote via $dbh, and manually insert
+  #my ($op, $extra_bind, $ident, $args) = @_;
+  my $ident = $_[2];
 
   my @sql_part = split /\?/, $sql;
   my $new_sql;
 
+  my $col_info = $self->_resolve_column_info($ident, [ map $_->[0], @$bind ]);
+
   foreach my $bound (@$bind) {
     my $col = shift @$bound;
-    my $datatype = 'FIXME!!!';
+
+    my $datatype = $col_info->{$col}{data_type};
+
     foreach my $data (@$bound) {
-        if(ref $data) {
-            $data = ''.$data;
-        }
-        $data = $self->_dbh->quote($data);
-        $new_sql .= shift(@sql_part) . $data;
+      $data = ''.$data if ref $data;
+
+      $data = $self->_prep_interpolated_value($datatype, $data)
+        if $datatype;
+
+      $data = $self->_dbh->quote($data)
+        unless $self->interpolate_unquoted($datatype, $data);
+
+      $new_sql .= shift(@sql_part) . $data;
     }
   }
   $new_sql .= join '', @sql_part;
@@ -65,12 +73,44 @@
   return ($new_sql, []);
 }
 
+=head2 interpolate_unquoted
+
+This method is called by L</_prep_for_execute> for every column in
+order to determine if its value should be quoted or not. The arguments
+are the current column data type and the actual bind value. The return
+value is interpreted as: true - do not quote, false - do quote. You should
+override this in you Storage::DBI::<database> subclass, if your RDBMS
+does not like quotes around certain datatypes (e.g. Sybase and integer
+columns). The default method always returns false (do quote).
+
+ WARNING!!!
+
+ Always validate that the bind-value is valid for the current datatype.
+ Otherwise you may very well open the door to SQL injection attacks.
+
+=cut
+
+sub interpolate_unquoted {
+  #my ($self, $datatype, $value) = @_;
+  return 0;
+}
+
+=head2 _prep_interpolated_value
+
+Given a datatype and the value to be inserted directly into a SQL query, returns
+the necessary string to represent that value (by e.g. adding a '$' sign)
+
+=cut
+
+sub _prep_interpolated_value {
+  #my ($self, $datatype, $value) = @_;
+  return $_[2];
+}
+
 =head1 AUTHORS
 
-Brandon Black <blblack at gmail.com>
+See L<DBIx::Class/CONTRIBUTORS>
 
-Trym Skaar <trym at tryms.no>
-
 =head1 LICENSE
 
 You may distribute this code under the same terms as Perl itself.

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -5,7 +5,6 @@
 use base qw/DBIx::Class::Storage::DBI::MSSQL/;
 use mro 'c3';
 
-use Carp::Clan qw/^DBIx::Class/;
 use List::Util();
 use Scalar::Util ();
 
@@ -62,7 +61,7 @@
   my $self = shift;
 
   if (ref($self->_dbi_connect_info->[0]) eq 'CODE') {
-    croak 'cannot set DBI attributes on a CODE ref connect_info';
+    $self->throw_exception ('Cannot set DBI attributes on a CODE ref connect_info');
   }
 
   my $dbi_attrs = $self->_dbi_connect_info->[-1];
@@ -91,7 +90,7 @@
     $dbh->do('SELECT @@IDENTITY');
   };
   if ($@) {
-    croak <<'EOF';
+    $self->throw_exception (<<'EOF');
 
 Your drivers do not seem to support dynamic cursors (odbc_cursortype => 2),
 if you're using FreeTDS, make sure to set tds_version to 8.0 or greater.
@@ -102,12 +101,18 @@
   $self->_identity_method('@@identity');
 }
 
-sub _rebless {
-  no warnings 'uninitialized';
+sub _init {
   my $self = shift;
 
-  if (ref($self->_dbi_connect_info->[0]) ne 'CODE' &&
-      eval { $self->_dbi_connect_info->[-1]{odbc_cursortype} } == 2) {
+  no warnings qw/uninitialized/;
+
+  if (
+    ref($self->_dbi_connect_info->[0]) ne 'CODE'
+      &&
+    ref ($self->_dbi_connect_info->[-1]) eq 'HASH'
+      &&
+    $self->_dbi_connect_info->[-1]{odbc_cursortype} == 2
+  ) {
     $self->_set_dynamic_cursors;
     return;
   }
@@ -159,7 +164,7 @@
   my $dsn = $self->_dbi_connect_info->[0];
 
   if (ref($dsn) eq 'CODE') {
-    croak 'cannot change the DBI DSN on a CODE ref connect_info';
+    $self->throw_exception('cannot change the DBI DSN on a CODE ref connect_info');
   }
 
   if ($dsn !~ /MARS_Connection=/) {

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -53,8 +53,16 @@
 
   my $sth;
 
+  my $source_name;
+  if ( ref $source->name ne 'SCALAR' ) {
+      $source_name = $source->name;
+  }
+  else {
+      $source_name = ${$source->name};
+  }
+
   # check for fully-qualified name (eg. SCHEMA.TABLENAME)
-  if ( my ( $schema, $table ) = $source->name =~ /(\w+)\.(\w+)/ ) {
+  if ( my ( $schema, $table ) = $source_name =~ /(\w+)\.(\w+)/ ) {
     $sql = q{
       SELECT trigger_body FROM ALL_TRIGGERS t
       WHERE t.owner = ? AND t.table_name = ?
@@ -66,7 +74,7 @@
   }
   else {
     $sth = $dbh->prepare($sql);
-    $sth->execute( uc( $source->name ) );
+    $sth->execute( uc( $source_name ) );
   }
   while (my ($insert_trigger) = $sth->fetchrow_array) {
     return uc($1) if $insert_trigger =~ m!(\w+)\.nextval!i; # col name goes here???
@@ -206,12 +214,6 @@
 "alter session set nls_timestamp_tz_format='$timestamp_tz_format'");
 }
 
-sub _svp_begin {
-    my ($self, $name) = @_;
-
-    $self->_get_dbh->do("SAVEPOINT $name");
-}
-
 =head2 source_bind_attributes
 
 Handle LOB types in Oracle.  Under a certain size (4k?), you can get away
@@ -229,7 +231,7 @@
 
 =cut
 
-sub source_bind_attributes 
+sub source_bind_attributes
 {
 	require DBD::Oracle;
 	my $self = shift;
@@ -256,6 +258,12 @@
 	return \%bind_attributes;
 }
 
+sub _svp_begin {
+    my ($self, $name) = @_;
+
+    $self->_get_dbh->do("SAVEPOINT $name");
+}
+
 # Oracle automatically releases a savepoint when you start another one with the
 # same name.
 sub _svp_release { 1 }

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Oracle.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Oracle.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Oracle.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -19,10 +19,8 @@
           ? 'DBIx::Class::Storage::DBI::Oracle::WhereJoins'
           : 'DBIx::Class::Storage::DBI::Oracle::Generic';
 
-        # Load and rebless
-        eval "require $class";
-
-        bless $self, $class unless $@;
+        $self->ensure_class_loaded ($class);
+        bless $self, $class;
     }
 }
 

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Pg.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Pg.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Pg.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -26,11 +26,11 @@
 
   for my $col (@cols) {
     my $seq = ( $source->column_info($col)->{sequence} ||= $self->dbh_do('_dbh_get_autoinc_seq', $source, $col) )
-      or $self->throw_exception( "could not determine sequence for "
-                                 . $source->name
-                                 . ".$col, please consider adding a "
-                                 . "schema-qualified sequence to its column info"
-                               );
+      or $self->throw_exception( sprintf(
+        'could not determine sequence for column %s.%s, please consider adding a schema-qualified sequence to its column info',
+          $source->name,
+          $col,
+      ));
 
     push @values, $self->_dbh_last_insert_id ($self->_dbh, $seq);
   }
@@ -61,22 +61,22 @@
     ( $schema, $table ) = ( $1, $2 );
   }
 
-  # use DBD::Pg to fetch the column info if it is recent enough to
-  # work. otherwise, use custom SQL
-  my $seq_expr =  $DBD::Pg::VERSION >= 2.015001
-      ? eval{ $dbh->column_info(undef,$schema,$table,$col)->fetchrow_hashref->{COLUMN_DEF} }
-      : $self->_dbh_get_column_default( $dbh, $schema, $table, $col );
+  # get the column default using a Postgres-specific pg_catalog query
+  my $seq_expr = $self->_dbh_get_column_default( $dbh, $schema, $table, $col );
 
   # if no default value is set on the column, or if we can't parse the
   # default value as a sequence, throw.
-  unless ( defined $seq_expr and $seq_expr =~ /^nextval\(+'([^']+)'::(?:text|regclass)\)/i ){
+  unless ( defined $seq_expr and $seq_expr =~ /^nextval\(+'([^']+)'::(?:text|regclass)\)/i ) {
     $seq_expr = '' unless defined $seq_expr;
     $schema = "$schema." if defined $schema && length $schema;
-    $self->throw_exception( "no sequence found for $schema$table.$col, check table definition, "
-                            . "or explicitly set the 'sequence' for this column in the "
-                            . $source->source_name
-                            . " class"
-                          );
+    $self->throw_exception( sprintf (
+      'no sequence found for %s%s.%s, check the RDBMS table definition or explicitly set the '.
+      "'sequence' for this column in %s",
+        $schema ? "$schema." : '',
+        $table,
+        $col,
+        $source->source_name,
+    ));
   }
 
   return $1;

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Replicated.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Replicated.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Replicated.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -17,9 +17,9 @@
   my @didnt_load;
 
   for my $module (keys %replication_required) {
-	eval "use $module $replication_required{$module}";
-	push @didnt_load, "$module $replication_required{$module}"
-	 if $@;
+    eval "use $module $replication_required{$module}";
+    push @didnt_load, "$module $replication_required{$module}"
+      if $@;
   }
 
   croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication")
@@ -33,7 +33,6 @@
 use DBIx::Class::Storage::DBI::Replicated::Types qw/BalancerClassNamePart DBICSchema DBICStorageDBI/;
 use MooseX::Types::Moose qw/ClassName HashRef Object/;
 use Scalar::Util 'reftype';
-use Carp::Clan qw/^DBIx::Class/;
 use Hash::Merge 'merge';
 
 use namespace::clean -except => 'meta';
@@ -222,7 +221,7 @@
   isa=>'DBIx::Class::Storage::DBI::Replicated::Pool',
   lazy_build=>1,
   handles=>[qw/
-    connect_replicants    
+    connect_replicants
     replicants
     has_replicants
   /],
@@ -277,7 +276,7 @@
     select
     select_single
     columns_info_for
-  /],    
+  /],
 );
 
 =head2 write_handler
@@ -290,9 +289,9 @@
   is=>'ro',
   isa=>Object,
   lazy_build=>1,
-  handles=>[qw/   
+  handles=>[qw/
     on_connect_do
-    on_disconnect_do       
+    on_disconnect_do
     connect_info
     throw_exception
     sql_maker
@@ -300,8 +299,8 @@
     create_ddl_dir
     deployment_statements
     datetime_parser
-    datetime_parser_type  
-    build_datetime_parser      
+    datetime_parser_type
+    build_datetime_parser
     last_insert_id
     insert
     insert_bulk
@@ -316,19 +315,19 @@
     sth
     deploy
     with_deferred_fk_checks
-	dbh_do
+    dbh_do
     reload_row
-	with_deferred_fk_checks
+    with_deferred_fk_checks
     _prep_for_execute
 
-	backup
-	is_datatype_numeric
-	_count_select
-	_subq_count_select
-	_subq_update_delete 
-	svp_rollback
-	svp_begin
-	svp_release
+    backup
+    is_datatype_numeric
+    _count_select
+    _subq_count_select
+    _subq_update_delete
+    svp_rollback
+    svp_begin
+    svp_release
   /],
 );
 
@@ -364,7 +363,7 @@
     );
 
     $self->pool($self->_build_pool)
-	if $self->pool;
+      if $self->pool;
   }
 
   if (@opts{qw/balancer_type balancer_args/}) {
@@ -376,7 +375,7 @@
     );
 
     $self->balancer($self->_build_balancer)
-	if $self->balancer;
+      if $self->balancer;
   }
 
   $self->_master_connect_info_opts(\%opts);
@@ -413,9 +412,9 @@
   my ($class, $schema, $storage_type_args, @args) = @_;	
 
   return {
-  	schema=>$schema, 
-  	%$storage_type_args,
-  	@args
+    schema=>$schema,
+    %$storage_type_args,
+    @args
   }
 }
 
@@ -452,7 +451,7 @@
 sub _build_balancer {
   my $self = shift @_;
   $self->create_balancer(
-    pool=>$self->pool, 
+    pool=>$self->pool,
     master=>$self->master,
     %{$self->balancer_args},
   );
@@ -494,23 +493,23 @@
   for my $r (@args) {
     $r = [ $r ] unless reftype $r eq 'ARRAY';
 
-    croak "coderef replicant connect_info not supported"
+    $self->throw_exception('coderef replicant connect_info not supported')
       if ref $r->[0] && reftype $r->[0] eq 'CODE';
 
 # any connect_info options?
     my $i = 0;
     $i++ while $i < @$r && (reftype($r->[$i])||'') ne 'HASH';
 
-# make one if none    
+# make one if none
     $r->[$i] = {} unless $r->[$i];
 
 # merge if two hashes
     my @hashes = @$r[$i .. $#{$r}];
 
-    croak "invalid connect_info options"
+    $self->throw_exception('invalid connect_info options')
       if (grep { reftype($_) eq 'HASH' } @hashes) != @hashes;
 
-    croak "too many hashrefs in connect_info"
+    $self->throw_exception('too many hashrefs in connect_info')
       if @hashes > 2;
 
     my %opts = %{ merge(reverse @hashes) };
@@ -600,11 +599,11 @@
       ($result[0]) = ($coderef->(@args));
     } else {
       $coderef->(@args);
-    }       
+    }
   };
 
   ##Reset to the original state
-  $self->read_handler($current); 
+  $self->read_handler($current);
 
   ##Exception testing has to come last, otherwise you might leave the 
   ##read_handler set to master.
@@ -738,7 +737,7 @@
   if(@_) {
     foreach my $source ($self->all_storages) {
       $source->debug(@_);
-    }   
+    }
   }
   return $self->master->debug;
 }
@@ -754,7 +753,7 @@
   if(@_) {
     foreach my $source ($self->all_storages) {
       $source->debugobj(@_);
-    } 	
+    }
   }
   return $self->master->debugobj;
 }
@@ -770,7 +769,7 @@
   if(@_) {
     foreach my $source ($self->all_storages) {
       $source->debugfh(@_);
-    }   
+    }
   }
   return $self->master->debugfh;
 }
@@ -786,7 +785,7 @@
   if(@_) {
     foreach my $source ($self->all_storages) {
       $source->debugcb(@_);
-    }   
+    }
   }
   return $self->master->debugcb;
 }

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/SQLite.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/SQLite.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/SQLite.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -47,6 +47,22 @@
   return $backupfile;
 }
 
+sub deployment_statements {
+  my $self = shift;;
+  my ($schema, $type, $version, $dir, $sqltargs, @rest) = @_;
+
+  $sqltargs ||= {};
+
+  my $sqlite_version = $self->_get_dbh->{sqlite_version};
+
+  # numify, SQLT does a numeric comparison
+  $sqlite_version =~ s/^(\d+) \. (\d+) (?: \. (\d+))? .*/${1}.${2}/x;
+
+  $sqltargs->{producer_args}{sqlite_version} = $sqlite_version;
+
+  $self->next::method($schema, $type, $version, $dir, $sqltargs, @rest);
+}
+
 sub datetime_parser_type { return "DateTime::Format::SQLite"; } 
 
 1;

Added: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -0,0 +1,102 @@
+package DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars;
+
+use base qw/
+  DBIx::Class::Storage::DBI::NoBindVars
+  DBIx::Class::Storage::DBI::Sybase::ASE
+/;
+use mro 'c3';
+use List::Util ();
+use Scalar::Util ();
+
+sub _init {
+  my $self = shift;
+  $self->disable_sth_caching(1);
+  $self->_identity_method('@@IDENTITY');
+  $self->next::method (@_);
+}
+
+sub _fetch_identity_sql { 'SELECT ' . $_[0]->_identity_method }
+
+my $number = sub { Scalar::Util::looks_like_number($_[0]) };
+
+my $decimal = sub { $_[0] =~ /^ [-+]? \d+ (?:\.\d*)? \z/x };
+
+my %noquote = (
+    int => sub { $_[0] =~ /^ [-+]? \d+ \z/x },
+    bit => => sub { $_[0] =~ /^[01]\z/ },
+    money => sub { $_[0] =~ /^\$ \d+ (?:\.\d*)? \z/x },
+    float => $number,
+    real => $number,
+    double => $number,
+    decimal => $decimal,
+    numeric => $decimal,
+);
+
+sub interpolate_unquoted {
+  my $self = shift;
+  my ($type, $value) = @_;
+
+  return $self->next::method(@_) if not defined $value or not defined $type;
+
+  if (my $key = List::Util::first { $type =~ /$_/i } keys %noquote) {
+    return 1 if $noquote{$key}->($value);
+  }
+  elsif ($self->is_datatype_numeric($type) && $number->($value)) {
+    return 1;
+  }
+
+  return $self->next::method(@_);
+}
+
+sub _prep_interpolated_value {
+  my ($self, $type, $value) = @_;
+
+  if ($type =~ /money/i && defined $value) {
+    # change a ^ not followed by \$ to a \$
+    $value =~ s/^ (?! \$) /\$/x;
+  }
+
+  return $value;
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars - Storage::DBI subclass for
+Sybase ASE without placeholder support
+
+=head1 DESCRIPTION
+
+If you're using this driver than your version of Sybase, or the libraries you
+use to connect to it, do not support placeholders.
+
+You can also enable this driver explicitly using:
+
+  my $schema = SchemaClass->clone;
+  $schema->storage_type('::DBI::Sybase::ASE::NoBindVars');
+  $schema->connect($dsn, $user, $pass, \%opts);
+
+See the discussion in L<< DBD::Sybase/Using ? Placeholders & bind parameters to
+$sth->execute >> for details on the pros and cons of using placeholders.
+
+One advantage of not using placeholders is that C<select @@identity> will work
+for obtainging the last insert id of an C<IDENTITY> column, instead of having to
+do C<select max(col)> in a transaction as the base Sybase driver does.
+
+When using this driver, bind variables will be interpolated (properly quoted of
+course) into the SQL query itself, without using placeholders.
+
+The caching of prepared statements is also explicitly disabled, as the
+interpolation renders it useless.
+
+=head1 AUTHORS
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+# vim:sts=2 sw=2:

Copied: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm (from rev 7609, DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase.pm)
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -0,0 +1,1132 @@
+package DBIx::Class::Storage::DBI::Sybase::ASE;
+
+use strict;
+use warnings;
+
+use base qw/
+    DBIx::Class::Storage::DBI::Sybase
+    DBIx::Class::Storage::DBI::AutoCast
+/;
+use mro 'c3';
+use Carp::Clan qw/^DBIx::Class/;
+use Scalar::Util();
+use List::Util();
+use Sub::Name();
+use Data::Dumper::Concise();
+
+__PACKAGE__->mk_group_accessors('simple' =>
+    qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
+       _bulk_storage _is_bulk_storage _began_bulk_work
+       _bulk_disabled_due_to_coderef_connect_info_warned
+       _identity_method/
+);
+
+my @also_proxy_to_extra_storages = qw/
+  connect_call_set_auto_cast auto_cast connect_call_blob_setup
+  connect_call_datetime_setup
+
+  disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
+  auto_savepoint unsafe cursor_class debug debugobj schema
+/;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::ASE - Sybase ASE SQL Server support for
+DBIx::Class
+
+=head1 SYNOPSIS
+
+This subclass supports L<DBD::Sybase> for real (non-Microsoft) Sybase databases.
+
+=head1 DESCRIPTION
+
+If your version of Sybase does not support placeholders, then your storage will
+be reblessed to L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
+You can also enable that driver explicitly, see the documentation for more
+details.
+
+With this driver there is unfortunately no way to get the C<last_insert_id>
+without doing a C<SELECT MAX(col)>. This is done safely in a transaction
+(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
+
+A recommended L<DBIx::Class::Storage::DBI/connect_info> setting:
+
+  on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
+
+=head1 METHODS
+
+=cut
+
+sub _rebless {
+  my $self = shift;
+
+  my $no_bind_vars = __PACKAGE__ . '::NoBindVars';
+
+  if ($self->using_freetds) {
+    carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
+
+You are using FreeTDS with Sybase.
+
+We will do our best to support this configuration, but please consider this
+support experimental.
+
+TEXT/IMAGE columns will definitely not work.
+
+You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries
+instead.
+
+See perldoc DBIx::Class::Storage::DBI::Sybase::ASE for more details.
+
+To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment
+variable.
+EOF
+
+    if (not $self->_typeless_placeholders_supported) {
+      if ($self->_placeholders_supported) {
+        $self->auto_cast(1);
+      }
+      else {
+        $self->ensure_class_loaded($no_bind_vars);
+        bless $self, $no_bind_vars;
+        $self->_rebless;
+      }
+    }
+  }
+
+  elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
+    # not necessarily FreeTDS, but no placeholders nevertheless
+    $self->ensure_class_loaded($no_bind_vars);
+    bless $self, $no_bind_vars;
+    $self->_rebless;
+  }
+  # this is highly unlikely, but we check just in case
+  elsif (not $self->_typeless_placeholders_supported) {
+    $self->auto_cast(1);
+  }
+}
+
+sub _init {
+  my $self = shift;
+  $self->_set_max_connect(256);
+
+# create storage for insert/(update blob) transactions,
+# unless this is that storage
+  return if $self->_is_extra_storage;
+
+  my $writer_storage = (ref $self)->new;
+
+  $writer_storage->_is_extra_storage(1);
+  $writer_storage->connect_info($self->connect_info);
+  $writer_storage->auto_cast($self->auto_cast);
+
+  $self->_writer_storage($writer_storage);
+
+# create a bulk storage unless connect_info is a coderef
+  return if ref($self->_dbi_connect_info->[0]) eq 'CODE';
+
+  my $bulk_storage = (ref $self)->new;
+
+  $bulk_storage->_is_extra_storage(1);
+  $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
+  $bulk_storage->connect_info($self->connect_info);
+
+# this is why
+  $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
+
+  $self->_bulk_storage($bulk_storage);
+}
+
+for my $method (@also_proxy_to_extra_storages) {
+  no strict 'refs';
+  no warnings 'redefine';
+
+  my $replaced = __PACKAGE__->can($method);
+
+  *{$method} = Sub::Name::subname $method => sub {
+    my $self = shift;
+    $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
+    $self->_bulk_storage->$replaced(@_)   if $self->_bulk_storage;
+    return $self->$replaced(@_);
+  };
+}
+
+sub disconnect {
+  my $self = shift;
+
+# Even though we call $sth->finish for uses off the bulk API, there's still an
+# "active statement" warning on disconnect, which we throw away here.
+# This is due to the bug described in insert_bulk.
+# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
+  local $SIG{__WARN__} = sub {
+    warn $_[0] unless $_[0] =~ /active statement/i;
+  } if $self->_is_bulk_storage;
+
+# so that next transaction gets a dbh
+  $self->_began_bulk_work(0) if $self->_is_bulk_storage;
+
+  $self->next::method;
+}
+
+# Set up session settings for Sybase databases for the connection.
+#
+# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
+# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
+# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
+# only want when AutoCommit is off.
+#
+# Also SET TEXTSIZE for FreeTDS because LongReadLen doesn't work.
+sub _run_connection_actions {
+  my $self = shift;
+
+  if ($self->_is_bulk_storage) {
+# this should be cleared on every reconnect
+    $self->_began_bulk_work(0);
+    return;
+  }
+
+  if (not $self->using_freetds) {
+    $self->_dbh->{syb_chained_txn} = 1;
+  } else {
+    # based on LongReadLen in connect_info
+    $self->set_textsize;
+
+    if ($self->_dbh_autocommit) {
+      $self->_dbh->do('SET CHAINED OFF');
+    } else {
+      $self->_dbh->do('SET CHAINED ON');
+    }
+  }
+
+  $self->next::method(@_);
+}
+
+=head2 connect_call_blob_setup
+
+Used as:
+
+  on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ]
+
+Does C<< $dbh->{syb_binary_images} = 1; >> to return C<IMAGE> data as raw binary
+instead of as a hex string.
+
+Recommended.
+
+Also sets the C<log_on_update> value for blob write operations. The default is
+C<1>, but C<0> is better if your database is configured for it.
+
+See
+L<DBD::Sybase/Handling_IMAGE/TEXT_data_with_syb_ct_get_data()/syb_ct_send_data()>.
+
+=cut
+
+sub connect_call_blob_setup {
+  my $self = shift;
+  my %args = @_;
+  my $dbh = $self->_dbh;
+  $dbh->{syb_binary_images} = 1;
+
+  $self->_blob_log_on_update($args{log_on_update})
+    if exists $args{log_on_update};
+}
+
+sub _is_lob_type {
+  my $self = shift;
+  my $type = shift;
+  $type && $type =~ /(?:text|image|lob|bytea|binary|memo)/i;
+}
+
+sub _is_lob_column {
+  my ($self, $source, $column) = @_;
+
+  return $self->_is_lob_type($source->column_info($column)->{data_type});
+}
+
+sub _prep_for_execute {
+  my $self = shift;
+  my ($op, $extra_bind, $ident, $args) = @_;
+
+  my ($sql, $bind) = $self->next::method (@_);
+
+  my $table = Scalar::Util::blessed($ident) ? $ident->from : $ident;
+
+  my $bind_info = $self->_resolve_column_info(
+    $ident, [map $_->[0], @{$bind}]
+  );
+  my $bound_identity_col = List::Util::first
+    { $bind_info->{$_}{is_auto_increment} }
+    (keys %$bind_info)
+  ;
+  my $identity_col = Scalar::Util::blessed($ident) &&
+    List::Util::first
+    { $ident->column_info($_)->{is_auto_increment} }
+    $ident->columns
+  ;
+
+  if (($op eq 'insert' && $bound_identity_col) ||
+      ($op eq 'update' && exists $args->[0]{$identity_col})) {
+    $sql = join ("\n",
+      $self->_set_table_identity_sql($op => $table, 'on'),
+      $sql,
+      $self->_set_table_identity_sql($op => $table, 'off'),
+    );
+  }
+
+  if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
+      (not $self->{insert_bulk})) {
+    $sql =
+      "$sql\n" .
+      $self->_fetch_identity_sql($ident, $identity_col);
+  }
+
+  return ($sql, $bind);
+}
+
+sub _set_table_identity_sql {
+  my ($self, $op, $table, $on_off) = @_;
+
+  return sprintf 'SET IDENTITY_%s %s %s',
+    uc($op), $self->sql_maker->_quote($table), uc($on_off);
+}
+
+# Stolen from SQLT, with some modifications. This is a makeshift
+# solution before a sane type-mapping library is available, thus
+# the 'our' for easy overrides.
+our %TYPE_MAPPING  = (
+    number    => 'numeric',
+    money     => 'money',
+    varchar   => 'varchar',
+    varchar2  => 'varchar',
+    timestamp => 'datetime',
+    text      => 'varchar',
+    real      => 'double precision',
+    comment   => 'text',
+    bit       => 'bit',
+    tinyint   => 'smallint',
+    float     => 'double precision',
+    serial    => 'numeric',
+    bigserial => 'numeric',
+    boolean   => 'varchar',
+    long      => 'varchar',
+);
+
+sub _native_data_type {
+  my ($self, $type) = @_;
+
+  $type = lc $type;
+  $type =~ s/\s* identity//x;
+
+  return uc($TYPE_MAPPING{$type} || $type);
+}
+
+sub _fetch_identity_sql {
+  my ($self, $source, $col) = @_;
+
+  return sprintf ("SELECT MAX(%s) FROM %s",
+    map { $self->sql_maker->_quote ($_) } ($col, $source->from)
+  );
+}
+
+sub _execute {
+  my $self = shift;
+  my ($op) = @_;
+
+  my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
+
+  if ($op eq 'insert') {
+    $self->_identity($sth->fetchrow_array);
+    $sth->finish;
+  }
+
+  return wantarray ? ($rv, $sth, @bind) : $rv;
+}
+
+sub last_insert_id { shift->_identity }
+
+# handles TEXT/IMAGE and transaction for last_insert_id
+sub insert {
+  my $self = shift;
+  my ($source, $to_insert) = @_;
+
+  my $identity_col = (List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns) || '';
+
+  # check for empty insert
+  # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
+  # try to insert explicit 'DEFAULT's instead (except for identity)
+  if (not %$to_insert) {
+    for my $col ($source->columns) {
+      next if $col eq $identity_col;
+      $to_insert->{$col} = \'DEFAULT';
+    }
+  }
+
+  my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
+
+  # do we need the horrific SELECT MAX(COL) hack?
+  my $dumb_last_insert_id =
+       $identity_col
+    && (not exists $to_insert->{$identity_col})
+    && ($self->_identity_method||'') ne '@@IDENTITY';
+
+  my $next = $self->next::can;
+
+  # we are already in a transaction, or there are no blobs
+  # and we don't need the PK - just (try to) do it
+  if ($self->{transaction_depth}
+        || (!$blob_cols && !$dumb_last_insert_id)
+  ) {
+    return $self->_insert (
+      $next, $source, $to_insert, $blob_cols, $identity_col
+    );
+  }
+
+  # otherwise use the _writer_storage to do the insert+transaction on another
+  # connection
+  my $guard = $self->_writer_storage->txn_scope_guard;
+
+  my $updated_cols = $self->_writer_storage->_insert (
+    $next, $source, $to_insert, $blob_cols, $identity_col
+  );
+
+  $self->_identity($self->_writer_storage->_identity);
+
+  $guard->commit;
+
+  return $updated_cols;
+}
+
+sub _insert {
+  my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
+
+  my $updated_cols = $self->$next ($source, $to_insert);
+
+  my $final_row = {
+    ($identity_col ?
+      ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
+    %$to_insert,
+    %$updated_cols,
+  };
+
+  $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
+
+  return $updated_cols;
+}
+
+sub update {
+  my $self = shift;
+  my ($source, $fields, $where, @rest) = @_;
+
+  my $wantarray = wantarray;
+
+  my $blob_cols = $self->_remove_blob_cols($source, $fields);
+
+  my $table = $source->name;
+
+  my $identity_col = List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns;
+
+  my $is_identity_update = $identity_col && defined $fields->{$identity_col};
+
+  return $self->next::method(@_) unless $blob_cols;
+
+# If there are any blobs in $where, Sybase will return a descriptive error
+# message.
+# XXX blobs can still be used with a LIKE query, and this should be handled.
+
+# update+blob update(s) done atomically on separate connection
+  $self = $self->_writer_storage;
+
+  my $guard = $self->txn_scope_guard;
+
+# First update the blob columns to be updated to '' (taken from $fields, where
+# it is originally put by _remove_blob_cols .)
+  my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
+
+# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
+
+  $self->next::method($source, \%blobs_to_empty, $where, @rest);
+
+# Now update the blobs before the other columns in case the update of other
+# columns makes the search condition invalid.
+  $self->_update_blobs($source, $blob_cols, $where);
+
+  my @res;
+  if (%$fields) {
+    if ($wantarray) {
+      @res    = $self->next::method(@_);
+    }
+    elsif (defined $wantarray) {
+      $res[0] = $self->next::method(@_);
+    }
+    else {
+      $self->next::method(@_);
+    }
+  }
+
+  $guard->commit;
+
+  return $wantarray ? @res : $res[0];
+}
+
+sub insert_bulk {
+  my $self = shift;
+  my ($source, $cols, $data) = @_;
+
+  my $identity_col = List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns;
+
+  my $is_identity_insert = (List::Util::first
+    { $_ eq $identity_col }
+    @{$cols}
+  ) ? 1 : 0;
+
+  my @source_columns = $source->columns;
+
+  my $use_bulk_api =
+    $self->_bulk_storage &&
+    $self->_get_dbh->{syb_has_blk};
+
+  if ((not $use_bulk_api)
+        &&
+      (ref($self->_dbi_connect_info->[0]) eq 'CODE')
+        &&
+      (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
+    carp <<'EOF';
+Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
+regular array inserts.
+EOF
+    $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
+  }
+
+  if (not $use_bulk_api) {
+    my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
+
+# _execute_array uses a txn anyway, but it ends too early in case we need to
+# select max(col) to get the identity for inserting blobs.
+    ($self, my $guard) = $self->{transaction_depth} == 0 ?
+      ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
+      :
+      ($self, undef);
+
+    local $self->{insert_bulk} = 1;
+
+    $self->next::method(@_);
+
+    if ($blob_cols) {
+      if ($is_identity_insert) {
+        $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
+      }
+      else {
+        my @cols_with_identities = (@$cols, $identity_col);
+
+        ## calculate identities
+        # XXX This assumes identities always increase by 1, which may or may not
+        # be true.
+        my ($last_identity) =
+          $self->_dbh->selectrow_array (
+            $self->_fetch_identity_sql($source, $identity_col)
+          );
+        my @identities = (($last_identity - @$data + 1) .. $last_identity);
+
+        my @data_with_identities = map [@$_, shift @identities], @$data;
+
+        $self->_insert_blobs_array (
+          $source, $blob_cols, \@cols_with_identities, \@data_with_identities
+        );
+      }
+    }
+
+    $guard->commit if $guard;
+
+    return;
+  }
+
+# otherwise, use the bulk API
+
+# rearrange @$data so that columns are in database order
+  my %orig_idx;
+  @orig_idx{@$cols} = 0..$#$cols;
+
+  my %new_idx;
+  @new_idx{@source_columns} = 0..$#source_columns;
+
+  my @new_data;
+  for my $datum (@$data) {
+    my $new_datum = [];
+    for my $col (@source_columns) {
+# identity data will be 'undef' if not $is_identity_insert
+# columns with defaults will also be 'undef'
+      $new_datum->[ $new_idx{$col} ] =
+        exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
+    }
+    push @new_data, $new_datum;
+  }
+
+# bcp identity index is 1-based
+  my $identity_idx = exists $new_idx{$identity_col} ?
+    $new_idx{$identity_col} + 1 : 0;
+
+## Set a client-side conversion error handler, straight from DBD::Sybase docs.
+# This ignores any data conversion errors detected by the client side libs, as
+# they are usually harmless.
+  my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
+    Sub::Name::subname insert_bulk => sub {
+      my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
+
+      return 1 if $errno == 36;
+
+      carp
+        "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
+        ($errmsg ? "\n$errmsg" : '') .
+        ($osmsg  ? "\n$osmsg"  : '')  .
+        ($blkmsg ? "\n$blkmsg" : '');
+
+      return 0;
+  });
+
+  eval {
+    my $bulk = $self->_bulk_storage;
+
+    my $guard = $bulk->txn_scope_guard;
+
+## XXX get this to work instead of our own $sth
+## will require SQLA or *Hacks changes for ordered columns
+#    $bulk->next::method($source, \@source_columns, \@new_data, {
+#      syb_bcp_attribs => {
+#        identity_flag   => $is_identity_insert,
+#        identity_column => $identity_idx,
+#      }
+#    });
+    my $sql = 'INSERT INTO ' .
+      $bulk->sql_maker->_quote($source->name) . ' (' .
+# colname list is ignored for BCP, but does no harm
+      (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
+      ' VALUES ('.  (join ', ', ('?') x @source_columns) . ')';
+
+## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
+## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
+## better yet the version above. Should be fixed in DBD::Sybase .
+    my $sth = $bulk->_get_dbh->prepare($sql,
+#      'insert', # op
+      {
+        syb_bcp_attribs => {
+          identity_flag   => $is_identity_insert,
+          identity_column => $identity_idx,
+        }
+      }
+    );
+
+    my @bind = do {
+      my $idx = 0;
+      map [ $_, $idx++ ], @source_columns;
+    };
+
+    $self->_execute_array(
+      $source, $sth, \@bind, \@source_columns, \@new_data, sub {
+        $guard->commit
+      }
+    );
+
+    $bulk->_query_end($sql);
+  };
+
+  my $exception = $@;
+  DBD::Sybase::set_cslib_cb($orig_cslib_cb);
+
+  if ($exception =~ /-Y option/) {
+    carp <<"EOF";
+
+Sybase bulk API operation failed due to character set incompatibility, reverting
+to regular array inserts:
+
+*** Try unsetting the LANG environment variable.
+
+$exception
+EOF
+    $self->_bulk_storage(undef);
+    unshift @_, $self;
+    goto \&insert_bulk;
+  }
+  elsif ($exception) {
+# rollback makes the bulkLogin connection unusable
+    $self->_bulk_storage->disconnect;
+    $self->throw_exception($exception);
+  }
+}
+
+sub _dbh_execute_array {
+  my ($self, $sth, $tuple_status, $cb) = @_;
+
+  my $rv = $self->next::method($sth, $tuple_status);
+  $cb->() if $cb;
+
+  return $rv;
+}
+
+# Make sure blobs are not bound as placeholders, and return any non-empty ones
+# as a hash.
+sub _remove_blob_cols {
+  my ($self, $source, $fields) = @_;
+
+  my %blob_cols;
+
+  for my $col (keys %$fields) {
+    if ($self->_is_lob_column($source, $col)) {
+      my $blob_val = delete $fields->{$col};
+      if (not defined $blob_val) {
+        $fields->{$col} = \'NULL';
+      }
+      else {
+        $fields->{$col} = \"''";
+        $blob_cols{$col} = $blob_val unless $blob_val eq '';
+      }
+    }
+  }
+
+  return %blob_cols ? \%blob_cols : undef;
+}
+
+# same for insert_bulk
+sub _remove_blob_cols_array {
+  my ($self, $source, $cols, $data) = @_;
+
+  my @blob_cols;
+
+  for my $i (0..$#$cols) {
+    my $col = $cols->[$i];
+
+    if ($self->_is_lob_column($source, $col)) {
+      for my $j (0..$#$data) {
+        my $blob_val = delete $data->[$j][$i];
+        if (not defined $blob_val) {
+          $data->[$j][$i] = \'NULL';
+        }
+        else {
+          $data->[$j][$i] = \"''";
+          $blob_cols[$j][$i] = $blob_val
+            unless $blob_val eq '';
+        }
+      }
+    }
+  }
+
+  return @blob_cols ? \@blob_cols : undef;
+}
+
+sub _update_blobs {
+  my ($self, $source, $blob_cols, $where) = @_;
+
+  my (@primary_cols) = $source->primary_columns;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
+    unless @primary_cols;
+
+# check if we're updating a single row by PK
+  my $pk_cols_in_where = 0;
+  for my $col (@primary_cols) {
+    $pk_cols_in_where++ if defined $where->{$col};
+  }
+  my @rows;
+
+  if ($pk_cols_in_where == @primary_cols) {
+    my %row_to_update;
+    @row_to_update{@primary_cols} = @{$where}{@primary_cols};
+    @rows = \%row_to_update;
+  } else {
+    my $cursor = $self->select ($source, \@primary_cols, $where, {});
+    @rows = map {
+      my %row; @row{@primary_cols} = @$_; \%row
+    } $cursor->all;
+  }
+
+  for my $row (@rows) {
+    $self->_insert_blobs($source, $blob_cols, $row);
+  }
+}
+
+sub _insert_blobs {
+  my ($self, $source, $blob_cols, $row) = @_;
+  my $dbh = $self->_get_dbh;
+
+  my $table = $source->name;
+
+  my %row = %$row;
+  my (@primary_cols) = $source->primary_columns;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
+    unless @primary_cols;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
+    if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
+
+  for my $col (keys %$blob_cols) {
+    my $blob = $blob_cols->{$col};
+
+    my %where = map { ($_, $row{$_}) } @primary_cols;
+
+    my $cursor = $self->select ($source, [$col], \%where, {});
+    $cursor->next;
+    my $sth = $cursor->sth;
+
+    if (not $sth) {
+
+      $self->throw_exception(
+          "Could not find row in table '$table' for blob update:\n"
+        . Data::Dumper::Concise::Dumper (\%where)
+      );
+    }
+
+    eval {
+      do {
+        $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
+      } while $sth->fetch;
+
+      $sth->func('ct_prepare_send') or die $sth->errstr;
+
+      my $log_on_update = $self->_blob_log_on_update;
+      $log_on_update    = 1 if not defined $log_on_update;
+
+      $sth->func('CS_SET', 1, {
+        total_txtlen => length($blob),
+        log_on_update => $log_on_update
+      }, 'ct_data_info') or die $sth->errstr;
+
+      $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
+
+      $sth->func('ct_finish_send') or die $sth->errstr;
+    };
+    my $exception = $@;
+    $sth->finish if $sth;
+    if ($exception) {
+      if ($self->using_freetds) {
+        $self->throw_exception (
+          'TEXT/IMAGE operation failed, probably because you are using FreeTDS: '
+          . $exception
+        );
+      } else {
+        $self->throw_exception($exception);
+      }
+    }
+  }
+}
+
+sub _insert_blobs_array {
+  my ($self, $source, $blob_cols, $cols, $data) = @_;
+
+  for my $i (0..$#$data) {
+    my $datum = $data->[$i];
+
+    my %row;
+    @row{ @$cols } = @$datum;
+
+    my %blob_vals;
+    for my $j (0..$#$cols) {
+      if (exists $blob_cols->[$i][$j]) {
+        $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
+      }
+    }
+
+    $self->_insert_blobs ($source, \%blob_vals, \%row);
+  }
+}
+
+=head2 connect_call_datetime_setup
+
+Used as:
+
+  on_connect_call => 'datetime_setup'
+
+In L<DBIx::Class::Storage::DBI/connect_info> to set:
+
+  $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
+  $dbh->do('set dateformat mdy');   # input fmt:  08/13/1979 18:08:55.080
+
+On connection for use with L<DBIx::Class::InflateColumn::DateTime>, using
+L<DateTime::Format::Sybase>, which you will need to install.
+
+This works for both C<DATETIME> and C<SMALLDATETIME> columns, although
+C<SMALLDATETIME> columns only have minute precision.
+
+=cut
+
+{
+  my $old_dbd_warned = 0;
+
+  sub connect_call_datetime_setup {
+    my $self = shift;
+    my $dbh = $self->_get_dbh;
+
+    if ($dbh->can('syb_date_fmt')) {
+      # amazingly, this works with FreeTDS
+      $dbh->syb_date_fmt('ISO_strict');
+    } elsif (not $old_dbd_warned) {
+      carp "Your DBD::Sybase is too old to support ".
+      "DBIx::Class::InflateColumn::DateTime, please upgrade!";
+      $old_dbd_warned = 1;
+    }
+
+    $dbh->do('SET DATEFORMAT mdy');
+
+    1;
+  }
+}
+
+sub datetime_parser_type { "DateTime::Format::Sybase" }
+
+# ->begin_work and such have no effect with FreeTDS but we run them anyway to
+# let the DBD keep any state it needs to.
+#
+# If they ever do start working, the extra statements will do no harm (because
+# Sybase supports nested transactions.)
+
+sub _dbh_begin_work {
+  my $self = shift;
+
+# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
+# TRAN once. However, we need to make sure there's a $dbh.
+  return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
+
+  $self->next::method(@_);
+
+  if ($self->using_freetds) {
+    $self->_get_dbh->do('BEGIN TRAN');
+  }
+
+  $self->_began_bulk_work(1) if $self->_is_bulk_storage;
+}
+
+sub _dbh_commit {
+  my $self = shift;
+  if ($self->using_freetds) {
+    $self->_dbh->do('COMMIT');
+  }
+  return $self->next::method(@_);
+}
+
+sub _dbh_rollback {
+  my $self = shift;
+  if ($self->using_freetds) {
+    $self->_dbh->do('ROLLBACK');
+  }
+  return $self->next::method(@_);
+}
+
+# savepoint support using ASE syntax
+
+sub _svp_begin {
+  my ($self, $name) = @_;
+
+  $self->_get_dbh->do("SAVE TRANSACTION $name");
+}
+
+# A new SAVE TRANSACTION with the same name releases the previous one.
+sub _svp_release { 1 }
+
+sub _svp_rollback {
+  my ($self, $name) = @_;
+
+  $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
+}
+
+1;
+
+=head1 Schema::Loader Support
+
+There is an experimental branch of L<DBIx::Class::Schema::Loader> that will
+allow you to dump a schema from most (if not all) versions of Sybase.
+
+It is available via subversion from:
+
+  http://dev.catalyst.perl.org/repos/bast/branches/DBIx-Class-Schema-Loader/current/
+
+=head1 FreeTDS
+
+This driver supports L<DBD::Sybase> compiled against FreeTDS
+(L<http://www.freetds.org/>) to the best of our ability, however it is
+recommended that you recompile L<DBD::Sybase> against the Sybase Open Client
+libraries. They are a part of the Sybase ASE distribution:
+
+The Open Client FAQ is here:
+L<http://www.isug.com/Sybase_FAQ/ASE/section7.html>.
+
+Sybase ASE for Linux (which comes with the Open Client libraries) may be
+downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
+
+To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
+
+  perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
+
+Some versions of the libraries involved will not support placeholders, in which
+case the storage will be reblessed to
+L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
+
+In some configurations, placeholders will work but will throw implicit type
+conversion errors for anything that's not expecting a string. In such a case,
+the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
+automatically set, which you may enable on connection with
+L<DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>. The type info
+for the C<CAST>s is taken from the L<DBIx::Class::ResultSource/data_type>
+definitions in your Result classes, and are mapped to a Sybase type (if it isn't
+already) using a mapping based on L<SQL::Translator>.
+
+In other configurations, placeholers will work just as they do with the Sybase
+Open Client libraries.
+
+Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
+
+=head1 INSERTS WITH PLACEHOLDERS
+
+With placeholders enabled, inserts are done in a transaction so that there are
+no concurrency issues with getting the inserted identity value using
+C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
+mode.
+
+In addition, they are done on a separate connection so that it's possible to
+have active cursors when doing an insert.
+
+When using C<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars> transactions
+are disabled, as there are no concurrency issues with C<SELECT @@IDENTITY> as
+it's a session variable.
+
+=head1 TRANSACTIONS
+
+Due to limitations of the TDS protocol, L<DBD::Sybase>, or both; you cannot
+begin a transaction while there are active cursors; nor can you use multiple
+active cursors within a transaction. An active cursor is, for example, a
+L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
+C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
+
+For example, this will not work:
+
+  $schema->txn_do(sub {
+    my $rs = $schema->resultset('Book');
+    while (my $row = $rs->next) {
+      $schema->resultset('MetaData')->create({
+        book_id => $row->id,
+        ...
+      });
+    }
+  });
+
+This won't either:
+
+  my $first_row = $large_rs->first;
+  $schema->txn_do(sub { ... });
+
+Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
+are not affected, as they are done on an extra database handle.
+
+Some workarounds:
+
+=over 4
+
+=item * use L<DBIx::Class::Storage::DBI::Replicated>
+
+=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
+
+=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
+
+=back
+
+=head1 MAXIMUM CONNECTIONS
+
+The TDS protocol makes separate connections to the server for active statements
+in the background. By default the number of such connections is limited to 25,
+on both the client side and the server side.
+
+This is a bit too low for a complex L<DBIx::Class> application, so on connection
+the client side setting is set to C<256> (see L<DBD::Sybase/maxConnect>.) You
+can override it to whatever setting you like in the DSN.
+
+See
+L<http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.ase_15.0.sag1/html/sag1/sag1272.htm>
+for information on changing the setting on the server side.
+
+=head1 DATES
+
+See L</connect_call_datetime_setup> to setup date formats
+for L<DBIx::Class::InflateColumn::DateTime>.
+
+=head1 TEXT/IMAGE COLUMNS
+
+L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
+C<TEXT/IMAGE> columns.
+
+Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either:
+
+  $schema->storage->dbh->do("SET TEXTSIZE $bytes");
+
+or
+
+  $schema->storage->set_textsize($bytes);
+
+instead.
+
+However, the C<LongReadLen> you pass in
+L<DBIx::Class::Storage::DBI/connect_info> is used to execute the equivalent
+C<SET TEXTSIZE> command on connection.
+
+See L</connect_call_blob_setup> for a L<DBIx::Class::Storage::DBI/connect_info>
+setting you need to work with C<IMAGE> columns.
+
+=head1 BULK API
+
+The experimental L<DBD::Sybase> Bulk API support is used for
+L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
+on a separate connection.
+
+To use this feature effectively, use a large number of rows for each
+L<populate|DBIx::Class::ResultSet/populate> call, eg.:
+
+  while (my $rows = $data_source->get_100_rows()) {
+    $rs->populate($rows);
+  }
+
+B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
+calls in your C<Result> classes B<must> list columns in database order for this
+to work. Also, you may have to unset the C<LANG> environment variable before
+loading your app, if it doesn't match the character set of your database.
+
+When inserting IMAGE columns using this method, you'll need to use
+L</connect_call_blob_setup> as well.
+
+=head1 TODO
+
+=over
+
+=item *
+
+Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
+any active cursors, using eager cursors.
+
+=item *
+
+Real limits and limited counts using stored procedures deployed on startup.
+
+=item *
+
+Adaptive Server Anywhere (ASA) support, with possible SQLA::Limit support.
+
+=item *
+
+Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
+
+=item *
+
+bulk_insert using prepare_cached (see comments.)
+
+=back
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+# vim:sts=2 sw=2:

Deleted: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -1,54 +0,0 @@
-package # hide from PAUSE
-    DBIx::Class::Storage::DBI::Sybase::Base;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::Storage::DBI/;
-use mro 'c3';
-
-=head1 NAME
-
-DBIx::Class::Storage::DBI::Sybase::Base - Common functionality for drivers using
-DBD::Sybase
-
-=cut
-
-sub _ping {
-  my $self = shift;
-
-  my $dbh = $self->_dbh or return 0;
-
-  local $dbh->{RaiseError} = 1;
-  eval {
-    $dbh->do('select 1');
-  };
-
-  return $@ ? 0 : 1;
-}
-
-sub _placeholders_supported {
-  my $self = shift;
-  my $dbh  = $self->_get_dbh;
-
-  return eval {
-# There's also $dbh->{syb_dynamic_supported} but it can be inaccurate for this
-# purpose.
-    local $dbh->{PrintError} = 0;
-    local $dbh->{RaiseError} = 1;
-# this specifically tests a bind that is NOT a string
-    $dbh->selectrow_array('select 1 where 1 = ?', {}, 1);
-  };
-}
-
-1;
-
-=head1 AUTHORS
-
-See L<DBIx::Class/CONTRIBUTORS>.
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -9,9 +9,8 @@
 /;
 use mro 'c3';
 
-sub _rebless {
+sub _init {
   my $self = shift;
-
   $self->disable_sth_caching(1);
 }
 

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -4,7 +4,7 @@
 use warnings;
 
 use base qw/
-  DBIx::Class::Storage::DBI::Sybase::Base
+  DBIx::Class::Storage::DBI::Sybase
   DBIx::Class::Storage::DBI::MSSQL
 /;
 use mro 'c3';
@@ -13,21 +13,44 @@
   my $self = shift;
   my $dbh  = $self->_get_dbh;
 
-  if (not $self->_placeholders_supported) {
+  if (not $self->_typeless_placeholders_supported) {
     bless $self,
       'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars';
     $self->_rebless;
   }
+}
 
-# LongReadLen doesn't work with MSSQL through DBD::Sybase, and the default is
-# huge on some versions of SQL server and can cause memory problems, so we
-# fix it up here.
-  my $text_size = eval { $self->_dbi_connect_info->[-1]->{LongReadLen} } ||
-    32768; # the DBD::Sybase default
+sub _run_connection_actions {
+  my $self = shift;
 
-  $dbh->do("set textsize $text_size");
+  # LongReadLen doesn't work with MSSQL through DBD::Sybase, and the default is
+  # huge on some versions of SQL server and can cause memory problems, so we
+  # fix it up here (see ::DBI::Sybase.pm)
+  $self->set_textsize;
+
+  $self->next::method(@_);
 }
 
+sub _dbh_begin_work {
+  my $self = shift;
+
+  $self->_get_dbh->do('BEGIN TRAN');
+}
+
+sub _dbh_commit {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot COMMIT on a disconnected handle');
+  $dbh->do('COMMIT');
+}
+
+sub _dbh_rollback {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
+  $dbh->do('ROLLBACK');
+}
+
 1;
 
 =head1 NAME

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -3,62 +3,128 @@
 use strict;
 use warnings;
 
-use base qw/
-    DBIx::Class::Storage::DBI::Sybase::Base
-    DBIx::Class::Storage::DBI::NoBindVars
-/;
-use mro 'c3';
+use base qw/DBIx::Class::Storage::DBI/;
 
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase - Base class for drivers using
+L<DBD::Sybase>
+
+=head1 DESCRIPTION
+
+This is the base class/dispatcher for Storage's designed to work with
+L<DBD::Sybase>
+
+=head1 METHODS
+
+=cut
+
 sub _rebless {
-    my $self = shift;
+  my $self = shift;
 
-    my $dbtype = eval {
-      @{$self->_get_dbh
-        ->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})
-      }[2]
-    };
-    unless ( $@ ) {
-        $dbtype =~ s/\W/_/gi;
-        my $subclass = "DBIx::Class::Storage::DBI::Sybase::${dbtype}";
-        if ($self->load_optional_class($subclass) && !$self->isa($subclass)) {
-            bless $self, $subclass;
-            $self->_rebless;
-        }
+  my $dbtype = eval {
+    @{$self->_get_dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2]
+  };
+
+  $self->throw_exception("Unable to estable connection to determine database type: $@")
+    if $@;
+
+  if ($dbtype) {
+    $dbtype =~ s/\W/_/gi;
+
+    # saner class name
+    $dbtype = 'ASE' if $dbtype eq 'SQL_Server';
+
+    my $subclass = __PACKAGE__ . "::$dbtype";
+    if ($self->load_optional_class($subclass)) {
+      bless $self, $subclass;
+      $self->_rebless;
     }
+  }
 }
 
-sub _dbh_last_insert_id {
-    my ($self, $dbh, $source, $col) = @_;
-    return ($dbh->selectrow_array('select @@identity'))[0];
+sub _ping {
+  my $self = shift;
+
+  my $dbh = $self->_dbh or return 0;
+
+  local $dbh->{RaiseError} = 1;
+  local $dbh->{PrintError} = 0;
+
+  if ($dbh->{syb_no_child_con}) {
+# if extra connections are not allowed, then ->ping is reliable
+    my $ping = eval { $dbh->ping };
+    return $@ ? 0 : $ping;
+  }
+
+  eval {
+# XXX if the main connection goes stale, does opening another for this statement
+# really determine anything?
+    $dbh->do('select 1');
+  };
+
+  return $@ ? 0 : 1;
 }
 
-1;
+sub _set_max_connect {
+  my $self = shift;
+  my $val  = shift || 256;
 
-=head1 NAME
+  my $dsn = $self->_dbi_connect_info->[0];
 
-DBIx::Class::Storage::DBI::Sybase - Storage::DBI subclass for Sybase
+  return if ref($dsn) eq 'CODE';
 
-=head1 SYNOPSIS
+  if ($dsn !~ /maxConnect=/) {
+    $self->_dbi_connect_info->[0] = "$dsn;maxConnect=$val";
+    my $connected = defined $self->_dbh;
+    $self->disconnect;
+    $self->ensure_connected if $connected;
+  }
+}
 
-This subclass supports L<DBD::Sybase> for real Sybase databases.  If
-you are using an MSSQL database via L<DBD::Sybase>, see
-L<DBIx::Class::Storage::DBI::Sybase::MSSQL>.
+=head2 using_freetds
 
-=head1 CAVEATS
+Whether or not L<DBD::Sybase> was compiled against FreeTDS. If false, it means
+the Sybase OpenClient libraries were used.
 
-This storage driver uses L<DBIx::Class::Storage::DBI::NoBindVars> as a base.
-This means that bind variables will be interpolated (properly quoted of course)
-into the SQL query itself, without using bind placeholders.
+=cut
 
-More importantly this means that caching of prepared statements is explicitly
-disabled, as the interpolation renders it useless.
+sub using_freetds {
+  my $self = shift;
 
+  return $self->_get_dbh->{syb_oc_version} =~ /freetds/i;
+}
+
+=head2 set_textsize
+
+When using FreeTDS and/or MSSQL, C<< $dbh->{LongReadLen} >> is not available,
+use this function instead. It does:
+
+  $dbh->do("SET TEXTSIZE $bytes");
+
+Takes the number of bytes, or uses the C<LongReadLen> value from your
+L<DBIx::Class/connect_info> if omitted, lastly falls back to the C<32768> which
+is the L<DBD::Sybase> default.
+
+=cut
+
+sub set_textsize {
+  my $self = shift;
+  my $text_size = shift ||
+    eval { $self->_dbi_connect_info->[-1]->{LongReadLen} } ||
+    32768; # the DBD::Sybase default
+
+  return unless defined $text_size;
+
+  $self->_dbh->do("SET TEXTSIZE $text_size");
+}
+
+1;
+
 =head1 AUTHORS
 
-Brandon L Black <blblack at gmail.com>
+See L<DBIx::Class/CONTRIBUTORS>.
 
-Justin Hunter <justin.d.hunter at gmail.com>
-
 =head1 LICENSE
 
 You may distribute this code under the same terms as Perl itself.


Property changes on: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI/Sybase.pm
___________________________________________________________________
Name: svn:eol-style
   - native

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBI.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -4,7 +4,7 @@
 use strict;
 use warnings;
 
-use base 'DBIx::Class::Storage';
+use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/;
 use mro 'c3';
 
 use Carp::Clan qw/^DBIx::Class/;
@@ -13,7 +13,14 @@
 use DBIx::Class::Storage::Statistics;
 use Scalar::Util();
 use List::Util();
+use Data::Dumper::Concise();
+use Sub::Name ();
 
+# what version of sqlt do we require if deploy() without a ddl_dir is invoked
+# when changing also adjust the corresponding author_require in Makefile.PL
+my $minimum_sqlt_version = '0.11002';
+
+
 __PACKAGE__->mk_group_accessors('simple' =>
   qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid
      _conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/
@@ -35,6 +42,38 @@
 __PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks');
 
 
+# Each of these methods need _determine_driver called before itself
+# in order to function reliably. This is a purely DRY optimization
+my @rdbms_specific_methods = qw/
+  sqlt_type
+  build_datetime_parser
+  datetime_parser_type
+
+  insert
+  insert_bulk
+  update
+  delete
+  select
+  select_single
+/;
+
+for my $meth (@rdbms_specific_methods) {
+
+  my $orig = __PACKAGE__->can ($meth)
+    or next;
+
+  no strict qw/refs/;
+  no warnings qw/redefine/;
+  *{__PACKAGE__ ."::$meth"} = Sub::Name::subname $meth => sub {
+    if (not $_[0]->_driver_determined) {
+      $_[0]->_determine_driver;
+      goto $_[0]->can($meth);
+    }
+    $orig->(@_);
+  };
+}
+
+
 =head1 NAME
 
 DBIx::Class::Storage::DBI - DBI storage handler
@@ -681,7 +720,8 @@
 
     $self->_do_connection_actions(disconnect_call_ => $_) for @actions;
 
-    $self->_dbh->rollback unless $self->_dbh_autocommit;
+    $self->_dbh_rollback unless $self->_dbh_autocommit;
+
     $self->_dbh->disconnect;
     $self->_dbh(undef);
     $self->{_dbh_gen}++;
@@ -706,7 +746,6 @@
 # Storage subclasses should override this
 sub with_deferred_fk_checks {
   my ($self, $sub) = @_;
-
   $sub->();
 }
 
@@ -835,7 +874,9 @@
   return $self->_sql_maker;
 }
 
+# nothing to do by default
 sub _rebless {}
+sub _init {}
 
 sub _populate_dbh {
   my ($self) = @_;
@@ -870,13 +911,14 @@
   my ($self) = @_;
 
   if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) {
-    my $started_unconnected = 0;
+    my $started_connected = 0;
     local $self->{_in_determine_driver} = 1;
 
     if (ref($self) eq __PACKAGE__) {
       my $driver;
       if ($self->_dbh) { # we are connected
         $driver = $self->_dbh->{Driver}{Name};
+        $started_connected = 1;
       } else {
         # if connect_info is a CODEREF, we have no choice but to connect
         if (ref $self->_dbi_connect_info->[0] &&
@@ -888,7 +930,6 @@
           # try to use dsn to not require being connected, the driver may still
           # force a connection in _rebless to determine version
           ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
-          $started_unconnected = 1;
         }
       }
 
@@ -902,8 +943,10 @@
 
     $self->_driver_determined(1);
 
+    $self->_init; # run driver-specific initializations
+
     $self->_run_connection_actions
-        if $started_unconnected && defined $self->_dbh;
+        if !$started_connected && defined $self->_dbh;
   }
 }
 
@@ -997,6 +1040,8 @@
             $weak_self->throw_exception("DBI Exception: $_[0]");
           }
           else {
+            # the handler may be invoked by something totally out of
+            # the scope of DBIC
             croak ("DBI Exception: $_[0]");
           }
       };
@@ -1106,27 +1151,35 @@
   if($self->{transaction_depth} == 0) {
     $self->debugobj->txn_begin()
       if $self->debug;
-
-    # being here implies we have AutoCommit => 1
-    # if the user is utilizing txn_do - good for
-    # him, otherwise we need to ensure that the
-    # $dbh is healthy on BEGIN
-    my $dbh_method = $self->{_in_dbh_do} ? '_dbh' : 'dbh';
-    $self->$dbh_method->begin_work;
-
-  } elsif ($self->auto_savepoint) {
+    $self->_dbh_begin_work;
+  }
+  elsif ($self->auto_savepoint) {
     $self->svp_begin;
   }
   $self->{transaction_depth}++;
 }
 
+sub _dbh_begin_work {
+  my $self = shift;
+
+  # if the user is utilizing txn_do - good for him, otherwise we need to
+  # ensure that the $dbh is healthy on BEGIN.
+  # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
+  # will be replaced by a failure of begin_work itself (which will be
+  # then retried on reconnect)
+  if ($self->{_in_dbh_do}) {
+    $self->_dbh->begin_work;
+  } else {
+    $self->dbh_do(sub { $_[1]->begin_work });
+  }
+}
+
 sub txn_commit {
   my $self = shift;
   if ($self->{transaction_depth} == 1) {
-    my $dbh = $self->_dbh;
     $self->debugobj->txn_commit()
       if ($self->debug);
-    $dbh->commit;
+    $self->_dbh_commit;
     $self->{transaction_depth} = 0
       if $self->_dbh_autocommit;
   }
@@ -1137,6 +1190,13 @@
   }
 }
 
+sub _dbh_commit {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot COMMIT on a disconnected handle');
+  $dbh->commit;
+}
+
 sub txn_rollback {
   my $self = shift;
   my $dbh = $self->_dbh;
@@ -1146,7 +1206,7 @@
         if ($self->debug);
       $self->{transaction_depth} = 0
         if $self->_dbh_autocommit;
-      $dbh->rollback;
+      $self->_dbh_rollback;
     }
     elsif($self->{transaction_depth} > 1) {
       $self->{transaction_depth}--;
@@ -1169,6 +1229,13 @@
   }
 }
 
+sub _dbh_rollback {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
+  $dbh->rollback;
+}
+
 # This used to be the top-half of _execute.  It was split out to make it
 #  easier to override in NoBindVars without duping the rest.  It takes up
 #  all of _execute's args, and emits $sql, @bind.
@@ -1269,12 +1336,6 @@
 sub insert {
   my ($self, $source, $to_insert) = @_;
 
-# redispatch to insert method of storage we reblessed into, if necessary
-  if (not $self->_driver_determined) {
-    $self->_determine_driver;
-    goto $self->can('insert');
-  }
-
   my $ident = $source->from;
   my $bind_attributes = $self->source_bind_attributes($source);
 
@@ -1306,22 +1367,103 @@
 sub insert_bulk {
   my ($self, $source, $cols, $data) = @_;
 
-# redispatch to insert_bulk method of storage we reblessed into, if necessary
-  if (not $self->_driver_determined) {
-    $self->_determine_driver;
-    goto $self->can('insert_bulk');
-  }
-
   my %colvalues;
-  my $table = $source->from;
   @colvalues{@$cols} = (0..$#$cols);
-  my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues);
 
-  $self->_query_start( $sql, @bind );
+  for my $i (0..$#$cols) {
+    my $first_val = $data->[0][$i];
+    next unless ref $first_val eq 'SCALAR';
+
+    $colvalues{ $cols->[$i] } = $first_val;
+  }
+
+  # check for bad data and stringify stringifiable objects
+  my $bad_slice = sub {
+    my ($msg, $col_idx, $slice_idx) = @_;
+    $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
+      $msg,
+      $cols->[$col_idx],
+      do {
+        local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any
+        Data::Dumper::Concise::Dumper({
+          map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
+        }),
+      }
+    );
+  };
+
+  for my $datum_idx (0..$#$data) {
+    my $datum = $data->[$datum_idx];
+
+    for my $col_idx (0..$#$cols) {
+      my $val            = $datum->[$col_idx];
+      my $sqla_bind      = $colvalues{ $cols->[$col_idx] };
+      my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
+
+      if ($is_literal_sql) {
+        if (not ref $val) {
+          $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx);
+        }
+        elsif ((my $reftype = ref $val) ne 'SCALAR') {
+          $bad_slice->("$reftype reference found where literal SQL expected",
+            $col_idx, $datum_idx);
+        }
+        elsif ($$val ne $$sqla_bind){
+          $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
+            $col_idx, $datum_idx);
+        }
+      }
+      elsif (my $reftype = ref $val) {
+        require overload;
+        if (overload::Method($val, '""')) {
+          $datum->[$col_idx] = "".$val;
+        }
+        else {
+          $bad_slice->("$reftype reference found where bind expected",
+            $col_idx, $datum_idx);
+        }
+      }
+    }
+  }
+
+  my ($sql, $bind) = $self->_prep_for_execute (
+    'insert', undef, $source, [\%colvalues]
+  );
+  my @bind = @$bind;
+
+  my $empty_bind = 1 if (not @bind) &&
+    (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols;
+
+  if ((not @bind) && (not $empty_bind)) {
+    $self->throw_exception(
+      'Cannot insert_bulk without support for placeholders'
+    );
+  }
+
+  $self->_query_start( $sql, ['__BULK__'] );
   my $sth = $self->sth($sql);
 
-#  @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+  my $rv = do {
+    if ($empty_bind) {
+      # bind_param_array doesn't work if there are no binds
+      $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
+    }
+    else {
+#      @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+      $self->_execute_array( $source, $sth, \@bind, $cols, $data );
+    }
+  };
 
+  $self->_query_end( $sql, ['__BULK__'] );
+
+  return (wantarray ? ($rv, $sth, @bind) : $rv);
+}
+
+sub _execute_array {
+  my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
+
+  my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0;
+
   ## This must be an arrayref, else nothing works!
   my $tuple_status = [];
 
@@ -1331,7 +1473,7 @@
   ## Bind the values and execute
   my $placeholder_index = 1;
 
-  foreach my $bound (@bind) {
+  foreach my $bound (@$bind) {
 
     my $attributes = {};
     my ($column_name, $data_index) = @$bound;
@@ -1346,58 +1488,89 @@
     $sth->bind_param_array( $placeholder_index, [@data], $attributes );
     $placeholder_index++;
   }
-  my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) };
-  if (my $err = $@) {
+
+  my $rv = eval {
+    $self->_dbh_execute_array($sth, $tuple_status, @extra);
+  };
+  my $err = $@ || $sth->errstr;
+
+# Statement must finish even if there was an exception.
+  eval { $sth->finish };
+  $err = $@ unless $err;
+
+  if ($err) {
     my $i = 0;
     ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
 
-    $self->throw_exception($sth->errstr || "Unexpected populate error: $err")
+    $self->throw_exception("Unexpected populate error: $err")
       if ($i > $#$tuple_status);
 
-    require Data::Dumper;
-    local $Data::Dumper::Terse = 1;
-    local $Data::Dumper::Indent = 1;
-    local $Data::Dumper::Useqq = 1;
-    local $Data::Dumper::Quotekeys = 0;
-    local $Data::Dumper::Sortkeys = 1;
-
     $self->throw_exception(sprintf "%s for populate slice:\n%s",
-      $tuple_status->[$i][1],
-      Data::Dumper::Dumper(
-        { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) }
-      ),
+      ($tuple_status->[$i][1] || $err),
+      Data::Dumper::Concise::Dumper({
+        map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols)
+      }),
     );
   }
-  $self->throw_exception($sth->errstr) if !$rv;
 
-  $self->_query_end( $sql, @bind );
-  return (wantarray ? ($rv, $sth, @bind) : $rv);
+  $guard->commit if $guard;
+
+  return $rv;
 }
 
+sub _dbh_execute_array {
+    my ($self, $sth, $tuple_status, @extra) = @_;
+
+    return $sth->execute_array({ArrayTupleStatus => $tuple_status});
+}
+
+sub _dbh_execute_inserts_with_no_binds {
+  my ($self, $sth, $count) = @_;
+
+  my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0;
+
+  eval {
+    my $dbh = $self->_get_dbh;
+    local $dbh->{RaiseError} = 1;
+    local $dbh->{PrintError} = 0;
+
+    $sth->execute foreach 1..$count;
+  };
+  my $exception = $@;
+
+# Make sure statement is finished even if there was an exception.
+  eval { $sth->finish };
+  $exception = $@ unless $exception;
+
+  $self->throw_exception($exception) if $exception;
+
+  $guard->commit if $guard;
+
+  return $count;
+}
+
 sub update {
-  my $self = shift @_;
-  my $source = shift @_;
-  $self->_determine_driver;
-  my $bind_attributes = $self->source_bind_attributes($source);
+  my ($self, $source, @args) = @_; 
 
-  return $self->_execute('update' => [], $source, $bind_attributes, @_);
+  my $bind_attrs = $self->source_bind_attributes($source);
+
+  return $self->_execute('update' => [], $source, $bind_attrs, @args);
 }
 
 
 sub delete {
-  my $self = shift @_;
-  my $source = shift @_;
-  $self->_determine_driver;
+  my ($self, $source, @args) = @_;
+
   my $bind_attrs = $self->source_bind_attributes($source);
 
-  return $self->_execute('delete' => [], $source, $bind_attrs, @_);
+  return $self->_execute('delete' => [], $source, $bind_attrs, @args);
 }
 
 # We were sent here because the $rs contains a complex search
 # which will require a subquery to select the correct rows
-# (i.e. joined or limited resultsets)
+# (i.e. joined or limited resultsets, or non-introspectable conditions)
 #
-# Genarating a single PK column subquery is trivial and supported
+# Generating a single PK column subquery is trivial and supported
 # by all RDBMS. However if we have a multicolumn PK, things get ugly.
 # Look at _multipk_update_delete()
 sub _subq_update_delete {
@@ -1406,14 +1579,19 @@
 
   my $rsrc = $rs->result_source;
 
-  # we already check this, but double check naively just in case. Should be removed soon
+  # quick check if we got a sane rs on our hands
+  my @pcols = $rsrc->primary_columns;
+
   my $sel = $rs->_resolved_attrs->{select};
   $sel = [ $sel ] unless ref $sel eq 'ARRAY';
-  my @pcols = $rsrc->primary_columns;
-  if (@$sel != @pcols) {
+
+  if (
+      join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
+        ne
+      join ("\x00", sort @$sel )
+  ) {
     $self->throw_exception (
-      'Subquery update/delete can not be called on resultsets selecting a'
-     .' number of columns different than the number of primary keys'
+      '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
     );
   }
 
@@ -1615,324 +1793,6 @@
   return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit);
 }
 
-#
-# This is the code producing joined subqueries like:
-# SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ... 
-#
-sub _adjust_select_args_for_complex_prefetch {
-  my ($self, $from, $select, $where, $attrs) = @_;
-
-  $self->throw_exception ('Nothing to prefetch... how did we get here?!')
-    if not @{$attrs->{_prefetch_select}};
-
-  $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
-    if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY');
-
-
-  # generate inner/outer attribute lists, remove stuff that doesn't apply
-  my $outer_attrs = { %$attrs };
-  delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
-
-  my $inner_attrs = { %$attrs };
-  delete $inner_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
-
-
-  # bring over all non-collapse-induced order_by into the inner query (if any)
-  # the outer one will have to keep them all
-  delete $inner_attrs->{order_by};
-  if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
-    $inner_attrs->{order_by} = [
-      @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
-    ];
-  }
-
-
-  # generate the inner/outer select lists
-  # for inside we consider only stuff *not* brought in by the prefetch
-  # on the outside we substitute any function for its alias
-  my $outer_select = [ @$select ];
-  my $inner_select = [];
-  for my $i (0 .. ( @$outer_select - @{$outer_attrs->{_prefetch_select}} - 1) ) {
-    my $sel = $outer_select->[$i];
-
-    if (ref $sel eq 'HASH' ) {
-      $sel->{-as} ||= $attrs->{as}[$i];
-      $outer_select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "inner_column_$i") );
-    }
-
-    push @$inner_select, $sel;
-  }
-
-  # normalize a copy of $from, so it will be easier to work with further
-  # down (i.e. promote the initial hashref to an AoH)
-  $from = [ @$from ];
-  $from->[0] = [ $from->[0] ];
-  my %original_join_info = map { $_->[0]{-alias} => $_->[0] } (@$from);
-
-
-  # decide which parts of the join will remain in either part of
-  # the outer/inner query
-
-  # First we compose a list of which aliases are used in restrictions
-  # (i.e. conditions/order/grouping/etc). Since we do not have
-  # introspectable SQLA, we fall back to ugly scanning of raw SQL for
-  # WHERE, and for pieces of ORDER BY in order to determine which aliases
-  # need to appear in the resulting sql.
-  # It may not be very efficient, but it's a reasonable stop-gap
-  # Also unqualified column names will not be considered, but more often
-  # than not this is actually ok
-  #
-  # In the same loop we enumerate part of the selection aliases, as
-  # it requires the same sqla hack for the time being
-  my ($restrict_aliases, $select_aliases, $prefetch_aliases);
-  {
-    # produce stuff unquoted, so it can be scanned
-    my $sql_maker = $self->sql_maker;
-    local $sql_maker->{quote_char};
-    my $sep = $self->_sql_maker_opts->{name_sep} || '.';
-    $sep = "\Q$sep\E";
-
-    my $non_prefetch_select_sql = $sql_maker->_recurse_fields ($inner_select);
-    my $prefetch_select_sql = $sql_maker->_recurse_fields ($outer_attrs->{_prefetch_select});
-    my $where_sql = $sql_maker->where ($where);
-    my $group_by_sql = $sql_maker->_order_by({
-      map { $_ => $inner_attrs->{$_} } qw/group_by having/
-    });
-    my @non_prefetch_order_by_chunks = (map
-      { ref $_ ? $_->[0] : $_ }
-      $sql_maker->_order_by_chunks ($inner_attrs->{order_by})
-    );
-
-
-    for my $alias (keys %original_join_info) {
-      my $seen_re = qr/\b $alias $sep/x;
-
-      for my $piece ($where_sql, $group_by_sql, @non_prefetch_order_by_chunks ) {
-        if ($piece =~ $seen_re) {
-          $restrict_aliases->{$alias} = 1;
-        }
-      }
-
-      if ($non_prefetch_select_sql =~ $seen_re) {
-          $select_aliases->{$alias} = 1;
-      }
-
-      if ($prefetch_select_sql =~ $seen_re) {
-          $prefetch_aliases->{$alias} = 1;
-      }
-
-    }
-  }
-
-  # Add any non-left joins to the restriction list (such joins are indeed restrictions)
-  for my $j (values %original_join_info) {
-    my $alias = $j->{-alias} or next;
-    $restrict_aliases->{$alias} = 1 if (
-      (not $j->{-join_type})
-        or
-      ($j->{-join_type} !~ /^left (?: \s+ outer)? $/xi)
-    );
-  }
-
-  # mark all join parents as mentioned
-  # (e.g.  join => { cds => 'tracks' } - tracks will need to bring cds too )
-  for my $collection ($restrict_aliases, $select_aliases) {
-    for my $alias (keys %$collection) {
-      $collection->{$_} = 1
-        for (@{ $original_join_info{$alias}{-join_path} || [] });
-    }
-  }
-
-  # construct the inner $from for the subquery
-  my %inner_joins = (map { %{$_ || {}} } ($restrict_aliases, $select_aliases) );
-  my @inner_from;
-  for my $j (@$from) {
-    push @inner_from, $j if $inner_joins{$j->[0]{-alias}};
-  }
-
-  # if a multi-type join was needed in the subquery ("multi" is indicated by
-  # presence in {collapse}) - add a group_by to simulate the collapse in the subq
-  unless ($inner_attrs->{group_by}) {
-    for my $alias (keys %inner_joins) {
-
-      # the dot comes from some weirdness in collapse
-      # remove after the rewrite
-      if ($attrs->{collapse}{".$alias"}) {
-        $inner_attrs->{group_by} ||= $inner_select;
-        last;
-      }
-    }
-  }
-
-  # demote the inner_from head
-  $inner_from[0] = $inner_from[0][0];
-
-  # generate the subquery
-  my $subq = $self->_select_args_to_query (
-    \@inner_from,
-    $inner_select,
-    $where,
-    $inner_attrs,
-  );
-
-  my $subq_joinspec = {
-    -alias => $attrs->{alias},
-    -source_handle => $inner_from[0]{-source_handle},
-    $attrs->{alias} => $subq,
-  };
-
-  # Generate the outer from - this is relatively easy (really just replace
-  # the join slot with the subquery), with a major caveat - we can not
-  # join anything that is non-selecting (not part of the prefetch), but at
-  # the same time is a multi-type relationship, as it will explode the result.
-  #
-  # There are two possibilities here
-  # - either the join is non-restricting, in which case we simply throw it away
-  # - it is part of the restrictions, in which case we need to collapse the outer
-  #   result by tackling yet another group_by to the outside of the query
-
-  # so first generate the outer_from, up to the substitution point
-  my @outer_from;
-  while (my $j = shift @$from) {
-    if ($j->[0]{-alias} eq $attrs->{alias}) { # time to swap
-      push @outer_from, [
-        $subq_joinspec,
-        @{$j}[1 .. $#$j],
-      ];
-      last; # we'll take care of what's left in $from below
-    }
-    else {
-      push @outer_from, $j;
-    }
-  }
-
-  # see what's left - throw away if not selecting/restricting
-  # also throw in a group_by if restricting to guard against
-  # cross-join explosions
-  #
-  while (my $j = shift @$from) {
-    my $alias = $j->[0]{-alias};
-
-    if ($select_aliases->{$alias} || $prefetch_aliases->{$alias}) {
-      push @outer_from, $j;
-    }
-    elsif ($restrict_aliases->{$alias}) {
-      push @outer_from, $j;
-
-      # FIXME - this should be obviated by SQLA2, as I'll be able to 
-      # have restrict_inner and restrict_outer... or something to that
-      # effect... I think...
-
-      # FIXME2 - I can't find a clean way to determine if a particular join
-      # is a multi - instead I am just treating everything as a potential
-      # explosive join (ribasushi)
-      #
-      # if (my $handle = $j->[0]{-source_handle}) {
-      #   my $rsrc = $handle->resolve;
-      #   ... need to bail out of the following if this is not a multi,
-      #       as it will be much easier on the db ...
-
-          $outer_attrs->{group_by} ||= $outer_select;
-      # }
-    }
-  }
-
-  # demote the outer_from head
-  $outer_from[0] = $outer_from[0][0];
-
-  # This is totally horrific - the $where ends up in both the inner and outer query
-  # Unfortunately not much can be done until SQLA2 introspection arrives, and even
-  # then if where conditions apply to the *right* side of the prefetch, you may have
-  # to both filter the inner select (e.g. to apply a limit) and then have to re-filter
-  # the outer select to exclude joins you didin't want in the first place
-  #
-  # OTOH it can be seen as a plus: <ash> (notes that this query would make a DBA cry ;)
-  return (\@outer_from, $outer_select, $where, $outer_attrs);
-}
-
-sub _resolve_ident_sources {
-  my ($self, $ident) = @_;
-
-  my $alias2source = {};
-  my $rs_alias;
-
-  # the reason this is so contrived is that $ident may be a {from}
-  # structure, specifying multiple tables to join
-  if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
-    # this is compat mode for insert/update/delete which do not deal with aliases
-    $alias2source->{me} = $ident;
-    $rs_alias = 'me';
-  }
-  elsif (ref $ident eq 'ARRAY') {
-
-    for (@$ident) {
-      my $tabinfo;
-      if (ref $_ eq 'HASH') {
-        $tabinfo = $_;
-        $rs_alias = $tabinfo->{-alias};
-      }
-      if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') {
-        $tabinfo = $_->[0];
-      }
-
-      $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve
-        if ($tabinfo->{-source_handle});
-    }
-  }
-
-  return ($alias2source, $rs_alias);
-}
-
-# Takes $ident, \@column_names
-#
-# returns { $column_name => \%column_info, ... }
-# also note: this adds -result_source => $rsrc to the column info
-#
-# usage:
-#   my $col_sources = $self->_resolve_column_info($ident, @column_names);
-sub _resolve_column_info {
-  my ($self, $ident, $colnames) = @_;
-  my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident);
-
-  my $sep = $self->_sql_maker_opts->{name_sep} || '.';
-  $sep = "\Q$sep\E";
-
-  my (%return, %seen_cols);
-
-  # compile a global list of column names, to be able to properly
-  # disambiguate unqualified column names (if at all possible)
-  for my $alias (keys %$alias2src) {
-    my $rsrc = $alias2src->{$alias};
-    for my $colname ($rsrc->columns) {
-      push @{$seen_cols{$colname}}, $alias;
-    }
-  }
-
-  COLUMN:
-  foreach my $col (@$colnames) {
-    my ($alias, $colname) = $col =~ m/^ (?: ([^$sep]+) $sep)? (.+) $/x;
-
-    unless ($alias) {
-      # see if the column was seen exactly once (so we know which rsrc it came from)
-      if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1) {
-        $alias = $seen_cols{$colname}[0];
-      }
-      else {
-        next COLUMN;
-      }
-    }
-
-    my $rsrc = $alias2src->{$alias};
-    $return{$col} = $rsrc && {
-      %{$rsrc->column_info($colname)},
-      -result_source => $rsrc,
-      -source_alias => $alias,
-    };
-  }
-
-  return \%return;
-}
-
 # Returns a counting SELECT for a simple count
 # query. Abstracted so that a storage could override
 # this to { count => 'firstcol' } or whatever makes
@@ -1957,7 +1817,6 @@
   return @pcols ? \@pcols : [ 1 ];
 }
 
-
 sub source_bind_attributes {
   my ($self, $source) = @_;
 
@@ -2133,7 +1992,7 @@
 
 This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
 currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
-L<::Sybase|DBIx::Class::Storage::DBI::Sybase>.
+L<::Sybase::ASE|DBIx::Class::Storage::DBI::Sybase::ASE>.
 
 The default implementation returns C<undef>, implement in your Storage driver if
 you need this functionality.
@@ -2154,6 +2013,36 @@
   return undef
 }
 
+# Check if placeholders are supported at all
+sub _placeholders_supported {
+  my $self = shift;
+  my $dbh  = $self->_get_dbh;
+
+  # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
+  # but it is inaccurate more often than not
+  eval {
+    local $dbh->{PrintError} = 0;
+    local $dbh->{RaiseError} = 1;
+    $dbh->do('select ?', {}, 1);
+  };
+  return $@ ? 0 : 1;
+}
+
+# Check if placeholders bound to non-string types throw exceptions
+#
+sub _typeless_placeholders_supported {
+  my $self = shift;
+  my $dbh  = $self->_get_dbh;
+
+  eval {
+    local $dbh->{PrintError} = 0;
+    local $dbh->{RaiseError} = 1;
+    # this specifically tests a bind that is NOT a string
+    $dbh->do('select 1 where 1 = ?', {}, 1);
+  };
+  return $@ ? 0 : 1;
+}
+
 =head2 sqlt_type
 
 Returns the database driver name.
@@ -2161,14 +2050,7 @@
 =cut
 
 sub sqlt_type {
-  my ($self) = @_;
-
-  if (not $self->_driver_determined) {
-    $self->_determine_driver;
-    goto $self->can ('sqlt_type');
-  }
-
-  $self->_get_dbh->{Driver}->{Name};
+  shift->_get_dbh->{Driver}->{Name};
 }
 
 =head2 bind_attribute_by_data_type
@@ -2442,7 +2324,11 @@
     parser => 'SQL::Translator::Parser::DBIx::Class',
     data => $schema,
   );
-  return $tr->translate;
+
+  my $ret = $tr->translate
+    or $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error);
+
+  return $ret;
 }
 
 sub deploy {
@@ -2508,11 +2394,6 @@
 =cut
 
 sub build_datetime_parser {
-  if (not $_[0]->_driver_determined) {
-    $_[0]->_determine_driver;
-    goto $_[0]->can('build_datetime_parser');
-  }
-
   my $self = shift;
   my $type = $self->datetime_parser_type(@_);
   $self->ensure_class_loaded ($type);
@@ -2545,6 +2426,33 @@
     return;
 }
 
+# SQLT version handling
+{
+  my $_sqlt_version_ok;     # private
+  my $_sqlt_version_error;  # private
+
+  sub _sqlt_version_ok {
+    if (!defined $_sqlt_version_ok) {
+      eval "use SQL::Translator $minimum_sqlt_version";
+      if ($@) {
+        $_sqlt_version_ok = 0;
+        $_sqlt_version_error = $@;
+      }
+      else {
+        $_sqlt_version_ok = 1;
+      }
+    }
+    return $_sqlt_version_ok;
+  }
+
+  sub _sqlt_version_error {
+    shift->_sqlt_version_ok unless defined $_sqlt_version_ok;
+    return $_sqlt_version_error;
+  }
+
+  sub _sqlt_minimum_version { $minimum_sqlt_version };
+}
+
 sub DESTROY {
   my $self = shift;
 

Added: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBIHacks.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBIHacks.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/DBIHacks.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -0,0 +1,469 @@
+package   #hide from PAUSE
+  DBIx::Class::Storage::DBIHacks;
+
+#
+# This module contains code that should never have seen the light of day,
+# does not belong in the Storage, or is otherwise unfit for public
+# display. The arrival of SQLA2 should immediately oboslete 90% of this
+#
+
+use strict;
+use warnings;
+
+use base 'DBIx::Class::Storage';
+use mro 'c3';
+
+use Carp::Clan qw/^DBIx::Class/;
+
+#
+# This is the code producing joined subqueries like:
+# SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ... 
+#
+sub _adjust_select_args_for_complex_prefetch {
+  my ($self, $from, $select, $where, $attrs) = @_;
+
+  $self->throw_exception ('Nothing to prefetch... how did we get here?!')
+    if not @{$attrs->{_prefetch_select}};
+
+  $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
+    if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY');
+
+
+  # generate inner/outer attribute lists, remove stuff that doesn't apply
+  my $outer_attrs = { %$attrs };
+  delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
+
+  my $inner_attrs = { %$attrs };
+  delete $inner_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
+
+
+  # bring over all non-collapse-induced order_by into the inner query (if any)
+  # the outer one will have to keep them all
+  delete $inner_attrs->{order_by};
+  if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
+    $inner_attrs->{order_by} = [
+      @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
+    ];
+  }
+
+
+  # generate the inner/outer select lists
+  # for inside we consider only stuff *not* brought in by the prefetch
+  # on the outside we substitute any function for its alias
+  my $outer_select = [ @$select ];
+  my $inner_select = [];
+  for my $i (0 .. ( @$outer_select - @{$outer_attrs->{_prefetch_select}} - 1) ) {
+    my $sel = $outer_select->[$i];
+
+    if (ref $sel eq 'HASH' ) {
+      $sel->{-as} ||= $attrs->{as}[$i];
+      $outer_select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "inner_column_$i") );
+    }
+
+    push @$inner_select, $sel;
+  }
+
+  # normalize a copy of $from, so it will be easier to work with further
+  # down (i.e. promote the initial hashref to an AoH)
+  $from = [ @$from ];
+  $from->[0] = [ $from->[0] ];
+  my %original_join_info = map { $_->[0]{-alias} => $_->[0] } (@$from);
+
+
+  # decide which parts of the join will remain in either part of
+  # the outer/inner query
+
+  # First we compose a list of which aliases are used in restrictions
+  # (i.e. conditions/order/grouping/etc). Since we do not have
+  # introspectable SQLA, we fall back to ugly scanning of raw SQL for
+  # WHERE, and for pieces of ORDER BY in order to determine which aliases
+  # need to appear in the resulting sql.
+  # It may not be very efficient, but it's a reasonable stop-gap
+  # Also unqualified column names will not be considered, but more often
+  # than not this is actually ok
+  #
+  # In the same loop we enumerate part of the selection aliases, as
+  # it requires the same sqla hack for the time being
+  my ($restrict_aliases, $select_aliases, $prefetch_aliases);
+  {
+    # produce stuff unquoted, so it can be scanned
+    my $sql_maker = $self->sql_maker;
+    local $sql_maker->{quote_char};
+    my $sep = $self->_sql_maker_opts->{name_sep} || '.';
+    $sep = "\Q$sep\E";
+
+    my $non_prefetch_select_sql = $sql_maker->_recurse_fields ($inner_select);
+    my $prefetch_select_sql = $sql_maker->_recurse_fields ($outer_attrs->{_prefetch_select});
+    my $where_sql = $sql_maker->where ($where);
+    my $group_by_sql = $sql_maker->_order_by({
+      map { $_ => $inner_attrs->{$_} } qw/group_by having/
+    });
+    my @non_prefetch_order_by_chunks = (map
+      { ref $_ ? $_->[0] : $_ }
+      $sql_maker->_order_by_chunks ($inner_attrs->{order_by})
+    );
+
+
+    for my $alias (keys %original_join_info) {
+      my $seen_re = qr/\b $alias $sep/x;
+
+      for my $piece ($where_sql, $group_by_sql, @non_prefetch_order_by_chunks ) {
+        if ($piece =~ $seen_re) {
+          $restrict_aliases->{$alias} = 1;
+        }
+      }
+
+      if ($non_prefetch_select_sql =~ $seen_re) {
+          $select_aliases->{$alias} = 1;
+      }
+
+      if ($prefetch_select_sql =~ $seen_re) {
+          $prefetch_aliases->{$alias} = 1;
+      }
+
+    }
+  }
+
+  # Add any non-left joins to the restriction list (such joins are indeed restrictions)
+  for my $j (values %original_join_info) {
+    my $alias = $j->{-alias} or next;
+    $restrict_aliases->{$alias} = 1 if (
+      (not $j->{-join_type})
+        or
+      ($j->{-join_type} !~ /^left (?: \s+ outer)? $/xi)
+    );
+  }
+
+  # mark all join parents as mentioned
+  # (e.g.  join => { cds => 'tracks' } - tracks will need to bring cds too )
+  for my $collection ($restrict_aliases, $select_aliases) {
+    for my $alias (keys %$collection) {
+      $collection->{$_} = 1
+        for (@{ $original_join_info{$alias}{-join_path} || [] });
+    }
+  }
+
+  # construct the inner $from for the subquery
+  my %inner_joins = (map { %{$_ || {}} } ($restrict_aliases, $select_aliases) );
+  my @inner_from;
+  for my $j (@$from) {
+    push @inner_from, $j if $inner_joins{$j->[0]{-alias}};
+  }
+
+  # if a multi-type join was needed in the subquery ("multi" is indicated by
+  # presence in {collapse}) - add a group_by to simulate the collapse in the subq
+  unless ($inner_attrs->{group_by}) {
+    for my $alias (keys %inner_joins) {
+
+      # the dot comes from some weirdness in collapse
+      # remove after the rewrite
+      if ($attrs->{collapse}{".$alias"}) {
+        $inner_attrs->{group_by} ||= $inner_select;
+        last;
+      }
+    }
+  }
+
+  # demote the inner_from head
+  $inner_from[0] = $inner_from[0][0];
+
+  # generate the subquery
+  my $subq = $self->_select_args_to_query (
+    \@inner_from,
+    $inner_select,
+    $where,
+    $inner_attrs,
+  );
+
+  my $subq_joinspec = {
+    -alias => $attrs->{alias},
+    -source_handle => $inner_from[0]{-source_handle},
+    $attrs->{alias} => $subq,
+  };
+
+  # Generate the outer from - this is relatively easy (really just replace
+  # the join slot with the subquery), with a major caveat - we can not
+  # join anything that is non-selecting (not part of the prefetch), but at
+  # the same time is a multi-type relationship, as it will explode the result.
+  #
+  # There are two possibilities here
+  # - either the join is non-restricting, in which case we simply throw it away
+  # - it is part of the restrictions, in which case we need to collapse the outer
+  #   result by tackling yet another group_by to the outside of the query
+
+  # so first generate the outer_from, up to the substitution point
+  my @outer_from;
+  while (my $j = shift @$from) {
+    if ($j->[0]{-alias} eq $attrs->{alias}) { # time to swap
+      push @outer_from, [
+        $subq_joinspec,
+        @{$j}[1 .. $#$j],
+      ];
+      last; # we'll take care of what's left in $from below
+    }
+    else {
+      push @outer_from, $j;
+    }
+  }
+
+  # see what's left - throw away if not selecting/restricting
+  # also throw in a group_by if restricting to guard against
+  # cross-join explosions
+  #
+  while (my $j = shift @$from) {
+    my $alias = $j->[0]{-alias};
+
+    if ($select_aliases->{$alias} || $prefetch_aliases->{$alias}) {
+      push @outer_from, $j;
+    }
+    elsif ($restrict_aliases->{$alias}) {
+      push @outer_from, $j;
+
+      # FIXME - this should be obviated by SQLA2, as I'll be able to 
+      # have restrict_inner and restrict_outer... or something to that
+      # effect... I think...
+
+      # FIXME2 - I can't find a clean way to determine if a particular join
+      # is a multi - instead I am just treating everything as a potential
+      # explosive join (ribasushi)
+      #
+      # if (my $handle = $j->[0]{-source_handle}) {
+      #   my $rsrc = $handle->resolve;
+      #   ... need to bail out of the following if this is not a multi,
+      #       as it will be much easier on the db ...
+
+          $outer_attrs->{group_by} ||= $outer_select;
+      # }
+    }
+  }
+
+  # demote the outer_from head
+  $outer_from[0] = $outer_from[0][0];
+
+  # This is totally horrific - the $where ends up in both the inner and outer query
+  # Unfortunately not much can be done until SQLA2 introspection arrives, and even
+  # then if where conditions apply to the *right* side of the prefetch, you may have
+  # to both filter the inner select (e.g. to apply a limit) and then have to re-filter
+  # the outer select to exclude joins you didin't want in the first place
+  #
+  # OTOH it can be seen as a plus: <ash> (notes that this query would make a DBA cry ;)
+  return (\@outer_from, $outer_select, $where, $outer_attrs);
+}
+
+sub _resolve_ident_sources {
+  my ($self, $ident) = @_;
+
+  my $alias2source = {};
+  my $rs_alias;
+
+  # the reason this is so contrived is that $ident may be a {from}
+  # structure, specifying multiple tables to join
+  if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
+    # this is compat mode for insert/update/delete which do not deal with aliases
+    $alias2source->{me} = $ident;
+    $rs_alias = 'me';
+  }
+  elsif (ref $ident eq 'ARRAY') {
+
+    for (@$ident) {
+      my $tabinfo;
+      if (ref $_ eq 'HASH') {
+        $tabinfo = $_;
+        $rs_alias = $tabinfo->{-alias};
+      }
+      if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') {
+        $tabinfo = $_->[0];
+      }
+
+      $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve
+        if ($tabinfo->{-source_handle});
+    }
+  }
+
+  return ($alias2source, $rs_alias);
+}
+
+# Takes $ident, \@column_names
+#
+# returns { $column_name => \%column_info, ... }
+# also note: this adds -result_source => $rsrc to the column info
+#
+# If no columns_names are supplied returns info about *all* columns
+# for all sources
+sub _resolve_column_info {
+  my ($self, $ident, $colnames) = @_;
+  my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident);
+
+  my $sep = $self->_sql_maker_opts->{name_sep} || '.';
+  my $qsep = quotemeta $sep;
+
+  my (%return, %seen_cols, @auto_colnames);
+
+  # compile a global list of column names, to be able to properly
+  # disambiguate unqualified column names (if at all possible)
+  for my $alias (keys %$alias2src) {
+    my $rsrc = $alias2src->{$alias};
+    for my $colname ($rsrc->columns) {
+      push @{$seen_cols{$colname}}, $alias;
+      push @auto_colnames, "$alias$sep$colname" unless $colnames;
+    }
+  }
+
+  $colnames ||= [
+    @auto_colnames,
+    grep { @{$seen_cols{$_}} == 1 } (keys %seen_cols),
+  ];
+
+  COLUMN:
+  foreach my $col (@$colnames) {
+    my ($alias, $colname) = $col =~ m/^ (?: ([^$qsep]+) $qsep)? (.+) $/x;
+
+    unless ($alias) {
+      # see if the column was seen exactly once (so we know which rsrc it came from)
+      if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1) {
+        $alias = $seen_cols{$colname}[0];
+      }
+      else {
+        next COLUMN;
+      }
+    }
+
+    my $rsrc = $alias2src->{$alias};
+    $return{$col} = $rsrc && {
+      %{$rsrc->column_info($colname)},
+      -result_source => $rsrc,
+      -source_alias => $alias,
+    };
+  }
+
+  return \%return;
+}
+
+# The DBIC relationship chaining implementation is pretty simple - every
+# new related_relationship is pushed onto the {from} stack, and the {select}
+# window simply slides further in. This means that when we count somewhere
+# in the middle, we got to make sure that everything in the join chain is an
+# actual inner join, otherwise the count will come back with unpredictable
+# results (a resultset may be generated with _some_ rows regardless of if
+# the relation which the $rs currently selects has rows or not). E.g.
+# $artist_rs->cds->count - normally generates:
+# SELECT COUNT( * ) FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid
+# which actually returns the number of artists * (number of cds || 1)
+#
+# So what we do here is crawl {from}, determine if the current alias is at
+# the top of the stack, and if not - make sure the chain is inner-joined down
+# to the root.
+#
+sub _straight_join_to_node {
+  my ($self, $from, $alias) = @_;
+
+  # subqueries and other oddness are naturally not supported
+  return $from if (
+    ref $from ne 'ARRAY'
+      ||
+    @$from <= 1
+      ||
+    ref $from->[0] ne 'HASH'
+      ||
+    ! $from->[0]{-alias}
+      ||
+    $from->[0]{-alias} eq $alias  # this last bit means $alias is the head of $from - nothing to do
+  );
+
+  # find the current $alias in the $from structure
+  my $switch_branch;
+  JOINSCAN:
+  for my $j (@{$from}[1 .. $#$from]) {
+    if ($j->[0]{-alias} eq $alias) {
+      $switch_branch = $j->[0]{-join_path};
+      last JOINSCAN;
+    }
+  }
+
+  # something else went quite wrong
+  return $from unless $switch_branch;
+
+  # So it looks like we will have to switch some stuff around.
+  # local() is useless here as we will be leaving the scope
+  # anyway, and deep cloning is just too fucking expensive
+  # So replace the first hashref in the node arrayref manually 
+  my @new_from = ($from->[0]);
+  my $sw_idx = { map { $_ => 1 } @$switch_branch };
+
+  for my $j (@{$from}[1 .. $#$from]) {
+    my $jalias = $j->[0]{-alias};
+
+    if ($sw_idx->{$jalias}) {
+      my %attrs = %{$j->[0]};
+      delete $attrs{-join_type};
+      push @new_from, [
+        \%attrs,
+        @{$j}[ 1 .. $#$j ],
+      ];
+    }
+    else {
+      push @new_from, $j;
+    }
+  }
+
+  return \@new_from;
+}
+
+# Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus
+# a condition containing 'me' or other table prefixes will not work
+# at all. What this code tries to do (badly) is introspect the condition
+# and remove all column qualifiers. If it bails out early (returns undef)
+# the calling code should try another approach (e.g. a subquery)
+sub _strip_cond_qualifiers {
+  my ($self, $where) = @_;
+
+  my $cond = {};
+
+  # No-op. No condition, we're updating/deleting everything
+  return $cond unless $where;
+
+  if (ref $where eq 'ARRAY') {
+    $cond = [
+      map {
+        my %hash;
+        foreach my $key (keys %{$_}) {
+          $key =~ /([^.]+)$/;
+          $hash{$1} = $_->{$key};
+        }
+        \%hash;
+      } @$where
+    ];
+  }
+  elsif (ref $where eq 'HASH') {
+    if ( (keys %$where) == 1 && ( (keys %{$where})[0] eq '-and' )) {
+      $cond->{-and} = [];
+      my @cond = @{$where->{-and}};
+       for (my $i = 0; $i < @cond; $i++) {
+        my $entry = $cond[$i];
+        my $hash;
+        if (ref $entry eq 'HASH') {
+          $hash = $self->_strip_cond_qualifiers($entry);
+        }
+        else {
+          $entry =~ /([^.]+)$/;
+          $hash->{$1} = $cond[++$i];
+        }
+        push @{$cond->{-and}}, $hash;
+      }
+    }
+    else {
+      foreach my $key (keys %$where) {
+        $key =~ /([^.]+)$/;
+        $cond->{$1} = $where->{$key};
+      }
+    }
+  }
+  else {
+    return undef;
+  }
+
+  return $cond;
+}
+
+
+1;

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/Statistics.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/Statistics.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/Statistics.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -2,7 +2,7 @@
 use strict;
 use warnings;
 
-use base qw/Class::Accessor::Grouped/;
+use base qw/DBIx::Class/;
 use IO::File;
 
 __PACKAGE__->mk_group_accessors(simple => qw/callback debugfh silence/);

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/TxnScopeGuard.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/TxnScopeGuard.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage/TxnScopeGuard.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -25,30 +25,32 @@
 
   my $exception = $@;
 
-  carp 'A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or error. Rolling back.'
-    unless $exception;
-
-  my $rollback_exception;
   {
     local $@;
+
+    carp 'A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or error. Rolling back.'
+      unless $exception;
+
     eval { $storage->txn_rollback };
-    $rollback_exception = $@;
-  }
+    my $rollback_exception = $@;
 
-  if ($rollback_exception && $rollback_exception !~ /DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION/) {
-    if ($exception) {
-      $@ = "Transaction aborted: ${exception} "
+    if ($rollback_exception && $rollback_exception !~ /DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION/) {
+      if ($exception) {
+        $exception = "Transaction aborted: ${exception} "
           ."Rollback failed: ${rollback_exception}";
+      }
+      else {
+        carp (join ' ',
+          "********************* ROLLBACK FAILED!!! ********************",
+          "\nA rollback operation failed after the guard went out of scope.",
+          'This is potentially a disastrous situation, check your data for',
+          "consistency: $rollback_exception"
+        );
+      }
     }
-    else {
-      carp (join ' ',
-        "********************* ROLLBACK FAILED!!! ********************",
-        "\nA rollback operation failed after the guard went out of scope.",
-        'This is potentially a disastrous situation, check your data for',
-        "consistency: $rollback_exception"
-      );
-    }
   }
+
+  $@ = $exception;
 }
 
 1;

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class/Storage.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -6,8 +6,8 @@
 use base qw/DBIx::Class/;
 use mro 'c3';
 
-use Scalar::Util qw/weaken/;
-use Carp::Clan qw/^DBIx::Class/;
+use DBIx::Class::Exception;
+use Scalar::Util();
 use IO::File;
 use DBIx::Class::Storage::TxnScopeGuard;
 
@@ -83,7 +83,7 @@
 sub set_schema {
   my ($self, $schema) = @_;
   $self->schema($schema);
-  weaken($self->{schema}) if ref $self->{schema};
+  Scalar::Util::weaken($self->{schema}) if ref $self->{schema};
 }
 
 =head2 connected
@@ -120,8 +120,12 @@
 sub throw_exception {
   my $self = shift;
 
-  $self->schema->throw_exception(@_) if $self->schema;
-  croak @_;
+  if ($self->schema) {
+    $self->schema->throw_exception(@_);
+  }
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 =head2 txn_do

Modified: DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/DBIx/Class.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -6,7 +6,7 @@
 use MRO::Compat;
 
 use vars qw($VERSION);
-use base qw/DBIx::Class::Componentised Class::Accessor::Grouped/;
+use base qw/Class::C3::Componentised Class::Accessor::Grouped/;
 use DBIx::Class::StartupCheck;
 
 sub mk_classdata {
@@ -24,15 +24,10 @@
 # Always remember to do all digits for the version even if they're 0
 # i.e. first release of 0.XX *must* be 0.XX000. This avoids fBSD ports
 # brain damage and presumably various other packaging systems too
+$VERSION = '0.08114';
 
-$VERSION = '0.08111';
-
 $VERSION = eval $VERSION; # numify for warning-free dev releases
 
-# what version of sqlt do we require if deploy() without a ddl_dir is invoked
-# when changing also adjust $sqlt_recommends in Makefile.PL
-my $minimum_sqlt_version = '0.11002';
-
 sub MODIFY_CODE_ATTRIBUTES {
   my ($class,$code, at attrs) = @_;
   $class->mk_classdata('__attr_cache' => {})
@@ -48,34 +43,6 @@
   return $@ ? $cache : { %$cache, %$rest };
 }
 
-# SQLT version handling
-{
-  my $_sqlt_version_ok;     # private
-  my $_sqlt_version_error;  # private
-
-  sub _sqlt_version_ok {
-    if (!defined $_sqlt_version_ok) {
-      eval "use SQL::Translator $minimum_sqlt_version";
-      if ($@) {
-        $_sqlt_version_ok = 0;
-        $_sqlt_version_error = $@;
-      }
-      else {
-        $_sqlt_version_ok = 1;
-      }
-    }
-    return $_sqlt_version_ok;
-  }
-
-  sub _sqlt_version_error {
-    shift->_sqlt_version_ok unless defined $_sqlt_version_ok;
-    return $_sqlt_version_error;
-  }
-
-  sub _sqlt_minimum_version { $minimum_sqlt_version };
-}
-
-
 1;
 
 =head1 NAME
@@ -148,7 +115,7 @@
   my $all_artists_rs = $schema->resultset('Artist');
 
   # Output all artists names
-  # $artist here is a DBIx::Class::Row, which has accessors 
+  # $artist here is a DBIx::Class::Row, which has accessors
   # for all its columns. Rows are also subclasses of your Result class.
   foreach $artist (@artists) {
     print $artist->name, "\n";
@@ -374,6 +341,8 @@
 
 Tom Hukins
 
+triode: Pete Gamache <gamache at cpan.org>
+
 typester: Daisuke Murase <typester at cpan.org>
 
 victori: Victor Igumnov <victori at cpan.org>

Modified: DBIx-Class/0.08/branches/extended_rels/lib/SQL/Translator/Parser/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/lib/SQL/Translator/Parser/DBIx/Class.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/lib/SQL/Translator/Parser/DBIx/Class.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -86,7 +86,7 @@
         # support quoting properly to be signaled about this
         $table_name = $$table_name if ref $table_name eq 'SCALAR';
 
-        # Its possible to have multiple DBIC sources using the same table
+        # It's possible to have multiple DBIC sources using the same table
         next if $tables{$table_name};
 
         $tables{$table_name}{source} = $source;
@@ -141,6 +141,7 @@
             next unless ref $rel_info->{cond} eq 'HASH';
 
             my $othertable = $source->related_source($rel);
+            next if $othertable->isa('DBIx::Class::ResultSource::View');  # can't define constraints referencing a view
             my $rel_table = $othertable->name;
 
             # FIXME - this isn't the right way to do it, but sqlt does not
@@ -184,7 +185,7 @@
                     if ($fk_constraint) {
                         $cascade->{$c} = $rel_info->{attrs}{"on_$c"};
                     }
-                    else {
+                    elsif ( $rel_info->{attrs}{"on_$c"} ) {
                         carp "SQLT attribute 'on_$c' was supplied for relationship '$moniker/$rel', which does not appear to be a foreign constraint. "
                             . "If you are sure that SQLT must generate a constraint for this relationship, add 'is_foreign_key_constraint => 1' to the attributes.\n";
                     }
@@ -200,7 +201,7 @@
                 next unless $fk_constraint;
 
                 # Make sure we dont create the same foreign key constraint twice
-                my $key_test = join("\x00", @keys);
+                my $key_test = join("\x00", sort @keys);
                 next if $created_FK_rels{$rel_table}->{$key_test};
 
                 if (scalar(@keys)) {
@@ -214,7 +215,6 @@
                   if (! $is_deferrable and $rel_table ne $table_name) {
                     $tables{$table_name}{foreign_table_deps}{$rel_table}++;
                   }
-
                   $table->add_constraint(
                                     type             => 'foreign_key',
                                     name             => join('_', $table_name, 'fk', @keys),

Modified: DBIx-Class/0.08/branches/extended_rels/t/05components.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/05components.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/05components.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -7,8 +7,6 @@
 use lib qw(t/lib);
 use DBICTest::ForeignComponent;
 
-plan tests => 6;
-
 #   Tests if foreign component was loaded by calling foreign's method
 ok( DBICTest::ForeignComponent->foreign_test_method, 'foreign component' );
 
@@ -35,32 +33,7 @@
     'inject_base filters duplicates'
 );
 
-# Test for a warning with incorrect order in load_components
-my @warnings = ();
-{
-  package A::Test;
-  our @ISA = 'DBIx::Class';
-  {
-    local $SIG{__WARN__} = sub { push @warnings, shift};
-    __PACKAGE__->load_components(qw(Core UTF8Columns));
-  }
-}
-like( $warnings[0], qr/Core loaded before UTF8Columns/,
-      'warning issued for incorrect order in load_components()' );
-is( scalar @warnings, 1,
-    'only one warning issued for incorrect load_components call' );
-
-# Test that no warning is issued for the correct order in load_components
-{
-  @warnings = ();
-  package B::Test;
-  our @ISA = 'DBIx::Class';
-  {
-    local $SIG{__WARN__} = sub { push @warnings, shift };
-    __PACKAGE__->load_components(qw(UTF8Columns Core));
-  }
-}
-is( scalar @warnings, 0,
-    'warning not issued for correct order in load_components()' );
-
 use_ok('DBIx::Class::AccessorGroup');
+use_ok('DBIx::Class::Componentised');
+
+done_testing;

Modified: DBIx-Class/0.08/branches/extended_rels/t/100populate.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/100populate.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/100populate.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -5,9 +5,8 @@
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
+use Path::Class::File ();
 
-plan tests => 23;
-
 my $schema = DBICTest->init_schema();
 
 # The map below generates stuff like:
@@ -116,3 +115,205 @@
 is($link7->url, undef, 'Link 7 url');
 is($link7->title, 'gtitle', 'Link 7 title');
 
+my $rs = $schema->resultset('Artist');
+$rs->delete;
+
+# test _execute_array_empty (insert_bulk with all literal sql)
+
+$rs->populate([
+    (+{
+        name => \"'DT'",
+        rank => \500,
+        charfield => \"'mtfnpy'",
+    }) x 5
+]);
+
+is((grep {
+  $_->name eq 'DT' &&
+  $_->rank == 500  &&
+  $_->charfield eq 'mtfnpy'
+} $rs->all), 5, 'populate with all literal SQL');
+
+$rs->delete;
+
+# test mixed binds with literal sql
+
+$rs->populate([
+    (+{
+        name => \"'DT'",
+        rank => 500,
+        charfield => \"'mtfnpy'",
+    }) x 5
+]);
+
+is((grep {
+  $_->name eq 'DT' &&
+  $_->rank == 500  &&
+  $_->charfield eq 'mtfnpy'
+} $rs->all), 5, 'populate with all literal SQL');
+
+$rs->delete;
+
+###
+
+throws_ok {
+    $rs->populate([
+        {
+            artistid => 1,
+            name => 'foo1',
+        },
+        {
+            artistid => 'foo', # this dies
+            name => 'foo2',
+        },
+        {
+            artistid => 3,
+            name => 'foo3',
+        },
+    ]);
+} qr/slice/, 'bad slice';
+
+is($rs->count, 0, 'populate is atomic');
+
+# Trying to use a column marked as a bind in the first slice with literal sql in
+# a later slice should throw.
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => 1,
+      name => \"'foo'",
+    },
+    {
+      artistid => \2,
+      name => \"'foo'",
+    }
+  ]);
+} qr/bind expected/, 'literal sql where bind expected throws';
+
+# ... and vice-versa.
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => \1,
+      name => \"'foo'",
+    },
+    {
+      artistid => 2,
+      name => \"'foo'",
+    }
+  ]);
+} qr/literal SQL expected/i, 'bind where literal sql expected throws';
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => 1,
+      name => \"'foo'",
+    },
+    {
+      artistid => 2,
+      name => \"'bar'",
+    }
+  ]);
+} qr/inconsistent/, 'literal sql must be the same in all slices';
+
+# the stringification has nothing to do with the artist name
+# this is solely for testing consistency
+my $fn = Path::Class::File->new ('somedir/somefilename.tmp');
+my $fn2 = Path::Class::File->new ('somedir/someotherfilename.tmp');
+
+lives_ok {
+  $rs->populate([
+    {
+      name => 'supplied before stringifying object',
+    },
+    {
+      name => $fn,
+    }
+  ]);
+} 'stringifying objects pass through';
+
+# ... and vice-versa.
+
+lives_ok {
+  $rs->populate([
+    {
+      name => $fn2,
+    },
+    {
+      name => 'supplied after stringifying object',
+    },
+  ]);
+} 'stringifying objects pass through';
+
+for (
+  $fn,
+  $fn2,
+  'supplied after stringifying object',
+  'supplied before stringifying object'
+) {
+  my $row = $rs->find ({name => $_});
+  ok ($row, "Stringification test row '$_' properly inserted");
+}
+
+$rs->delete;
+
+# test stringification with ->create rather than Storage::insert_bulk as well
+
+lives_ok {
+  my @dummy = $rs->populate([
+    {
+      name => 'supplied before stringifying object',
+    },
+    {
+      name => $fn,
+    }
+  ]);
+} 'stringifying objects pass through';
+
+# ... and vice-versa.
+
+lives_ok {
+  my @dummy = $rs->populate([
+    {
+      name => $fn2,
+    },
+    {
+      name => 'supplied after stringifying object',
+    },
+  ]);
+} 'stringifying objects pass through';
+
+for (
+  $fn,
+  $fn2,
+  'supplied after stringifying object',
+  'supplied before stringifying object'
+) {
+  my $row = $rs->find ({name => $_});
+  ok ($row, "Stringification test row '$_' properly inserted");
+}
+
+lives_ok {
+   $schema->resultset('TwoKeys')->populate([{
+      artist => 1,
+      cd     => 5,
+      fourkeys_to_twokeys => [{
+            f_foo => 1,
+            f_bar => 1,
+            f_hello => 1,
+            f_goodbye => 1,
+            autopilot => 'a',
+      },{
+            f_foo => 2,
+            f_bar => 2,
+            f_hello => 2,
+            f_goodbye => 2,
+            autopilot => 'b',
+      }]
+   }])
+} 'multicol-PK has_many populate works';
+
+done_testing;

Modified: DBIx-Class/0.08/branches/extended_rels/t/101populate_rs.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/101populate_rs.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/101populate_rs.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -15,9 +15,7 @@
 use lib qw(t/lib);
 use DBICTest;
 
-plan tests => 142;
 
-
 ## ----------------------------------------------------------------------------
 ## Get a Schema and some ResultSets we can play with.
 ## ----------------------------------------------------------------------------
@@ -26,6 +24,8 @@
 my $art_rs	= $schema->resultset('Artist');
 my $cd_rs	= $schema->resultset('CD');
 
+my $restricted_art_rs	= $art_rs->search({rank => 42});
+
 ok( $schema, 'Got a Schema object');
 ok( $art_rs, 'Got Good Artist Resultset');
 ok( $cd_rs, 'Got Good CD Resultset');
@@ -333,6 +333,18 @@
 		is($cdB->artist->name, 'Fred BloggsD', 'Set Artist to FredD');
 		ok($cdB->artist->artistid == $aid, "Got Expected Artist ID");
 	}
+
+  WITH_COND_FROM_RS: {
+  
+    my ($more_crap) = $restricted_art_rs->populate([
+      {
+        name => 'More Manufactured Crap',
+      },
+    ]);
+    
+    ## Did it use the condition in the resultset?
+    cmp_ok( $more_crap->rank, '==', 42, "Got Correct rank for result object");
+  } 
 }
 
 
@@ -601,6 +613,21 @@
 		ok( $cd2->title eq "VOID_Yet More Tweeny-Pop crap", "Got Expected CD Title");
 	}
 
+  WITH_COND_FROM_RS: {
+  
+    $restricted_art_rs->populate([
+      {
+        name => 'VOID More Manufactured Crap',
+      },
+    ]);
+
+    my $more_crap = $art_rs->search({
+      name => 'VOID More Manufactured Crap'
+    })->first;
+    
+    ## Did it use the condition in the resultset?
+    cmp_ok( $more_crap->rank, '==', 42, "Got Correct rank for result object");
+  } 
 }
 
 ARRAYREF_OF_ARRAYREF_STYLE: {
@@ -619,7 +646,7 @@
   is $jumped->name, 'A singer that jumped the shark two albums ago', 'Correct Name';
   is $cool->name, 'An actually cool singer.', 'Correct Name';
   
-  my ($cooler, $lamer) = $art_rs->populate([
+  my ($cooler, $lamer) = $restricted_art_rs->populate([
     [qw/artistid name/],
     [1003, 'Cooler'],
     [1004, 'Lamer'],	
@@ -627,4 +654,36 @@
   
   is $cooler->name, 'Cooler', 'Correct Name';
   is $lamer->name, 'Lamer', 'Correct Name';  
-}
\ No newline at end of file
+
+  cmp_ok $cooler->rank, '==', 42, 'Correct Rank';
+
+  ARRAY_CONTEXT_WITH_COND_FROM_RS: {
+  
+    my ($mega_lamer) = $restricted_art_rs->populate([
+      {
+        name => 'Mega Lamer',
+      },
+    ]);
+
+    ## Did it use the condition in the resultset?
+    cmp_ok( $mega_lamer->rank, '==', 42, "Got Correct rank for result object");
+  } 
+
+  VOID_CONTEXT_WITH_COND_FROM_RS: {
+  
+    $restricted_art_rs->populate([
+      {
+        name => 'VOID Mega Lamer',
+      },
+    ]);
+
+    my $mega_lamer = $art_rs->search({
+      name => 'VOID Mega Lamer'
+    })->first;
+    
+    ## Did it use the condition in the resultset?
+    cmp_ok( $mega_lamer->rank, '==', 42, "Got Correct rank for result object");
+  } 
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/extended_rels/t/104view.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/104view.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/104view.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -1,5 +1,5 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use Test::Exception;
@@ -8,8 +8,6 @@
 
 my $schema = DBICTest->init_schema();
 
-plan tests => 2;
-
 ## Real view
 my $cds_rs_2000 = $schema->resultset('CD')->search( { year => 2000 });
 my $year2kcds_rs = $schema->resultset('Year2000CDs');
@@ -24,5 +22,50 @@
 is($cds_rs_1999->count, $year1999cds_rs->count, 'View Year1999CDs sees all CDs in year 1999');
 
 
+# Test if relationships work correctly
+is_deeply (
+  [
+    $schema->resultset('Year1999CDs')->search (
+      {},
+      {
+        result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+        prefetch => ['artist', { tracks => [qw/cd year1999cd year2000cd/] } ],
+      },
+    )->all
+  ],
+  [
+    $schema->resultset('CD')->search (
+      { 'me.year' => '1999'},
+      {
+        result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+        prefetch => ['artist', { tracks => [qw/cd year1999cd year2000cd/] } ],
+        columns => [qw/cdid single_track title/],   # to match the columns retrieved by the virtview
+      },
+    )->all
+  ],
+  'Prefetch over virtual view gives expected result',
+);
 
+is_deeply (
+  [
+    $schema->resultset('Year2000CDs')->search (
+      {},
+      {
+        result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+        prefetch => ['artist', { tracks => [qw/cd year1999cd year2000cd/] } ],
+      },
+    )->all
+  ],
+  [
+    $schema->resultset('CD')->search (
+      { 'me.year' => '2000'},
+      {
+        result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+        prefetch => ['artist', { tracks => [qw/cd year1999cd year2000cd/] } ],
+      },
+    )->all
+  ],
+  'Prefetch over regular view gives expected result',
+);
 
+done_testing;

Modified: DBIx-Class/0.08/branches/extended_rels/t/60core.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/60core.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/60core.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -1,8 +1,9 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use Test::Exception;
+use Test::Warn;
 use lib qw(t/lib);
 use DBICTest;
 use DBIC::SqlMakerTest;
@@ -35,10 +36,10 @@
 my %not_dirty = $art->get_dirty_columns();
 is(scalar(keys(%not_dirty)), 0, 'Nothing is dirty');
 
-eval {
+throws_ok ( sub {
   my $ret = $art->make_column_dirty('name2');
-};
-ok(defined($@), 'Failed to make non-existent column dirty');
+}, qr/No such column 'name2'/, 'Failed to make non-existent column dirty');
+
 $art->make_column_dirty('name');
 my %fake_dirty = $art->get_dirty_columns();
 is(scalar(keys(%fake_dirty)), 1, '1 fake dirty column');
@@ -64,7 +65,7 @@
 
 is(@art, 2, 'And then there were two');
 
-ok(!$art->in_storage, "It knows it's dead");
+is($art->in_storage, 0, "It knows it's dead");
 
 dies_ok ( sub { $art->delete }, "Can't delete twice");
 
@@ -143,7 +144,7 @@
   });
 
   is($new_obj->name, 'find_or_new', 'find_or_new: instantiated a new artist');
-  ok(! $new_obj->in_storage, 'new artist is not in storage');
+  is($new_obj->in_storage, 0, 'new artist is not in storage');
 }
 
 my $cd = $schema->resultset("CD")->find(1);
@@ -221,10 +222,10 @@
     isa_ok($tdata{'last_updated_on'}, 'DateTime', 'inflated accessored column');
 }
 
-eval { $schema->class("Track")->load_components('DoesNotExist'); };
+throws_ok (sub {
+  $schema->class("Track")->load_components('DoesNotExist');
+}, qr!Can't locate DBIx/Class/DoesNotExist.pm!, 'exception on nonexisting component');
 
-ok $@, $@;
-
 is($schema->class("Artist")->field_name_for->{name}, 'artist name', 'mk_classdata usage ok');
 
 my $search = [ { 'tags.tag' => 'Cheesy' }, { 'tags.tag' => 'Blue' } ];
@@ -238,6 +239,13 @@
 is ($collapsed_or_rs->all, 4, 'Collapsed joined search with OR returned correct number of rows');
 is ($collapsed_or_rs->count, 4, 'Collapsed search count with OR ok');
 
+# make sure sure distinct on a grouped rs is warned about
+my $cd_rs = $schema->resultset ('CD')
+              ->search ({}, { distinct => 1, group_by => 'title' });
+warnings_exist (sub {
+  $cd_rs->next;
+}, qr/Useless use of distinct/, 'UUoD warning');
+
 {
   my $tcount = $schema->resultset('Track')->search(
     {},

Modified: DBIx-Class/0.08/branches/extended_rels/t/71mysql.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/71mysql.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/71mysql.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -25,7 +25,7 @@
 
 $dbh->do("DROP TABLE IF EXISTS cd;");
 
-$dbh->do("CREATE TABLE cd (cdid INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, artist INTEGER, title TEXT, year INTEGER, genreid INTEGER, single_track INTEGER);");
+$dbh->do("CREATE TABLE cd (cdid INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, artist INTEGER, title TEXT, year DATE, genreid INTEGER, single_track INTEGER);");
 
 $dbh->do("DROP TABLE IF EXISTS producer;");
 
@@ -160,8 +160,6 @@
 
     my $type_info = $schema->storage->columns_info_for('artist');
     is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
-
-
 }
 
 my $cd = $schema->resultset ('CD')->create ({});
@@ -227,4 +225,61 @@
       => 'Nothing Found!';
 }
 
+ZEROINSEARCH: {
+  my $cds_per_year = {
+    2001 => 2,
+    2002 => 1,
+    2005 => 3,
+  };
+
+  my $rs = $schema->resultset ('CD');
+  $rs->delete;
+  for my $y (keys %$cds_per_year) {
+    for my $c (1 .. $cds_per_year->{$y} ) {
+      $rs->create ({ title => "CD $y-$c", artist => 1, year => "$y-01-01" });
+    }
+  }
+
+  is ($rs->count, 6, 'CDs created successfully');
+
+  $rs = $rs->search ({}, {
+    select => [ {year => 'year'} ], as => ['y'], distinct => 1, order_by => 'year',
+  });
+
+  is_deeply (
+    [ $rs->get_column ('y')->all ],
+    [ sort keys %$cds_per_year ],
+    'Years group successfully',
+  );
+
+  $rs->create ({ artist => 1, year => '0-1-1', title => 'Jesus Rap' });
+
+  is_deeply (
+    [ $rs->get_column ('y')->all ],
+    [ 0, sort keys %$cds_per_year ],
+    'Zero-year groups successfully',
+  );
+
+  # convoluted search taken verbatim from list 
+  my $restrict_rs = $rs->search({ -and => [
+    year => { '!=', 0 },
+    year => { '!=', undef }
+  ]});
+
+  is_deeply (
+    [ $restrict_rs->get_column('y')->all ],
+    [ $rs->get_column ('y')->all ],
+    'Zero year was correctly excluded from resultset',
+  );
+}
+
+## If find() is the first query after connect()
+## DBI::Storage::sql_maker() will be called before
+## _determine_driver() and so the ::SQLHacks class for MySQL
+## will not be used
+
+my $schema2 = DBICTest::Schema->connect($dsn, $user, $pass);
+$schema2->resultset("Artist")->find(4);
+isa_ok($schema2->storage->sql_maker, 'DBIx::Class::SQLAHacks::MySQL');
+
 done_testing;

Modified: DBIx-Class/0.08/branches/extended_rels/t/72pg.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/72pg.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/72pg.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -472,6 +472,7 @@
 
 my @eapk_schemas;
 BEGIN{ @eapk_schemas = map "dbic_apk_$_", 0..5 }
+my %seqs; #< hash of schema.table.col => currval of its (DBIC) primary key sequence
 
 sub run_extended_apk_tests {
   my $schema = shift;
@@ -489,10 +490,18 @@
         for @eapk_schemas;
 
     $dbh->do("CREATE SEQUENCE $eapk_schemas[5].fooseq");
+    $dbh->do("SELECT setval('$eapk_schemas[5].fooseq',400)");
+    $seqs{"$eapk_schemas[1].apk.id2"} = 400;
+
     $dbh->do("CREATE SEQUENCE $eapk_schemas[4].fooseq");
+    $dbh->do("SELECT setval('$eapk_schemas[4].fooseq',300)");
+    $seqs{"$eapk_schemas[3].apk.id2"} = 300;
+
     $dbh->do("CREATE SEQUENCE $eapk_schemas[3].fooseq");
+    $dbh->do("SELECT setval('$eapk_schemas[3].fooseq',200)");
+    $seqs{"$eapk_schemas[4].apk.id2"} = 200;
 
-    $dbh->do("SET search_path = ".join ',', @eapk_schemas );
+    $dbh->do("SET search_path = ".join ',', reverse @eapk_schemas );
   });
 
   # clear our search_path cache
@@ -519,12 +528,14 @@
                qualify_table => 4,
              );
 
+  eapk_poke( $schema );
   eapk_poke( $schema, 0 );
   eapk_poke( $schema, 2 );
   eapk_poke( $schema, 4 );
   eapk_poke( $schema, 1 );
   eapk_poke( $schema, 0 );
   eapk_poke( $schema, 1 );
+  eapk_poke( $schema );
   eapk_poke( $schema, 4 );
   eapk_poke( $schema, 3 );
   eapk_poke( $schema, 1 );
@@ -538,8 +549,6 @@
 # do a DBIC create on the apk table in the given schema number (which is an
 # index of @eapk_schemas)
 
-my %seqs; #< sanity-check hash of schema.table.col => currval of its sequence
-
 sub eapk_poke {
   my ($s, $schema_num) = @_;
 
@@ -547,7 +556,7 @@
       ? $eapk_schemas[$schema_num]
       : '';
 
-  my $schema_name_actual = $schema_name || eapk_get_search_path($s)->[0];
+  my $schema_name_actual = $schema_name || eapk_find_visible_schema($s);
 
   $s->source('ExtAPK')->name($schema_name ? $schema_name.'.apk' : 'apk');
   #< clear sequence name cache
@@ -558,12 +567,13 @@
   lives_ok {
     my $new;
     for my $inc (1,2,3) {
-      $new = $schema->resultset('ExtAPK')->create({});
+      $new = $schema->resultset('ExtAPK')->create({ id1 => 1});
       my $proper_seqval = ++$seqs{"$schema_name_actual.apk.id2"};
       is( $new->id2, $proper_seqval, "$schema_name_actual.apk.id2 correct inc $inc" )
           or eapk_seq_diag($s,$schema_name);
       $new->discard_changes;
-      for my $id (grep $_ ne 'id2', @eapk_id_columns) {
+      is( $new->id1, 1 );
+      for my $id ('id3','id4') {
         my $proper_seqval = ++$seqs{"$schema_name_actual.apk.$id"};
         is( $new->$id, $proper_seqval, "$schema_name_actual.apk.$id correct inc $inc" )
             or eapk_seq_diag($s,$schema_name);
@@ -577,7 +587,7 @@
 # class
 sub eapk_seq_diag {
     my $s = shift;
-    my $schema = shift || eapk_get_search_path($s)->[0];
+    my $schema = shift || eapk_find_visible_schema($s);
 
     diag "$schema.apk sequences: ",
         join(', ',
@@ -633,13 +643,13 @@
         local $_[1]->{Warn} = 0;
 
         my $id_def = $a{nextval}
-            ? "integer primary key not null default nextval('$a{nextval}'::regclass)"
-            : 'serial primary key';
+            ? "integer not null default nextval('$a{nextval}'::regclass)"
+            : 'serial';
         $dbh->do(<<EOS);
 CREATE TABLE $table_name (
   id1 serial
   , id2 $id_def
-  , id3 serial
+  , id3 serial primary key
   , id4 serial
 )
 EOS
@@ -667,3 +677,19 @@
 
     });
 }
+
+sub eapk_find_visible_schema {
+    my ($s) = @_;
+
+    my ($schema) =
+        $s->storage->dbh_do(sub {
+            $_[1]->selectrow_array(<<EOS);
+SELECT n.nspname
+FROM pg_catalog.pg_namespace n
+JOIN pg_catalog.pg_class c ON c.relnamespace = n.oid
+WHERE c.relname = 'apk'
+  AND pg_catalog.pg_table_is_visible(c.oid)
+EOS
+        });
+    return $schema;
+}

Modified: DBIx-Class/0.08/branches/extended_rels/t/73oracle.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/73oracle.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/73oracle.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -26,7 +26,7 @@
 }
 
 use strict;
-use warnings;  
+use warnings;
 
 use Test::Exception;
 use Test::More;
@@ -40,7 +40,7 @@
   ' as well as following sequences: \'pkid1_seq\', \'pkid2_seq\' and \'nonpkid_seq\''
   unless ($dsn && $user && $pass);
 
-plan tests => 35;
+plan tests => 36;
 
 DBICTest::Schema->load_classes('ArtistFQN');
 my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
@@ -49,6 +49,7 @@
 
 eval {
   $dbh->do("DROP SEQUENCE artist_seq");
+  $dbh->do("DROP SEQUENCE cd_seq");
   $dbh->do("DROP SEQUENCE pkid1_seq");
   $dbh->do("DROP SEQUENCE pkid2_seq");
   $dbh->do("DROP SEQUENCE nonpkid_seq");
@@ -58,6 +59,7 @@
   $dbh->do("DROP TABLE track");
 };
 $dbh->do("CREATE SEQUENCE artist_seq START WITH 1 MAXVALUE 999999 MINVALUE 0");
+$dbh->do("CREATE SEQUENCE cd_seq START WITH 1 MAXVALUE 999999 MINVALUE 0");
 $dbh->do("CREATE SEQUENCE pkid1_seq START WITH 1 MAXVALUE 999999 MINVALUE 0");
 $dbh->do("CREATE SEQUENCE pkid2_seq START WITH 10 MAXVALUE 999999 MINVALUE 0");
 $dbh->do("CREATE SEQUENCE nonpkid_seq START WITH 20 MAXVALUE 999999 MINVALUE 0");
@@ -67,6 +69,7 @@
 $dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at DATE, small_dt DATE)");
 
 $dbh->do("ALTER TABLE artist ADD (CONSTRAINT artist_pk PRIMARY KEY (artistid))");
+$dbh->do("ALTER TABLE cd ADD (CONSTRAINT cd_pk PRIMARY KEY (cdid))");
 $dbh->do("ALTER TABLE sequence_test ADD (CONSTRAINT sequence_test_constraint PRIMARY KEY (pkid1, pkid2))");
 $dbh->do(qq{
   CREATE OR REPLACE TRIGGER artist_insert_trg
@@ -80,6 +83,18 @@
     END IF;
   END;
 });
+$dbh->do(qq{
+  CREATE OR REPLACE TRIGGER cd_insert_trg
+  BEFORE INSERT ON cd
+  FOR EACH ROW
+  BEGIN
+    IF :new.cdid IS NULL THEN
+      SELECT cd_seq.nextval
+      INTO :new.cdid
+      FROM DUAL;
+    END IF;
+  END;
+});
 
 {
     # Swiped from t/bindtype_columns.t to avoid creating my own Resultset.
@@ -88,7 +103,7 @@
     eval { $dbh->do('DROP TABLE bindtype_test') };
 
     $dbh->do(qq[
-        CREATE TABLE bindtype_test 
+        CREATE TABLE bindtype_test
         (
             id              integer      NOT NULL   PRIMARY KEY,
             bytea           integer      NULL,
@@ -108,13 +123,15 @@
 my $new = $schema->resultset('Artist')->create({ name => 'foo' });
 is($new->artistid, 1, "Oracle Auto-PK worked");
 
+my $cd = $schema->resultset('CD')->create({ artist => 1, title => 'EP C', year => '2003' });
+is($new->artistid, 1, "Oracle Auto-PK worked - using scalar ref as table name");
+
 # test again with fully-qualified table name
 $new = $schema->resultset('ArtistFQN')->create( { name => 'bar' } );
 is( $new->artistid, 2, "Oracle Auto-PK worked with fully-qualified tablename" );
 
 # test join with row count ambiguity
 
-my $cd = $schema->resultset('CD')->create({ cdid => 1, artist => 1, title => 'EP C', year => '2003' });
 my $track = $schema->resultset('Track')->create({ trackid => 1, cd => 1,
     position => 1, title => 'Track1' });
 my $tjoin = $schema->resultset('Track')->search({ 'me.title' => 'Track1'},
@@ -149,7 +166,7 @@
 
 $tcount = $schema->resultset('Track')->search(
   {},
-  { 
+  {
      group_by => [ qw/position title/ ]
   }
 );
@@ -186,7 +203,10 @@
 my $st = $schema->resultset('SequenceTest')->create({ name => 'foo', pkid1 => 55 });
 is($st->pkid1, 55, "Oracle Auto-PK without trigger: First primary key set manually");
 
-{
+SKIP: {
+        skip 'buggy BLOB support in DBD::Oracle 1.23', 8
+          if $DBD::Oracle::VERSION == 1.23;
+
 	my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
 	$binstr{'large'} = $binstr{'small'} x 1024;
 
@@ -212,6 +232,7 @@
 END {
     if($schema && ($dbh = $schema->storage->dbh)) {
         $dbh->do("DROP SEQUENCE artist_seq");
+        $dbh->do("DROP SEQUENCE cd_seq");
         $dbh->do("DROP SEQUENCE pkid1_seq");
         $dbh->do("DROP SEQUENCE pkid2_seq");
         $dbh->do("DROP SEQUENCE nonpkid_seq");

Modified: DBIx-Class/0.08/branches/extended_rels/t/746mssql.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/746mssql.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/746mssql.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -143,14 +143,11 @@
     my ($storage, $dbh) = @_;
     eval { $dbh->do("DROP TABLE money_test") };
     $dbh->do(<<'SQL');
-
 CREATE TABLE money_test (
    id INT IDENTITY PRIMARY KEY,
    amount MONEY NULL
 )
-
 SQL
-
 });
 
 my $rs = $schema->resultset('Money');

Modified: DBIx-Class/0.08/branches/extended_rels/t/746sybase.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/746sybase.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/746sybase.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -1,5 +1,6 @@
 use strict;
 use warnings;  
+no warnings 'uninitialized';
 
 use Test::More;
 use Test::Exception;
@@ -8,84 +9,580 @@
 
 my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
 
-plan skip_all => 'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test'
-  unless ($dsn && $user);
+my $TESTS = 63 + 2;
 
-plan tests => 13;
+if (not ($dsn && $user)) {
+  plan skip_all =>
+    'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' .
+    "\nWarning: This test drops and creates the tables " .
+    "'artist', 'money_test' and 'bindtype_test'";
+} else {
+  plan tests => $TESTS*2 + 1;
+}
 
-my $schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1});
+my @storage_types = (
+  'DBI::Sybase::ASE',
+  'DBI::Sybase::ASE::NoBindVars',
+);
+eval "require DBIx::Class::Storage::$_;" for @storage_types;
 
-# start disconnected to test reconnection
-$schema->storage->ensure_connected;
-$schema->storage->_dbh->disconnect;
+my $schema;
+my $storage_idx = -1;
 
-isa_ok( $schema->storage, 'DBIx::Class::Storage::DBI::Sybase' );
+sub get_schema {
+  DBICTest::Schema->connect($dsn, $user, $pass, {
+    on_connect_call => [
+      [ blob_setup => log_on_update => 1 ], # this is a safer option
+    ],
+  });
+}
 
-my $dbh;
-lives_ok (sub {
-  $dbh = $schema->storage->dbh;
-}, 'reconnect works');
+my $ping_count = 0;
+{
+  my $ping = DBIx::Class::Storage::DBI::Sybase::ASE->can('_ping');
+  *DBIx::Class::Storage::DBI::Sybase::ASE::_ping = sub {
+    $ping_count++;
+    goto $ping;
+  };
+}
 
-$schema->storage->dbh_do (sub {
-    my ($storage, $dbh) = @_;
-    eval { $dbh->do("DROP TABLE artist") };
-    $dbh->do(<<'SQL');
+for my $storage_type (@storage_types) {
+  $storage_idx++;
 
+  unless ($storage_type eq 'DBI::Sybase::ASE') { # autodetect
+    DBICTest::Schema->storage_type("::$storage_type");
+  }
+
+  $schema = get_schema();
+
+  $schema->storage->ensure_connected;
+
+  if ($storage_idx == 0 &&
+      $schema->storage->isa('DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars')) {
+# no placeholders in this version of Sybase or DBD::Sybase (or using FreeTDS)
+      my $tb = Test::More->builder;
+      $tb->skip('no placeholders') for 1..$TESTS;
+      next;
+  }
+
+  isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" );
+
+  $schema->storage->_dbh->disconnect;
+  lives_ok (sub { $schema->storage->dbh }, 'reconnect works');
+
+  $schema->storage->dbh_do (sub {
+      my ($storage, $dbh) = @_;
+      eval { $dbh->do("DROP TABLE artist") };
+      $dbh->do(<<'SQL');
 CREATE TABLE artist (
-   artistid INT IDENTITY NOT NULL,
+   artistid INT IDENTITY PRIMARY KEY,
    name VARCHAR(100),
    rank INT DEFAULT 13 NOT NULL,
-   charfield CHAR(10) NULL,
-   primary key(artistid)
+   charfield CHAR(10) NULL
 )
-
 SQL
+  });
 
-});
+  my %seen_id;
 
-my %seen_id;
+# so we start unconnected
+  $schema->storage->disconnect;
 
-# fresh $schema so we start unconnected
-$schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1});
-
 # test primary key handling
-my $new = $schema->resultset('Artist')->create({ name => 'foo' });
-ok($new->artistid > 0, "Auto-PK worked");
+  my $new = $schema->resultset('Artist')->create({ name => 'foo' });
+  ok($new->artistid > 0, "Auto-PK worked");
 
-$seen_id{$new->artistid}++;
+  $seen_id{$new->artistid}++;
 
-# test LIMIT support
-for (1..6) {
+# check redispatch to storage-specific insert when auto-detected storage
+  if ($storage_type eq 'DBI::Sybase::ASE') {
+    DBICTest::Schema->storage_type('::DBI');
+    $schema = get_schema();
+  }
+
+  $new = $schema->resultset('Artist')->create({ name => 'Artist 1' });
+  is ( $seen_id{$new->artistid}, undef, 'id for Artist 1 is unique' );
+  $seen_id{$new->artistid}++;
+
+# inserts happen in a txn, so we make sure it still works inside a txn too
+  $schema->txn_begin;
+
+  for (2..6) {
     $new = $schema->resultset('Artist')->create({ name => 'Artist ' . $_ });
     is ( $seen_id{$new->artistid}, undef, "id for Artist $_ is unique" );
     $seen_id{$new->artistid}++;
-}
+  }
 
-my $it;
+  $schema->txn_commit;
 
-$it = $schema->resultset('Artist')->search( {}, {
+# test simple count
+  is ($schema->resultset('Artist')->count, 7, 'count(*) of whole table ok');
+
+# test LIMIT support
+  my $it = $schema->resultset('Artist')->search({
+    artistid => { '>' => 0 }
+  }, {
     rows => 3,
     order_by => 'artistid',
-});
+  });
 
-TODO: {
-    local $TODO = 'Sybase is very very fucked in the limit department';
+  is( $it->count, 3, "LIMIT count ok" );
 
-    is( $it->count, 3, "LIMIT count ok" );
-}
+  is( $it->next->name, "foo", "iterator->next ok" );
+  $it->next;
+  is( $it->next->name, "Artist 2", "iterator->next ok" );
+  is( $it->next, undef, "next past end of resultset ok" );
 
-# The iterator still works correctly with rows => 3, even though the sql is
-# fucked, very interesting.
+# now try with offset
+  $it = $schema->resultset('Artist')->search({}, {
+    rows => 3,
+    offset => 3,
+    order_by => 'artistid',
+  });
 
-is( $it->next->name, "foo", "iterator->next ok" );
-$it->next;
-is( $it->next->name, "Artist 2", "iterator->next ok" );
-is( $it->next, undef, "next past end of resultset ok" );
+  is( $it->count, 3, "LIMIT with offset count ok" );
 
+  is( $it->next->name, "Artist 3", "iterator->next ok" );
+  $it->next;
+  is( $it->next->name, "Artist 5", "iterator->next ok" );
+  is( $it->next, undef, "next past end of resultset ok" );
 
+# now try a grouped count
+  $schema->resultset('Artist')->create({ name => 'Artist 6' })
+    for (1..6);
+
+  $it = $schema->resultset('Artist')->search({}, {
+    group_by => 'name'
+  });
+
+  is( $it->count, 7, 'COUNT of GROUP_BY ok' );
+
+# do an IDENTITY_INSERT
+  {
+    no warnings 'redefine';
+
+    my @debug_out;
+    local $schema->storage->{debug} = 1;
+    local $schema->storage->debugobj->{callback} = sub {
+      push @debug_out, $_[1];
+    };
+
+    my $txn_used = 0;
+    my $txn_commit = \&DBIx::Class::Storage::DBI::txn_commit;
+    local *DBIx::Class::Storage::DBI::txn_commit = sub {
+      $txn_used = 1;
+      goto &$txn_commit;
+    };
+
+    $schema->resultset('Artist')
+      ->create({ artistid => 999, name => 'mtfnpy' });
+
+    ok((grep /IDENTITY_INSERT/i, @debug_out), 'IDENTITY_INSERT used');
+
+    SKIP: {
+      skip 'not testing lack of txn on IDENTITY_INSERT with NoBindVars', 1
+        if $storage_type =~ /NoBindVars/i;
+
+      is $txn_used, 0, 'no txn on insert with IDENTITY_INSERT';
+    }
+  }
+
+# do an IDENTITY_UPDATE
+  {
+    my @debug_out;
+    local $schema->storage->{debug} = 1;
+    local $schema->storage->debugobj->{callback} = sub {
+      push @debug_out, $_[1];
+    };
+
+    lives_and {
+      $schema->resultset('Artist')
+        ->find(999)->update({ artistid => 555 });
+      ok((grep /IDENTITY_UPDATE/i, @debug_out));
+    } 'IDENTITY_UPDATE used';
+    $ping_count-- if $@;
+  }
+
+  my $bulk_rs = $schema->resultset('Artist')->search({
+    name => { -like => 'bulk artist %' }
+  });
+
+# test insert_bulk using populate.
+  SKIP: {
+    skip 'insert_bulk not supported', 4
+      unless $storage_type !~ /NoBindVars/i;
+
+    lives_ok {
+      $schema->resultset('Artist')->populate([
+        {
+          name => 'bulk artist 1',
+          charfield => 'foo',
+        },
+        {
+          name => 'bulk artist 2',
+          charfield => 'foo',
+        },
+        {
+          name => 'bulk artist 3',
+          charfield => 'foo',
+        },
+      ]);
+    } 'insert_bulk via populate';
+
+    is $bulk_rs->count, 3, 'correct number inserted via insert_bulk';
+
+    is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
+      'column set correctly via insert_bulk');
+
+    my %bulk_ids;
+    @bulk_ids{map $_->artistid, $bulk_rs->all} = ();
+
+    is ((scalar keys %bulk_ids), 3,
+      'identities generated correctly in insert_bulk');
+
+    $bulk_rs->delete;
+  }
+
+# make sure insert_bulk works a second time on the same connection
+  SKIP: {
+    skip 'insert_bulk not supported', 3
+      unless $storage_type !~ /NoBindVars/i;
+
+    lives_ok {
+      $schema->resultset('Artist')->populate([
+        {
+          name => 'bulk artist 1',
+          charfield => 'bar',
+        },
+        {
+          name => 'bulk artist 2',
+          charfield => 'bar',
+        },
+        {
+          name => 'bulk artist 3',
+          charfield => 'bar',
+        },
+      ]);
+    } 'insert_bulk via populate called a second time';
+
+    is $bulk_rs->count, 3,
+      'correct number inserted via insert_bulk';
+
+    is ((grep $_->charfield eq 'bar', $bulk_rs->all), 3,
+      'column set correctly via insert_bulk');
+
+    $bulk_rs->delete;
+  }
+
+# test invalid insert_bulk (missing required column)
+#
+# There should be a rollback, reconnect and the next valid insert_bulk should
+# succeed.
+  throws_ok {
+    $schema->resultset('Artist')->populate([
+      {
+        charfield => 'foo',
+      }
+    ]);
+  } qr/no value or default|does not allow null|placeholders/i,
+# The second pattern is the error from fallback to regular array insert on
+# incompatible charset.
+# The third is for ::NoBindVars with no syb_has_blk.
+  'insert_bulk with missing required column throws error';
+
+# now test insert_bulk with IDENTITY_INSERT
+  SKIP: {
+    skip 'insert_bulk not supported', 3
+      unless $storage_type !~ /NoBindVars/i;
+
+    lives_ok {
+      $schema->resultset('Artist')->populate([
+        {
+          artistid => 2001,
+          name => 'bulk artist 1',
+          charfield => 'foo',
+        },
+        {
+          artistid => 2002,
+          name => 'bulk artist 2',
+          charfield => 'foo',
+        },
+        {
+          artistid => 2003,
+          name => 'bulk artist 3',
+          charfield => 'foo',
+        },
+      ]);
+    } 'insert_bulk with IDENTITY_INSERT via populate';
+
+    is $bulk_rs->count, 3,
+      'correct number inserted via insert_bulk with IDENTITY_INSERT';
+
+    is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
+      'column set correctly via insert_bulk with IDENTITY_INSERT');
+
+    $bulk_rs->delete;
+  }
+
+# test correlated subquery
+  my $subq = $schema->resultset('Artist')->search({ artistid => { '>' => 3 } })
+    ->get_column('artistid')
+    ->as_query;
+  my $subq_rs = $schema->resultset('Artist')->search({
+    artistid => { -in => $subq }
+  });
+  is $subq_rs->count, 11, 'correlated subquery';
+
+# mostly stolen from the blob stuff Nniuq wrote for t/73oracle.t
+  SKIP: {
+    skip 'TEXT/IMAGE support does not work with FreeTDS', 22
+      if $schema->storage->using_freetds;
+
+    my $dbh = $schema->storage->_dbh;
+    {
+      local $SIG{__WARN__} = sub {};
+      eval { $dbh->do('DROP TABLE bindtype_test') };
+
+      $dbh->do(qq[
+        CREATE TABLE bindtype_test 
+        (
+          id    INT   IDENTITY PRIMARY KEY,
+          bytea IMAGE NULL,
+          blob  IMAGE NULL,
+          clob  TEXT  NULL
+        )
+      ],{ RaiseError => 1, PrintError => 0 });
+    }
+
+    my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
+    $binstr{'large'} = $binstr{'small'} x 1024;
+
+    my $maxloblen = length $binstr{'large'};
+    
+    if (not $schema->storage->using_freetds) {
+      $dbh->{'LongReadLen'} = $maxloblen * 2;
+    } else {
+      $dbh->do("set textsize ".($maxloblen * 2));
+    }
+
+    my $rs = $schema->resultset('BindType');
+    my $last_id;
+
+    foreach my $type (qw(blob clob)) {
+      foreach my $size (qw(small large)) {
+        no warnings 'uninitialized';
+
+        my $created;
+        lives_ok {
+          $created = $rs->create( { $type => $binstr{$size} } )
+        } "inserted $size $type without dying";
+
+        $last_id = $created->id if $created;
+
+        lives_and {
+          ok($rs->find($last_id)->$type eq $binstr{$size})
+        } "verified inserted $size $type";
+      }
+    }
+
+    $rs->delete;
+
+    # blob insert with explicit PK
+    # also a good opportunity to test IDENTITY_INSERT
+    lives_ok {
+      $rs->create( { id => 1, blob => $binstr{large} } )
+    } 'inserted large blob without dying with manual PK';
+
+    lives_and {
+      ok($rs->find(1)->blob eq $binstr{large})
+    } 'verified inserted large blob with manual PK';
+
+    # try a blob update
+    my $new_str = $binstr{large} . 'mtfnpy';
+
+    # check redispatch to storage-specific update when auto-detected storage
+    if ($storage_type eq 'DBI::Sybase::ASE') {
+      DBICTest::Schema->storage_type('::DBI');
+      $schema = get_schema();
+    }
+
+    lives_ok {
+      $rs->search({ id => 1 })->update({ blob => $new_str })
+    } 'updated blob successfully';
+
+    lives_and {
+      ok($rs->find(1)->blob eq $new_str)
+    } 'verified updated blob';
+
+    # try a blob update with IDENTITY_UPDATE
+    lives_and {
+      $new_str = $binstr{large} . 'hlagh';
+      $rs->find(1)->update({ id => 999, blob => $new_str });
+      ok($rs->find(999)->blob eq $new_str);
+    } 'verified updated blob with IDENTITY_UPDATE';
+
+    ## try multi-row blob update
+    # first insert some blobs
+    $new_str = $binstr{large} . 'foo';
+    lives_and {
+      $rs->delete;
+      $rs->create({ blob => $binstr{large} }) for (1..2);
+      $rs->update({ blob => $new_str });
+      is((grep $_->blob eq $new_str, $rs->all), 2);
+    } 'multi-row blob update';
+
+    $rs->delete;
+
+    # now try insert_bulk with blobs and only blobs
+    $new_str = $binstr{large} . 'bar';
+    lives_ok {
+      $rs->populate([
+        {
+          bytea => 1,
+          blob => $binstr{large},
+          clob => $new_str,
+        },
+        {
+          bytea => 1,
+          blob => $binstr{large},
+          clob => $new_str,
+        },
+      ]);
+    } 'insert_bulk with blobs does not die';
+
+    is((grep $_->blob eq $binstr{large}, $rs->all), 2,
+      'IMAGE column set correctly via insert_bulk');
+
+    is((grep $_->clob eq $new_str, $rs->all), 2,
+      'TEXT column set correctly via insert_bulk');
+
+    # now try insert_bulk with blobs and a non-blob which also happens to be an
+    # identity column
+    SKIP: {
+      skip 'no insert_bulk without placeholders', 4
+        if $storage_type =~ /NoBindVars/i;
+
+      $rs->delete;
+      $new_str = $binstr{large} . 'bar';
+      lives_ok {
+        $rs->populate([
+          {
+            id => 1,
+            bytea => 1,
+            blob => $binstr{large},
+            clob => $new_str,
+          },
+          {
+            id => 2,
+            bytea => 1,
+            blob => $binstr{large},
+            clob => $new_str,
+          },
+        ]);
+      } 'insert_bulk with blobs and explicit identity does NOT die';
+
+      is((grep $_->blob eq $binstr{large}, $rs->all), 2,
+        'IMAGE column set correctly via insert_bulk with identity');
+
+      is((grep $_->clob eq $new_str, $rs->all), 2,
+        'TEXT column set correctly via insert_bulk with identity');
+
+      is_deeply [ map $_->id, $rs->all ], [ 1,2 ],
+        'explicit identities set correctly via insert_bulk with blobs';
+    }
+
+    lives_and {
+      $rs->delete;
+      $rs->create({ blob => $binstr{large} }) for (1..2);
+      $rs->update({ blob => undef });
+      is((grep !defined($_->blob), $rs->all), 2);
+    } 'blob update to NULL';
+  }
+
+# test MONEY column support (and some other misc. stuff)
+  $schema->storage->dbh_do (sub {
+      my ($storage, $dbh) = @_;
+      eval { $dbh->do("DROP TABLE money_test") };
+      $dbh->do(<<'SQL');
+CREATE TABLE money_test (
+   id INT IDENTITY PRIMARY KEY,
+   amount MONEY DEFAULT $999.99 NULL
+)
+SQL
+  });
+
+  my $rs = $schema->resultset('Money');
+
+# test insert with defaults
+  lives_and {
+    $rs->create({});
+    is((grep $_->amount == 999.99, $rs->all), 1);
+  } 'insert with all defaults works';
+  $rs->delete;
+
+# test insert transaction when there's an active cursor
+  {
+    my $artist_rs = $schema->resultset('Artist');
+    $artist_rs->first;
+    lives_ok {
+      my $row = $schema->resultset('Money')->create({ amount => 100 });
+      $row->delete;
+    } 'inserted a row with an active cursor';
+    $ping_count-- if $@; # dbh_do calls ->connected
+  }
+
+# test insert in an outer transaction when there's an active cursor
+  TODO: {
+    local $TODO = 'this should work once we have eager cursors';
+
+# clear state, or we get a deadlock on $row->delete
+# XXX figure out why this happens
+    $schema->storage->disconnect;
+
+    lives_ok {
+      $schema->txn_do(sub {
+        my $artist_rs = $schema->resultset('Artist');
+        $artist_rs->first;
+        my $row = $schema->resultset('Money')->create({ amount => 100 });
+        $row->delete;
+      });
+    } 'inserted a row with an active cursor in outer txn';
+    $ping_count-- if $@; # dbh_do calls ->connected
+  }
+
+# Now test money values.
+  my $row;
+  lives_ok {
+    $row = $rs->create({ amount => 100 });
+  } 'inserted a money value';
+
+  is eval { $rs->find($row->id)->amount }, 100, 'money value round-trip';
+
+  lives_ok {
+    $row->update({ amount => 200 });
+  } 'updated a money value';
+
+  is eval { $rs->find($row->id)->amount },
+    200, 'updated money value round-trip';
+
+  lives_ok {
+    $row->update({ amount => undef });
+  } 'updated a money value to NULL';
+
+  my $null_amount = eval { $rs->find($row->id)->amount };
+  ok(
+    (($null_amount == undef) && (not $@)),
+    'updated money value to NULL round-trip'
+  );
+  diag $@ if $@;
+}
+
+is $ping_count, 0, 'no pings';
+
 # clean up our mess
 END {
-    my $dbh = eval { $schema->storage->_dbh };
-    $dbh->do('DROP TABLE artist') if $dbh;
+  if (my $dbh = eval { $schema->storage->_dbh }) {
+    eval { $dbh->do("DROP TABLE $_") }
+      for qw/artist bindtype_test money_test/;
+  }
 }
-

Added: DBIx-Class/0.08/branches/extended_rels/t/747mssql_ado.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/747mssql_ado.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/extended_rels/t/747mssql_ado.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -0,0 +1,70 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MSSQL_ADO_${_}" } qw/DSN USER PASS/};
+
+plan skip_all => 'Set $ENV{DBICTEST_MSSQL_ADO_DSN}, _USER and _PASS to run this test'
+  unless ($dsn && $user);
+
+plan tests => 12;
+
+my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+$schema->storage->ensure_connected;
+
+isa_ok( $schema->storage, 'DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server' );
+
+$schema->storage->dbh_do (sub {
+    my ($storage, $dbh) = @_;
+    eval { $dbh->do("DROP TABLE artist") };
+    $dbh->do(<<'SQL');
+CREATE TABLE artist (
+   artistid INT IDENTITY NOT NULL,
+   name VARCHAR(100),
+   rank INT NOT NULL DEFAULT '13',
+   charfield CHAR(10) NULL,
+   primary key(artistid)
+)
+SQL
+});
+
+my $new = $schema->resultset('Artist')->create({ name => 'foo' });
+ok($new->artistid > 0, 'Auto-PK worked');
+
+# make sure select works
+my $found = $schema->resultset('Artist')->search({ name => 'foo' })->first;
+is $found->artistid, $new->artistid, 'search works';
+
+# test large column list in select
+$found = $schema->resultset('Artist')->search({ name => 'foo' }, {
+  select => ['artistid', 'name', map "'foo' foo_$_", 0..50],
+  as     => ['artistid', 'name', map       "foo_$_", 0..50],
+})->first;
+is $found->artistid, $new->artistid, 'select with big column list';
+is $found->get_column('foo_50'), 'foo', 'last item in big column list';
+
+# create a few more rows
+for (1..6) {
+  $schema->resultset('Artist')->create({ name => 'Artist ' . $_ });
+}
+
+# test multiple active cursors
+my $rs1 = $schema->resultset('Artist')->search({}, { order_by => 'artistid' });
+my $rs2 = $schema->resultset('Artist')->search({}, { order_by => 'name' });
+
+while ($rs1->next) {
+  ok eval { $rs2->next }, 'multiple active cursors';
+}
+
+# clean up our mess
+END {
+  if (my $dbh = eval { $schema->storage->_dbh }) {
+    eval { $dbh->do("DROP TABLE $_") }
+      for qw/artist/;
+  }
+}
+# vim:sw=2 sts=2

Modified: DBIx-Class/0.08/branches/extended_rels/t/74mssql.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/74mssql.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/74mssql.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -18,10 +18,6 @@
 plan skip_all => 'Set $ENV{DBICTEST_MSSQL_DSN}, _USER and _PASS to run this test'
   unless ($dsn);
 
-my $TESTS = 13;
-
-plan tests => $TESTS * 2;
-
 my @storage_types = (
   'DBI::Sybase::Microsoft_SQL_Server',
   'DBI::Sybase::Microsoft_SQL_Server::NoBindVars',
@@ -29,6 +25,7 @@
 my $storage_idx = -1;
 my $schema;
 
+my $NUMBER_OF_TESTS_IN_BLOCK = 18;
 for my $storage_type (@storage_types) {
   $storage_idx++;
 
@@ -44,7 +41,7 @@
 
   if ($storage_idx == 0 && ref($schema->storage) =~ /NoBindVars\z/) {
     my $tb = Test::More->builder;
-    $tb->skip('no placeholders') for 1..$TESTS;
+    $tb->skip('no placeholders') for 1..$NUMBER_OF_TESTS_IN_BLOCK;
     next;
   }
 
@@ -133,8 +130,56 @@
 
   is $rs->find($row->id)->amount,
     undef, 'updated money value to NULL round-trip';
+
+  $rs->create({ amount => 300 }) for (1..3);
+
+  # test multiple active statements
+  lives_ok {
+    my $artist_rs = $schema->resultset('Artist');
+    while (my $row = $rs->next) {
+      my $artist = $artist_rs->next;
+    }
+    $rs->reset;
+  } 'multiple active statements';
+
+  $rs->delete;
+
+  # test simple transaction with commit
+  lives_ok {
+    $schema->txn_do(sub {
+      $rs->create({ amount => 400 });
+    });
+  } 'simple transaction';
+
+  cmp_ok $rs->first->amount, '==', 400, 'committed';
+  $rs->reset;
+
+  $rs->delete;
+
+  # test rollback
+  throws_ok {
+    $schema->txn_do(sub {
+      $rs->create({ amount => 400 });
+      die 'mtfnpy';
+    });
+  } qr/mtfnpy/, 'simple failed txn';
+
+  is $rs->first, undef, 'rolled back';
+  $rs->reset;
 }
 
+# test op-induced autoconnect
+lives_ok (sub {
+
+  my $schema =  DBICTest::Schema->clone;
+  $schema->connection($dsn, $user, $pass);
+
+  my $artist = $schema->resultset ('Artist')->search ({}, { order_by => 'artistid' })->next;
+  is ($artist->id, 1, 'Artist retrieved successfully');
+}, 'Query-induced autoconnect works');
+
+done_testing;
+
 # clean up our mess
 END {
   if (my $dbh = eval { $schema->storage->dbh }) {

Modified: DBIx-Class/0.08/branches/extended_rels/t/79aliasing.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/79aliasing.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/79aliasing.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -52,7 +52,7 @@
   my $cd_rs = $schema->resultset('CD')->search({ 'artist.name' => 'Caterwauler McCrae' }, { join => 'artist' });
 
   my $cd = $cd_rs->find_or_new({ title => 'Huh?', year => 2006 });
-  ok(! $cd->in_storage, 'new CD not in storage yet');
+  is($cd->in_storage, 0, 'new CD not in storage yet');
   is($cd->title, 'Huh?', 'new CD title is correct');
   is($cd->year, 2006, 'new CD year is correct');
 }

Modified: DBIx-Class/0.08/branches/extended_rels/t/80unique.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/80unique.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/80unique.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -195,7 +195,7 @@
       { key => 'cd_artist_title' }
     );
 
-    ok(!$cd1->in_storage, 'CD is not in storage yet after update_or_new');
+    is($cd1->in_storage, 0, 'CD is not in storage yet after update_or_new');
     $cd1->insert;
     ok($cd1->in_storage, 'CD got added to strage after update_or_new && insert');
 

Modified: DBIx-Class/0.08/branches/extended_rels/t/81transactions.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/81transactions.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/81transactions.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -333,7 +333,10 @@
 {
   my $schema = DBICTest->init_schema();
 
-  warnings_exist (sub {
+  # something is really confusing Test::Warn here, no time to debug
+=begin
+  warnings_exist (
+    sub {
       my $guard = $schema->txn_scope_guard;
       $schema->resultset ('Artist')->create ({ name => 'bohhoo'});
 
@@ -345,6 +348,30 @@
     ],
     'proper warnings generated on out-of-scope+rollback failure'
   );
+=cut
+
+  my @want = (
+    qr/A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or error. Rolling back./,
+    qr/\*+ ROLLBACK FAILED\!\!\! \*+/,
+  );
+
+  my @w;
+  local $SIG{__WARN__} = sub {
+    if (grep {$_[0] =~ $_} (@want)) {
+      push @w, $_[0];
+    }
+    else {
+      warn $_[0];
+    }
+  };
+  {
+      my $guard = $schema->txn_scope_guard;
+      $schema->resultset ('Artist')->create ({ name => 'bohhoo'});
+
+      $schema->storage->disconnect;  # this should freak out the guard rollback
+  }
+
+  is (@w, 2, 'Both expected warnings found');
 }
 
 done_testing;

Modified: DBIx-Class/0.08/branches/extended_rels/t/86sqlt.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/86sqlt.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/86sqlt.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -6,10 +6,10 @@
 use DBICTest;
 
 BEGIN {
-  require DBIx::Class;
+  require DBIx::Class::Storage::DBI;
   plan skip_all =>
-      'Test needs SQL::Translator ' . DBIx::Class->_sqlt_minimum_version
-    if not DBIx::Class->_sqlt_version_ok;
+      'Test needs SQL::Translator ' . DBIx::Class::Storage::DBI->_sqlt_minimum_version
+    if not DBIx::Class::Storage::DBI->_sqlt_version_ok;
 }
 
 my $schema = DBICTest->init_schema (no_deploy => 1);

Modified: DBIx-Class/0.08/branches/extended_rels/t/93single_accessor_object.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/93single_accessor_object.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/93single_accessor_object.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -48,7 +48,7 @@
 	my $artist = $schema->resultset('Artist')->create({ artistid => 666, name => 'bad religion' });
 	my $cd = $schema->resultset('CD')->create({ cdid => 187, artist => 1, title => 'how could hell be any worse?', year => 1982, genreid => undef });
 
-	ok(!defined($cd->genreid), 'genreid is NULL');
+	ok(!defined($cd->get_column('genreid')), 'genreid is NULL');  #no accessor was defined for this column
 	ok(!defined($cd->genre), 'genre accessor returns undef');
 }
 

Modified: DBIx-Class/0.08/branches/extended_rels/t/94versioning.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/94versioning.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/94versioning.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -5,7 +5,6 @@
 use Test::More;
 use File::Spec;
 use File::Copy;
-use Time::HiRes qw/time sleep/;
 
 #warn "$dsn $user $pass";
 my ($dsn, $user, $pass);
@@ -16,10 +15,14 @@
   plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
     unless ($dsn);
 
-  require DBIx::Class;
+  eval { require Time::HiRes }
+    || plan skip_all => 'Test needs Time::HiRes';
+  Time::HiRes->import(qw/time sleep/);
+
+  require DBIx::Class::Storage::DBI;
   plan skip_all =>
-      'Test needs SQL::Translator ' . DBIx::Class->_sqlt_minimum_version
-    if not DBIx::Class->_sqlt_version_ok;
+      'Test needs SQL::Translator ' . DBIx::Class::Storage::DBI->_sqlt_minimum_version
+    if not DBIx::Class::Storage::DBI->_sqlt_version_ok;
 }
 
 my $version_table_name = 'dbix_class_schema_versions';

Modified: DBIx-Class/0.08/branches/extended_rels/t/95sql_maker.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/95sql_maker.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/95sql_maker.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -70,8 +70,7 @@
 }
 
 # Make sure the carp/croak override in SQLA works (via SQLAHacks)
-my $file = __FILE__;
-$file = "\Q$file\E";
+my $file = quotemeta (__FILE__);
 throws_ok (sub {
   $schema->resultset ('Artist')->search ({}, { order_by => { -asc => 'stuff', -desc => 'staff' } } )->as_query;
 }, qr/$file/, 'Exception correctly croak()ed');

Modified: DBIx-Class/0.08/branches/extended_rels/t/95sql_maker_quote.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/95sql_maker_quote.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/95sql_maker_quote.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -6,13 +6,6 @@
 use lib qw(t/lib);
 use DBIC::SqlMakerTest;
 
-BEGIN {
-    eval "use DBD::SQLite";
-    plan $@
-        ? ( skip_all => 'needs DBD::SQLite for testing' )
-        : ( tests => 12 );
-}
-
 use_ok('DBICTest');
 
 my $schema = DBICTest->init_schema();
@@ -235,6 +228,36 @@
 );
 
 
+($sql, @bind) = $sql_maker->select(
+  [ { me => 'cd' }                  ],
+  [qw/ me.cdid me.artist me.title  /],
+  { cdid => \['rlike ?', [cdid => 'X'] ]       },
+  { group_by => 'title', having => \['count(me.artist) > ?', [ cnt => 2] ] },
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title` FROM `cd` `me` WHERE ( `cdid` rlike ? ) GROUP BY `title` HAVING count(me.artist) > ?/,
+  [ [ cdid => 'X'], ['cnt' => '2'] ],
+  'Quoting works with where/having arrayrefsrefs',
+);
+
+
+($sql, @bind) = $sql_maker->select(
+  [ { me => 'cd' }                  ],
+  [qw/ me.cdid me.artist me.title  /],
+  { cdid => \'rlike X'              },
+  { group_by => 'title', having => \'count(me.artist) > 2' },
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title` FROM `cd` `me` WHERE ( `cdid` rlike X ) GROUP BY `title` HAVING count(me.artist) > 2/,
+  [],
+  'Quoting works with where/having scalarrefs',
+);
+
+
 ($sql, @bind) = $sql_maker->update(
           'group',
           {
@@ -330,3 +353,5 @@
   q/UPDATE [group] SET [name] = ?, [order] = ?/, [ ['name' => 'Bill'], ['order' => '12'] ],
   'bracket quoted table names for UPDATE'
 );
+
+done_testing;

Modified: DBIx-Class/0.08/branches/extended_rels/t/99dbic_sqlt_parser.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/99dbic_sqlt_parser.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/99dbic_sqlt_parser.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -6,10 +6,10 @@
 use DBICTest;
 
 BEGIN {
-  require DBIx::Class;
+  require DBIx::Class::Storage::DBI;
   plan skip_all =>
-      'Test needs SQL::Translator ' . DBIx::Class->_sqlt_minimum_version
-    if not DBIx::Class->_sqlt_version_ok;
+      'Test needs SQL::Translator ' . DBIx::Class::Storage::DBI->_sqlt_minimum_version
+    if not DBIx::Class::Storage::DBI->_sqlt_version_ok;
 }
 
 my $schema = DBICTest->init_schema();

Modified: DBIx-Class/0.08/branches/extended_rels/t/count/prefetch.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/count/prefetch.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/count/prefetch.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -39,16 +39,9 @@
 
 # collapsing prefetch with distinct
 {
-  my $first_cd = $schema->resultset('Artist')->first->cds->first;
-  $first_cd->update ({
-    genreid => $first_cd->create_related (
-      genre => ({ name => 'vague genre' })
-    )->id
-  });
-
   my $rs = $schema->resultset("Artist")->search(undef, {distinct => 1})
             ->search_related('cds')->search_related('genre',
-                { 'genre.name' => { '!=', 'foo' } },
+                { 'genre.name' => 'emo' },
                 { prefetch => q(cds) },
             );
   is ($rs->all, 1, 'Correct number of objects');
@@ -60,15 +53,22 @@
       SELECT COUNT( * )
         FROM (
           SELECT genre.genreid
-            FROM artist me
-            JOIN cd cds ON cds.artist = me.artistid
+            FROM (
+              SELECT cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track
+                FROM (
+                  SELECT me.artistid, me.name, me.rank, me.charfield
+                    FROM artist me GROUP BY me.artistid, me.name, me.rank, me.charfield
+                ) me
+                LEFT JOIN cd cds ON cds.artist = me.artistid
+              GROUP BY cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track
+            ) cds
             JOIN genre genre ON genre.genreid = cds.genreid
             LEFT JOIN cd cds_2 ON cds_2.genreid = genre.genreid
-          WHERE ( genre.name != ? )
+          WHERE ( genre.name = ? )
           GROUP BY genre.genreid
         ) count_subq
     )',
-    [ [ 'genre.name' => 'foo' ] ],
+    [ [ 'genre.name' => 'emo' ] ],
   );
 }
 

Added: DBIx-Class/0.08/branches/extended_rels/t/count/search_related.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/count/search_related.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/extended_rels/t/count/search_related.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -0,0 +1,41 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+my $cd_rs = $schema->resultset('CD')->search ({}, { rows => 1, order_by => 'cdid' });
+
+my $track_count = $cd_rs->first->tracks->count;
+
+cmp_ok ($track_count, '>', 1, 'First CD has several tracks');
+
+is ($cd_rs->search_related ('tracks')->count, $track_count, 'related->count returns correct number chained off a limited rs');
+is (scalar ($cd_rs->search_related ('tracks')->all), $track_count, 'related->all returns correct number of objects chained off a limited rs');
+
+
+my $joined_cd_rs = $cd_rs->search ({}, {
+  join => 'tracks', rows => 2, distinct => 1, having => \ 'count(tracks.trackid) > 2',
+});
+
+my $multiple_track_count = $schema->resultset('Track')->search ({
+  cd => { -in => $joined_cd_rs->get_column ('cdid')->as_query }
+})->count;
+
+
+is (
+  $joined_cd_rs->search_related ('tracks')->count,
+  $multiple_track_count,
+  'related->count returns correct number chained off a grouped rs',
+);
+is (
+  scalar ($joined_cd_rs->search_related ('tracks')->all),
+  $multiple_track_count,
+  'related->all returns correct number of objects chained off a grouped rs',
+);
+
+done_testing;

Modified: DBIx-Class/0.08/branches/extended_rels/t/from_subquery.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/from_subquery.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/from_subquery.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -1,5 +1,5 @@
 use strict;
-use warnings FATAL => 'all';
+use warnings;
 
 use Test::More;
 

Added: DBIx-Class/0.08/branches/extended_rels/t/inflate/datetime_sybase.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/inflate/datetime_sybase.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/extended_rels/t/inflate/datetime_sybase.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -0,0 +1,85 @@
+use strict;
+use warnings;  
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
+
+if (not ($dsn && $user)) {
+  plan skip_all =>
+    'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' .
+    "\nWarning: This test drops and creates a table called 'track'";
+} else {
+  eval "use DateTime; use DateTime::Format::Sybase;";
+  if ($@) {
+    plan skip_all => 'needs DateTime and DateTime::Format::Sybase for testing';
+  }
+  else {
+    plan tests => (4 * 2 * 2) + 2; # (tests * dt_types * storage_types) + storage_tests
+  }
+}
+
+my @storage_types = (
+  'DBI::Sybase::ASE',
+  'DBI::Sybase::ASE::NoBindVars',
+);
+my $schema;
+
+for my $storage_type (@storage_types) {
+  $schema = DBICTest::Schema->clone;
+
+  unless ($storage_type eq 'DBI::Sybase::ASE') { # autodetect
+    $schema->storage_type("::$storage_type");
+  }
+  $schema->connection($dsn, $user, $pass, {
+    AutoCommit => 1,
+    on_connect_call => [ 'datetime_setup' ],
+  });
+
+  $schema->storage->ensure_connected;
+
+  isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" );
+
+# coltype, col, date
+  my @dt_types = (
+    ['DATETIME', 'last_updated_at', '2004-08-21T14:36:48.080Z'],
+# minute precision
+    ['SMALLDATETIME', 'small_dt', '2004-08-21T14:36:00.000Z'],
+  );
+  
+  for my $dt_type (@dt_types) {
+    my ($type, $col, $sample_dt) = @$dt_type;
+
+    eval { $schema->storage->dbh->do("DROP TABLE track") };
+    $schema->storage->dbh->do(<<"SQL");
+CREATE TABLE track (
+   trackid INT IDENTITY PRIMARY KEY,
+   cd INT,
+   position INT,
+   $col $type,
+)
+SQL
+    ok(my $dt = DateTime::Format::Sybase->parse_datetime($sample_dt));
+
+    my $row;
+    ok( $row = $schema->resultset('Track')->create({
+          $col => $dt,
+          cd => 1,
+        }));
+    ok( $row = $schema->resultset('Track')
+      ->search({ trackid => $row->trackid }, { select => [$col] })
+      ->first
+    );
+    is( $row->$col, $dt, 'DateTime roundtrip' );
+  }
+}
+
+# clean up our mess
+END {
+  if (my $dbh = eval { $schema->storage->_dbh }) {
+    $dbh->do('DROP TABLE track');
+  }
+}

Modified: DBIx-Class/0.08/branches/extended_rels/t/inflate/hri.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/inflate/hri.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/inflate/hri.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -1,7 +1,7 @@
 use strict;
-use warnings;  
+use warnings;
 
-use Test::More qw(no_plan);
+use Test::More;
 use lib qw(t/lib);
 use DBICTest;
 my $schema = DBICTest->init_schema();
@@ -9,7 +9,7 @@
 # Under some versions of SQLite if the $rs is left hanging around it will lock
 # So we create a scope here cos I'm lazy
 {
-    my $rs = $schema->resultset('CD');
+    my $rs = $schema->resultset('CD')->search ({}, { order_by => 'cdid' });
 
     # get the defined columns
     my @dbic_cols = sort $rs->result_source->columns;
@@ -23,9 +23,11 @@
     my @hashref_cols = sort keys %$datahashref1;
 
     is_deeply( \@dbic_cols, \@hashref_cols, 'returned columns' );
+
+    my $cd1 = $rs->find ({cdid => 1});
+    is_deeply ( $cd1, $datahashref1, 'first/find return the same thing');
 }
 
-
 sub check_cols_of {
     my ($dbic_obj, $datahashref) = @_;
     
@@ -135,3 +137,5 @@
   [{ $artist->get_columns, cds => [] }],
   'nested has_many prefetch without entries'
 );
+
+done_testing;

Modified: DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Artist.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Artist.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Artist.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -30,7 +30,7 @@
   },
 );
 __PACKAGE__->set_primary_key('artistid');
-__PACKAGE__->add_unique_constraint(['artistid']); # do not remove, part of a test
+__PACKAGE__->add_unique_constraint(artist => ['artistid']); # do not remove, part of a test
 
 __PACKAGE__->mk_classdata('field_name_for', {
     artistid    => 'primary key',

Modified: DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/CD.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/CD.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/CD.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -26,6 +26,7 @@
   'genreid' => { 
     data_type => 'integer',
     is_nullable => 1,
+    accessor => undef,
   },
   'single_track' => {
     data_type => 'integer',

Modified: DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Track.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Track.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Track.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -50,6 +50,19 @@
 __PACKAGE__->might_have( cd_single => 'DBICTest::Schema::CD', 'single_track' );
 __PACKAGE__->might_have( lyrics => 'DBICTest::Schema::Lyrics', 'track_id' );
 
+__PACKAGE__->belongs_to(
+    "year1999cd",
+    "DBICTest::Schema::Year1999CDs",
+    { "foreign.cdid" => "self.cd" },
+    { join_type => 'left' },  # the relationship is of course optional
+);
+__PACKAGE__->belongs_to(
+    "year2000cd",
+    "DBICTest::Schema::Year2000CDs",
+    { "foreign.cdid" => "self.cd" },
+    { join_type => 'left' },
+);
+
 __PACKAGE__->might_have (
     'next_track',
     __PACKAGE__,

Modified: DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Year1999CDs.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Year1999CDs.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Year1999CDs.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -9,7 +9,7 @@
 __PACKAGE__->table('year1999cds');
 __PACKAGE__->result_source_instance->is_virtual(1);
 __PACKAGE__->result_source_instance->view_definition(
-  "SELECT cdid, artist, title FROM cd WHERE year ='1999'"
+  "SELECT cdid, artist, title, single_track FROM cd WHERE year ='1999'"
 );
 __PACKAGE__->add_columns(
   'cdid' => {
@@ -23,9 +23,17 @@
     data_type => 'varchar',
     size      => 100,
   },
-
+  'single_track' => {
+    data_type => 'integer',
+    is_nullable => 1,
+    is_foreign_key => 1,
+  },
 );
 __PACKAGE__->set_primary_key('cdid');
 __PACKAGE__->add_unique_constraint([ qw/artist title/ ]);
 
+__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' );
+__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track',
+    { "foreign.cd" => "self.cdid" });
+
 1;

Modified: DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Year2000CDs.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Year2000CDs.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest/Schema/Year2000CDs.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -1,30 +1,19 @@
-package # hide from PAUSE 
+package # hide from PAUSE
     DBICTest::Schema::Year2000CDs;
-## Used in 104view.t
 
-use base qw/DBICTest::BaseResult/;
+use base qw/DBICTest::Schema::CD/;
 
 __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
-
 __PACKAGE__->table('year2000cds');
-__PACKAGE__->result_source_instance->view_definition(
-  "SELECT cdid, artist, title FROM cd WHERE year ='2000'"
-);
-__PACKAGE__->add_columns(
-  'cdid' => {
-    data_type => 'integer',
-    is_auto_increment => 1,
-  },
-  'artist' => {
-    data_type => 'integer',
-  },
-  'title' => {
-    data_type => 'varchar',
-    size      => 100,
-  },
 
-);
-__PACKAGE__->set_primary_key('cdid');
-__PACKAGE__->add_unique_constraint([ qw/artist title/ ]);
+# need to operate on the instance for things to work
+__PACKAGE__->result_source_instance->view_definition( sprintf (
+  'SELECT %s FROM cd WHERE year = "2000"',
+  join (', ', __PACKAGE__->columns),
+));
 
+__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' );
+__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track',
+    { "foreign.cd" => "self.cdid" });
+
 1;

Modified: DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest.pm
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest.pm	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/lib/DBICTest.pm	2009-12-01 10:59:18 UTC (rev 8006)
@@ -155,6 +155,11 @@
     my $self = shift;
     my $schema = shift;
 
+    $schema->populate('Genre', [
+      [qw/genreid name/],
+      [qw/1       emo  /],
+    ]);
+
     $schema->populate('Artist', [
         [ qw/artistid name/ ],
         [ 1, 'Caterwauler McCrae' ],
@@ -163,8 +168,8 @@
     ]);
 
     $schema->populate('CD', [
-        [ qw/cdid artist title year/ ],
-        [ 1, 1, "Spoonful of bees", 1999 ],
+        [ qw/cdid artist title year genreid/ ],
+        [ 1, 1, "Spoonful of bees", 1999, 1 ],
         [ 2, 1, "Forkful of bees", 2001 ],
         [ 3, 1, "Caterwaulin' Blues", 1997 ],
         [ 4, 2, "Generic Manufactured Singles", 2001 ],
@@ -243,7 +248,7 @@
     
     $schema->populate('TreeLike', [
         [ qw/id parent name/ ],
-        [ 1, undef, 'root' ],        
+        [ 1, undef, 'root' ],
         [ 2, 1, 'foo'  ],
         [ 3, 2, 'bar'  ],
         [ 6, 2, 'blop' ],

Modified: DBIx-Class/0.08/branches/extended_rels/t/lib/sqlite.sql
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/lib/sqlite.sql	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/lib/sqlite.sql	2009-12-01 10:59:18 UTC (rev 8006)
@@ -1,6 +1,6 @@
 -- 
 -- Created by SQL::Translator::Producer::SQLite
--- Created on Tue Aug 25 12:34:34 2009
+-- Created on Sun Nov 15 14:13:02 2009
 -- 
 
 
@@ -453,6 +453,6 @@
 -- View: year2000cds
 --
 CREATE VIEW year2000cds AS
-    SELECT cdid, artist, title FROM cd WHERE year ='2000';
+    SELECT cdid, artist, title, year, genreid, single_track FROM cd WHERE year = "2000";
 
 COMMIT;

Modified: DBIx-Class/0.08/branches/extended_rels/t/prefetch/grouped.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/prefetch/grouped.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/prefetch/grouped.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -217,11 +217,11 @@
     $rs->as_query,
     '(
       SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
-             tags.tagid, tags.cd, tags.tag 
+             tags.tagid, tags.cd, tags.tag
         FROM (
           SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
             FROM cd me
-          GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+          GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, cdid
           ORDER BY cdid
         ) me
         LEFT JOIN tags tags ON tags.cd = me.cdid
@@ -329,4 +329,28 @@
     );
 }
 
+{
+    my $rs = $schema->resultset('CD')->search({},
+        {
+           '+select' => [{ count => 'tags.tag' }],
+           '+as' => ['test_count'],
+           prefetch => ['tags'],
+           distinct => 1,
+           order_by => {'-asc' => 'tags.tag'},
+           rows => 1
+        }
+    );
+    is_same_sql_bind($rs->as_query, q{
+        (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, me.test_count, tags.tagid, tags.cd, tags.tag
+          FROM (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, COUNT( tags.tag ) AS test_count
+                FROM cd me LEFT JOIN tags tags ON tags.cd = me.cdid
+            GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, tags.tag
+            ORDER BY tags.tag ASC LIMIT 1)
+            me
+          LEFT JOIN tags tags ON tags.cd = me.cdid
+         ORDER BY tags.tag ASC, tags.cd, tags.tag
+        )
+    }, []);
+}
+
 done_testing;

Added: DBIx-Class/0.08/branches/extended_rels/t/prefetch/join_type.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/prefetch/join_type.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/extended_rels/t/prefetch/join_type.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -0,0 +1,47 @@
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBIC::SqlMakerTest;
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+
+# a regular belongs_to prefetch
+my $cds = $schema->resultset('CD')->search ({}, { prefetch => 'artist' } );
+
+my $nulls = {
+  hashref => {},
+  arrayref => [],
+  undef => undef,
+};
+
+# make sure null-prefetches do not screw with the final sql:
+for my $type (keys %$nulls) {
+#  is_same_sql_bind (
+#    $cds->search({}, { prefetch => { artist => $nulls->{$type} } })->as_query,
+#    $cds->as_query,
+#    "same sql with null $type prefetch"
+#  );
+}
+
+# make sure left join is carried only starting from the first has_many
+is_same_sql_bind (
+  $cds->search({}, { prefetch => { artist => { cds => 'artist' } } })->as_query,
+  '(
+    SELECT  me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+            artist.artistid, artist.name, artist.rank, artist.charfield,
+            cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track,
+            artist_2.artistid, artist_2.name, artist_2.rank, artist_2.charfield
+      FROM cd me
+      JOIN artist artist ON artist.artistid = me.artist
+      LEFT JOIN cd cds ON cds.artist = artist.artistid
+      LEFT JOIN artist artist_2 ON artist_2.artistid = cds.artist
+    ORDER BY cds.artist, cds.year
+  )',
+  [],
+);
+
+done_testing;

Modified: DBIx-Class/0.08/branches/extended_rels/t/prefetch/via_search_related.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/prefetch/via_search_related.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/prefetch/via_search_related.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -37,7 +37,37 @@
 
 }, 'search_related prefetch with order_by works');
 
+TODO: { local $TODO = 'Unqualified columns in where clauses can not be fixed without an SQLA rewrite' if SQL::Abstract->VERSION < 2;
+lives_ok ( sub {
+  my $no_prefetch = $schema->resultset('Track')->search_related(cd =>
+    {
+      'cd.year' => "2000",
+      'tagid' => 1,
+    },
+    {
+      join => 'tags',
+      rows => 1,
+    }
+  );
 
+  my $use_prefetch = $no_prefetch->search(
+    undef,
+    {
+      prefetch => 'tags',
+    }
+  );
+
+  is(
+    scalar ($use_prefetch->all),
+    scalar ($no_prefetch->all),
+    "Amount of returned rows is right"
+  );
+  is($use_prefetch->count, $no_prefetch->count, 'counts with and without prefetch match');
+
+}, 'search_related prefetch with condition referencing unqualified column of a joined table works');
+}
+
+
 lives_ok (sub {
     my $rs = $schema->resultset("Artwork")->search(undef, {distinct => 1})
               ->search_related('artwork_to_artist')->search_related('artist',

Modified: DBIx-Class/0.08/branches/extended_rels/t/relationship/core.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/relationship/core.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/relationship/core.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -5,6 +5,7 @@
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
+use DBIC::SqlMakerTest;
 
 my $schema = DBICTest->init_schema();
 my $sdebug = $schema->storage->debug;
@@ -132,7 +133,7 @@
   year => 2007,
 } );
 is( $cd->title, 'Greatest Hits 2: Louder Than Ever', 'find_or_new_related new record ok' );
-ok( ! $cd->in_storage, 'find_or_new_related on a new record: not in_storage' );
+is( $cd->in_storage, 0, 'find_or_new_related on a new record: not in_storage' );
 
 $cd->artist(undef);
 my $newartist = $cd->find_or_new_related( 'artist', {
@@ -258,8 +259,22 @@
 is($def_artist_cd->search_related('artist')->count, 0, 'closed search on null FK');
 
 # test undirected many-to-many relationship (e.g. "related artists")
-my $undir_maps = $schema->resultset("Artist")->find(1)->artist_undirected_maps;
+my $undir_maps = $schema->resultset("Artist")
+                          ->search ({artistid => 1})
+                            ->search_related ('artist_undirected_maps');
 is($undir_maps->count, 1, 'found 1 undirected map for artist 1');
+is_same_sql_bind (
+  $undir_maps->as_query,
+  '(
+    SELECT artist_undirected_maps.id1, artist_undirected_maps.id2
+      FROM artist me
+      LEFT JOIN artist_undirected_map artist_undirected_maps
+        ON artist_undirected_maps.id1 = me.artistid OR artist_undirected_maps.id2 = me.artistid
+    WHERE ( artistid = ? )
+  )',
+  [[artistid => 1]],
+  'expected join sql produced',
+);
 
 $undir_maps = $schema->resultset("Artist")->find(2)->artist_undirected_maps;
 is($undir_maps->count, 1, 'found 1 undirected map for artist 2');

Modified: DBIx-Class/0.08/branches/extended_rels/t/relationship/update_or_create_multi.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/relationship/update_or_create_multi.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/relationship/update_or_create_multi.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -76,8 +76,9 @@
 $schema->storage->debugcb(undef);
 $schema->storage->debug ($sdebug);
 
+my ($search_sql) = $sql[0] =~ /^(SELECT .+?)\:/;
 is_same_sql (
-  $sql[0],
+  $search_sql,
   'SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
     FROM cd me 
     WHERE ( me.artist = ? AND me.title = ? AND me.genreid = ? )

Modified: DBIx-Class/0.08/branches/extended_rels/t/resultset/as_query.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/resultset/as_query.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/resultset/as_query.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -1,7 +1,5 @@
-#!/usr/bin/perl
-
 use strict;
-use warnings FATAL => 'all';
+use warnings;
 
 use Test::More;
 

Added: DBIx-Class/0.08/branches/extended_rels/t/resultset/is_paged.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/resultset/is_paged.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/extended_rels/t/resultset/is_paged.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -0,0 +1,18 @@
+use strict;
+use warnings;
+
+use lib qw(t/lib);
+use Test::More;
+use Test::Exception;
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $tkfks = $schema->resultset('Artist');
+
+ok !$tkfks->is_paged, 'vanilla resultset is not paginated';
+
+my $paginated = $tkfks->search(undef, { page => 5 });
+ok $paginated->is_paged, 'resultset is paginated now';
+
+done_testing;


Property changes on: DBIx-Class/0.08/branches/extended_rels/t/resultset/is_paged.t
___________________________________________________________________
Name: svn:mergeinfo
   + 
Name: svn:eol-style
   + native

Added: DBIx-Class/0.08/branches/extended_rels/t/resultset/plus_select.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/resultset/plus_select.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/extended_rels/t/resultset/plus_select.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -0,0 +1,63 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $cd_rs = $schema->resultset('CD')->search ({genreid => { '!=', undef } }, { order_by => 'cdid' });
+my $track_cnt = $cd_rs->search({}, { rows => 1 })->search_related ('tracks')->count;
+
+my %basecols = $cd_rs->first->get_columns;
+
+# the current implementation of get_inflated_columns will "inflate"
+# relationships by simply calling the accessor, when you have
+# identically named columns and relationships (you shouldn't anyway)
+# I consider this wrong, but at the same time appreciate the
+# ramifications of changing this. Thus the value override  and the
+# TODO to go with it. Delete all of this if ever resolved.
+my %todo_rel_inflation_override = ( artist => $basecols{artist} );
+TODO: {
+  local $TODO = 'Treating relationships as inflatable data is wrong - see comment in ' . __FILE__;
+  ok (! keys %todo_rel_inflation_override);
+}
+
+my $plus_rs = $cd_rs->search (
+  {},
+  { join => 'tracks', distinct => 1, '+select' => { count => 'tracks.trackid' }, '+as' => 'tr_cnt' },
+);
+
+is_deeply (
+  { $plus_rs->first->get_columns },
+  { %basecols, tr_cnt => $track_cnt },
+  'extra columns returned by get_columns',
+);
+
+is_deeply (
+  { $plus_rs->first->get_inflated_columns, %todo_rel_inflation_override },
+  { %basecols, tr_cnt => $track_cnt },
+  'extra columns returned by get_inflated_columns without inflatable columns',
+);
+
+SKIP: {
+  eval { require DateTime };
+  skip "Need DateTime for +select/get_inflated_columns tests", 1 if $@;
+
+  $schema->class('CD')->inflate_column( 'year',
+    { inflate => sub { DateTime->new( year => shift ) },
+      deflate => sub { shift->year } }
+  );
+
+  $basecols{year} = DateTime->new ( year => $basecols{year} );
+
+  is_deeply (
+    { $plus_rs->first->get_inflated_columns, %todo_rel_inflation_override },
+    { %basecols, tr_cnt => $track_cnt },
+    'extra columns returned by get_inflated_columns',
+  );
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/extended_rels/t/resultset/update_delete.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/resultset/update_delete.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/resultset/update_delete.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -79,8 +79,12 @@
 );
 
 # grouping on PKs only should pass
-$sub_rs->search ({}, { group_by => [ reverse $sub_rs->result_source->primary_columns ] })     # reverse to make sure the comaprison works
-          ->update ({ pilot_sequence => \ 'pilot_sequence + 1' });
+$sub_rs->search (
+  {},
+  {
+    group_by => [ reverse $sub_rs->result_source->primary_columns ],     # reverse to make sure the PK-list comaprison works
+  },
+)->update ({ pilot_sequence => \ 'pilot_sequence + 1' });
 
 is_deeply (
   [ $tkfks->search ({ autopilot => [qw/a b x y/]}, { order_by => 'autopilot' })
@@ -90,6 +94,19 @@
   'Only two rows incremented',
 );
 
+# also make sure weird scalarref usage works (RT#51409)
+$tkfks->search (
+  \ 'pilot_sequence BETWEEN 11 AND 21',
+)->update ({ pilot_sequence => \ 'pilot_sequence + 1' });
+
+is_deeply (
+  [ $tkfks->search ({ autopilot => [qw/a b x y/]}, { order_by => 'autopilot' })
+            ->get_column ('pilot_sequence')->all 
+  ],
+  [qw/12 22 30 40/],
+  'Only two rows incremented (where => scalarref works)',
+);
+
 $sub_rs->delete;
 
 is ($tkfks->count, $tkfk_cnt -= 2, 'Only two rows deleted');

Modified: DBIx-Class/0.08/branches/extended_rels/t/storage/on_connect_call.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/storage/on_connect_call.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/storage/on_connect_call.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -3,67 +3,96 @@
 no warnings qw/once redefine/;
 
 use lib qw(t/lib);
+use DBI;
 use DBICTest;
+use DBICTest::Schema;
+use DBIx::Class::Storage::DBI;
 
-use Test::More tests => 9;
+# !!! do not replace this with done_testing - tests reside in the callbacks
+# !!! number of calls is important
+use Test::More tests => 16;
+# !!!
 
-use DBIx::Class::Storage::DBI;
-my $schema = DBICTest->init_schema(
-  no_connect  => 1,
-  no_deploy   => 1,
-);
+my $schema = DBICTest::Schema->clone;
 
-local *DBIx::Class::Storage::DBI::connect_call_foo = sub {
-  isa_ok $_[0], 'DBIx::Class::Storage::DBI',
-    'got storage in connect_call method';
-  is $_[1], 'bar', 'got param in connect_call method';
-};
+{
+  *DBIx::Class::Storage::DBI::connect_call_foo = sub {
+    isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+      'got storage in connect_call method';
+    is $_[1], 'bar', 'got param in connect_call method';
+  };
 
-local *DBIx::Class::Storage::DBI::disconnect_call_foo = sub {
-  isa_ok $_[0], 'DBIx::Class::Storage::DBI',
-    'got storage in disconnect_call method';
-};
+  *DBIx::Class::Storage::DBI::disconnect_call_foo = sub {
+    isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+      'got storage in disconnect_call method';
+  };
 
-ok $schema->connection(
-  DBICTest->_database,
-  {
-    on_connect_call => [
-        [ do_sql => 'create table test1 (id integer)' ],
-        [ do_sql => [ 'insert into test1 values (?)', {}, 1 ] ],
-        [ do_sql => sub { ['insert into test1 values (2)'] } ],
-        [ sub { $_[0]->dbh->do($_[1]) }, 'insert into test1 values (3)' ],
-        # this invokes $storage->connect_call_foo('bar') (above)
-        [ foo => 'bar' ],
-    ],
-    on_connect_do => 'insert into test1 values (4)',
-    on_disconnect_call => 'foo',
-  },
-), 'connection()';
+  ok $schema->connection(
+      DBICTest->_database,
+    {
+      on_connect_call => [
+          [ do_sql => 'create table test1 (id integer)' ],
+          [ do_sql => [ 'insert into test1 values (?)', {}, 1 ] ],
+          [ do_sql => sub { ['insert into test1 values (2)'] } ],
+          [ sub { $_[0]->dbh->do($_[1]) }, 'insert into test1 values (3)' ],
+          # this invokes $storage->connect_call_foo('bar') (above)
+          [ foo => 'bar' ],
+      ],
+      on_connect_do => 'insert into test1 values (4)',
+      on_disconnect_call => 'foo',
+    },
+  ), 'connection()';
 
-is_deeply (
-  $schema->storage->dbh->selectall_arrayref('select * from test1'),
-  [ [ 1 ], [ 2 ], [ 3 ], [ 4 ] ],
-  'on_connect_call/do actions worked'
-);
+  ok (! $schema->storage->connected, 'start disconnected');
 
-local *DBIx::Class::Storage::DBI::connect_call_foo = sub {
-  isa_ok $_[0], 'DBIx::Class::Storage::DBI',
-    'got storage in connect_call method';
-};
+  is_deeply (
+    $schema->storage->dbh->selectall_arrayref('select * from test1'),
+    [ [ 1 ], [ 2 ], [ 3 ], [ 4 ] ],
+    'on_connect_call/do actions worked'
+  );
 
-local *DBIx::Class::Storage::DBI::connect_call_bar = sub {
-  isa_ok $_[0], 'DBIx::Class::Storage::DBI',
-    'got storage in connect_call method';
-};
+  $schema->storage->disconnect;
+}
 
-$schema->storage->disconnect;
+{
+  *DBIx::Class::Storage::DBI::connect_call_foo = sub {
+    isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+      'got storage in connect_call method';
+  };
 
-ok $schema->connection(
-  DBICTest->_database,
-  {
-    # method list form
-    on_connect_call => [ 'foo', sub { ok 1, "coderef in list form" }, 'bar' ],
-  },
-), 'connection()';
+  *DBIx::Class::Storage::DBI::connect_call_bar = sub {
+    isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+      'got storage in connect_call method';
+  };
 
-$schema->storage->ensure_connected;
+
+  ok $schema->connection(
+    DBICTest->_database,
+    {
+      # method list form
+      on_connect_call => [ 'foo', sub { ok 1, "coderef in list form" }, 'bar' ],
+    },
+  ), 'connection()';
+
+  ok (! $schema->storage->connected, 'start disconnected');
+  $schema->storage->ensure_connected;
+  $schema->storage->disconnect; # this should not fire any tests
+}
+
+{
+  ok $schema->connection(
+    sub { DBI->connect(DBICTest->_database) },
+    {
+      # method list form
+      on_connect_call => [ sub { ok 1, "on_connect_call after DT parser" }, ],
+      on_disconnect_call => [ sub { ok 1, "on_disconnect_call after DT parser" }, ],
+    },
+  ), 'connection()';
+
+  ok (! $schema->storage->connected, 'start disconnected');
+
+  $schema->storage->_determine_driver;  # this should connect due to the coderef
+
+  ok ($schema->storage->connected, 'determine driver connects');
+  $schema->storage->disconnect;
+}

Modified: DBIx-Class/0.08/branches/extended_rels/t/storage/on_connect_do.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/storage/on_connect_do.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/storage/on_connect_do.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -5,6 +5,7 @@
 
 use lib qw(t/lib);
 use base 'DBICTest';
+require DBI;
 
 
 my $schema = DBICTest->init_schema(
@@ -28,7 +29,7 @@
 $schema->storage->disconnect;
 
 ok $schema->connection(
-    DBICTest->_database,
+    sub { DBI->connect(DBICTest->_database) },
     {
         on_connect_do       => [
             'CREATE TABLE TEST_empty (id INTEGER)',

Modified: DBIx-Class/0.08/branches/extended_rels/t/storage/replication.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/storage/replication.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/storage/replication.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -66,7 +66,7 @@
 
     sub init_schema {
         # current SQLT SQLite producer does not handle DROP TABLE IF EXISTS, trap warnings here
-        local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/ };
+        local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/s };
 
         my ($class, $schema_method) = @_;
 

Modified: DBIx-Class/0.08/branches/extended_rels/t/zzzzzzz_sqlite_deadlock.t
===================================================================
--- DBIx-Class/0.08/branches/extended_rels/t/zzzzzzz_sqlite_deadlock.t	2009-12-01 10:18:48 UTC (rev 8005)
+++ DBIx-Class/0.08/branches/extended_rels/t/zzzzzzz_sqlite_deadlock.t	2009-12-01 10:59:18 UTC (rev 8006)
@@ -10,7 +10,7 @@
 use DBICTest::Schema;
 
 plan tests => 2;
-my $wait_for = 10;  # how many seconds to wait
+my $wait_for = 30;  # how many seconds to wait
 
 for my $close (0,1) {
 




More information about the Bast-commits mailing list