[Bast-commits] r7983 - in DBIx-Class/0.08/branches/prefetch: . lib/DBIx lib/DBIx/Class lib/DBIx/Class/CDBICompat lib/DBIx/Class/Manual lib/DBIx/Class/Schema lib/DBIx/Class/Storage lib/DBIx/Class/Storage/DBI lib/DBIx/Class/Storage/DBI/ADO lib/DBIx/Class/Storage/DBI/ODBC lib/DBIx/Class/Storage/DBI/Oracle lib/DBIx/Class/Storage/DBI/Sybase lib/DBIx/Class/Storage/DBI/Sybase/ASE lib/SQL/Translator/Parser/DBIx t t/count t/inflate t/lib t/lib/DBICTest/Schema t/prefetch t/relationship t/resultset t/storage

ribasushi at dev.catalyst.perl.org ribasushi at dev.catalyst.perl.org
Sun Nov 29 11:59:25 GMT 2009


Author: ribasushi
Date: 2009-11-29 11:59:25 +0000 (Sun, 29 Nov 2009)
New Revision: 7983

Added:
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ADO.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ADO/
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/ASE/
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBIHacks.pm
   DBIx-Class/0.08/branches/prefetch/t/747mssql_ado.t
   DBIx-Class/0.08/branches/prefetch/t/count/search_related.t
   DBIx-Class/0.08/branches/prefetch/t/inflate/datetime_sybase.t
   DBIx-Class/0.08/branches/prefetch/t/resultset/is_paged.t
   DBIx-Class/0.08/branches/prefetch/t/resultset/plus_select.t
Removed:
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm
Modified:
   DBIx-Class/0.08/branches/prefetch/
   DBIx-Class/0.08/branches/prefetch/Changes
   DBIx-Class/0.08/branches/prefetch/Makefile.PL
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/AccessorGroup.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/CDBICompat/Constructor.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Manual/FAQ.pod
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Manual/Troubleshooting.pod
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Ordered.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/ResultSet.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/ResultSource.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Row.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/SQLAHacks.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Schema.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Schema/Versioned.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/AutoCast.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/MSSQL.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Pg.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/SQLite.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase.pm
   DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/prefetch/lib/SQL/Translator/Parser/DBIx/Class.pm
   DBIx-Class/0.08/branches/prefetch/t/100populate.t
   DBIx-Class/0.08/branches/prefetch/t/101populate_rs.t
   DBIx-Class/0.08/branches/prefetch/t/104view.t
   DBIx-Class/0.08/branches/prefetch/t/60core.t
   DBIx-Class/0.08/branches/prefetch/t/71mysql.t
   DBIx-Class/0.08/branches/prefetch/t/72pg.t
   DBIx-Class/0.08/branches/prefetch/t/73oracle.t
   DBIx-Class/0.08/branches/prefetch/t/746mssql.t
   DBIx-Class/0.08/branches/prefetch/t/746sybase.t
   DBIx-Class/0.08/branches/prefetch/t/74mssql.t
   DBIx-Class/0.08/branches/prefetch/t/79aliasing.t
   DBIx-Class/0.08/branches/prefetch/t/80unique.t
   DBIx-Class/0.08/branches/prefetch/t/93single_accessor_object.t
   DBIx-Class/0.08/branches/prefetch/t/94versioning.t
   DBIx-Class/0.08/branches/prefetch/t/95sql_maker.t
   DBIx-Class/0.08/branches/prefetch/t/95sql_maker_quote.t
   DBIx-Class/0.08/branches/prefetch/t/count/prefetch.t
   DBIx-Class/0.08/branches/prefetch/t/from_subquery.t
   DBIx-Class/0.08/branches/prefetch/t/inflate/hri.t
   DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest.pm
   DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/CD.pm
   DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/Track.pm
   DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/Year1999CDs.pm
   DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/Year2000CDs.pm
   DBIx-Class/0.08/branches/prefetch/t/lib/sqlite.sql
   DBIx-Class/0.08/branches/prefetch/t/prefetch/grouped.t
   DBIx-Class/0.08/branches/prefetch/t/prefetch/via_search_related.t
   DBIx-Class/0.08/branches/prefetch/t/relationship/core.t
   DBIx-Class/0.08/branches/prefetch/t/relationship/update_or_create_multi.t
   DBIx-Class/0.08/branches/prefetch/t/resultset/as_query.t
   DBIx-Class/0.08/branches/prefetch/t/resultset/update_delete.t
   DBIx-Class/0.08/branches/prefetch/t/storage/on_connect_call.t
   DBIx-Class/0.08/branches/prefetch/t/storage/on_connect_do.t
   DBIx-Class/0.08/branches/prefetch/t/storage/replication.t
   DBIx-Class/0.08/branches/prefetch/t/zzzzzzz_sqlite_deadlock.t
Log:
 r7726 at Thesaurus (orig r7715):  ribasushi | 2009-09-21 16:26:07 +0200
 A test for an obscure join syntax - make sure we don't break it
 r7732 at Thesaurus (orig r7721):  ribasushi | 2009-09-22 12:58:09 +0200
 this would break in the future - sanitize sql fed to the tester
 r7735 at Thesaurus (orig r7724):  ribasushi | 2009-09-22 13:07:31 +0200
 The hack is no longer necessary with a recent sqla
 r7740 at Thesaurus (orig r7729):  caelum | 2009-09-24 23:44:01 +0200
 add test for multiple active statements in mssql over dbd::sybase
 r7741 at Thesaurus (orig r7730):  caelum | 2009-09-25 08:46:22 +0200
 test on_connect_do with a coderef connect_info too
 r7742 at Thesaurus (orig r7731):  caelum | 2009-09-25 23:26:52 +0200
 failing test for simple transaction with mssql via dbd::sybase
 r7765 at Thesaurus (orig r7753):  ribasushi | 2009-10-03 15:49:14 +0200
 Test reorg (no changes)
 r7766 at Thesaurus (orig r7754):  ribasushi | 2009-10-03 15:55:25 +0200
 Add failing tests for RT#50003
 r7767 at Thesaurus (orig r7755):  caelum | 2009-10-03 16:09:45 +0200
 fix on_connect_ with coderef connect_info
 r7771 at Thesaurus (orig r7759):  ribasushi | 2009-10-04 13:17:53 +0200
 Fix AutoCast's POD
 r7782 at Thesaurus (orig r7770):  ribasushi | 2009-10-09 06:57:20 +0200
  r7777 at Thesaurus (orig r7765):  frew | 2009-10-07 20:05:05 +0200
  add method to check if an rs is paginated
  r7778 at Thesaurus (orig r7766):  frew | 2009-10-07 20:31:02 +0200
  is_paginated method and test
  r7780 at Thesaurus (orig r7768):  frew | 2009-10-09 06:45:36 +0200
  change name of method
  r7781 at Thesaurus (orig r7769):  frew | 2009-10-09 06:47:31 +0200
  add message to changelog for is_paged
 
 r7785 at Thesaurus (orig r7773):  ribasushi | 2009-10-09 11:00:36 +0200
 Ugh CRLF
 r7786 at Thesaurus (orig r7774):  ribasushi | 2009-10-09 11:04:35 +0200
 Skip versioning test on really old perls lacking Time::HiRes
 r7787 at Thesaurus (orig r7775):  ribasushi | 2009-10-09 11:04:50 +0200
 Changes
 r7788 at Thesaurus (orig r7776):  triode | 2009-10-09 22:32:04 +0200
 added troubleshooting case of excessive memory allocation involving TEXT/BLOB/etc
 columns and large LongReadLen
 
 r7789 at Thesaurus (orig r7777):  triode | 2009-10-09 22:44:21 +0200
 added my name to contributors list
 
 r7790 at Thesaurus (orig r7778):  ribasushi | 2009-10-10 18:49:15 +0200
 Whoops, this isn't right
 r7791 at Thesaurus (orig r7779):  ribasushi | 2009-10-11 15:44:18 +0200
 More ordered fixes
 r7793 at Thesaurus (orig r7781):  norbi | 2009-10-13 11:27:18 +0200
  r7982 at vger:  mendel | 2009-10-13 11:26:11 +0200
  Fixed a typo and a POD error.
 
 r7805 at Thesaurus (orig r7793):  ribasushi | 2009-10-16 14:28:35 +0200
 Fix test to stop failing when DT-support is not present
 r7811 at Thesaurus (orig r7799):  caelum | 2009-10-18 11:13:29 +0200
  r20728 at hlagh (orig r7703):  ribasushi | 2009-09-20 18:51:16 -0400
  Another try at a clean sybase branch
  r20730 at hlagh (orig r7705):  ribasushi | 2009-09-20 18:58:09 -0400
  Part one of the sybase work by Caelum (mostly reviewed)
  r20731 at hlagh (orig r7706):  ribasushi | 2009-09-20 19:18:40 -0400
  main sybase branch ready
  r21051 at hlagh (orig r7797):  caelum | 2009-10-18 04:57:43 -0400
   r20732 at hlagh (orig r7707):  ribasushi | 2009-09-20 19:20:00 -0400
   Branch for bulk insert
   r20733 at hlagh (orig r7708):  ribasushi | 2009-09-20 20:06:21 -0400
   All sybase bulk-insert code by Caelum
   r20750 at hlagh (orig r7725):  caelum | 2009-09-24 02:47:39 -0400
   clean up set_identity stuff
   r20751 at hlagh (orig r7726):  caelum | 2009-09-24 05:21:18 -0400
   minor cleanups, test update of blob to NULL
   r20752 at hlagh (orig r7727):  caelum | 2009-09-24 08:45:04 -0400
   remove some duplicate code
   r20753 at hlagh (orig r7728):  caelum | 2009-09-24 09:57:58 -0400
   fix insert with all defaults
   r20786 at hlagh (orig r7732):  caelum | 2009-09-25 21:17:16 -0400
   some cleanups
   r20804 at hlagh (orig r7736):  caelum | 2009-09-28 05:31:38 -0400
   minor changes
   r20805 at hlagh (orig r7737):  caelum | 2009-09-28 06:25:48 -0400
   fix DT stuff
   r20809 at hlagh (orig r7741):  caelum | 2009-09-28 22:25:55 -0400
   removed some dead code, added fix and test for _execute_array_empty
   r20811 at hlagh (orig r7743):  caelum | 2009-09-29 13:36:20 -0400
   minor changes after review
   r20812 at hlagh (orig r7744):  caelum | 2009-09-29 14:16:03 -0400
   do not clobber $rv from execute_array
   r20813 at hlagh (orig r7745):  caelum | 2009-09-29 14:38:14 -0400
   make insert_bulk atomic
   r20815 at hlagh (orig r7747):  caelum | 2009-09-29 20:35:26 -0400
   remove _exhaaust_statements
   r20816 at hlagh (orig r7748):  caelum | 2009-09-29 21:48:38 -0400
   fix insert_bulk when not using bulk api inside a txn
   r20831 at hlagh (orig r7749):  caelum | 2009-09-30 02:53:42 -0400
   added test for populate being atomic
   r20832 at hlagh (orig r7750):  caelum | 2009-09-30 03:00:59 -0400
   factor out subclass-specific _execute_array callback
   r20833 at hlagh (orig r7751):  caelum | 2009-10-01 11:59:30 -0400
   remove a piece of dead code
   r20840 at hlagh (orig r7758):  caelum | 2009-10-03 15:46:56 -0400
   remove _pretty_print
   r20842 at hlagh (orig r7760):  caelum | 2009-10-04 16:19:56 -0400
   minor optimization for insert_bulk
   r21050 at hlagh (orig r7796):  caelum | 2009-10-18 04:56:54 -0400
   error checking related to literal SQL for insert_bulk
  
 
 r7820 at Thesaurus (orig r7808):  caelum | 2009-10-21 03:10:39 +0200
 add test for populate with literal sql mixed with binds, improve error messages
 r7823 at Thesaurus (orig r7811):  ribasushi | 2009-10-21 16:33:45 +0200
 Show what's wrong with the current populate code
 r7824 at Thesaurus (orig r7812):  caelum | 2009-10-22 11:10:38 +0200
 stringify values passed to populate/insert_bulk
 r7825 at Thesaurus (orig r7813):  ribasushi | 2009-10-22 13:17:41 +0200
 Some smoker run the suite for 30 *minutes* - the timeout seems to be too short for them (boggle)
 r7826 at Thesaurus (orig r7814):  caelum | 2009-10-22 14:41:37 +0200
 a few extra tests can never hurt, right? :)
 r7827 at Thesaurus (orig r7815):  ribasushi | 2009-10-23 10:51:05 +0200
 Prevent sqlt from failing silently
 r7828 at Thesaurus (orig r7816):  ribasushi | 2009-10-23 10:52:49 +0200
 { is_foreign_key_constraint => 0, on_delete => undef } is a valid construct - no need to carp
 r7832 at Thesaurus (orig r7820):  robkinyon | 2009-10-26 20:11:22 +0100
 Fixed bad if-check in columns()
 r7840 at Thesaurus (orig r7828):  caelum | 2009-10-31 14:01:56 +0100
 change repository in meta to point to real svn url rather than svnweb
 r7842 at Thesaurus (orig r7830):  caelum | 2009-10-31 21:04:39 +0100
 pass sqlite_version to SQLT
 r7843 at Thesaurus (orig r7831):  caelum | 2009-10-31 21:22:37 +0100
 fix regex to numify sqlite_version
 r7844 at Thesaurus (orig r7832):  caelum | 2009-10-31 23:59:19 +0100
 work-around disconnect bug with DBD::Pg 2.15.1
 r7855 at Thesaurus (orig r7843):  ribasushi | 2009-11-04 10:55:51 +0100
  r7817 at Thesaurus (orig r7805):  rbuels | 2009-10-21 02:37:28 +0200
  making a branch, here we go again with the pg_unqualified_schema
  r7818 at Thesaurus (orig r7806):  rbuels | 2009-10-21 02:38:59 +0200
  more pg unqualified schema tests, which expose a gap in the coverage
  r7819 at Thesaurus (orig r7807):  rbuels | 2009-10-21 03:10:38 +0200
  gutted Pg storage driver's sequence discovery to just rely on DBD::Pg's last_insert_id.  this needs testing with older versions of DBD::Pg
  r7821 at Thesaurus (orig r7809):  rbuels | 2009-10-21 04:00:39 +0200
  more coverage in Pg sequence-discovery tests.  i think this shows why last_insert_id cannot be used.
  r7822 at Thesaurus (orig r7810):  rbuels | 2009-10-21 04:07:05 +0200
  reverted [7807], and just changed code to use the custom pg_catalog query, which is the only thing that works in the pathological case where DBIC is told a different primary key from the primary key that is set on the table in the DB ([7809] added testing for this)
  r7852 at Thesaurus (orig r7840):  rbuels | 2009-11-03 18:47:05 +0100
  added Changes line mentioning tweak to Pg auto-inc fix
  r7854 at Thesaurus (orig r7842):  ribasushi | 2009-11-04 10:55:35 +0100
  Cleanup exceptions
 
 r7858 at Thesaurus (orig r7846):  caelum | 2009-11-06 16:01:30 +0100
 transactions for MSSQL over DBD::Sybase
 r7861 at Thesaurus (orig r7849):  caelum | 2009-11-10 13:16:18 +0100
 made commit/rollback when disconnected an exception
 r7862 at Thesaurus (orig r7850):  robkinyon | 2009-11-10 17:19:57 +0100
 Added a note about select
 r7863 at Thesaurus (orig r7851):  ribasushi | 2009-11-10 18:23:10 +0100
 Changes
 r7867 at Thesaurus (orig r7855):  frew | 2009-11-11 21:56:37 +0100
 RT50874
 r7868 at Thesaurus (orig r7856):  frew | 2009-11-11 23:50:43 +0100
 RT50828
 r7869 at Thesaurus (orig r7857):  frew | 2009-11-11 23:54:15 +0100
 clearer test message
 r7870 at Thesaurus (orig r7858):  frew | 2009-11-12 00:37:27 +0100
 some cleanup for $rs->populate
 r7872 at Thesaurus (orig r7860):  ribasushi | 2009-11-12 01:35:36 +0100
 Fix find on resultset with custom result_class
 r7873 at Thesaurus (orig r7861):  ribasushi | 2009-11-12 01:40:14 +0100
 Fix return value of in_storage
 r7874 at Thesaurus (orig r7862):  ribasushi | 2009-11-12 01:43:48 +0100
 Extra FAQ entry
 r7875 at Thesaurus (orig r7863):  ribasushi | 2009-11-12 02:11:25 +0100
 Sanify _determine_driver handling in ::Storage::DBI
 r7876 at Thesaurus (orig r7864):  ribasushi | 2009-11-12 02:14:37 +0100
 Add mysql determine_driver test by Pedro Melo
 r7881 at Thesaurus (orig r7869):  ribasushi | 2009-11-12 11:10:04 +0100
 _cond_for_update_delete is hopelessly broken attempting to introspect SQLA1. Replace with a horrific but effective hack
 r7882 at Thesaurus (orig r7870):  ribasushi | 2009-11-12 11:15:12 +0100
 Clarifying comment
 r7884 at Thesaurus (orig r7872):  ribasushi | 2009-11-13 00:13:40 +0100
 The real fix for the non-introspectable condition bug, mst++
 r7885 at Thesaurus (orig r7873):  ribasushi | 2009-11-13 00:24:56 +0100
 Some cleanup
 r7887 at Thesaurus (orig r7875):  frew | 2009-11-13 10:01:37 +0100
 fix subtle bug with Sybase database type determination
 r7892 at Thesaurus (orig r7880):  frew | 2009-11-14 00:53:29 +0100
 release woo!
 r7894 at Thesaurus (orig r7882):  caelum | 2009-11-14 03:57:52 +0100
 fix oracle dep in Makefile.PL
 r7895 at Thesaurus (orig r7883):  caelum | 2009-11-14 04:20:53 +0100
 skip Oracle BLOB tests on DBD::Oracle == 1.23
 r7897 at Thesaurus (orig r7885):  caelum | 2009-11-14 09:40:01 +0100
  r7357 at pentium (orig r7355):  caelum | 2009-08-20 17:58:23 -0400
  branch to support MSSQL over ADO
  r7358 at pentium (orig r7356):  caelum | 2009-08-21 00:32:14 -0400
  something apparently working
  r7359 at pentium (orig r7357):  caelum | 2009-08-21 00:53:53 -0400
  slightly better mars test, still passes
 
 r7899 at Thesaurus (orig r7887):  caelum | 2009-11-14 09:41:54 +0100
  r7888 at pentium (orig r7886):  caelum | 2009-11-14 03:41:25 -0500
  add TODO test for large column list in select
 
 r7901 at Thesaurus (orig r7889):  caelum | 2009-11-14 09:47:16 +0100
 add ADO/MSSQL to Changes
 r7902 at Thesaurus (orig r7890):  caelum | 2009-11-14 10:27:29 +0100
 fix the large column list test for ADO/MSSQL, now passes
 r7904 at Thesaurus (orig r7892):  caelum | 2009-11-14 12:20:58 +0100
 fix Changes (ADO change in wrong release)
 r7905 at Thesaurus (orig r7893):  ribasushi | 2009-11-14 19:23:23 +0100
 Release 0.08114
 r7907 at Thesaurus (orig r7895):  ribasushi | 2009-11-15 12:09:17 +0100
 Failing test to highlight mssql autoconnect regression
 r7908 at Thesaurus (orig r7896):  ribasushi | 2009-11-15 12:20:25 +0100
 Fix plan
 r7913 at Thesaurus (orig r7901):  ribasushi | 2009-11-15 13:11:38 +0100
  r7773 at Thesaurus (orig r7761):  norbi | 2009-10-05 14:49:06 +0200
  Created branch 'prefetch_bug-unqualified_column_in_search_related_cond': A bug that manifests when a prefetched table's column is referenced without the table name in the condition of a search_related() on an M:N relationship.
  r7878 at Thesaurus (orig r7866):  ribasushi | 2009-11-12 02:36:08 +0100
  Factor some code out
  r7879 at Thesaurus (orig r7867):  ribasushi | 2009-11-12 09:11:03 +0100
  Factor out more stuff
  r7880 at Thesaurus (orig r7868):  ribasushi | 2009-11-12 09:21:04 +0100
  Saner naming/comments
  r7910 at Thesaurus (orig r7898):  ribasushi | 2009-11-15 12:39:29 +0100
  Move more code to DBIHacks, put back the update/delete rs check, just in case
  r7911 at Thesaurus (orig r7899):  ribasushi | 2009-11-15 13:01:34 +0100
  TODOify test until we get an AST
  r7912 at Thesaurus (orig r7900):  ribasushi | 2009-11-15 13:10:15 +0100
  Hide from pause
 
 r7921 at Thesaurus (orig r7909):  ribasushi | 2009-11-15 14:17:48 +0100
  r7871 at Thesaurus (orig r7859):  ribasushi | 2009-11-12 00:46:07 +0100
  Branches to test some ideas
  r7889 at Thesaurus (orig r7877):  abraxxa | 2009-11-13 12:05:50 +0100
  added rels to view result classes in test schema
  
  r7890 at Thesaurus (orig r7878):  abraxxa | 2009-11-13 13:05:45 +0100
  seems I found the bugger
  
  r7917 at Thesaurus (orig r7905):  ribasushi | 2009-11-15 13:29:23 +0100
  FK constraints towards a view don't quite work
  r7918 at Thesaurus (orig r7906):  ribasushi | 2009-11-15 14:10:10 +0100
  Turn into a straight-inheritance view class
  r7919 at Thesaurus (orig r7907):  ribasushi | 2009-11-15 14:11:03 +0100
  Extensive test of virtual and classic view relationships
  r7920 at Thesaurus (orig r7908):  ribasushi | 2009-11-15 14:17:23 +0100
  Fix non-sqlt schema file
 
 r7923 at Thesaurus (orig r7911):  caelum | 2009-11-15 18:31:37 +0100
 fix MSSQL via DBD::Sybase regression
 r7930 at Thesaurus (orig r7918):  ribasushi | 2009-11-16 19:15:45 +0100
  r7864 at Thesaurus (orig r7852):  edenc | 2009-11-10 20:15:15 +0100
  branching for fixes related to prefetch, distinct and group by
  r7865 at Thesaurus (orig r7853):  edenc | 2009-11-10 20:21:38 +0100
  added test case for ensuring a column mentioned in the order by clause is also included in the group by clause
  r7926 at Thesaurus (orig r7914):  ribasushi | 2009-11-16 08:09:30 +0100
  Make _resolve_column_info function without supplying column names
  r7927 at Thesaurus (orig r7915):  ribasushi | 2009-11-16 08:11:17 +0100
  Fix order_by/distinct bug
 
 r7937 at Thesaurus (orig r7925):  ribasushi | 2009-11-19 12:04:21 +0100
 Bail out eary in Versioned if no versioning checks are requested
 r7938 at Thesaurus (orig r7926):  ribasushi | 2009-11-19 12:06:13 +0100
 POD fixes
 r7940 at Thesaurus (orig r7928):  caelum | 2009-11-22 11:03:33 +0100
 fix connection setup for Sybase
 r7943 at Thesaurus (orig r7931):  caelum | 2009-11-22 13:27:43 +0100
 override _run_connection_actions for internal connection setup in sybase stuff, much cleaner this way
 r7947 at Thesaurus (orig r7935):  ribasushi | 2009-11-23 01:18:28 +0100
 Whoops
 r7948 at Thesaurus (orig r7936):  ribasushi | 2009-11-23 01:28:50 +0100
 Fix ::Versioned regression introduced in r7925
 r7951 at Thesaurus (orig r7939):  caelum | 2009-11-23 12:32:10 +0100
 add subname to rdbms_specific_methods wrapper
 r7953 at Thesaurus (orig r7941):  caelum | 2009-11-23 13:23:14 +0100
  r21187 at hlagh (orig r7933):  ribasushi | 2009-11-22 18:38:34 -0500
  New sybase refactor branch
  r21188 at hlagh (orig r7934):  ribasushi | 2009-11-22 19:06:48 -0500
  refactor part1
  r21192 at hlagh (orig r7938):  ribasushi | 2009-11-22 19:30:05 -0500
  refactor part 2
  r21194 at hlagh (orig r7940):  caelum | 2009-11-23 07:06:46 -0500
  fix test
 
 r7955 at Thesaurus (orig r7943):  ribasushi | 2009-11-23 16:30:13 +0100
 Add missing Sub::Name invocations and improve the SQLA Carp overrides
 r7957 at Thesaurus (orig r7945):  ribasushi | 2009-11-24 10:12:49 +0100
  r7749 at Thesaurus (orig r7738):  norbi | 2009-09-28 22:01:39 +0200
  Created branch 'void_populate_resultset_cond': Fixing a bug: $rs->populate in void context does not use the conditions from $rs.
  r7751 at Thesaurus (orig r7740):  norbi | 2009-09-28 23:26:06 +0200
   r7935 at vger:  mendel | 2009-09-28 23:25:52 +0200
   Undid the previous tweaks to the already existing tests and added new tests instead.
  
  r7928 at Thesaurus (orig r7916):  ribasushi | 2009-11-16 08:48:42 +0100
  Change plan
  r7956 at Thesaurus (orig r7944):  ribasushi | 2009-11-24 10:10:49 +0100
  Better naming and a bit leaner implementation. Main idea remains the same
 
 r7959 at Thesaurus (orig r7947):  ribasushi | 2009-11-24 10:39:52 +0100
 Changes and prevent a spurious todo-pass
 r7962 at Thesaurus (orig r7950):  ribasushi | 2009-11-24 19:43:42 +0100
 Extra sqla quoting test
 r7963 at Thesaurus (orig r7951):  ribasushi | 2009-11-24 19:48:01 +0100
 Extra sqla quoting test(2)
 r7964 at Thesaurus (orig r7952):  ribasushi | 2009-11-25 21:24:10 +0100
 wtf
 r7967 at Thesaurus (orig r7955):  ribasushi | 2009-11-26 11:07:06 +0100
 cleanups
 r7968 at Thesaurus (orig r7956):  ribasushi | 2009-11-26 12:11:21 +0100
 Sanify search_related chaining code (no functional changes)
 r7969 at Thesaurus (orig r7957):  ribasushi | 2009-11-26 12:52:05 +0100
 Another count() quirk down
 r7970 at Thesaurus (orig r7958):  ribasushi | 2009-11-26 14:23:28 +0100
 Add a no-accessor column to generally test handling
 r7972 at Thesaurus (orig r7960):  ribasushi | 2009-11-26 15:32:17 +0100
 Whoops, wrong accessor (things still work though)
 r7977 at Thesaurus (orig r7965):  ribasushi | 2009-11-26 16:43:21 +0100
  r7971 at Thesaurus (orig r7959):  ribasushi | 2009-11-26 14:54:17 +0100
  New branch for get_inflated_column bugfix
  r7974 at Thesaurus (orig r7962):  ribasushi | 2009-11-26 15:56:20 +0100
  Fix for rt46953
  r7975 at Thesaurus (orig r7963):  ribasushi | 2009-11-26 16:05:17 +0100
  Make Test::More happy
  r7976 at Thesaurus (orig r7964):  ribasushi | 2009-11-26 16:43:09 +0100
  Changes
 
 r7980 at Thesaurus (orig r7968):  ribasushi | 2009-11-27 01:38:11 +0100
 Fix search_related wrt grouped resultsets (distinct is currently passed to the new resultset, this is probably wrong)
 r7987 at Thesaurus (orig r7975):  ribasushi | 2009-11-28 16:54:23 +0100
 Cleanup the s.c.o. index
 r7988 at Thesaurus (orig r7976):  ribasushi | 2009-11-28 16:57:04 +0100
 Test based on http://lists.scsys.co.uk/pipermail/dbix-class/2009-November/008599.html



Property changes on: DBIx-Class/0.08/branches/prefetch
___________________________________________________________________
Name: svk:merge
   - 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/cookbook_fixes:7657
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7237
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/autocast:7418
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connect_info_hash:7435
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cookbook_fixes:7479
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_has_many_join:7382
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7566
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:7682
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulk_insert:7679
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:7712
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
   + 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/cookbook_fixes:7657
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/prefetch_bug-unqualified_column_in_search_related_cond:7959
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/void_populate_resultset_cond:7935
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7982
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/ado_mssql:7886
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/autocast:7418
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connect_info_hash:7435
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cookbook_fixes:7479
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/get_inflated_columns_rt46953:7964
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_has_many_join:7382
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/is_resultset_paginated:7769
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7842
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch-group_by:7917
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_bug-unqualified_column_in_search_related_cond:7900
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:7682
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulk_insert:7679
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulkinsert_support:7796
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_refactor:7940
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_support:7797
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/view_rels:7908
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/void_populate_resultset_cond:7944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:7976
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510

Modified: DBIx-Class/0.08/branches/prefetch/Changes
===================================================================
--- DBIx-Class/0.08/branches/prefetch/Changes	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/Changes	2009-11-29 11:59:25 UTC (rev 7983)
@@ -1,5 +1,59 @@
 Revision history for DBIx::Class
 
+        - Fix distinct => 1 with non-selecting order_by (the columns
+          in order_by also need to be aded to the resulting group_by)
+        - Do not attempt to deploy FK constraints pointing to a View
+        - Refactored Sybase storage driver into a central ::DBI::Sybase
+          dispatcher, and a sybase-specific ::DBI::Sybase::ASE
+        - Make sure populate() inherits the resultset conditions just
+          like create() does
+        - Fix count/objects from search_related on limited resultset
+        - Make get_inflated_columns behave identically to get_columns
+          wrt +select/+as (RT#46953)
+
+0.08114 2009-11-14 17:45:00 (UTC)
+        - Preliminary support for MSSQL via DBD::ADO
+        - Fix botched 0.08113 release (invalid tarball)
+
+0.08113 2009-11-13 23:13:00 (UTC)
+        - Fix populate with has_many bug
+          (RT #50828)
+        - Fix Oracle autoincrement broken for Resultsets with scalar refs
+          (RT #50874)
+        - Complete Sybase RDBMS support including:
+          - Support for TEXT/IMAGE columns
+          - Support for the 'money' datatype
+          - Transaction savepoints support
+          - DateTime inflation support
+          - Support for bind variables when connecting to a newer Sybase with
+             OpenClient libraries
+          - Support for connections via FreeTDS with CASTs for bind variables
+             when needed
+          - Support for interpolated variables with proper quoting when
+             connecting to an older Sybase and/or via FreeTDS
+          - bulk API support for populate()
+        - Transaction support for MSSQL via DBD::Sybase
+        - Add is_paged method to DBIx::Class::ResultSet so that we can
+          check that if we want a pager
+        - Skip versioning test on really old perls lacking Time::HiRes
+          (RT #50209)
+        - Fixed on_connect_do/call regression when used with a coderef
+          connector (RT #50003)
+        - A couple of fixes to Ordered to remedy subclassing issues
+        - Fixed another lingering problem with PostgreSQL
+          auto-increment support and its interaction with multiple
+          schemas
+        - Remove some IN workarounds, and require a recent version of
+          SQLA instead
+        - Improvements to populate's handling of mixed scalarref values
+        - Fixed regression losing result_class after $rs->find (introduced
+          in 0.08108)
+        - Fix in_storage() to return 1|0 as per existing documentation
+        - Centralize handling of _determine_driver calls prior to certain
+          ::Storage::DBI methods
+        - Fix update/delete arbitrary condition handling (RT#51409)
+        - POD improvements
+
 0.08112 2009-09-21 10:57:00 (UTC)
         - Remove the recommends from Makefile.PL, DBIx::Class is not
           supposed to have optional dependencies. ever.
@@ -102,7 +156,7 @@
           nonexisting prefetch
         - make_column_dirty() now overwrites the deflated value with an
           inflated one if such exists
-        - Fixed set_$rel with where restriction deleting rows outside 
+        - Fixed set_$rel with where restriction deleting rows outside
           the restriction
         - populate() returns the created objects or an arrayref of the
           created objects depending on scalar vs. list context
@@ -154,7 +208,7 @@
           side of the relation, to avoid duplicates
         - DBIC now properly handles empty inserts (invoking all default
           values from the DB, normally via INSERT INTO tbl DEFAULT VALUES
-        - Fix find_or_new/create to stop returning random rows when 
+        - Fix find_or_new/create to stop returning random rows when
           default value insert is requested (RT#28875)
         - Make IC::DT extra warning state the column name too
         - It is now possible to transparrently search() on columns
@@ -176,9 +230,9 @@
         - Change ->count code to work correctly with DISTINCT (distinct => 1)
           via GROUP BY
         - Removed interpolation of bind vars for as_query - placeholders
-          are preserved and nested query bind variables are properly 
+          are preserved and nested query bind variables are properly
           merged in the correct order
-        - Refactor DBIx::Class::Storage::DBI::Sybase to automatically 
+        - Refactor DBIx::Class::Storage::DBI::Sybase to automatically
           load a subclass, namely Microsoft_SQL_Server.pm
           (similar to DBIx::Class::Storage::DBI::ODBC)
         - Refactor InflateColumn::DateTime to allow components to
@@ -241,7 +295,7 @@
           - not try and insert things tagged on via new_related unless required
         - Possible to set locale in IC::DateTime extra => {} config
         - Calling the accessor of a belongs_to when the foreign_key
-          was NULL and the row was not stored would unexpectedly fail 
+          was NULL and the row was not stored would unexpectedly fail
         - Split sql statements for deploy only if SQLT::Producer returned a scalar
           containing all statements to be executed
         - Add as_query() for ResultSet and ResultSetColumn. This makes subqueries
@@ -269,8 +323,8 @@
         - new order_by => { -desc => 'colname' } syntax supported
         - PG array datatype supported
         - insert should use store_column, not set_column to avoid marking
-          clean just-stored values as dirty. New test for this 
-        - regression test for source_name 
+          clean just-stored values as dirty. New test for this
+        - regression test for source_name
 
 0.08099_05 2008-10-30 21:30:00 (UTC)
         - Rewrite of Storage::DBI::connect_info(), extended with an
@@ -284,7 +338,7 @@
         - Fixed up related resultsets and multi-create
         - Fixed superfluous connection in ODBC::_rebless
         - Fixed undef PK for first insert in ODBC::Microsoft_SQL_Server
-        - Added virtual method to Versioned so a user can create upgrade 
+        - Added virtual method to Versioned so a user can create upgrade
           path across multiple versions (jgoulah)
         - Better (and marginally faster) implementation of the HashRefInflator
           hash construction algorithm
@@ -293,7 +347,7 @@
 
 0.08099_04 2008-07-24 01:00:00
         - Functionality to storage to enable a sub to be run without FK checks
-        - Fixed $schema->clone bug which caused clone and source to share 
+        - Fixed $schema->clone bug which caused clone and source to share
           internal hash refs
         - Added register_extra_source methods for additional sources
         - Added datetime_undef_if_invalid for InflateColumn::DateTime to
@@ -319,11 +373,11 @@
         - Add warnings for non-unique ResultSet::find queries
         - Changed Storage::DBI::Replication to Storage::DBI::Replicated and
           refactored support.
-        - By default now deploy/diff et al. will ignore constraint and index 
+        - By default now deploy/diff et al. will ignore constraint and index
           names
         - Add ResultSet::_is_deterministic_value, make new_result filter the
           values passed to new to drop values that would generate invalid SQL.
-        - Use Sub::Name to name closures before installing them. Fixes 
+        - Use Sub::Name to name closures before installing them. Fixes
           incompatibility with Moose method modifiers on generated methods.
 
 0.08010 2008-03-01 10:30
@@ -332,7 +386,7 @@
 0.08009 2008-01-20 13:30
         - Made search_rs smarter about when to preserve the cache to fix
           mm prefetch usage
-        - Added Storage::DBI subclass for MSSQL over ODBC. 
+        - Added Storage::DBI subclass for MSSQL over ODBC.
         - Added freeze, thaw and dclone methods to Schema so that thawed
           objects will get re-attached to the schema.
         - Moved dbicadmin to JSON::Any wrapped JSON.pm for a sane API
@@ -346,20 +400,20 @@
           foreign and self parts the wrong way round in the condition
         - ResultSetColumn::func() now returns all results if called in list
           context; this makes things like func('DISTINCT') work as expected
-        - Many-to-many relationships now warn if the utility methods would 
+        - Many-to-many relationships now warn if the utility methods would
           clash
         - InflateColumn::DateTime now accepts an extra parameter of timezone
           to set timezone on the DT object (thanks Sergio Salvi)
-        - Added sqlt_deploy_hook to result classes so that indexes can be 
+        - Added sqlt_deploy_hook to result classes so that indexes can be
           added.
-        - Added startup checks to warn loudly if we appear to be running on 
+        - Added startup checks to warn loudly if we appear to be running on
           RedHat systems from perl-5.8.8-10 and up that have the bless/overload
           patch applied (badly) which causes 2x -> 100x performance penalty.
           (Jon Schutz)
-        - ResultSource::reverse_relationship_info can distinguish between 
+        - ResultSource::reverse_relationship_info can distinguish between
           sources using the same table
         - Row::insert will now not fall over if passed duplicate related objects
-        - Row::copy will not fall over if you have two relationships to the 
+        - Row::copy will not fall over if you have two relationships to the
           same source with a unique constraint on it
 
 0.08007 2007-09-04 19:36:00
@@ -371,7 +425,7 @@
         - Move to using Class::C3::Componentised
         - Remove warn statement from DBIx::Class::Row
 
-0.08005 2007-08-06 
+0.08005 2007-08-06
         - add timestamp fix re rt.cpan 26978 - no test yet but change
           clearly should cause no regressions
         - provide alias for related_resultset via local() so it's set
@@ -386,7 +440,7 @@
           (original fix from diz)
 
 0.08004 2007-08-06 19:00:00
-        - fix storage connect code to not trigger bug via auto-viv 
+        - fix storage connect code to not trigger bug via auto-viv
           (test from aherzog)
         - fixup cursor_class to be an 'inherited' attr for per-package defaults
         - add default_resultset_attributes entry to Schema

Modified: DBIx-Class/0.08/branches/prefetch/Makefile.PL
===================================================================
--- DBIx-Class/0.08/branches/prefetch/Makefile.PL	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/Makefile.PL	2009-11-29 11:59:25 UTC (rev 7983)
@@ -42,9 +42,10 @@
 requires 'Module::Find'             => '0.06';
 requires 'Path::Class'              => '0.16';
 requires 'Scope::Guard'             => '0.03';
-requires 'SQL::Abstract'            => '1.58';
+requires 'SQL::Abstract'            => '1.60';
 requires 'SQL::Abstract::Limit'     => '0.13';
 requires 'Sub::Name'                => '0.04';
+requires 'Data::Dumper::Concise'    => '1.000';
 
 my %replication_requires = (
   'Moose',                    => '0.87',
@@ -109,11 +110,17 @@
     ) : ()
   ,
 
-  $ENV{DBICTEST_ORACLE_DSN}
+  $ENV{DBICTEST_ORA_DSN}
     ? (
       'DateTime::Format::Oracle' => '0',
     ) : ()
   ,
+
+  $ENV{DBICTEST_SYBASE_DSN}
+    ? (
+      'DateTime::Format::Sybase' => 0,
+    ) : ()
+  ,
 );
 #************************************************************************#
 # Make ABSOLUTELY SURE that nothing on the list above is a real require, #
@@ -132,15 +139,20 @@
 
 resources 'IRC'         => 'irc://irc.perl.org/#dbix-class';
 resources 'license'     => 'http://dev.perl.org/licenses/';
-resources 'repository'  => 'http://dev.catalyst.perl.org/svnweb/bast/browse/DBIx-Class/';
+resources 'repository'  => 'http://dev.catalyst.perl.org/repos/bast/DBIx-Class/';
 resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
 
-no_index 'DBIx::Class::Storage::DBI::Sybase::Base';
 no_index 'DBIx::Class::SQLAHacks';
 no_index 'DBIx::Class::SQLAHacks::MSSQL';
+no_index 'DBIx::Class::SQLAHacks::OracleJoins';
 no_index 'DBIx::Class::Storage::DBI::AmbiguousGlob';
-no_index 'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server';
-no_index 'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars';
+no_index 'DBIx::Class::Storage::DBIHacks';
+no_index 'DBIx::Class::PK::Auto::DB2';
+no_index 'DBIx::Class::PK::Auto::MSSQL';
+no_index 'DBIx::Class::PK::Auto::MySQL';
+no_index 'DBIx::Class::PK::Auto::Oracle';
+no_index 'DBIx::Class::PK::Auto::Pg';
+no_index 'DBIx::Class::PK::Auto::SQLite';
 
 # re-build README and require extra modules for testing if we're in a checkout
 

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/AccessorGroup.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/AccessorGroup.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/AccessorGroup.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -17,8 +17,6 @@
 
 This class now exists in its own right on CPAN as Class::Accessor::Grouped
 
-1;
-
 =head1 AUTHORS
 
 Matt S. Trout <mst at shadowcatsystems.co.uk>

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/CDBICompat/Constructor.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/CDBICompat/Constructor.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/CDBICompat/Constructor.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -3,6 +3,8 @@
 
 use base qw(DBIx::Class::CDBICompat::ImaDBI);
 
+use Sub::Name();
+
 use strict;
 use warnings;
 
@@ -22,7 +24,7 @@
     return carp("$method already exists in $class")
             if *$meth{CODE};
 
-    *$meth = sub {
+    *$meth = Sub::Name::subname $meth => sub {
             my $self = shift;
             $self->sth_to_objects($self->sql_Retrieve($fragment), \@_);
     };

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Manual/FAQ.pod
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Manual/FAQ.pod	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Manual/FAQ.pod	2009-11-29 11:59:25 UTC (rev 7983)
@@ -371,6 +371,9 @@
 
 =item .. insert many rows of data efficiently?
 
+The C<populate> method in L<DBIx::Class::ResultSet> provides
+efficient bulk inserts.
+
 =item .. update a collection of rows at the same time?
 
 Create a resultset using a search, to filter the rows of data you

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Manual/Troubleshooting.pod
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Manual/Troubleshooting.pod	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Manual/Troubleshooting.pod	2009-11-29 11:59:25 UTC (rev 7983)
@@ -156,5 +156,16 @@
 L<https://bugzilla.redhat.com/show_bug.cgi?id=460308> and
 L<http://rhn.redhat.com/errata/RHBA-2008-0876.html>
 
+=head2 Excessive Memory Allocation with TEXT/BLOB/etc. Columns and Large LongReadLen
+
+It has been observed, using L<DBD::ODBC>, that a creating a L<DBIx::Class::Row> 
+object which includes a column of data type TEXT/BLOB/etc. will allocate 
+LongReadLen bytes.  This allocation does not leak, but if LongReadLen 
+is large in size, and many such row objects are created, e.g. as the 
+output of a ResultSet query, the memory footprint of the Perl interpreter 
+can grow very large.
+
+The solution is to use the smallest practical value for LongReadLen.
+
 =cut
 

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Ordered.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Ordered.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Ordered.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -501,7 +501,7 @@
     }
     else {
       my $bumped_pos_val = $self->_position_value ($to_position);
-      my @between = ($to_position, $new_group_last_position);
+      my @between = map { $self->_position_value ($_) } ($to_position, $new_group_last_position);
       $self->_shift_siblings (1, @between);   #shift right
       $self->set_column( $position_column => $bumped_pos_val );
     }
@@ -682,27 +682,9 @@
 if you are working with preexisting non-normalised position data,
 or if you need to work with materialized path columns.
 
-=head2 _position
-
-  my $num_pos = $item->_position;
-
-Returns the B<absolute numeric position> of the current object, with the
-first object being at position 1, its sibling at position 2 and so on.
-By default simply returns the value of L</position_column>.
-
-=cut
-sub _position {
-    my $self = shift;
-
-#    #the right way to do this
-#    return $self->previous_siblings->count + 1;
-
-    return $self->get_column ($self->position_column);
-}
-
 =head2 _position_from_value
 
-  my $num_pos = $item->_position_of_value ( $pos_value )
+  my $num_pos = $item->_position_from_value ( $pos_value )
 
 Returns the B<absolute numeric position> of an object with a B<position
 value> set to C<$pos_value>. By default simply returns C<$pos_value>.
@@ -864,6 +846,19 @@
     );
 }
 
+=head2 _position
+
+  my $num_pos = $item->_position;
+
+Returns the B<absolute numeric position> of the current object, with the
+first object being at position 1, its sibling at position 2 and so on.
+
+=cut
+sub _position {
+    my $self = shift;
+    return $self->_position_from_value ($self->get_column ($self->position_column) );
+}
+
 =head2 _grouping_clause
 
 This method returns one or more name=>value pairs for limiting a search

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/ResultSet.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/ResultSet.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/ResultSet.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -357,9 +357,9 @@
   }
 
   my $rs = (ref $self)->new($self->result_source, $new_attrs);
-  if ($rows) {
-    $rs->set_cache($rows);
-  }
+
+  $rs->set_cache($rows) if ($rows);
+
   return $rs;
 }
 
@@ -519,7 +519,7 @@
     # in ::Relationship::Base::search_related (the row method), and furthermore
     # the relationship is of the 'single' type. This means that the condition
     # provided by the relationship (already attached to $self) is sufficient,
-    # as there can be only one row in the databse that would satisfy the 
+    # as there can be only one row in the databse that would satisfy the
     # relationship
   }
   else {
@@ -530,7 +530,7 @@
   }
 
   # Run the query
-  my $rs = $self->search ($query, $attrs);
+  my $rs = $self->search ($query, {result_class => $self->result_class, %$attrs});
   if (keys %{$rs->_resolved_attrs->{collapse}}) {
     my $row = $rs->next;
     carp "Query returned more than one row" if $rs->next;
@@ -1204,7 +1204,7 @@
 
   my $tmp_attrs = { %$attrs };
 
-  # take off any limits, record_filter is cdbi, and no point of ordering a count 
+  # take off any limits, record_filter is cdbi, and no point of ordering a count
   delete $tmp_attrs->{$_} for (qw/select as rows offset order_by record_filter/);
 
   # overwrite the selector (supplied by the storage)
@@ -1212,7 +1212,7 @@
   $tmp_attrs->{as} = 'count';
 
   # read the comment on top of the actual function to see what this does
-  $tmp_attrs->{from} = $self->_switch_to_inner_join_if_needed (
+  $tmp_attrs->{from} = $self->result_source->schema->storage->_straight_join_to_node (
     $tmp_attrs->{from}, $tmp_attrs->{alias}
   );
 
@@ -1244,11 +1244,13 @@
   $sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $sub_attrs);
 
   # read the comment on top of the actual function to see what this does
-  $sub_attrs->{from} = $self->_switch_to_inner_join_if_needed (
+  $sub_attrs->{from} = $self->result_source->schema->storage->_straight_join_to_node (
     $sub_attrs->{from}, $sub_attrs->{alias}
   );
 
-  # this is so that ordering can be thrown away in things like Top limit
+  # this is so that the query can be simplified e.g.
+  # * non-limiting joins can be pruned
+  # * ordering can be thrown away in things like Top limit
   $sub_attrs->{-for_count_only} = 1;
 
   my $sub_rs = $rsrc->resultset_class->new ($rsrc, $sub_attrs);
@@ -1265,77 +1267,6 @@
   return $self->_count_rs ($attrs);
 }
 
-
-# The DBIC relationship chaining implementation is pretty simple - every
-# new related_relationship is pushed onto the {from} stack, and the {select}
-# window simply slides further in. This means that when we count somewhere
-# in the middle, we got to make sure that everything in the join chain is an
-# actual inner join, otherwise the count will come back with unpredictable
-# results (a resultset may be generated with _some_ rows regardless of if
-# the relation which the $rs currently selects has rows or not). E.g.
-# $artist_rs->cds->count - normally generates:
-# SELECT COUNT( * ) FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid
-# which actually returns the number of artists * (number of cds || 1)
-#
-# So what we do here is crawl {from}, determine if the current alias is at
-# the top of the stack, and if not - make sure the chain is inner-joined down
-# to the root.
-#
-sub _switch_to_inner_join_if_needed {
-  my ($self, $from, $alias) = @_;
-
-  # subqueries and other oddness is naturally not supported
-  return $from if (
-    ref $from ne 'ARRAY'
-      ||
-    @$from <= 1
-      ||
-    ref $from->[0] ne 'HASH'
-      ||
-    ! $from->[0]{-alias}
-      ||
-    $from->[0]{-alias} eq $alias
-  );
-
-  my $switch_branch;
-  JOINSCAN:
-  for my $j (@{$from}[1 .. $#$from]) {
-    if ($j->[0]{-alias} eq $alias) {
-      $switch_branch = $j->[0]{-join_path};
-      last JOINSCAN;
-    }
-  }
-
-  # something else went wrong
-  return $from unless $switch_branch;
-
-  # So it looks like we will have to switch some stuff around.
-  # local() is useless here as we will be leaving the scope
-  # anyway, and deep cloning is just too fucking expensive
-  # So replace the inner hashref manually
-  my @new_from = ($from->[0]);
-  my $sw_idx = { map { $_ => 1 } @$switch_branch };
-
-  for my $j (@{$from}[1 .. $#$from]) {
-    my $jalias = $j->[0]{-alias};
-
-    if ($sw_idx->{$jalias}) {
-      my %attrs = %{$j->[0]};
-      delete $attrs{-join_type};
-      push @new_from, [
-        \%attrs,
-        @{$j}[ 1 .. $#$j ],
-      ];
-    }
-    else {
-      push @new_from, $j;
-    }
-  }
-
-  return \@new_from;
-}
-
-
 sub _bool {
   return 1;
 }
@@ -1459,8 +1390,12 @@
 
   my $rsrc = $self->result_source;
 
+  # if a condition exists we need to strip all table qualifiers
+  # if this is not possible we'll force a subquery below
+  my $cond = $rsrc->schema->storage->_strip_cond_qualifiers ($self->{cond});
+
   my $needs_group_by_subq = $self->_has_resolved_attr (qw/collapse group_by -join/);
-  my $needs_subq = $self->_has_resolved_attr (qw/row offset/);
+  my $needs_subq = (not defined $cond) || $self->_has_resolved_attr(qw/row offset/);
 
   if ($needs_group_by_subq or $needs_subq) {
 
@@ -1508,70 +1443,11 @@
     return $rsrc->storage->$op(
       $rsrc,
       $op eq 'update' ? $values : (),
-      $self->_cond_for_update_delete,
+      $cond,
     );
   }
 }
 
-
-# _cond_for_update_delete
-#
-# update/delete require the condition to be modified to handle
-# the differing SQL syntax available.  This transforms the $self->{cond}
-# appropriately, returning the new condition.
-
-sub _cond_for_update_delete {
-  my ($self, $full_cond) = @_;
-  my $cond = {};
-
-  $full_cond ||= $self->{cond};
-  # No-op. No condition, we're updating/deleting everything
-  return $cond unless ref $full_cond;
-
-  if (ref $full_cond eq 'ARRAY') {
-    $cond = [
-      map {
-        my %hash;
-        foreach my $key (keys %{$_}) {
-          $key =~ /([^.]+)$/;
-          $hash{$1} = $_->{$key};
-        }
-        \%hash;
-      } @{$full_cond}
-    ];
-  }
-  elsif (ref $full_cond eq 'HASH') {
-    if ((keys %{$full_cond})[0] eq '-and') {
-      $cond->{-and} = [];
-      my @cond = @{$full_cond->{-and}};
-       for (my $i = 0; $i < @cond; $i++) {
-        my $entry = $cond[$i];
-        my $hash;
-        if (ref $entry eq 'HASH') {
-          $hash = $self->_cond_for_update_delete($entry);
-        }
-        else {
-          $entry =~ /([^.]+)$/;
-          $hash->{$1} = $cond[++$i];
-        }
-        push @{$cond->{-and}}, $hash;
-      }
-    }
-    else {
-      foreach my $key (keys %{$full_cond}) {
-        $key =~ /([^.]+)$/;
-        $cond->{$1} = $full_cond->{$key};
-      }
-    }
-  }
-  else {
-    $self->throw_exception("Can't update/delete on resultset with condition unless hash or array");
-  }
-
-  return $cond;
-}
-
-
 =head2 update
 
 =over 4
@@ -1758,10 +1634,19 @@
     }
     return wantarray ? @created : \@created;
   } else {
-    my ($first, @rest) = @$data;
+    my $first = $data->[0];
 
-    my @names = grep {!ref $first->{$_}} keys %$first;
-    my @rels = grep { $self->result_source->has_relationship($_) } keys %$first;
+    # if a column is a registered relationship, and is a non-blessed hash/array, consider
+    # it relationship data
+    my (@rels, @columns);
+    for (keys %$first) {
+      my $ref = ref $first->{$_};
+      $self->result_source->has_relationship($_) && ($ref eq 'ARRAY' or $ref eq 'HASH')
+        ? push @rels, $_
+        : push @columns, $_
+      ;
+    }
+
     my @pks = $self->result_source->primary_columns;
 
     ## do the belongs_to relationships
@@ -1790,17 +1675,21 @@
         delete $data->[$index]->{$rel};
         $data->[$index] = {%{$data->[$index]}, %$related};
 
-        push @names, keys %$related if $index == 0;
+        push @columns, keys %$related if $index == 0;
       }
     }
 
-    ## do bulk insert on current row
-    my @values = map { [ @$_{@names} ] } @$data;
+    ## inherit the data locked in the conditions of the resultset
+    my ($rs_data) = $self->_merge_cond_with_data({});
+    delete @{$rs_data}{@columns};
+    my @inherit_cols = keys %$rs_data;
+    my @inherit_data = values %$rs_data;
 
+    ## do bulk insert on current row
     $self->result_source->storage->insert_bulk(
       $self->result_source,
-      \@names,
-      \@values,
+      [@columns, @inherit_cols],
+      [ map { [ @$_{@columns}, @inherit_data ] } @$data ],
     );
 
     ## do the has_many relationships
@@ -1809,7 +1698,7 @@
       foreach my $rel (@rels) {
         next unless $item->{$rel} && ref $item->{$rel} eq "ARRAY";
 
-        my $parent = $self->find(map {{$_=>$item->{$_}} } @pks)
+        my $parent = $self->find({map { $_ => $item->{$_} } @pks})
      || $self->throw_exception('Cannot find the relating object.');
 
         my $child = $parent->$rel;
@@ -1937,46 +1826,66 @@
   $self->throw_exception( "new_result needs a hash" )
     unless (ref $values eq 'HASH');
 
-  my %new;
+  my ($merged_cond, $cols_from_relations) = $self->_merge_cond_with_data($values);
+
+  my %new = (
+    %$merged_cond,
+    @$cols_from_relations
+      ? (-cols_from_relations => $cols_from_relations)
+      : (),
+    -source_handle => $self->_source_handle,
+    -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED
+  );
+
+  return $self->result_class->new(\%new);
+}
+
+# _merge_cond_with_data
+#
+# Takes a simple hash of K/V data and returns its copy merged with the
+# condition already present on the resultset. Additionally returns an
+# arrayref of value/condition names, which were inferred from related
+# objects (this is needed for in-memory related objects)
+sub _merge_cond_with_data {
+  my ($self, $data) = @_;
+
+  my (%new_data, @cols_from_relations);
+
   my $alias = $self->{attrs}{alias};
 
-  if (
-    defined $self->{cond}
-    && $self->{cond} eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION
-  ) {
-    %new = %{ $self->{attrs}{related_objects} || {} };  # nothing might have been inserted yet
-    $new{-from_resultset} = [ keys %new ] if keys %new;
-  } else {
+  if (! defined $self->{cond}) {
+    # just massage $data below
+  }
+  elsif ($self->{cond} eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION) {
+    %new_data = %{ $self->{attrs}{related_objects} || {} };  # nothing might have been inserted yet
+    @cols_from_relations = keys %new_data;
+  }
+  elsif (ref $self->{cond} ne 'HASH') {
     $self->throw_exception(
-      "Can't abstract implicit construct, condition not a hash"
-    ) if ($self->{cond} && !(ref $self->{cond} eq 'HASH'));
-
-    my $collapsed_cond = (
-      $self->{cond}
-        ? $self->_collapse_cond($self->{cond})
-        : {}
+      "Can't abstract implicit construct, resultset condition not a hash"
     );
-
+  }
+  else {
     # precendence must be given to passed values over values inherited from
     # the cond, so the order here is important.
-    my %implied =  %{$self->_remove_alias($collapsed_cond, $alias)};
-    while( my($col,$value) = each %implied ){
-      if(ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '='){
-        $new{$col} = $value->{'='};
+    my $collapsed_cond = $self->_collapse_cond($self->{cond});
+    my %implied = %{$self->_remove_alias($collapsed_cond, $alias)};
+
+    while ( my($col, $value) = each %implied ) {
+      if (ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '=') {
+        $new_data{$col} = $value->{'='};
         next;
       }
-      $new{$col} = $value if $self->_is_deterministic_value($value);
+      $new_data{$col} = $value if $self->_is_deterministic_value($value);
     }
   }
 
-  %new = (
-    %new,
-    %{ $self->_remove_alias($values, $alias) },
-    -source_handle => $self->_source_handle,
-    -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED
+  %new_data = (
+    %new_data,
+    %{ $self->_remove_alias($data, $alias) },
   );
 
-  return $self->result_class->new(\%new);
+  return (\%new_data, \@cols_from_relations);
 }
 
 # _is_deterministic_value
@@ -2528,6 +2437,23 @@
   shift->set_cache(undef);
 }
 
+=head2 is_paged
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: true, if the resultset has been paginated
+
+=back
+
+=cut
+
+sub is_paged {
+  my ($self) = @_;
+  return !!$self->{attrs}{page};
+}
+
 =head2 related_resultset
 
 =over 4
@@ -2556,14 +2482,13 @@
         "' has no such relationship $rel")
       unless $rel_info;
 
-    my ($from,$seen) = $self->_chain_relationship($rel);
+    my $attrs = $self->_chain_relationship($rel);
 
-    my $join_count = $seen->{$rel};
+    my $join_count = $attrs->{seen_join}{$rel};
     my $alias = ($join_count > 1 ? join('_', $rel, $join_count) : $rel);
 
     #XXX - temp fix for result_class bug. There likely is a more elegant fix -groditi
-    my %attrs = %{$self->{attrs}||{}};
-    delete @attrs{qw(result_class alias)};
+    delete @{$attrs}{qw(result_class alias)};
 
     my $new_cache;
 
@@ -2584,20 +2509,14 @@
       # to work sanely (e.g. RestrictWithObject wants to be able to add
       # extra query restrictions, and these may need to be $alias.)
 
-      my $attrs = $rel_source->resultset_attributes;
-      local $attrs->{alias} = $alias;
+      my $rel_attrs = $rel_source->resultset_attributes;
+      local $rel_attrs->{alias} = $alias;
 
       $rel_source->resultset
                  ->search_rs(
                      undef, {
-                       %attrs,
-                       join => undef,
-                       prefetch => undef,
-                       select => undef,
-                       as => undef,
-                       where => $self->{cond},
-                       seen_join => $seen,
-                       from => $from,
+                       %$attrs,
+                       where => $attrs->{where},
                    });
     };
     $new->set_cache($new_cache) if $new_cache;
@@ -2655,37 +2574,58 @@
 # with a relation_chain_depth less than the depth of the
 # current prefetch is not considered)
 #
-# The increments happen in 1/2s to make it easier to correlate the
-# join depth with the join path. An integer means a relationship
-# specified via a search_related, whereas a fraction means an added
-# join/prefetch via attributes
+# The increments happen twice per join. An even number means a
+# relationship specified via a search_related, whereas an odd
+# number indicates a join/prefetch added via attributes
+#
+# Also this code will wrap the current resultset (the one we
+# chain to) in a subselect IFF it contains limiting attributes
 sub _chain_relationship {
   my ($self, $rel) = @_;
   my $source = $self->result_source;
-  my $attrs = $self->{attrs};
+  my $attrs = { %{$self->{attrs}||{}} };
 
-  my $from = [ @{
-      $attrs->{from}
-        ||
-      [{
-        -source_handle => $source->handle,
-        -alias => $attrs->{alias},
-        $attrs->{alias} => $source->from,
-      }]
-  }];
+  # we need to take the prefetch the attrs into account before we
+  # ->_resolve_join as otherwise they get lost - captainL
+  my $join = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
 
-  my $seen = { %{$attrs->{seen_join} || {} } };
-  my $jpath = ($attrs->{seen_join} && keys %{$attrs->{seen_join}}) 
-    ? $from->[-1][0]{-join_path} 
-    : [];
+  delete @{$attrs}{qw/join prefetch collapse select as columns +select +as +columns/};
 
+  my $seen = { %{ (delete $attrs->{seen_join}) || {} } };
 
-  # we need to take the prefetch the attrs into account before we
-  # ->_resolve_join as otherwise they get lost - captainL
-  my $merged = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
+  my $from;
+  my @force_subq_attrs = qw/offset rows group_by having/;
 
+  if (
+    ($attrs->{from} && ref $attrs->{from} ne 'ARRAY')
+      ||
+    $self->_has_resolved_attr (@force_subq_attrs)
+  ) {
+    $from = [{
+      -source_handle => $source->handle,
+      -alias => $attrs->{alias},
+      $attrs->{alias} => $self->as_query,
+    }];
+    delete @{$attrs}{@force_subq_attrs, 'where'};
+    $seen->{-relation_chain_depth} = 0;
+  }
+  elsif ($attrs->{from}) {  #shallow copy suffices
+    $from = [ @{$attrs->{from}} ];
+  }
+  else {
+    $from = [{
+      -source_handle => $source->handle,
+      -alias => $attrs->{alias},
+      $attrs->{alias} => $source->from,
+    }];
+  }
+
+  my $jpath = ($seen->{-relation_chain_depth})
+    ? $from->[-1][0]{-join_path}
+    : [];
+
   my @requested_joins = $source->_resolve_join(
-    $merged,
+    $join,
     $attrs->{alias},
     $seen,
     $jpath,
@@ -2693,7 +2633,7 @@
 
   push @$from, @requested_joins;
 
-  $seen->{-relation_chain_depth} += 0.5;
+  $seen->{-relation_chain_depth}++;
 
   # if $self already had a join/prefetch specified on it, the requested
   # $rel might very well be already included. What we do in this case
@@ -2705,7 +2645,7 @@
   # we consider the last one thus reverse
   for my $j (reverse @requested_joins) {
     if ($rel eq $j->[0]{-join_path}[-1]) {
-      $j->[0]{-relation_chain_depth} += 0.5;
+      $j->[0]{-relation_chain_depth}++;
       $already_joined++;
       last;
     }
@@ -2715,7 +2655,7 @@
 #  for my $j (reverse @$from) {
 #    next unless ref $j eq 'ARRAY';
 #    if ($j->[0]{-join_path} && $j->[0]{-join_path}[-1] eq $rel) {
-#      $j->[0]{-relation_chain_depth} += 0.5;
+#      $j->[0]{-relation_chain_depth}++;
 #      $already_joined++;
 #      last;
 #    }
@@ -2730,9 +2670,9 @@
     );
   }
 
-  $seen->{-relation_chain_depth} += 0.5;
+  $seen->{-relation_chain_depth}++;
 
-  return ($from,$seen);
+  return {%$attrs, from => $from, seen_join => $seen};
 }
 
 # too many times we have to do $attrs = { %{$self->_resolved_attrs} }
@@ -2884,6 +2824,22 @@
     }
     else {
       $attrs->{group_by} = [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
+
+      # add any order_by parts that are not already present in the group_by
+      # we need to be careful not to add any named functions/aggregates
+      # i.e. select => [ ... { count => 'foo', -as 'foocount' } ... ]
+      my %already_grouped = map { $_ => 1 } (@{$attrs->{group_by}});
+
+      my $storage = $self->result_source->schema->storage;
+      my $rs_column_list = $storage->_resolve_column_info ($attrs->{from});
+      my @chunks = $storage->sql_maker->_order_by_chunks ($attrs->{order_by});
+
+      for my $chunk (map { ref $_ ? @$_ : $_ } (@chunks) ) {
+        $chunk =~ s/\s+ (?: ASC|DESC ) \s* $//ix;
+        if ($rs_column_list->{$chunk} && not $already_grouped{$chunk}++) {
+          push @{$attrs->{group_by}}, $chunk;
+        }
+      }
     }
   }
 
@@ -2912,7 +2868,7 @@
   # even though it doesn't make much sense, this is what pre 081xx has
   # been doing
   if (my $page = delete $attrs->{page}) {
-    $attrs->{offset} = 
+    $attrs->{offset} =
       ($attrs->{rows} * ($page - 1))
             +
       ($attrs->{offset} || 0)
@@ -2930,8 +2886,8 @@
 
   my $cur_depth = $seen->{-relation_chain_depth} || 0;
 
-  if (int ($cur_depth) != $cur_depth) {
-    $self->throw_exception ("-relation_chain_depth is not an integer, something went horribly wrong ($cur_depth)");
+  if ($cur_depth % 2) {
+    $self->throw_exception ("-relation_chain_depth is not even, something went horribly wrong ($cur_depth)");
   }
 
   for my $j (@$fromspec) {
@@ -2942,7 +2898,7 @@
     my $jpath = $j->[0]{-join_path};
 
     my $p = $paths;
-    $p = $p->{$_} ||= {} for @{$jpath}[$cur_depth .. $#$jpath];
+    $p = $p->{$_} ||= {} for @{$jpath}[$cur_depth/2 .. $#$jpath]; #only even depths are actual jpath boundaries
     push @{$p->{-join_aliases} }, $j->[0]{-alias};
   }
 
@@ -3107,7 +3063,7 @@
 
 =back
 
-Which column(s) to order the results by. 
+Which column(s) to order the results by.
 
 [The full list of suitable values is documented in
 L<SQL::Abstract/"ORDER BY CLAUSES">; the following is a summary of
@@ -3202,6 +3158,9 @@
 attribute, the column names returned are storage-dependent. E.g. MySQL would
 return a column named C<count(employeeid)> in the above example.
 
+B<NOTE:> You will almost always need a corresponding 'as' entry when you use
+'select'.
+
 =head2 +select
 
 =over 4
@@ -3399,12 +3358,12 @@
 
 =over 4
 
-=item * 
+=item *
 
 Prefetch uses the L</cache> to populate the prefetched relationships. This
 may or may not be what you want.
 
-=item * 
+=item *
 
 If you specify a condition on a prefetched relationship, ONLY those
 rows that match the prefetched condition will be fetched into that relationship.
@@ -3516,8 +3475,8 @@
   # only return rows WHERE deleted IS NULL for all searches
   __PACKAGE__->resultset_attributes({ where => { deleted => undef } }); )
 
-Can be overridden by passing C<{ where => undef }> as an attribute
-to a resulset.
+Can be overridden by passing C<< { where => undef } >> as an attribute
+to a resultset.
 
 =back
 

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/ResultSource.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/ResultSource.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/ResultSource.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -389,7 +389,7 @@
   my $self = shift;
   $self->throw_exception(
     "columns() is a read-only accessor, did you mean add_columns()?"
-  ) if (@_ > 1);
+  ) if @_;
   return @{$self->{_ordered_columns}||[]};
 }
 

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Row.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Row.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Row.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -155,7 +155,7 @@
     $new->result_source($source);
   }
 
-  if (my $related = delete $attrs->{-from_resultset}) {
+  if (my $related = delete $attrs->{-cols_from_relations}) {
     @{$new->{_ignore_at_insert}={}}{@$related} = ();
   }
 
@@ -424,7 +424,7 @@
 sub in_storage {
   my ($self, $val) = @_;
   $self->{_in_storage} = $val if @_ > 1;
-  return $self->{_in_storage};
+  return $self->{_in_storage} ? 1 : 0;
 }
 
 =head2 update
@@ -751,10 +751,27 @@
 
 sub get_inflated_columns {
   my $self = shift;
-  return map {
-    my $accessor = $self->column_info($_)->{'accessor'} || $_;
-    ($_ => $self->$accessor);
-  } grep $self->has_column_loaded($_), $self->columns;
+
+  my %loaded_colinfo = (map
+    { $_ => $self->column_info($_) }
+    (grep { $self->has_column_loaded($_) } $self->columns)
+  );
+
+  my %inflated;
+  for my $col (keys %loaded_colinfo) {
+    if (exists $loaded_colinfo{$col}{accessor}) {
+      my $acc = $loaded_colinfo{$col}{accessor};
+      if (defined $acc) {
+        $inflated{$col} = $self->$acc;
+      }
+    }
+    else {
+      $inflated{$col} = $self->$col;
+    }
+  }
+
+  # return all loaded columns with the inflations overlayed on top
+  return ($self->get_columns, %inflated);
 }
 
 =head2 set_column

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/SQLAHacks.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/SQLAHacks.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/SQLAHacks.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -9,6 +9,7 @@
 use strict;
 use warnings;
 use Carp::Clan qw/^DBIx::Class|^SQL::Abstract/;
+use Sub::Name();
 
 BEGIN {
   # reinstall the carp()/croak() functions imported into SQL::Abstract
@@ -18,17 +19,15 @@
   for my $f (qw/carp croak/) {
 
     my $orig = \&{"SQL::Abstract::$f"};
-    *{"SQL::Abstract::$f"} = sub {
-
-      local $Carp::CarpLevel = 1;   # even though Carp::Clan ignores this, $orig will not
-
-      if (Carp::longmess() =~ /DBIx::Class::SQLAHacks::[\w]+ .+? called \s at/x) {
-        __PACKAGE__->can($f)->(@_);
-      }
-      else {
-        $orig->(@_);
-      }
-    }
+    *{"SQL::Abstract::$f"} = Sub::Name::subname "SQL::Abstract::$f" =>
+      sub {
+        if (Carp::longmess() =~ /DBIx::Class::SQLAHacks::[\w]+ .+? called \s at/x) {
+          __PACKAGE__->can($f)->(@_);
+        }
+        else {
+          goto $orig;
+        }
+      };
   }
 }
 
@@ -47,53 +46,7 @@
   $self;
 }
 
-# Some databases (sqlite) do not handle multiple parenthesis
-# around in/between arguments. A tentative x IN ( (1, 2 ,3) )
-# is interpreted as x IN 1 or something similar.
-#
-# Since we currently do not have access to the SQLA AST, resort
-# to barbaric mutilation of any SQL supplied in literal form
-sub _strip_outer_paren {
-  my ($self, $arg) = @_;
 
-  return $self->_SWITCH_refkind ($arg, {
-    ARRAYREFREF => sub {
-      $$arg->[0] = __strip_outer_paren ($$arg->[0]);
-      return $arg;
-    },
-    SCALARREF => sub {
-      return \__strip_outer_paren( $$arg );
-    },
-    FALLBACK => sub {
-      return $arg
-    },
-  });
-}
-
-sub __strip_outer_paren {
-  my $sql = shift;
-
-  if ($sql and not ref $sql) {
-    while ($sql =~ /^ \s* \( (.*) \) \s* $/x ) {
-      $sql = $1;
-    }
-  }
-
-  return $sql;
-}
-
-sub _where_field_IN {
-  my ($self, $lhs, $op, $rhs) = @_;
-  $rhs = $self->_strip_outer_paren ($rhs);
-  return $self->SUPER::_where_field_IN ($lhs, $op, $rhs);
-}
-
-sub _where_field_BETWEEN {
-  my ($self, $lhs, $op, $rhs) = @_;
-  $rhs = $self->_strip_outer_paren ($rhs);
-  return $self->SUPER::_where_field_BETWEEN ($lhs, $op, $rhs);
-}
-
 # Slow but ANSI standard Limit/Offset support. DB2 uses this
 sub _RowNumberOver {
   my ($self, $sql, $order, $rows, $offset ) = @_;

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Schema/Versioned.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Schema/Versioned.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Schema/Versioned.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -472,9 +472,13 @@
   my ($self, $args) = @_;
 
   $args = {} unless $args;
+
   $self->{vschema} = DBIx::Class::Version->connect(@{$self->storage->connect_info()});
   my $vtable = $self->{vschema}->resultset('Table');
 
+  # useful when connecting from scripts etc
+  return if ($args->{ignore_version} || ($ENV{DBIC_NO_VERSION_CHECK} && !exists $args->{ignore_version}));
+
   # check for legacy versions table and move to new if exists
   my $vschema_compat = DBIx::Class::VersionCompat->connect(@{$self->storage->connect_info()});
   unless ($self->_source_exists($vtable)) {
@@ -486,8 +490,6 @@
     }
   }
 
-  # useful when connecting from scripts etc
-  return if ($args->{ignore_version} || ($ENV{DBIC_NO_VERSION_CHECK} && !exists $args->{ignore_version}));
   my $pversion = $self->get_db_version();
 
   if($pversion eq $self->schema_version)

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Schema.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Schema.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Schema.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -406,12 +406,10 @@
 
 Set the storage class that will be instantiated when L</connect> is called.
 If the classname starts with C<::>, the prefix C<DBIx::Class::Storage> is
-assumed by L</connect>.  
+assumed by L</connect>.
 
 You want to use this to set subclasses of L<DBIx::Class::Storage::DBI>
-in cases where the appropriate subclass is not autodetected, such as
-when dealing with MSSQL via L<DBD::Sybase>, in which case you'd set it
-to C<::DBI::Sybase::MSSQL>.
+in cases where the appropriate subclass is not autodetected.
 
 If your storage type requires instantiation arguments, those are
 defined as a second argument in the form of a hashref and the entire
@@ -631,13 +629,13 @@
 This interface is preferred over using the individual methods L</txn_begin>,
 L</txn_commit>, and L</txn_rollback> below.
 
-WARNING: If you are connected with C<AutoCommit => 0> the transaction is
+WARNING: If you are connected with C<< AutoCommit => 0 >> the transaction is
 considered nested, and you will still need to call L</txn_commit> to write your
-changes when appropriate. You will also want to connect with C<auto_savepoint =>
-1> to get partial rollback to work, if the storage driver for your database
+changes when appropriate. You will also want to connect with C<< auto_savepoint =>
+1 >> to get partial rollback to work, if the storage driver for your database
 supports it.
 
-Connecting with C<AutoCommit => 1> is recommended.
+Connecting with C<< AutoCommit => 1 >> is recommended.
 
 =cut
 
@@ -910,7 +908,7 @@
     no strict 'refs';
     no warnings 'redefine';
     foreach my $meth (qw/class source resultset/) {
-      *{"${target}::${meth}"} =
+      *{"${target}::${meth}"} = Sub::Name::subname "${target}::${meth}" =>
         sub { shift->schema->$meth(@_) };
     }
   }

Added: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -0,0 +1,45 @@
+package DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server;
+
+use strict;
+use warnings;
+
+use base qw/
+  DBIx::Class::Storage::DBI::ADO
+  DBIx::Class::Storage::DBI::MSSQL
+/;
+use mro 'c3';
+
+sub _rebless {
+  my $self = shift;
+  $self->_identity_method('@@identity');
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server - Support for Microsoft
+SQL Server via DBD::ADO
+
+=head1 SYNOPSIS
+
+This subclass supports MSSQL server connections via L<DBD::ADO>.
+
+=head1 DESCRIPTION
+
+The MSSQL specific functionality is provided by
+L<DBIx::Class::Storage::DBI::MSSQL>.
+
+C<_identity_method> is set to C<@@identity>, as C<SCOPE_IDENTITY()> doesn't work
+with L<DBD::ADO>. See L<DBIx::Class::Storage::DBI::MSSQL/IMPLEMENTATION NOTES>
+for caveats regarding this.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Added: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ADO.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ADO.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ADO.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -0,0 +1,42 @@
+package # hide from PAUSE
+    DBIx::Class::Storage::DBI::ADO;
+
+use base 'DBIx::Class::Storage::DBI';
+
+sub _rebless {
+  my $self = shift;
+
+# check for MSSQL
+# XXX This should be using an OpenSchema method of some sort, but I don't know
+# how.
+# Current version is stolen from Sybase.pm
+  my $dbtype = eval {
+    @{$self->_get_dbh
+      ->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})
+    }[2]
+  };
+
+  unless ($@) {
+    $dbtype =~ s/\W/_/gi;
+    my $subclass = "DBIx::Class::Storage::DBI::ADO::${dbtype}";
+    if ($self->load_optional_class($subclass) && !$self->isa($subclass)) {
+      bless $self, $subclass;
+      $self->_rebless;
+    }
+  }
+}
+
+# set cursor type here, if necessary
+#sub _dbh_sth {
+#  my ($self, $dbh, $sql) = @_;
+#
+#  my $sth = $self->disable_sth_caching
+#    ? $dbh->prepare($sql, { CursorType => 'adOpenStatic' })
+#    : $dbh->prepare_cached($sql, { CursorType => 'adOpenStatic' }, 3);
+#
+#  $self->throw_exception($dbh->errstr) if !$sth;
+#
+#  $sth;
+#}
+
+1;

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/AutoCast.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/AutoCast.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/AutoCast.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -10,7 +10,7 @@
 
 =head1 NAME
 
-DBIx::Class::Storage::DBI::AutoCast
+DBIx::Class::Storage::DBI::AutoCast - Storage component for RDBMS requiring explicit placeholder typing
 
 =head1 SYNOPSIS
 
@@ -29,6 +29,10 @@
 
   CAST(? as $mapped_type)
 
+This option can also be enabled in L<DBIx::Class::Storage::DBI/connect_info> as:
+
+  on_connect_call => ['set_auto_cast']
+
 =cut
 
 sub _prep_for_execute {
@@ -60,7 +64,27 @@
   return ($sql, $bind);
 }
 
+=head2 connect_call_set_auto_cast
 
+Executes:
+
+  $schema->storage->auto_cast(1);
+
+on connection.
+
+Used as:
+
+    on_connect_call => ['set_auto_cast']
+
+in L<DBIx::Class::Storage::DBI/connect_info>.
+
+=cut
+
+sub connect_call_set_auto_cast {
+  my $self = shift;
+  $self->auto_cast(1);
+}
+
 =head1 AUTHOR
 
 See L<DBIx::Class/CONTRIBUTORS>

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/MSSQL.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/MSSQL.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/MSSQL.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -160,7 +160,7 @@
 
     # this should bring back the result of SELECT SCOPE_IDENTITY() we tacked
     # on in _prep_for_execute above
-    my ($identity) = $sth->fetchrow_array;
+    my ($identity) = eval { $sth->fetchrow_array };
 
     # SCOPE_IDENTITY failed, but we can do something else
     if ( (! $identity) && $self->_identity_method) {

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -61,7 +61,7 @@
   my $self = shift;
 
   if (ref($self->_dbi_connect_info->[0]) eq 'CODE') {
-    $self->throw_exception ('cannot set DBI attributes on a CODE ref connect_info');
+    $self->throw_exception ('Cannot set DBI attributes on a CODE ref connect_info');
   }
 
   my $dbi_attrs = $self->_dbi_connect_info->[-1];

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -53,8 +53,16 @@
 
   my $sth;
 
+  my $source_name;
+  if ( ref $source->name ne 'SCALAR' ) {
+      $source_name = $source->name;
+  }
+  else {
+      $source_name = ${$source->name};
+  }
+
   # check for fully-qualified name (eg. SCHEMA.TABLENAME)
-  if ( my ( $schema, $table ) = $source->name =~ /(\w+)\.(\w+)/ ) {
+  if ( my ( $schema, $table ) = $source_name =~ /(\w+)\.(\w+)/ ) {
     $sql = q{
       SELECT trigger_body FROM ALL_TRIGGERS t
       WHERE t.owner = ? AND t.table_name = ?
@@ -66,7 +74,7 @@
   }
   else {
     $sth = $dbh->prepare($sql);
-    $sth->execute( uc( $source->name ) );
+    $sth->execute( uc( $source_name ) );
   }
   while (my ($insert_trigger) = $sth->fetchrow_array) {
     return uc($1) if $insert_trigger =~ m!(\w+)\.nextval!i; # col name goes here???
@@ -223,7 +231,7 @@
 
 =cut
 
-sub source_bind_attributes 
+sub source_bind_attributes
 {
 	require DBD::Oracle;
 	my $self = shift;

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Pg.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Pg.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Pg.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -26,11 +26,11 @@
 
   for my $col (@cols) {
     my $seq = ( $source->column_info($col)->{sequence} ||= $self->dbh_do('_dbh_get_autoinc_seq', $source, $col) )
-      or $self->throw_exception( "could not determine sequence for "
-                                 . $source->name
-                                 . ".$col, please consider adding a "
-                                 . "schema-qualified sequence to its column info"
-                               );
+      or $self->throw_exception( sprintf(
+        'could not determine sequence for column %s.%s, please consider adding a schema-qualified sequence to its column info',
+          $source->name,
+          $col,
+      ));
 
     push @values, $self->_dbh_last_insert_id ($self->_dbh, $seq);
   }
@@ -61,22 +61,22 @@
     ( $schema, $table ) = ( $1, $2 );
   }
 
-  # use DBD::Pg to fetch the column info if it is recent enough to
-  # work. otherwise, use custom SQL
-  my $seq_expr =  $DBD::Pg::VERSION >= 2.015001
-      ? eval{ $dbh->column_info(undef,$schema,$table,$col)->fetchrow_hashref->{COLUMN_DEF} }
-      : $self->_dbh_get_column_default( $dbh, $schema, $table, $col );
+  # get the column default using a Postgres-specific pg_catalog query
+  my $seq_expr = $self->_dbh_get_column_default( $dbh, $schema, $table, $col );
 
   # if no default value is set on the column, or if we can't parse the
   # default value as a sequence, throw.
-  unless ( defined $seq_expr and $seq_expr =~ /^nextval\(+'([^']+)'::(?:text|regclass)\)/i ){
+  unless ( defined $seq_expr and $seq_expr =~ /^nextval\(+'([^']+)'::(?:text|regclass)\)/i ) {
     $seq_expr = '' unless defined $seq_expr;
     $schema = "$schema." if defined $schema && length $schema;
-    $self->throw_exception( "no sequence found for $schema$table.$col, check table definition, "
-                            . "or explicitly set the 'sequence' for this column in the "
-                            . $source->source_name
-                            . " class"
-                          );
+    $self->throw_exception( sprintf (
+      'no sequence found for %s%s.%s, check the RDBMS table definition or explicitly set the '.
+      "'sequence' for this column in %s",
+        $schema ? "$schema." : '',
+        $table,
+        $col,
+        $source->source_name,
+    ));
   }
 
   return $1;

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/SQLite.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/SQLite.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/SQLite.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -47,6 +47,22 @@
   return $backupfile;
 }
 
+sub deployment_statements {
+  my $self = shift;;
+  my ($schema, $type, $version, $dir, $sqltargs, @rest) = @_;
+
+  $sqltargs ||= {};
+
+  my $sqlite_version = $self->_get_dbh->{sqlite_version};
+
+  # numify, SQLT does a numeric comparison
+  $sqlite_version =~ s/^(\d+) \. (\d+) (?: \. (\d+))? .*/${1}.${2}/x;
+
+  $sqltargs->{producer_args}{sqlite_version} = $sqlite_version;
+
+  $self->next::method($schema, $type, $version, $dir, $sqltargs, @rest);
+}
+
 sub datetime_parser_type { return "DateTime::Format::SQLite"; } 
 
 1;

Added: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -0,0 +1,102 @@
+package DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars;
+
+use base qw/
+  DBIx::Class::Storage::DBI::NoBindVars
+  DBIx::Class::Storage::DBI::Sybase::ASE
+/;
+use mro 'c3';
+use List::Util ();
+use Scalar::Util ();
+
+sub _init {
+  my $self = shift;
+  $self->disable_sth_caching(1);
+  $self->_identity_method('@@IDENTITY');
+  $self->next::method (@_);
+}
+
+sub _fetch_identity_sql { 'SELECT ' . $_[0]->_identity_method }
+
+my $number = sub { Scalar::Util::looks_like_number($_[0]) };
+
+my $decimal = sub { $_[0] =~ /^ [-+]? \d+ (?:\.\d*)? \z/x };
+
+my %noquote = (
+    int => sub { $_[0] =~ /^ [-+]? \d+ \z/x },
+    bit => => sub { $_[0] =~ /^[01]\z/ },
+    money => sub { $_[0] =~ /^\$ \d+ (?:\.\d*)? \z/x },
+    float => $number,
+    real => $number,
+    double => $number,
+    decimal => $decimal,
+    numeric => $decimal,
+);
+
+sub interpolate_unquoted {
+  my $self = shift;
+  my ($type, $value) = @_;
+
+  return $self->next::method(@_) if not defined $value or not defined $type;
+
+  if (my $key = List::Util::first { $type =~ /$_/i } keys %noquote) {
+    return 1 if $noquote{$key}->($value);
+  }
+  elsif ($self->is_datatype_numeric($type) && $number->($value)) {
+    return 1;
+  }
+
+  return $self->next::method(@_);
+}
+
+sub _prep_interpolated_value {
+  my ($self, $type, $value) = @_;
+
+  if ($type =~ /money/i && defined $value) {
+    # change a ^ not followed by \$ to a \$
+    $value =~ s/^ (?! \$) /\$/x;
+  }
+
+  return $value;
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars - Storage::DBI subclass for
+Sybase ASE without placeholder support
+
+=head1 DESCRIPTION
+
+If you're using this driver than your version of Sybase, or the libraries you
+use to connect to it, do not support placeholders.
+
+You can also enable this driver explicitly using:
+
+  my $schema = SchemaClass->clone;
+  $schema->storage_type('::DBI::Sybase::ASE::NoBindVars');
+  $schema->connect($dsn, $user, $pass, \%opts);
+
+See the discussion in L<< DBD::Sybase/Using ? Placeholders & bind parameters to
+$sth->execute >> for details on the pros and cons of using placeholders.
+
+One advantage of not using placeholders is that C<select @@identity> will work
+for obtainging the last insert id of an C<IDENTITY> column, instead of having to
+do C<select max(col)> in a transaction as the base Sybase driver does.
+
+When using this driver, bind variables will be interpolated (properly quoted of
+course) into the SQL query itself, without using placeholders.
+
+The caching of prepared statements is also explicitly disabled, as the
+interpolation renders it useless.
+
+=head1 AUTHORS
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+# vim:sts=2 sw=2:

Copied: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm (from rev 7359, DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase.pm)
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -0,0 +1,1132 @@
+package DBIx::Class::Storage::DBI::Sybase::ASE;
+
+use strict;
+use warnings;
+
+use base qw/
+    DBIx::Class::Storage::DBI::Sybase
+    DBIx::Class::Storage::DBI::AutoCast
+/;
+use mro 'c3';
+use Carp::Clan qw/^DBIx::Class/;
+use Scalar::Util();
+use List::Util();
+use Sub::Name();
+use Data::Dumper::Concise();
+
+__PACKAGE__->mk_group_accessors('simple' =>
+    qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
+       _bulk_storage _is_bulk_storage _began_bulk_work
+       _bulk_disabled_due_to_coderef_connect_info_warned
+       _identity_method/
+);
+
+my @also_proxy_to_extra_storages = qw/
+  connect_call_set_auto_cast auto_cast connect_call_blob_setup
+  connect_call_datetime_setup
+
+  disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
+  auto_savepoint unsafe cursor_class debug debugobj schema
+/;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::ASE - Sybase ASE SQL Server support for
+DBIx::Class
+
+=head1 SYNOPSIS
+
+This subclass supports L<DBD::Sybase> for real (non-Microsoft) Sybase databases.
+
+=head1 DESCRIPTION
+
+If your version of Sybase does not support placeholders, then your storage will
+be reblessed to L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
+You can also enable that driver explicitly, see the documentation for more
+details.
+
+With this driver there is unfortunately no way to get the C<last_insert_id>
+without doing a C<SELECT MAX(col)>. This is done safely in a transaction
+(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
+
+A recommended L<DBIx::Class::Storage::DBI/connect_info> setting:
+
+  on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
+
+=head1 METHODS
+
+=cut
+
+sub _rebless {
+  my $self = shift;
+
+  my $no_bind_vars = __PACKAGE__ . '::NoBindVars';
+
+  if ($self->using_freetds) {
+    carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
+
+You are using FreeTDS with Sybase.
+
+We will do our best to support this configuration, but please consider this
+support experimental.
+
+TEXT/IMAGE columns will definitely not work.
+
+You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries
+instead.
+
+See perldoc DBIx::Class::Storage::DBI::Sybase::ASE for more details.
+
+To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment
+variable.
+EOF
+
+    if (not $self->_typeless_placeholders_supported) {
+      if ($self->_placeholders_supported) {
+        $self->auto_cast(1);
+      }
+      else {
+        $self->ensure_class_loaded($no_bind_vars);
+        bless $self, $no_bind_vars;
+        $self->_rebless;
+      }
+    }
+  }
+
+  elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
+    # not necessarily FreeTDS, but no placeholders nevertheless
+    $self->ensure_class_loaded($no_bind_vars);
+    bless $self, $no_bind_vars;
+    $self->_rebless;
+  }
+  # this is highly unlikely, but we check just in case
+  elsif (not $self->_typeless_placeholders_supported) {
+    $self->auto_cast(1);
+  }
+}
+
+sub _init {
+  my $self = shift;
+  $self->_set_max_connect(256);
+
+# create storage for insert/(update blob) transactions,
+# unless this is that storage
+  return if $self->_is_extra_storage;
+
+  my $writer_storage = (ref $self)->new;
+
+  $writer_storage->_is_extra_storage(1);
+  $writer_storage->connect_info($self->connect_info);
+  $writer_storage->auto_cast($self->auto_cast);
+
+  $self->_writer_storage($writer_storage);
+
+# create a bulk storage unless connect_info is a coderef
+  return if ref($self->_dbi_connect_info->[0]) eq 'CODE';
+
+  my $bulk_storage = (ref $self)->new;
+
+  $bulk_storage->_is_extra_storage(1);
+  $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
+  $bulk_storage->connect_info($self->connect_info);
+
+# this is why
+  $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
+
+  $self->_bulk_storage($bulk_storage);
+}
+
+for my $method (@also_proxy_to_extra_storages) {
+  no strict 'refs';
+  no warnings 'redefine';
+
+  my $replaced = __PACKAGE__->can($method);
+
+  *{$method} = Sub::Name::subname $method => sub {
+    my $self = shift;
+    $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
+    $self->_bulk_storage->$replaced(@_)   if $self->_bulk_storage;
+    return $self->$replaced(@_);
+  };
+}
+
+sub disconnect {
+  my $self = shift;
+
+# Even though we call $sth->finish for uses off the bulk API, there's still an
+# "active statement" warning on disconnect, which we throw away here.
+# This is due to the bug described in insert_bulk.
+# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
+  local $SIG{__WARN__} = sub {
+    warn $_[0] unless $_[0] =~ /active statement/i;
+  } if $self->_is_bulk_storage;
+
+# so that next transaction gets a dbh
+  $self->_began_bulk_work(0) if $self->_is_bulk_storage;
+
+  $self->next::method;
+}
+
+# Set up session settings for Sybase databases for the connection.
+#
+# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
+# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
+# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
+# only want when AutoCommit is off.
+#
+# Also SET TEXTSIZE for FreeTDS because LongReadLen doesn't work.
+sub _run_connection_actions {
+  my $self = shift;
+
+  if ($self->_is_bulk_storage) {
+# this should be cleared on every reconnect
+    $self->_began_bulk_work(0);
+    return;
+  }
+
+  if (not $self->using_freetds) {
+    $self->_dbh->{syb_chained_txn} = 1;
+  } else {
+    # based on LongReadLen in connect_info
+    $self->set_textsize;
+
+    if ($self->_dbh_autocommit) {
+      $self->_dbh->do('SET CHAINED OFF');
+    } else {
+      $self->_dbh->do('SET CHAINED ON');
+    }
+  }
+
+  $self->next::method(@_);
+}
+
+=head2 connect_call_blob_setup
+
+Used as:
+
+  on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ]
+
+Does C<< $dbh->{syb_binary_images} = 1; >> to return C<IMAGE> data as raw binary
+instead of as a hex string.
+
+Recommended.
+
+Also sets the C<log_on_update> value for blob write operations. The default is
+C<1>, but C<0> is better if your database is configured for it.
+
+See
+L<DBD::Sybase/Handling_IMAGE/TEXT_data_with_syb_ct_get_data()/syb_ct_send_data()>.
+
+=cut
+
+sub connect_call_blob_setup {
+  my $self = shift;
+  my %args = @_;
+  my $dbh = $self->_dbh;
+  $dbh->{syb_binary_images} = 1;
+
+  $self->_blob_log_on_update($args{log_on_update})
+    if exists $args{log_on_update};
+}
+
+sub _is_lob_type {
+  my $self = shift;
+  my $type = shift;
+  $type && $type =~ /(?:text|image|lob|bytea|binary|memo)/i;
+}
+
+sub _is_lob_column {
+  my ($self, $source, $column) = @_;
+
+  return $self->_is_lob_type($source->column_info($column)->{data_type});
+}
+
+sub _prep_for_execute {
+  my $self = shift;
+  my ($op, $extra_bind, $ident, $args) = @_;
+
+  my ($sql, $bind) = $self->next::method (@_);
+
+  my $table = Scalar::Util::blessed($ident) ? $ident->from : $ident;
+
+  my $bind_info = $self->_resolve_column_info(
+    $ident, [map $_->[0], @{$bind}]
+  );
+  my $bound_identity_col = List::Util::first
+    { $bind_info->{$_}{is_auto_increment} }
+    (keys %$bind_info)
+  ;
+  my $identity_col = Scalar::Util::blessed($ident) &&
+    List::Util::first
+    { $ident->column_info($_)->{is_auto_increment} }
+    $ident->columns
+  ;
+
+  if (($op eq 'insert' && $bound_identity_col) ||
+      ($op eq 'update' && exists $args->[0]{$identity_col})) {
+    $sql = join ("\n",
+      $self->_set_table_identity_sql($op => $table, 'on'),
+      $sql,
+      $self->_set_table_identity_sql($op => $table, 'off'),
+    );
+  }
+
+  if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
+      (not $self->{insert_bulk})) {
+    $sql =
+      "$sql\n" .
+      $self->_fetch_identity_sql($ident, $identity_col);
+  }
+
+  return ($sql, $bind);
+}
+
+sub _set_table_identity_sql {
+  my ($self, $op, $table, $on_off) = @_;
+
+  return sprintf 'SET IDENTITY_%s %s %s',
+    uc($op), $self->sql_maker->_quote($table), uc($on_off);
+}
+
+# Stolen from SQLT, with some modifications. This is a makeshift
+# solution before a sane type-mapping library is available, thus
+# the 'our' for easy overrides.
+our %TYPE_MAPPING  = (
+    number    => 'numeric',
+    money     => 'money',
+    varchar   => 'varchar',
+    varchar2  => 'varchar',
+    timestamp => 'datetime',
+    text      => 'varchar',
+    real      => 'double precision',
+    comment   => 'text',
+    bit       => 'bit',
+    tinyint   => 'smallint',
+    float     => 'double precision',
+    serial    => 'numeric',
+    bigserial => 'numeric',
+    boolean   => 'varchar',
+    long      => 'varchar',
+);
+
+sub _native_data_type {
+  my ($self, $type) = @_;
+
+  $type = lc $type;
+  $type =~ s/\s* identity//x;
+
+  return uc($TYPE_MAPPING{$type} || $type);
+}
+
+sub _fetch_identity_sql {
+  my ($self, $source, $col) = @_;
+
+  return sprintf ("SELECT MAX(%s) FROM %s",
+    map { $self->sql_maker->_quote ($_) } ($col, $source->from)
+  );
+}
+
+sub _execute {
+  my $self = shift;
+  my ($op) = @_;
+
+  my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
+
+  if ($op eq 'insert') {
+    $self->_identity($sth->fetchrow_array);
+    $sth->finish;
+  }
+
+  return wantarray ? ($rv, $sth, @bind) : $rv;
+}
+
+sub last_insert_id { shift->_identity }
+
+# handles TEXT/IMAGE and transaction for last_insert_id
+sub insert {
+  my $self = shift;
+  my ($source, $to_insert) = @_;
+
+  my $identity_col = (List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns) || '';
+
+  # check for empty insert
+  # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
+  # try to insert explicit 'DEFAULT's instead (except for identity)
+  if (not %$to_insert) {
+    for my $col ($source->columns) {
+      next if $col eq $identity_col;
+      $to_insert->{$col} = \'DEFAULT';
+    }
+  }
+
+  my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
+
+  # do we need the horrific SELECT MAX(COL) hack?
+  my $dumb_last_insert_id =
+       $identity_col
+    && (not exists $to_insert->{$identity_col})
+    && ($self->_identity_method||'') ne '@@IDENTITY';
+
+  my $next = $self->next::can;
+
+  # we are already in a transaction, or there are no blobs
+  # and we don't need the PK - just (try to) do it
+  if ($self->{transaction_depth}
+        || (!$blob_cols && !$dumb_last_insert_id)
+  ) {
+    return $self->_insert (
+      $next, $source, $to_insert, $blob_cols, $identity_col
+    );
+  }
+
+  # otherwise use the _writer_storage to do the insert+transaction on another
+  # connection
+  my $guard = $self->_writer_storage->txn_scope_guard;
+
+  my $updated_cols = $self->_writer_storage->_insert (
+    $next, $source, $to_insert, $blob_cols, $identity_col
+  );
+
+  $self->_identity($self->_writer_storage->_identity);
+
+  $guard->commit;
+
+  return $updated_cols;
+}
+
+sub _insert {
+  my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
+
+  my $updated_cols = $self->$next ($source, $to_insert);
+
+  my $final_row = {
+    ($identity_col ?
+      ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
+    %$to_insert,
+    %$updated_cols,
+  };
+
+  $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
+
+  return $updated_cols;
+}
+
+sub update {
+  my $self = shift;
+  my ($source, $fields, $where, @rest) = @_;
+
+  my $wantarray = wantarray;
+
+  my $blob_cols = $self->_remove_blob_cols($source, $fields);
+
+  my $table = $source->name;
+
+  my $identity_col = List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns;
+
+  my $is_identity_update = $identity_col && defined $fields->{$identity_col};
+
+  return $self->next::method(@_) unless $blob_cols;
+
+# If there are any blobs in $where, Sybase will return a descriptive error
+# message.
+# XXX blobs can still be used with a LIKE query, and this should be handled.
+
+# update+blob update(s) done atomically on separate connection
+  $self = $self->_writer_storage;
+
+  my $guard = $self->txn_scope_guard;
+
+# First update the blob columns to be updated to '' (taken from $fields, where
+# it is originally put by _remove_blob_cols .)
+  my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
+
+# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
+
+  $self->next::method($source, \%blobs_to_empty, $where, @rest);
+
+# Now update the blobs before the other columns in case the update of other
+# columns makes the search condition invalid.
+  $self->_update_blobs($source, $blob_cols, $where);
+
+  my @res;
+  if (%$fields) {
+    if ($wantarray) {
+      @res    = $self->next::method(@_);
+    }
+    elsif (defined $wantarray) {
+      $res[0] = $self->next::method(@_);
+    }
+    else {
+      $self->next::method(@_);
+    }
+  }
+
+  $guard->commit;
+
+  return $wantarray ? @res : $res[0];
+}
+
+sub insert_bulk {
+  my $self = shift;
+  my ($source, $cols, $data) = @_;
+
+  my $identity_col = List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns;
+
+  my $is_identity_insert = (List::Util::first
+    { $_ eq $identity_col }
+    @{$cols}
+  ) ? 1 : 0;
+
+  my @source_columns = $source->columns;
+
+  my $use_bulk_api =
+    $self->_bulk_storage &&
+    $self->_get_dbh->{syb_has_blk};
+
+  if ((not $use_bulk_api)
+        &&
+      (ref($self->_dbi_connect_info->[0]) eq 'CODE')
+        &&
+      (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
+    carp <<'EOF';
+Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
+regular array inserts.
+EOF
+    $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
+  }
+
+  if (not $use_bulk_api) {
+    my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
+
+# _execute_array uses a txn anyway, but it ends too early in case we need to
+# select max(col) to get the identity for inserting blobs.
+    ($self, my $guard) = $self->{transaction_depth} == 0 ?
+      ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
+      :
+      ($self, undef);
+
+    local $self->{insert_bulk} = 1;
+
+    $self->next::method(@_);
+
+    if ($blob_cols) {
+      if ($is_identity_insert) {
+        $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
+      }
+      else {
+        my @cols_with_identities = (@$cols, $identity_col);
+
+        ## calculate identities
+        # XXX This assumes identities always increase by 1, which may or may not
+        # be true.
+        my ($last_identity) =
+          $self->_dbh->selectrow_array (
+            $self->_fetch_identity_sql($source, $identity_col)
+          );
+        my @identities = (($last_identity - @$data + 1) .. $last_identity);
+
+        my @data_with_identities = map [@$_, shift @identities], @$data;
+
+        $self->_insert_blobs_array (
+          $source, $blob_cols, \@cols_with_identities, \@data_with_identities
+        );
+      }
+    }
+
+    $guard->commit if $guard;
+
+    return;
+  }
+
+# otherwise, use the bulk API
+
+# rearrange @$data so that columns are in database order
+  my %orig_idx;
+  @orig_idx{@$cols} = 0..$#$cols;
+
+  my %new_idx;
+  @new_idx{@source_columns} = 0..$#source_columns;
+
+  my @new_data;
+  for my $datum (@$data) {
+    my $new_datum = [];
+    for my $col (@source_columns) {
+# identity data will be 'undef' if not $is_identity_insert
+# columns with defaults will also be 'undef'
+      $new_datum->[ $new_idx{$col} ] =
+        exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
+    }
+    push @new_data, $new_datum;
+  }
+
+# bcp identity index is 1-based
+  my $identity_idx = exists $new_idx{$identity_col} ?
+    $new_idx{$identity_col} + 1 : 0;
+
+## Set a client-side conversion error handler, straight from DBD::Sybase docs.
+# This ignores any data conversion errors detected by the client side libs, as
+# they are usually harmless.
+  my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
+    Sub::Name::subname insert_bulk => sub {
+      my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
+
+      return 1 if $errno == 36;
+
+      carp
+        "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
+        ($errmsg ? "\n$errmsg" : '') .
+        ($osmsg  ? "\n$osmsg"  : '')  .
+        ($blkmsg ? "\n$blkmsg" : '');
+
+      return 0;
+  });
+
+  eval {
+    my $bulk = $self->_bulk_storage;
+
+    my $guard = $bulk->txn_scope_guard;
+
+## XXX get this to work instead of our own $sth
+## will require SQLA or *Hacks changes for ordered columns
+#    $bulk->next::method($source, \@source_columns, \@new_data, {
+#      syb_bcp_attribs => {
+#        identity_flag   => $is_identity_insert,
+#        identity_column => $identity_idx,
+#      }
+#    });
+    my $sql = 'INSERT INTO ' .
+      $bulk->sql_maker->_quote($source->name) . ' (' .
+# colname list is ignored for BCP, but does no harm
+      (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
+      ' VALUES ('.  (join ', ', ('?') x @source_columns) . ')';
+
+## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
+## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
+## better yet the version above. Should be fixed in DBD::Sybase .
+    my $sth = $bulk->_get_dbh->prepare($sql,
+#      'insert', # op
+      {
+        syb_bcp_attribs => {
+          identity_flag   => $is_identity_insert,
+          identity_column => $identity_idx,
+        }
+      }
+    );
+
+    my @bind = do {
+      my $idx = 0;
+      map [ $_, $idx++ ], @source_columns;
+    };
+
+    $self->_execute_array(
+      $source, $sth, \@bind, \@source_columns, \@new_data, sub {
+        $guard->commit
+      }
+    );
+
+    $bulk->_query_end($sql);
+  };
+
+  my $exception = $@;
+  DBD::Sybase::set_cslib_cb($orig_cslib_cb);
+
+  if ($exception =~ /-Y option/) {
+    carp <<"EOF";
+
+Sybase bulk API operation failed due to character set incompatibility, reverting
+to regular array inserts:
+
+*** Try unsetting the LANG environment variable.
+
+$exception
+EOF
+    $self->_bulk_storage(undef);
+    unshift @_, $self;
+    goto \&insert_bulk;
+  }
+  elsif ($exception) {
+# rollback makes the bulkLogin connection unusable
+    $self->_bulk_storage->disconnect;
+    $self->throw_exception($exception);
+  }
+}
+
+sub _dbh_execute_array {
+  my ($self, $sth, $tuple_status, $cb) = @_;
+
+  my $rv = $self->next::method($sth, $tuple_status);
+  $cb->() if $cb;
+
+  return $rv;
+}
+
+# Make sure blobs are not bound as placeholders, and return any non-empty ones
+# as a hash.
+sub _remove_blob_cols {
+  my ($self, $source, $fields) = @_;
+
+  my %blob_cols;
+
+  for my $col (keys %$fields) {
+    if ($self->_is_lob_column($source, $col)) {
+      my $blob_val = delete $fields->{$col};
+      if (not defined $blob_val) {
+        $fields->{$col} = \'NULL';
+      }
+      else {
+        $fields->{$col} = \"''";
+        $blob_cols{$col} = $blob_val unless $blob_val eq '';
+      }
+    }
+  }
+
+  return %blob_cols ? \%blob_cols : undef;
+}
+
+# same for insert_bulk
+sub _remove_blob_cols_array {
+  my ($self, $source, $cols, $data) = @_;
+
+  my @blob_cols;
+
+  for my $i (0..$#$cols) {
+    my $col = $cols->[$i];
+
+    if ($self->_is_lob_column($source, $col)) {
+      for my $j (0..$#$data) {
+        my $blob_val = delete $data->[$j][$i];
+        if (not defined $blob_val) {
+          $data->[$j][$i] = \'NULL';
+        }
+        else {
+          $data->[$j][$i] = \"''";
+          $blob_cols[$j][$i] = $blob_val
+            unless $blob_val eq '';
+        }
+      }
+    }
+  }
+
+  return @blob_cols ? \@blob_cols : undef;
+}
+
+sub _update_blobs {
+  my ($self, $source, $blob_cols, $where) = @_;
+
+  my (@primary_cols) = $source->primary_columns;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
+    unless @primary_cols;
+
+# check if we're updating a single row by PK
+  my $pk_cols_in_where = 0;
+  for my $col (@primary_cols) {
+    $pk_cols_in_where++ if defined $where->{$col};
+  }
+  my @rows;
+
+  if ($pk_cols_in_where == @primary_cols) {
+    my %row_to_update;
+    @row_to_update{@primary_cols} = @{$where}{@primary_cols};
+    @rows = \%row_to_update;
+  } else {
+    my $cursor = $self->select ($source, \@primary_cols, $where, {});
+    @rows = map {
+      my %row; @row{@primary_cols} = @$_; \%row
+    } $cursor->all;
+  }
+
+  for my $row (@rows) {
+    $self->_insert_blobs($source, $blob_cols, $row);
+  }
+}
+
+sub _insert_blobs {
+  my ($self, $source, $blob_cols, $row) = @_;
+  my $dbh = $self->_get_dbh;
+
+  my $table = $source->name;
+
+  my %row = %$row;
+  my (@primary_cols) = $source->primary_columns;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
+    unless @primary_cols;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
+    if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
+
+  for my $col (keys %$blob_cols) {
+    my $blob = $blob_cols->{$col};
+
+    my %where = map { ($_, $row{$_}) } @primary_cols;
+
+    my $cursor = $self->select ($source, [$col], \%where, {});
+    $cursor->next;
+    my $sth = $cursor->sth;
+
+    if (not $sth) {
+
+      $self->throw_exception(
+          "Could not find row in table '$table' for blob update:\n"
+        . Data::Dumper::Concise::Dumper (\%where)
+      );
+    }
+
+    eval {
+      do {
+        $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
+      } while $sth->fetch;
+
+      $sth->func('ct_prepare_send') or die $sth->errstr;
+
+      my $log_on_update = $self->_blob_log_on_update;
+      $log_on_update    = 1 if not defined $log_on_update;
+
+      $sth->func('CS_SET', 1, {
+        total_txtlen => length($blob),
+        log_on_update => $log_on_update
+      }, 'ct_data_info') or die $sth->errstr;
+
+      $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
+
+      $sth->func('ct_finish_send') or die $sth->errstr;
+    };
+    my $exception = $@;
+    $sth->finish if $sth;
+    if ($exception) {
+      if ($self->using_freetds) {
+        $self->throw_exception (
+          'TEXT/IMAGE operation failed, probably because you are using FreeTDS: '
+          . $exception
+        );
+      } else {
+        $self->throw_exception($exception);
+      }
+    }
+  }
+}
+
+sub _insert_blobs_array {
+  my ($self, $source, $blob_cols, $cols, $data) = @_;
+
+  for my $i (0..$#$data) {
+    my $datum = $data->[$i];
+
+    my %row;
+    @row{ @$cols } = @$datum;
+
+    my %blob_vals;
+    for my $j (0..$#$cols) {
+      if (exists $blob_cols->[$i][$j]) {
+        $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
+      }
+    }
+
+    $self->_insert_blobs ($source, \%blob_vals, \%row);
+  }
+}
+
+=head2 connect_call_datetime_setup
+
+Used as:
+
+  on_connect_call => 'datetime_setup'
+
+In L<DBIx::Class::Storage::DBI/connect_info> to set:
+
+  $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
+  $dbh->do('set dateformat mdy');   # input fmt:  08/13/1979 18:08:55.080
+
+On connection for use with L<DBIx::Class::InflateColumn::DateTime>, using
+L<DateTime::Format::Sybase>, which you will need to install.
+
+This works for both C<DATETIME> and C<SMALLDATETIME> columns, although
+C<SMALLDATETIME> columns only have minute precision.
+
+=cut
+
+{
+  my $old_dbd_warned = 0;
+
+  sub connect_call_datetime_setup {
+    my $self = shift;
+    my $dbh = $self->_get_dbh;
+
+    if ($dbh->can('syb_date_fmt')) {
+      # amazingly, this works with FreeTDS
+      $dbh->syb_date_fmt('ISO_strict');
+    } elsif (not $old_dbd_warned) {
+      carp "Your DBD::Sybase is too old to support ".
+      "DBIx::Class::InflateColumn::DateTime, please upgrade!";
+      $old_dbd_warned = 1;
+    }
+
+    $dbh->do('SET DATEFORMAT mdy');
+
+    1;
+  }
+}
+
+sub datetime_parser_type { "DateTime::Format::Sybase" }
+
+# ->begin_work and such have no effect with FreeTDS but we run them anyway to
+# let the DBD keep any state it needs to.
+#
+# If they ever do start working, the extra statements will do no harm (because
+# Sybase supports nested transactions.)
+
+sub _dbh_begin_work {
+  my $self = shift;
+
+# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
+# TRAN once. However, we need to make sure there's a $dbh.
+  return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
+
+  $self->next::method(@_);
+
+  if ($self->using_freetds) {
+    $self->_get_dbh->do('BEGIN TRAN');
+  }
+
+  $self->_began_bulk_work(1) if $self->_is_bulk_storage;
+}
+
+sub _dbh_commit {
+  my $self = shift;
+  if ($self->using_freetds) {
+    $self->_dbh->do('COMMIT');
+  }
+  return $self->next::method(@_);
+}
+
+sub _dbh_rollback {
+  my $self = shift;
+  if ($self->using_freetds) {
+    $self->_dbh->do('ROLLBACK');
+  }
+  return $self->next::method(@_);
+}
+
+# savepoint support using ASE syntax
+
+sub _svp_begin {
+  my ($self, $name) = @_;
+
+  $self->_get_dbh->do("SAVE TRANSACTION $name");
+}
+
+# A new SAVE TRANSACTION with the same name releases the previous one.
+sub _svp_release { 1 }
+
+sub _svp_rollback {
+  my ($self, $name) = @_;
+
+  $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
+}
+
+1;
+
+=head1 Schema::Loader Support
+
+There is an experimental branch of L<DBIx::Class::Schema::Loader> that will
+allow you to dump a schema from most (if not all) versions of Sybase.
+
+It is available via subversion from:
+
+  http://dev.catalyst.perl.org/repos/bast/branches/DBIx-Class-Schema-Loader/current/
+
+=head1 FreeTDS
+
+This driver supports L<DBD::Sybase> compiled against FreeTDS
+(L<http://www.freetds.org/>) to the best of our ability, however it is
+recommended that you recompile L<DBD::Sybase> against the Sybase Open Client
+libraries. They are a part of the Sybase ASE distribution:
+
+The Open Client FAQ is here:
+L<http://www.isug.com/Sybase_FAQ/ASE/section7.html>.
+
+Sybase ASE for Linux (which comes with the Open Client libraries) may be
+downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
+
+To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
+
+  perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
+
+Some versions of the libraries involved will not support placeholders, in which
+case the storage will be reblessed to
+L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
+
+In some configurations, placeholders will work but will throw implicit type
+conversion errors for anything that's not expecting a string. In such a case,
+the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
+automatically set, which you may enable on connection with
+L<DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>. The type info
+for the C<CAST>s is taken from the L<DBIx::Class::ResultSource/data_type>
+definitions in your Result classes, and are mapped to a Sybase type (if it isn't
+already) using a mapping based on L<SQL::Translator>.
+
+In other configurations, placeholers will work just as they do with the Sybase
+Open Client libraries.
+
+Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
+
+=head1 INSERTS WITH PLACEHOLDERS
+
+With placeholders enabled, inserts are done in a transaction so that there are
+no concurrency issues with getting the inserted identity value using
+C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
+mode.
+
+In addition, they are done on a separate connection so that it's possible to
+have active cursors when doing an insert.
+
+When using C<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars> transactions
+are disabled, as there are no concurrency issues with C<SELECT @@IDENTITY> as
+it's a session variable.
+
+=head1 TRANSACTIONS
+
+Due to limitations of the TDS protocol, L<DBD::Sybase>, or both; you cannot
+begin a transaction while there are active cursors; nor can you use multiple
+active cursors within a transaction. An active cursor is, for example, a
+L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
+C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
+
+For example, this will not work:
+
+  $schema->txn_do(sub {
+    my $rs = $schema->resultset('Book');
+    while (my $row = $rs->next) {
+      $schema->resultset('MetaData')->create({
+        book_id => $row->id,
+        ...
+      });
+    }
+  });
+
+This won't either:
+
+  my $first_row = $large_rs->first;
+  $schema->txn_do(sub { ... });
+
+Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
+are not affected, as they are done on an extra database handle.
+
+Some workarounds:
+
+=over 4
+
+=item * use L<DBIx::Class::Storage::DBI::Replicated>
+
+=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
+
+=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
+
+=back
+
+=head1 MAXIMUM CONNECTIONS
+
+The TDS protocol makes separate connections to the server for active statements
+in the background. By default the number of such connections is limited to 25,
+on both the client side and the server side.
+
+This is a bit too low for a complex L<DBIx::Class> application, so on connection
+the client side setting is set to C<256> (see L<DBD::Sybase/maxConnect>.) You
+can override it to whatever setting you like in the DSN.
+
+See
+L<http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.ase_15.0.sag1/html/sag1/sag1272.htm>
+for information on changing the setting on the server side.
+
+=head1 DATES
+
+See L</connect_call_datetime_setup> to setup date formats
+for L<DBIx::Class::InflateColumn::DateTime>.
+
+=head1 TEXT/IMAGE COLUMNS
+
+L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
+C<TEXT/IMAGE> columns.
+
+Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either:
+
+  $schema->storage->dbh->do("SET TEXTSIZE $bytes");
+
+or
+
+  $schema->storage->set_textsize($bytes);
+
+instead.
+
+However, the C<LongReadLen> you pass in
+L<DBIx::Class::Storage::DBI/connect_info> is used to execute the equivalent
+C<SET TEXTSIZE> command on connection.
+
+See L</connect_call_blob_setup> for a L<DBIx::Class::Storage::DBI/connect_info>
+setting you need to work with C<IMAGE> columns.
+
+=head1 BULK API
+
+The experimental L<DBD::Sybase> Bulk API support is used for
+L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
+on a separate connection.
+
+To use this feature effectively, use a large number of rows for each
+L<populate|DBIx::Class::ResultSet/populate> call, eg.:
+
+  while (my $rows = $data_source->get_100_rows()) {
+    $rs->populate($rows);
+  }
+
+B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
+calls in your C<Result> classes B<must> list columns in database order for this
+to work. Also, you may have to unset the C<LANG> environment variable before
+loading your app, if it doesn't match the character set of your database.
+
+When inserting IMAGE columns using this method, you'll need to use
+L</connect_call_blob_setup> as well.
+
+=head1 TODO
+
+=over
+
+=item *
+
+Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
+any active cursors, using eager cursors.
+
+=item *
+
+Real limits and limited counts using stored procedures deployed on startup.
+
+=item *
+
+Adaptive Server Anywhere (ASA) support, with possible SQLA::Limit support.
+
+=item *
+
+Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
+
+=item *
+
+bulk_insert using prepare_cached (see comments.)
+
+=back
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+# vim:sts=2 sw=2:

Deleted: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -1,54 +0,0 @@
-package # hide from PAUSE
-    DBIx::Class::Storage::DBI::Sybase::Base;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::Storage::DBI/;
-use mro 'c3';
-
-=head1 NAME
-
-DBIx::Class::Storage::DBI::Sybase::Base - Common functionality for drivers using
-DBD::Sybase
-
-=cut
-
-sub _ping {
-  my $self = shift;
-
-  my $dbh = $self->_dbh or return 0;
-
-  local $dbh->{RaiseError} = 1;
-  eval {
-    $dbh->do('select 1');
-  };
-
-  return $@ ? 0 : 1;
-}
-
-sub _placeholders_supported {
-  my $self = shift;
-  my $dbh  = $self->_get_dbh;
-
-  return eval {
-# There's also $dbh->{syb_dynamic_supported} but it can be inaccurate for this
-# purpose.
-    local $dbh->{PrintError} = 0;
-    local $dbh->{RaiseError} = 1;
-# this specifically tests a bind that is NOT a string
-    $dbh->selectrow_array('select 1 where 1 = ?', {}, 1);
-  };
-}
-
-1;
-
-=head1 AUTHORS
-
-See L<DBIx::Class/CONTRIBUTORS>.
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -4,7 +4,7 @@
 use warnings;
 
 use base qw/
-  DBIx::Class::Storage::DBI::Sybase::Base
+  DBIx::Class::Storage::DBI::Sybase
   DBIx::Class::Storage::DBI::MSSQL
 /;
 use mro 'c3';
@@ -18,16 +18,39 @@
       'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars';
     $self->_rebless;
   }
+}
 
-# LongReadLen doesn't work with MSSQL through DBD::Sybase, and the default is
-# huge on some versions of SQL server and can cause memory problems, so we
-# fix it up here.
-  my $text_size = eval { $self->_dbi_connect_info->[-1]->{LongReadLen} } ||
-    32768; # the DBD::Sybase default
+sub _run_connection_actions {
+  my $self = shift;
 
-  $dbh->do("set textsize $text_size");
+  # LongReadLen doesn't work with MSSQL through DBD::Sybase, and the default is
+  # huge on some versions of SQL server and can cause memory problems, so we
+  # fix it up here (see ::DBI::Sybase.pm)
+  $self->set_textsize;
+
+  $self->next::method(@_);
 }
 
+sub _dbh_begin_work {
+  my $self = shift;
+
+  $self->_get_dbh->do('BEGIN TRAN');
+}
+
+sub _dbh_commit {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot COMMIT on a disconnected handle');
+  $dbh->do('COMMIT');
+}
+
+sub _dbh_rollback {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
+  $dbh->do('ROLLBACK');
+}
+
 1;
 
 =head1 NAME

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -3,62 +3,128 @@
 use strict;
 use warnings;
 
-use base qw/
-    DBIx::Class::Storage::DBI::Sybase::Base
-    DBIx::Class::Storage::DBI::NoBindVars
-/;
-use mro 'c3';
+use base qw/DBIx::Class::Storage::DBI/;
 
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase - Base class for drivers using
+L<DBD::Sybase>
+
+=head1 DESCRIPTION
+
+This is the base class/dispatcher for Storage's designed to work with
+L<DBD::Sybase>
+
+=head1 METHODS
+
+=cut
+
 sub _rebless {
-    my $self = shift;
+  my $self = shift;
 
-    my $dbtype = eval {
-      @{$self->_get_dbh
-        ->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})
-      }[2]
-    };
-    unless ( $@ ) {
-        $dbtype =~ s/\W/_/gi;
-        my $subclass = "DBIx::Class::Storage::DBI::Sybase::${dbtype}";
-        if ($self->load_optional_class($subclass) && !$self->isa($subclass)) {
-            bless $self, $subclass;
-            $self->_rebless;
-        }
+  my $dbtype = eval {
+    @{$self->_get_dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2]
+  };
+
+  $self->throw_exception("Unable to estable connection to determine database type: $@")
+    if $@;
+
+  if ($dbtype) {
+    $dbtype =~ s/\W/_/gi;
+
+    # saner class name
+    $dbtype = 'ASE' if $dbtype eq 'SQL_Server';
+
+    my $subclass = __PACKAGE__ . "::$dbtype";
+    if ($self->load_optional_class($subclass)) {
+      bless $self, $subclass;
+      $self->_rebless;
     }
+  }
 }
 
-sub _dbh_last_insert_id {
-    my ($self, $dbh, $source, $col) = @_;
-    return ($dbh->selectrow_array('select @@identity'))[0];
+sub _ping {
+  my $self = shift;
+
+  my $dbh = $self->_dbh or return 0;
+
+  local $dbh->{RaiseError} = 1;
+  local $dbh->{PrintError} = 0;
+
+  if ($dbh->{syb_no_child_con}) {
+# if extra connections are not allowed, then ->ping is reliable
+    my $ping = eval { $dbh->ping };
+    return $@ ? 0 : $ping;
+  }
+
+  eval {
+# XXX if the main connection goes stale, does opening another for this statement
+# really determine anything?
+    $dbh->do('select 1');
+  };
+
+  return $@ ? 0 : 1;
 }
 
-1;
+sub _set_max_connect {
+  my $self = shift;
+  my $val  = shift || 256;
 
-=head1 NAME
+  my $dsn = $self->_dbi_connect_info->[0];
 
-DBIx::Class::Storage::DBI::Sybase - Storage::DBI subclass for Sybase
+  return if ref($dsn) eq 'CODE';
 
-=head1 SYNOPSIS
+  if ($dsn !~ /maxConnect=/) {
+    $self->_dbi_connect_info->[0] = "$dsn;maxConnect=$val";
+    my $connected = defined $self->_dbh;
+    $self->disconnect;
+    $self->ensure_connected if $connected;
+  }
+}
 
-This subclass supports L<DBD::Sybase> for real Sybase databases.  If
-you are using an MSSQL database via L<DBD::Sybase>, see
-L<DBIx::Class::Storage::DBI::Sybase::MSSQL>.
+=head2 using_freetds
 
-=head1 CAVEATS
+Whether or not L<DBD::Sybase> was compiled against FreeTDS. If false, it means
+the Sybase OpenClient libraries were used.
 
-This storage driver uses L<DBIx::Class::Storage::DBI::NoBindVars> as a base.
-This means that bind variables will be interpolated (properly quoted of course)
-into the SQL query itself, without using bind placeholders.
+=cut
 
-More importantly this means that caching of prepared statements is explicitly
-disabled, as the interpolation renders it useless.
+sub using_freetds {
+  my $self = shift;
 
+  return $self->_get_dbh->{syb_oc_version} =~ /freetds/i;
+}
+
+=head2 set_textsize
+
+When using FreeTDS and/or MSSQL, C<< $dbh->{LongReadLen} >> is not available,
+use this function instead. It does:
+
+  $dbh->do("SET TEXTSIZE $bytes");
+
+Takes the number of bytes, or uses the C<LongReadLen> value from your
+L<DBIx::Class/connect_info> if omitted, lastly falls back to the C<32768> which
+is the L<DBD::Sybase> default.
+
+=cut
+
+sub set_textsize {
+  my $self = shift;
+  my $text_size = shift ||
+    eval { $self->_dbi_connect_info->[-1]->{LongReadLen} } ||
+    32768; # the DBD::Sybase default
+
+  return unless defined $text_size;
+
+  $self->_dbh->do("SET TEXTSIZE $text_size");
+}
+
+1;
+
 =head1 AUTHORS
 
-Brandon L Black <blblack at gmail.com>
+See L<DBIx::Class/CONTRIBUTORS>.
 
-Justin Hunter <justin.d.hunter at gmail.com>
-
 =head1 LICENSE
 
 You may distribute this code under the same terms as Perl itself.


Property changes on: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI/Sybase.pm
___________________________________________________________________
Name: svn:eol-style
   - native

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBI.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -4,7 +4,7 @@
 use strict;
 use warnings;
 
-use base 'DBIx::Class::Storage';
+use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/;
 use mro 'c3';
 
 use Carp::Clan qw/^DBIx::Class/;
@@ -13,6 +13,8 @@
 use DBIx::Class::Storage::Statistics;
 use Scalar::Util();
 use List::Util();
+use Data::Dumper::Concise();
+use Sub::Name ();
 
 # what version of sqlt do we require if deploy() without a ddl_dir is invoked
 # when changing also adjust the corresponding author_require in Makefile.PL
@@ -40,6 +42,38 @@
 __PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks');
 
 
+# Each of these methods need _determine_driver called before itself
+# in order to function reliably. This is a purely DRY optimization
+my @rdbms_specific_methods = qw/
+  sqlt_type
+  build_datetime_parser
+  datetime_parser_type
+
+  insert
+  insert_bulk
+  update
+  delete
+  select
+  select_single
+/;
+
+for my $meth (@rdbms_specific_methods) {
+
+  my $orig = __PACKAGE__->can ($meth)
+    or next;
+
+  no strict qw/refs/;
+  no warnings qw/redefine/;
+  *{__PACKAGE__ ."::$meth"} = Sub::Name::subname $meth => sub {
+    if (not $_[0]->_driver_determined) {
+      $_[0]->_determine_driver;
+      goto $_[0]->can($meth);
+    }
+    $orig->(@_);
+  };
+}
+
+
 =head1 NAME
 
 DBIx::Class::Storage::DBI - DBI storage handler
@@ -712,7 +746,6 @@
 # Storage subclasses should override this
 sub with_deferred_fk_checks {
   my ($self, $sub) = @_;
-
   $sub->();
 }
 
@@ -878,13 +911,14 @@
   my ($self) = @_;
 
   if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) {
-    my $started_unconnected = 0;
+    my $started_connected = 0;
     local $self->{_in_determine_driver} = 1;
 
     if (ref($self) eq __PACKAGE__) {
       my $driver;
       if ($self->_dbh) { # we are connected
         $driver = $self->_dbh->{Driver}{Name};
+        $started_connected = 1;
       } else {
         # if connect_info is a CODEREF, we have no choice but to connect
         if (ref $self->_dbi_connect_info->[0] &&
@@ -896,7 +930,6 @@
           # try to use dsn to not require being connected, the driver may still
           # force a connection in _rebless to determine version
           ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
-          $started_unconnected = 1;
         }
       }
 
@@ -913,7 +946,7 @@
     $self->_init; # run driver-specific initializations
 
     $self->_run_connection_actions
-        if $started_unconnected && defined $self->_dbh;
+        if !$started_connected && defined $self->_dbh;
   }
 }
 
@@ -1144,7 +1177,6 @@
 sub txn_commit {
   my $self = shift;
   if ($self->{transaction_depth} == 1) {
-    my $dbh = $self->_dbh;
     $self->debugobj->txn_commit()
       if ($self->debug);
     $self->_dbh_commit;
@@ -1160,7 +1192,9 @@
 
 sub _dbh_commit {
   my $self = shift;
-  $self->_dbh->commit;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot COMMIT on a disconnected handle');
+  $dbh->commit;
 }
 
 sub txn_rollback {
@@ -1197,7 +1231,9 @@
 
 sub _dbh_rollback {
   my $self = shift;
-  $self->_dbh->rollback;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
+  $dbh->rollback;
 }
 
 # This used to be the top-half of _execute.  It was split out to make it
@@ -1300,12 +1336,6 @@
 sub insert {
   my ($self, $source, $to_insert) = @_;
 
-# redispatch to insert method of storage we reblessed into, if necessary
-  if (not $self->_driver_determined) {
-    $self->_determine_driver;
-    goto $self->can('insert');
-  }
-
   my $ident = $source->from;
   my $bind_attributes = $self->source_bind_attributes($source);
 
@@ -1337,22 +1367,103 @@
 sub insert_bulk {
   my ($self, $source, $cols, $data) = @_;
 
-# redispatch to insert_bulk method of storage we reblessed into, if necessary
-  if (not $self->_driver_determined) {
-    $self->_determine_driver;
-    goto $self->can('insert_bulk');
-  }
-
   my %colvalues;
-  my $table = $source->from;
   @colvalues{@$cols} = (0..$#$cols);
-  my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues);
 
-  $self->_query_start( $sql, @bind );
+  for my $i (0..$#$cols) {
+    my $first_val = $data->[0][$i];
+    next unless ref $first_val eq 'SCALAR';
+
+    $colvalues{ $cols->[$i] } = $first_val;
+  }
+
+  # check for bad data and stringify stringifiable objects
+  my $bad_slice = sub {
+    my ($msg, $col_idx, $slice_idx) = @_;
+    $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
+      $msg,
+      $cols->[$col_idx],
+      do {
+        local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any
+        Data::Dumper::Concise::Dumper({
+          map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
+        }),
+      }
+    );
+  };
+
+  for my $datum_idx (0..$#$data) {
+    my $datum = $data->[$datum_idx];
+
+    for my $col_idx (0..$#$cols) {
+      my $val            = $datum->[$col_idx];
+      my $sqla_bind      = $colvalues{ $cols->[$col_idx] };
+      my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
+
+      if ($is_literal_sql) {
+        if (not ref $val) {
+          $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx);
+        }
+        elsif ((my $reftype = ref $val) ne 'SCALAR') {
+          $bad_slice->("$reftype reference found where literal SQL expected",
+            $col_idx, $datum_idx);
+        }
+        elsif ($$val ne $$sqla_bind){
+          $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
+            $col_idx, $datum_idx);
+        }
+      }
+      elsif (my $reftype = ref $val) {
+        require overload;
+        if (overload::Method($val, '""')) {
+          $datum->[$col_idx] = "".$val;
+        }
+        else {
+          $bad_slice->("$reftype reference found where bind expected",
+            $col_idx, $datum_idx);
+        }
+      }
+    }
+  }
+
+  my ($sql, $bind) = $self->_prep_for_execute (
+    'insert', undef, $source, [\%colvalues]
+  );
+  my @bind = @$bind;
+
+  my $empty_bind = 1 if (not @bind) &&
+    (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols;
+
+  if ((not @bind) && (not $empty_bind)) {
+    $self->throw_exception(
+      'Cannot insert_bulk without support for placeholders'
+    );
+  }
+
+  $self->_query_start( $sql, ['__BULK__'] );
   my $sth = $self->sth($sql);
 
-#  @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+  my $rv = do {
+    if ($empty_bind) {
+      # bind_param_array doesn't work if there are no binds
+      $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
+    }
+    else {
+#      @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+      $self->_execute_array( $source, $sth, \@bind, $cols, $data );
+    }
+  };
 
+  $self->_query_end( $sql, ['__BULK__'] );
+
+  return (wantarray ? ($rv, $sth, @bind) : $rv);
+}
+
+sub _execute_array {
+  my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
+
+  my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0;
+
   ## This must be an arrayref, else nothing works!
   my $tuple_status = [];
 
@@ -1362,7 +1473,7 @@
   ## Bind the values and execute
   my $placeholder_index = 1;
 
-  foreach my $bound (@bind) {
+  foreach my $bound (@$bind) {
 
     my $attributes = {};
     my ($column_name, $data_index) = @$bound;
@@ -1377,63 +1488,89 @@
     $sth->bind_param_array( $placeholder_index, [@data], $attributes );
     $placeholder_index++;
   }
-  my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) };
-  if (my $err = $@) {
+
+  my $rv = eval {
+    $self->_dbh_execute_array($sth, $tuple_status, @extra);
+  };
+  my $err = $@ || $sth->errstr;
+
+# Statement must finish even if there was an exception.
+  eval { $sth->finish };
+  $err = $@ unless $err;
+
+  if ($err) {
     my $i = 0;
     ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
 
-    $self->throw_exception($sth->errstr || "Unexpected populate error: $err")
+    $self->throw_exception("Unexpected populate error: $err")
       if ($i > $#$tuple_status);
 
-    require Data::Dumper;
-    local $Data::Dumper::Terse = 1;
-    local $Data::Dumper::Indent = 1;
-    local $Data::Dumper::Useqq = 1;
-    local $Data::Dumper::Quotekeys = 0;
-    local $Data::Dumper::Sortkeys = 1;
-
     $self->throw_exception(sprintf "%s for populate slice:\n%s",
-      $tuple_status->[$i][1],
-      Data::Dumper::Dumper(
-        { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) }
-      ),
+      ($tuple_status->[$i][1] || $err),
+      Data::Dumper::Concise::Dumper({
+        map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols)
+      }),
     );
   }
-  $self->throw_exception($sth->errstr) if !$rv;
 
-  $self->_query_end( $sql, @bind );
-  return (wantarray ? ($rv, $sth, @bind) : $rv);
+  $guard->commit if $guard;
+
+  return $rv;
 }
 
+sub _dbh_execute_array {
+    my ($self, $sth, $tuple_status, @extra) = @_;
+
+    return $sth->execute_array({ArrayTupleStatus => $tuple_status});
+}
+
+sub _dbh_execute_inserts_with_no_binds {
+  my ($self, $sth, $count) = @_;
+
+  my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0;
+
+  eval {
+    my $dbh = $self->_get_dbh;
+    local $dbh->{RaiseError} = 1;
+    local $dbh->{PrintError} = 0;
+
+    $sth->execute foreach 1..$count;
+  };
+  my $exception = $@;
+
+# Make sure statement is finished even if there was an exception.
+  eval { $sth->finish };
+  $exception = $@ unless $exception;
+
+  $self->throw_exception($exception) if $exception;
+
+  $guard->commit if $guard;
+
+  return $count;
+}
+
 sub update {
   my ($self, $source, @args) = @_; 
 
-# redispatch to update method of storage we reblessed into, if necessary
-  if (not $self->_driver_determined) {
-    $self->_determine_driver;
-    goto $self->can('update');
-  }
+  my $bind_attrs = $self->source_bind_attributes($source);
 
-  my $bind_attributes = $self->source_bind_attributes($source);
-
-  return $self->_execute('update' => [], $source, $bind_attributes, @args);
+  return $self->_execute('update' => [], $source, $bind_attrs, @args);
 }
 
 
 sub delete {
-  my $self = shift @_;
-  my $source = shift @_;
-  $self->_determine_driver;
+  my ($self, $source, @args) = @_;
+
   my $bind_attrs = $self->source_bind_attributes($source);
 
-  return $self->_execute('delete' => [], $source, $bind_attrs, @_);
+  return $self->_execute('delete' => [], $source, $bind_attrs, @args);
 }
 
 # We were sent here because the $rs contains a complex search
 # which will require a subquery to select the correct rows
-# (i.e. joined or limited resultsets)
+# (i.e. joined or limited resultsets, or non-introspectable conditions)
 #
-# Genarating a single PK column subquery is trivial and supported
+# Generating a single PK column subquery is trivial and supported
 # by all RDBMS. However if we have a multicolumn PK, things get ugly.
 # Look at _multipk_update_delete()
 sub _subq_update_delete {
@@ -1442,14 +1579,19 @@
 
   my $rsrc = $rs->result_source;
 
-  # we already check this, but double check naively just in case. Should be removed soon
+  # quick check if we got a sane rs on our hands
+  my @pcols = $rsrc->primary_columns;
+
   my $sel = $rs->_resolved_attrs->{select};
   $sel = [ $sel ] unless ref $sel eq 'ARRAY';
-  my @pcols = $rsrc->primary_columns;
-  if (@$sel != @pcols) {
+
+  if (
+      join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
+        ne
+      join ("\x00", sort @$sel )
+  ) {
     $self->throw_exception (
-      'Subquery update/delete can not be called on resultsets selecting a'
-     .' number of columns different than the number of primary keys'
+      '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
     );
   }
 
@@ -1651,324 +1793,6 @@
   return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit);
 }
 
-#
-# This is the code producing joined subqueries like:
-# SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ... 
-#
-sub _adjust_select_args_for_complex_prefetch {
-  my ($self, $from, $select, $where, $attrs) = @_;
-
-  $self->throw_exception ('Nothing to prefetch... how did we get here?!')
-    if not @{$attrs->{_prefetch_select}};
-
-  $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
-    if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY');
-
-
-  # generate inner/outer attribute lists, remove stuff that doesn't apply
-  my $outer_attrs = { %$attrs };
-  delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
-
-  my $inner_attrs = { %$attrs };
-  delete $inner_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
-
-
-  # bring over all non-collapse-induced order_by into the inner query (if any)
-  # the outer one will have to keep them all
-  delete $inner_attrs->{order_by};
-  if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
-    $inner_attrs->{order_by} = [
-      @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
-    ];
-  }
-
-
-  # generate the inner/outer select lists
-  # for inside we consider only stuff *not* brought in by the prefetch
-  # on the outside we substitute any function for its alias
-  my $outer_select = [ @$select ];
-  my $inner_select = [];
-  for my $i (0 .. ( @$outer_select - @{$outer_attrs->{_prefetch_select}} - 1) ) {
-    my $sel = $outer_select->[$i];
-
-    if (ref $sel eq 'HASH' ) {
-      $sel->{-as} ||= $attrs->{as}[$i];
-      $outer_select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "inner_column_$i") );
-    }
-
-    push @$inner_select, $sel;
-  }
-
-  # normalize a copy of $from, so it will be easier to work with further
-  # down (i.e. promote the initial hashref to an AoH)
-  $from = [ @$from ];
-  $from->[0] = [ $from->[0] ];
-  my %original_join_info = map { $_->[0]{-alias} => $_->[0] } (@$from);
-
-
-  # decide which parts of the join will remain in either part of
-  # the outer/inner query
-
-  # First we compose a list of which aliases are used in restrictions
-  # (i.e. conditions/order/grouping/etc). Since we do not have
-  # introspectable SQLA, we fall back to ugly scanning of raw SQL for
-  # WHERE, and for pieces of ORDER BY in order to determine which aliases
-  # need to appear in the resulting sql.
-  # It may not be very efficient, but it's a reasonable stop-gap
-  # Also unqualified column names will not be considered, but more often
-  # than not this is actually ok
-  #
-  # In the same loop we enumerate part of the selection aliases, as
-  # it requires the same sqla hack for the time being
-  my ($restrict_aliases, $select_aliases, $prefetch_aliases);
-  {
-    # produce stuff unquoted, so it can be scanned
-    my $sql_maker = $self->sql_maker;
-    local $sql_maker->{quote_char};
-    my $sep = $self->_sql_maker_opts->{name_sep} || '.';
-    $sep = "\Q$sep\E";
-
-    my $non_prefetch_select_sql = $sql_maker->_recurse_fields ($inner_select);
-    my $prefetch_select_sql = $sql_maker->_recurse_fields ($outer_attrs->{_prefetch_select});
-    my $where_sql = $sql_maker->where ($where);
-    my $group_by_sql = $sql_maker->_order_by({
-      map { $_ => $inner_attrs->{$_} } qw/group_by having/
-    });
-    my @non_prefetch_order_by_chunks = (map
-      { ref $_ ? $_->[0] : $_ }
-      $sql_maker->_order_by_chunks ($inner_attrs->{order_by})
-    );
-
-
-    for my $alias (keys %original_join_info) {
-      my $seen_re = qr/\b $alias $sep/x;
-
-      for my $piece ($where_sql, $group_by_sql, @non_prefetch_order_by_chunks ) {
-        if ($piece =~ $seen_re) {
-          $restrict_aliases->{$alias} = 1;
-        }
-      }
-
-      if ($non_prefetch_select_sql =~ $seen_re) {
-          $select_aliases->{$alias} = 1;
-      }
-
-      if ($prefetch_select_sql =~ $seen_re) {
-          $prefetch_aliases->{$alias} = 1;
-      }
-
-    }
-  }
-
-  # Add any non-left joins to the restriction list (such joins are indeed restrictions)
-  for my $j (values %original_join_info) {
-    my $alias = $j->{-alias} or next;
-    $restrict_aliases->{$alias} = 1 if (
-      (not $j->{-join_type})
-        or
-      ($j->{-join_type} !~ /^left (?: \s+ outer)? $/xi)
-    );
-  }
-
-  # mark all join parents as mentioned
-  # (e.g.  join => { cds => 'tracks' } - tracks will need to bring cds too )
-  for my $collection ($restrict_aliases, $select_aliases) {
-    for my $alias (keys %$collection) {
-      $collection->{$_} = 1
-        for (@{ $original_join_info{$alias}{-join_path} || [] });
-    }
-  }
-
-  # construct the inner $from for the subquery
-  my %inner_joins = (map { %{$_ || {}} } ($restrict_aliases, $select_aliases) );
-  my @inner_from;
-  for my $j (@$from) {
-    push @inner_from, $j if $inner_joins{$j->[0]{-alias}};
-  }
-
-  # if a multi-type join was needed in the subquery ("multi" is indicated by
-  # presence in {collapse}) - add a group_by to simulate the collapse in the subq
-  unless ($inner_attrs->{group_by}) {
-    for my $alias (keys %inner_joins) {
-
-      # the dot comes from some weirdness in collapse
-      # remove after the rewrite
-      if ($attrs->{collapse}{".$alias"}) {
-        $inner_attrs->{group_by} ||= $inner_select;
-        last;
-      }
-    }
-  }
-
-  # demote the inner_from head
-  $inner_from[0] = $inner_from[0][0];
-
-  # generate the subquery
-  my $subq = $self->_select_args_to_query (
-    \@inner_from,
-    $inner_select,
-    $where,
-    $inner_attrs,
-  );
-
-  my $subq_joinspec = {
-    -alias => $attrs->{alias},
-    -source_handle => $inner_from[0]{-source_handle},
-    $attrs->{alias} => $subq,
-  };
-
-  # Generate the outer from - this is relatively easy (really just replace
-  # the join slot with the subquery), with a major caveat - we can not
-  # join anything that is non-selecting (not part of the prefetch), but at
-  # the same time is a multi-type relationship, as it will explode the result.
-  #
-  # There are two possibilities here
-  # - either the join is non-restricting, in which case we simply throw it away
-  # - it is part of the restrictions, in which case we need to collapse the outer
-  #   result by tackling yet another group_by to the outside of the query
-
-  # so first generate the outer_from, up to the substitution point
-  my @outer_from;
-  while (my $j = shift @$from) {
-    if ($j->[0]{-alias} eq $attrs->{alias}) { # time to swap
-      push @outer_from, [
-        $subq_joinspec,
-        @{$j}[1 .. $#$j],
-      ];
-      last; # we'll take care of what's left in $from below
-    }
-    else {
-      push @outer_from, $j;
-    }
-  }
-
-  # see what's left - throw away if not selecting/restricting
-  # also throw in a group_by if restricting to guard against
-  # cross-join explosions
-  #
-  while (my $j = shift @$from) {
-    my $alias = $j->[0]{-alias};
-
-    if ($select_aliases->{$alias} || $prefetch_aliases->{$alias}) {
-      push @outer_from, $j;
-    }
-    elsif ($restrict_aliases->{$alias}) {
-      push @outer_from, $j;
-
-      # FIXME - this should be obviated by SQLA2, as I'll be able to 
-      # have restrict_inner and restrict_outer... or something to that
-      # effect... I think...
-
-      # FIXME2 - I can't find a clean way to determine if a particular join
-      # is a multi - instead I am just treating everything as a potential
-      # explosive join (ribasushi)
-      #
-      # if (my $handle = $j->[0]{-source_handle}) {
-      #   my $rsrc = $handle->resolve;
-      #   ... need to bail out of the following if this is not a multi,
-      #       as it will be much easier on the db ...
-
-          $outer_attrs->{group_by} ||= $outer_select;
-      # }
-    }
-  }
-
-  # demote the outer_from head
-  $outer_from[0] = $outer_from[0][0];
-
-  # This is totally horrific - the $where ends up in both the inner and outer query
-  # Unfortunately not much can be done until SQLA2 introspection arrives, and even
-  # then if where conditions apply to the *right* side of the prefetch, you may have
-  # to both filter the inner select (e.g. to apply a limit) and then have to re-filter
-  # the outer select to exclude joins you didin't want in the first place
-  #
-  # OTOH it can be seen as a plus: <ash> (notes that this query would make a DBA cry ;)
-  return (\@outer_from, $outer_select, $where, $outer_attrs);
-}
-
-sub _resolve_ident_sources {
-  my ($self, $ident) = @_;
-
-  my $alias2source = {};
-  my $rs_alias;
-
-  # the reason this is so contrived is that $ident may be a {from}
-  # structure, specifying multiple tables to join
-  if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
-    # this is compat mode for insert/update/delete which do not deal with aliases
-    $alias2source->{me} = $ident;
-    $rs_alias = 'me';
-  }
-  elsif (ref $ident eq 'ARRAY') {
-
-    for (@$ident) {
-      my $tabinfo;
-      if (ref $_ eq 'HASH') {
-        $tabinfo = $_;
-        $rs_alias = $tabinfo->{-alias};
-      }
-      if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') {
-        $tabinfo = $_->[0];
-      }
-
-      $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve
-        if ($tabinfo->{-source_handle});
-    }
-  }
-
-  return ($alias2source, $rs_alias);
-}
-
-# Takes $ident, \@column_names
-#
-# returns { $column_name => \%column_info, ... }
-# also note: this adds -result_source => $rsrc to the column info
-#
-# usage:
-#   my $col_sources = $self->_resolve_column_info($ident, @column_names);
-sub _resolve_column_info {
-  my ($self, $ident, $colnames) = @_;
-  my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident);
-
-  my $sep = $self->_sql_maker_opts->{name_sep} || '.';
-  $sep = "\Q$sep\E";
-
-  my (%return, %seen_cols);
-
-  # compile a global list of column names, to be able to properly
-  # disambiguate unqualified column names (if at all possible)
-  for my $alias (keys %$alias2src) {
-    my $rsrc = $alias2src->{$alias};
-    for my $colname ($rsrc->columns) {
-      push @{$seen_cols{$colname}}, $alias;
-    }
-  }
-
-  COLUMN:
-  foreach my $col (@$colnames) {
-    my ($alias, $colname) = $col =~ m/^ (?: ([^$sep]+) $sep)? (.+) $/x;
-
-    unless ($alias) {
-      # see if the column was seen exactly once (so we know which rsrc it came from)
-      if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1) {
-        $alias = $seen_cols{$colname}[0];
-      }
-      else {
-        next COLUMN;
-      }
-    }
-
-    my $rsrc = $alias2src->{$alias};
-    $return{$col} = $rsrc && {
-      %{$rsrc->column_info($colname)},
-      -result_source => $rsrc,
-      -source_alias => $alias,
-    };
-  }
-
-  return \%return;
-}
-
 # Returns a counting SELECT for a simple count
 # query. Abstracted so that a storage could override
 # this to { count => 'firstcol' } or whatever makes
@@ -1993,7 +1817,6 @@
   return @pcols ? \@pcols : [ 1 ];
 }
 
-
 sub source_bind_attributes {
   my ($self, $source) = @_;
 
@@ -2169,7 +1992,7 @@
 
 This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
 currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
-L<::Sybase|DBIx::Class::Storage::DBI::Sybase>.
+L<::Sybase::ASE|DBIx::Class::Storage::DBI::Sybase::ASE>.
 
 The default implementation returns C<undef>, implement in your Storage driver if
 you need this functionality.
@@ -2227,14 +2050,7 @@
 =cut
 
 sub sqlt_type {
-  my ($self) = @_;
-
-  if (not $self->_driver_determined) {
-    $self->_determine_driver;
-    goto $self->can ('sqlt_type');
-  }
-
-  $self->_get_dbh->{Driver}->{Name};
+  shift->_get_dbh->{Driver}->{Name};
 }
 
 =head2 bind_attribute_by_data_type
@@ -2508,7 +2324,11 @@
     parser => 'SQL::Translator::Parser::DBIx::Class',
     data => $schema,
   );
-  return $tr->translate;
+
+  my $ret = $tr->translate
+    or $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error);
+
+  return $ret;
 }
 
 sub deploy {
@@ -2574,11 +2394,6 @@
 =cut
 
 sub build_datetime_parser {
-  if (not $_[0]->_driver_determined) {
-    $_[0]->_determine_driver;
-    goto $_[0]->can('build_datetime_parser');
-  }
-
   my $self = shift;
   my $type = $self->datetime_parser_type(@_);
   $self->ensure_class_loaded ($type);
@@ -2611,10 +2426,10 @@
     return;
 }
 
-# SQLT version handling 
+# SQLT version handling
 {
-  my $_sqlt_version_ok;     # private 
-  my $_sqlt_version_error;  # private 
+  my $_sqlt_version_ok;     # private
+  my $_sqlt_version_error;  # private
 
   sub _sqlt_version_ok {
     if (!defined $_sqlt_version_ok) {

Added: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBIHacks.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBIHacks.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class/Storage/DBIHacks.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -0,0 +1,469 @@
+package   #hide from PAUSE
+  DBIx::Class::Storage::DBIHacks;
+
+#
+# This module contains code that should never have seen the light of day,
+# does not belong in the Storage, or is otherwise unfit for public
+# display. The arrival of SQLA2 should immediately oboslete 90% of this
+#
+
+use strict;
+use warnings;
+
+use base 'DBIx::Class::Storage';
+use mro 'c3';
+
+use Carp::Clan qw/^DBIx::Class/;
+
+#
+# This is the code producing joined subqueries like:
+# SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ... 
+#
+sub _adjust_select_args_for_complex_prefetch {
+  my ($self, $from, $select, $where, $attrs) = @_;
+
+  $self->throw_exception ('Nothing to prefetch... how did we get here?!')
+    if not @{$attrs->{_prefetch_select}};
+
+  $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
+    if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY');
+
+
+  # generate inner/outer attribute lists, remove stuff that doesn't apply
+  my $outer_attrs = { %$attrs };
+  delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
+
+  my $inner_attrs = { %$attrs };
+  delete $inner_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
+
+
+  # bring over all non-collapse-induced order_by into the inner query (if any)
+  # the outer one will have to keep them all
+  delete $inner_attrs->{order_by};
+  if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
+    $inner_attrs->{order_by} = [
+      @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
+    ];
+  }
+
+
+  # generate the inner/outer select lists
+  # for inside we consider only stuff *not* brought in by the prefetch
+  # on the outside we substitute any function for its alias
+  my $outer_select = [ @$select ];
+  my $inner_select = [];
+  for my $i (0 .. ( @$outer_select - @{$outer_attrs->{_prefetch_select}} - 1) ) {
+    my $sel = $outer_select->[$i];
+
+    if (ref $sel eq 'HASH' ) {
+      $sel->{-as} ||= $attrs->{as}[$i];
+      $outer_select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "inner_column_$i") );
+    }
+
+    push @$inner_select, $sel;
+  }
+
+  # normalize a copy of $from, so it will be easier to work with further
+  # down (i.e. promote the initial hashref to an AoH)
+  $from = [ @$from ];
+  $from->[0] = [ $from->[0] ];
+  my %original_join_info = map { $_->[0]{-alias} => $_->[0] } (@$from);
+
+
+  # decide which parts of the join will remain in either part of
+  # the outer/inner query
+
+  # First we compose a list of which aliases are used in restrictions
+  # (i.e. conditions/order/grouping/etc). Since we do not have
+  # introspectable SQLA, we fall back to ugly scanning of raw SQL for
+  # WHERE, and for pieces of ORDER BY in order to determine which aliases
+  # need to appear in the resulting sql.
+  # It may not be very efficient, but it's a reasonable stop-gap
+  # Also unqualified column names will not be considered, but more often
+  # than not this is actually ok
+  #
+  # In the same loop we enumerate part of the selection aliases, as
+  # it requires the same sqla hack for the time being
+  my ($restrict_aliases, $select_aliases, $prefetch_aliases);
+  {
+    # produce stuff unquoted, so it can be scanned
+    my $sql_maker = $self->sql_maker;
+    local $sql_maker->{quote_char};
+    my $sep = $self->_sql_maker_opts->{name_sep} || '.';
+    $sep = "\Q$sep\E";
+
+    my $non_prefetch_select_sql = $sql_maker->_recurse_fields ($inner_select);
+    my $prefetch_select_sql = $sql_maker->_recurse_fields ($outer_attrs->{_prefetch_select});
+    my $where_sql = $sql_maker->where ($where);
+    my $group_by_sql = $sql_maker->_order_by({
+      map { $_ => $inner_attrs->{$_} } qw/group_by having/
+    });
+    my @non_prefetch_order_by_chunks = (map
+      { ref $_ ? $_->[0] : $_ }
+      $sql_maker->_order_by_chunks ($inner_attrs->{order_by})
+    );
+
+
+    for my $alias (keys %original_join_info) {
+      my $seen_re = qr/\b $alias $sep/x;
+
+      for my $piece ($where_sql, $group_by_sql, @non_prefetch_order_by_chunks ) {
+        if ($piece =~ $seen_re) {
+          $restrict_aliases->{$alias} = 1;
+        }
+      }
+
+      if ($non_prefetch_select_sql =~ $seen_re) {
+          $select_aliases->{$alias} = 1;
+      }
+
+      if ($prefetch_select_sql =~ $seen_re) {
+          $prefetch_aliases->{$alias} = 1;
+      }
+
+    }
+  }
+
+  # Add any non-left joins to the restriction list (such joins are indeed restrictions)
+  for my $j (values %original_join_info) {
+    my $alias = $j->{-alias} or next;
+    $restrict_aliases->{$alias} = 1 if (
+      (not $j->{-join_type})
+        or
+      ($j->{-join_type} !~ /^left (?: \s+ outer)? $/xi)
+    );
+  }
+
+  # mark all join parents as mentioned
+  # (e.g.  join => { cds => 'tracks' } - tracks will need to bring cds too )
+  for my $collection ($restrict_aliases, $select_aliases) {
+    for my $alias (keys %$collection) {
+      $collection->{$_} = 1
+        for (@{ $original_join_info{$alias}{-join_path} || [] });
+    }
+  }
+
+  # construct the inner $from for the subquery
+  my %inner_joins = (map { %{$_ || {}} } ($restrict_aliases, $select_aliases) );
+  my @inner_from;
+  for my $j (@$from) {
+    push @inner_from, $j if $inner_joins{$j->[0]{-alias}};
+  }
+
+  # if a multi-type join was needed in the subquery ("multi" is indicated by
+  # presence in {collapse}) - add a group_by to simulate the collapse in the subq
+  unless ($inner_attrs->{group_by}) {
+    for my $alias (keys %inner_joins) {
+
+      # the dot comes from some weirdness in collapse
+      # remove after the rewrite
+      if ($attrs->{collapse}{".$alias"}) {
+        $inner_attrs->{group_by} ||= $inner_select;
+        last;
+      }
+    }
+  }
+
+  # demote the inner_from head
+  $inner_from[0] = $inner_from[0][0];
+
+  # generate the subquery
+  my $subq = $self->_select_args_to_query (
+    \@inner_from,
+    $inner_select,
+    $where,
+    $inner_attrs,
+  );
+
+  my $subq_joinspec = {
+    -alias => $attrs->{alias},
+    -source_handle => $inner_from[0]{-source_handle},
+    $attrs->{alias} => $subq,
+  };
+
+  # Generate the outer from - this is relatively easy (really just replace
+  # the join slot with the subquery), with a major caveat - we can not
+  # join anything that is non-selecting (not part of the prefetch), but at
+  # the same time is a multi-type relationship, as it will explode the result.
+  #
+  # There are two possibilities here
+  # - either the join is non-restricting, in which case we simply throw it away
+  # - it is part of the restrictions, in which case we need to collapse the outer
+  #   result by tackling yet another group_by to the outside of the query
+
+  # so first generate the outer_from, up to the substitution point
+  my @outer_from;
+  while (my $j = shift @$from) {
+    if ($j->[0]{-alias} eq $attrs->{alias}) { # time to swap
+      push @outer_from, [
+        $subq_joinspec,
+        @{$j}[1 .. $#$j],
+      ];
+      last; # we'll take care of what's left in $from below
+    }
+    else {
+      push @outer_from, $j;
+    }
+  }
+
+  # see what's left - throw away if not selecting/restricting
+  # also throw in a group_by if restricting to guard against
+  # cross-join explosions
+  #
+  while (my $j = shift @$from) {
+    my $alias = $j->[0]{-alias};
+
+    if ($select_aliases->{$alias} || $prefetch_aliases->{$alias}) {
+      push @outer_from, $j;
+    }
+    elsif ($restrict_aliases->{$alias}) {
+      push @outer_from, $j;
+
+      # FIXME - this should be obviated by SQLA2, as I'll be able to 
+      # have restrict_inner and restrict_outer... or something to that
+      # effect... I think...
+
+      # FIXME2 - I can't find a clean way to determine if a particular join
+      # is a multi - instead I am just treating everything as a potential
+      # explosive join (ribasushi)
+      #
+      # if (my $handle = $j->[0]{-source_handle}) {
+      #   my $rsrc = $handle->resolve;
+      #   ... need to bail out of the following if this is not a multi,
+      #       as it will be much easier on the db ...
+
+          $outer_attrs->{group_by} ||= $outer_select;
+      # }
+    }
+  }
+
+  # demote the outer_from head
+  $outer_from[0] = $outer_from[0][0];
+
+  # This is totally horrific - the $where ends up in both the inner and outer query
+  # Unfortunately not much can be done until SQLA2 introspection arrives, and even
+  # then if where conditions apply to the *right* side of the prefetch, you may have
+  # to both filter the inner select (e.g. to apply a limit) and then have to re-filter
+  # the outer select to exclude joins you didin't want in the first place
+  #
+  # OTOH it can be seen as a plus: <ash> (notes that this query would make a DBA cry ;)
+  return (\@outer_from, $outer_select, $where, $outer_attrs);
+}
+
+sub _resolve_ident_sources {
+  my ($self, $ident) = @_;
+
+  my $alias2source = {};
+  my $rs_alias;
+
+  # the reason this is so contrived is that $ident may be a {from}
+  # structure, specifying multiple tables to join
+  if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
+    # this is compat mode for insert/update/delete which do not deal with aliases
+    $alias2source->{me} = $ident;
+    $rs_alias = 'me';
+  }
+  elsif (ref $ident eq 'ARRAY') {
+
+    for (@$ident) {
+      my $tabinfo;
+      if (ref $_ eq 'HASH') {
+        $tabinfo = $_;
+        $rs_alias = $tabinfo->{-alias};
+      }
+      if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') {
+        $tabinfo = $_->[0];
+      }
+
+      $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve
+        if ($tabinfo->{-source_handle});
+    }
+  }
+
+  return ($alias2source, $rs_alias);
+}
+
+# Takes $ident, \@column_names
+#
+# returns { $column_name => \%column_info, ... }
+# also note: this adds -result_source => $rsrc to the column info
+#
+# If no columns_names are supplied returns info about *all* columns
+# for all sources
+sub _resolve_column_info {
+  my ($self, $ident, $colnames) = @_;
+  my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident);
+
+  my $sep = $self->_sql_maker_opts->{name_sep} || '.';
+  my $qsep = quotemeta $sep;
+
+  my (%return, %seen_cols, @auto_colnames);
+
+  # compile a global list of column names, to be able to properly
+  # disambiguate unqualified column names (if at all possible)
+  for my $alias (keys %$alias2src) {
+    my $rsrc = $alias2src->{$alias};
+    for my $colname ($rsrc->columns) {
+      push @{$seen_cols{$colname}}, $alias;
+      push @auto_colnames, "$alias$sep$colname" unless $colnames;
+    }
+  }
+
+  $colnames ||= [
+    @auto_colnames,
+    grep { @{$seen_cols{$_}} == 1 } (keys %seen_cols),
+  ];
+
+  COLUMN:
+  foreach my $col (@$colnames) {
+    my ($alias, $colname) = $col =~ m/^ (?: ([^$qsep]+) $qsep)? (.+) $/x;
+
+    unless ($alias) {
+      # see if the column was seen exactly once (so we know which rsrc it came from)
+      if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1) {
+        $alias = $seen_cols{$colname}[0];
+      }
+      else {
+        next COLUMN;
+      }
+    }
+
+    my $rsrc = $alias2src->{$alias};
+    $return{$col} = $rsrc && {
+      %{$rsrc->column_info($colname)},
+      -result_source => $rsrc,
+      -source_alias => $alias,
+    };
+  }
+
+  return \%return;
+}
+
+# The DBIC relationship chaining implementation is pretty simple - every
+# new related_relationship is pushed onto the {from} stack, and the {select}
+# window simply slides further in. This means that when we count somewhere
+# in the middle, we got to make sure that everything in the join chain is an
+# actual inner join, otherwise the count will come back with unpredictable
+# results (a resultset may be generated with _some_ rows regardless of if
+# the relation which the $rs currently selects has rows or not). E.g.
+# $artist_rs->cds->count - normally generates:
+# SELECT COUNT( * ) FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid
+# which actually returns the number of artists * (number of cds || 1)
+#
+# So what we do here is crawl {from}, determine if the current alias is at
+# the top of the stack, and if not - make sure the chain is inner-joined down
+# to the root.
+#
+sub _straight_join_to_node {
+  my ($self, $from, $alias) = @_;
+
+  # subqueries and other oddness are naturally not supported
+  return $from if (
+    ref $from ne 'ARRAY'
+      ||
+    @$from <= 1
+      ||
+    ref $from->[0] ne 'HASH'
+      ||
+    ! $from->[0]{-alias}
+      ||
+    $from->[0]{-alias} eq $alias  # this last bit means $alias is the head of $from - nothing to do
+  );
+
+  # find the current $alias in the $from structure
+  my $switch_branch;
+  JOINSCAN:
+  for my $j (@{$from}[1 .. $#$from]) {
+    if ($j->[0]{-alias} eq $alias) {
+      $switch_branch = $j->[0]{-join_path};
+      last JOINSCAN;
+    }
+  }
+
+  # something else went quite wrong
+  return $from unless $switch_branch;
+
+  # So it looks like we will have to switch some stuff around.
+  # local() is useless here as we will be leaving the scope
+  # anyway, and deep cloning is just too fucking expensive
+  # So replace the first hashref in the node arrayref manually 
+  my @new_from = ($from->[0]);
+  my $sw_idx = { map { $_ => 1 } @$switch_branch };
+
+  for my $j (@{$from}[1 .. $#$from]) {
+    my $jalias = $j->[0]{-alias};
+
+    if ($sw_idx->{$jalias}) {
+      my %attrs = %{$j->[0]};
+      delete $attrs{-join_type};
+      push @new_from, [
+        \%attrs,
+        @{$j}[ 1 .. $#$j ],
+      ];
+    }
+    else {
+      push @new_from, $j;
+    }
+  }
+
+  return \@new_from;
+}
+
+# Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus
+# a condition containing 'me' or other table prefixes will not work
+# at all. What this code tries to do (badly) is introspect the condition
+# and remove all column qualifiers. If it bails out early (returns undef)
+# the calling code should try another approach (e.g. a subquery)
+sub _strip_cond_qualifiers {
+  my ($self, $where) = @_;
+
+  my $cond = {};
+
+  # No-op. No condition, we're updating/deleting everything
+  return $cond unless $where;
+
+  if (ref $where eq 'ARRAY') {
+    $cond = [
+      map {
+        my %hash;
+        foreach my $key (keys %{$_}) {
+          $key =~ /([^.]+)$/;
+          $hash{$1} = $_->{$key};
+        }
+        \%hash;
+      } @$where
+    ];
+  }
+  elsif (ref $where eq 'HASH') {
+    if ( (keys %$where) == 1 && ( (keys %{$where})[0] eq '-and' )) {
+      $cond->{-and} = [];
+      my @cond = @{$where->{-and}};
+       for (my $i = 0; $i < @cond; $i++) {
+        my $entry = $cond[$i];
+        my $hash;
+        if (ref $entry eq 'HASH') {
+          $hash = $self->_strip_cond_qualifiers($entry);
+        }
+        else {
+          $entry =~ /([^.]+)$/;
+          $hash->{$1} = $cond[++$i];
+        }
+        push @{$cond->{-and}}, $hash;
+      }
+    }
+    else {
+      foreach my $key (keys %$where) {
+        $key =~ /([^.]+)$/;
+        $cond->{$1} = $where->{$key};
+      }
+    }
+  }
+  else {
+    return undef;
+  }
+
+  return $cond;
+}
+
+
+1;

Modified: DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/DBIx/Class.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -24,7 +24,7 @@
 # Always remember to do all digits for the version even if they're 0
 # i.e. first release of 0.XX *must* be 0.XX000. This avoids fBSD ports
 # brain damage and presumably various other packaging systems too
-$VERSION = '0.08112';
+$VERSION = '0.08114';
 
 $VERSION = eval $VERSION; # numify for warning-free dev releases
 
@@ -115,7 +115,7 @@
   my $all_artists_rs = $schema->resultset('Artist');
 
   # Output all artists names
-  # $artist here is a DBIx::Class::Row, which has accessors 
+  # $artist here is a DBIx::Class::Row, which has accessors
   # for all its columns. Rows are also subclasses of your Result class.
   foreach $artist (@artists) {
     print $artist->name, "\n";
@@ -341,6 +341,8 @@
 
 Tom Hukins
 
+triode: Pete Gamache <gamache at cpan.org>
+
 typester: Daisuke Murase <typester at cpan.org>
 
 victori: Victor Igumnov <victori at cpan.org>

Modified: DBIx-Class/0.08/branches/prefetch/lib/SQL/Translator/Parser/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/lib/SQL/Translator/Parser/DBIx/Class.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/lib/SQL/Translator/Parser/DBIx/Class.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -86,7 +86,7 @@
         # support quoting properly to be signaled about this
         $table_name = $$table_name if ref $table_name eq 'SCALAR';
 
-        # Its possible to have multiple DBIC sources using the same table
+        # It's possible to have multiple DBIC sources using the same table
         next if $tables{$table_name};
 
         $tables{$table_name}{source} = $source;
@@ -141,6 +141,7 @@
             next unless ref $rel_info->{cond} eq 'HASH';
 
             my $othertable = $source->related_source($rel);
+            next if $othertable->isa('DBIx::Class::ResultSource::View');  # can't define constraints referencing a view
             my $rel_table = $othertable->name;
 
             # FIXME - this isn't the right way to do it, but sqlt does not
@@ -184,7 +185,7 @@
                     if ($fk_constraint) {
                         $cascade->{$c} = $rel_info->{attrs}{"on_$c"};
                     }
-                    else {
+                    elsif ( $rel_info->{attrs}{"on_$c"} ) {
                         carp "SQLT attribute 'on_$c' was supplied for relationship '$moniker/$rel', which does not appear to be a foreign constraint. "
                             . "If you are sure that SQLT must generate a constraint for this relationship, add 'is_foreign_key_constraint => 1' to the attributes.\n";
                     }
@@ -200,7 +201,7 @@
                 next unless $fk_constraint;
 
                 # Make sure we dont create the same foreign key constraint twice
-                my $key_test = join("\x00", @keys);
+                my $key_test = join("\x00", sort @keys);
                 next if $created_FK_rels{$rel_table}->{$key_test};
 
                 if (scalar(@keys)) {
@@ -214,7 +215,6 @@
                   if (! $is_deferrable and $rel_table ne $table_name) {
                     $tables{$table_name}{foreign_table_deps}{$rel_table}++;
                   }
-
                   $table->add_constraint(
                                     type             => 'foreign_key',
                                     name             => join('_', $table_name, 'fk', @keys),

Modified: DBIx-Class/0.08/branches/prefetch/t/100populate.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/100populate.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/100populate.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -5,9 +5,8 @@
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
+use Path::Class::File ();
 
-plan tests => 23;
-
 my $schema = DBICTest->init_schema();
 
 # The map below generates stuff like:
@@ -116,3 +115,205 @@
 is($link7->url, undef, 'Link 7 url');
 is($link7->title, 'gtitle', 'Link 7 title');
 
+my $rs = $schema->resultset('Artist');
+$rs->delete;
+
+# test _execute_array_empty (insert_bulk with all literal sql)
+
+$rs->populate([
+    (+{
+        name => \"'DT'",
+        rank => \500,
+        charfield => \"'mtfnpy'",
+    }) x 5
+]);
+
+is((grep {
+  $_->name eq 'DT' &&
+  $_->rank == 500  &&
+  $_->charfield eq 'mtfnpy'
+} $rs->all), 5, 'populate with all literal SQL');
+
+$rs->delete;
+
+# test mixed binds with literal sql
+
+$rs->populate([
+    (+{
+        name => \"'DT'",
+        rank => 500,
+        charfield => \"'mtfnpy'",
+    }) x 5
+]);
+
+is((grep {
+  $_->name eq 'DT' &&
+  $_->rank == 500  &&
+  $_->charfield eq 'mtfnpy'
+} $rs->all), 5, 'populate with all literal SQL');
+
+$rs->delete;
+
+###
+
+throws_ok {
+    $rs->populate([
+        {
+            artistid => 1,
+            name => 'foo1',
+        },
+        {
+            artistid => 'foo', # this dies
+            name => 'foo2',
+        },
+        {
+            artistid => 3,
+            name => 'foo3',
+        },
+    ]);
+} qr/slice/, 'bad slice';
+
+is($rs->count, 0, 'populate is atomic');
+
+# Trying to use a column marked as a bind in the first slice with literal sql in
+# a later slice should throw.
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => 1,
+      name => \"'foo'",
+    },
+    {
+      artistid => \2,
+      name => \"'foo'",
+    }
+  ]);
+} qr/bind expected/, 'literal sql where bind expected throws';
+
+# ... and vice-versa.
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => \1,
+      name => \"'foo'",
+    },
+    {
+      artistid => 2,
+      name => \"'foo'",
+    }
+  ]);
+} qr/literal SQL expected/i, 'bind where literal sql expected throws';
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => 1,
+      name => \"'foo'",
+    },
+    {
+      artistid => 2,
+      name => \"'bar'",
+    }
+  ]);
+} qr/inconsistent/, 'literal sql must be the same in all slices';
+
+# the stringification has nothing to do with the artist name
+# this is solely for testing consistency
+my $fn = Path::Class::File->new ('somedir/somefilename.tmp');
+my $fn2 = Path::Class::File->new ('somedir/someotherfilename.tmp');
+
+lives_ok {
+  $rs->populate([
+    {
+      name => 'supplied before stringifying object',
+    },
+    {
+      name => $fn,
+    }
+  ]);
+} 'stringifying objects pass through';
+
+# ... and vice-versa.
+
+lives_ok {
+  $rs->populate([
+    {
+      name => $fn2,
+    },
+    {
+      name => 'supplied after stringifying object',
+    },
+  ]);
+} 'stringifying objects pass through';
+
+for (
+  $fn,
+  $fn2,
+  'supplied after stringifying object',
+  'supplied before stringifying object'
+) {
+  my $row = $rs->find ({name => $_});
+  ok ($row, "Stringification test row '$_' properly inserted");
+}
+
+$rs->delete;
+
+# test stringification with ->create rather than Storage::insert_bulk as well
+
+lives_ok {
+  my @dummy = $rs->populate([
+    {
+      name => 'supplied before stringifying object',
+    },
+    {
+      name => $fn,
+    }
+  ]);
+} 'stringifying objects pass through';
+
+# ... and vice-versa.
+
+lives_ok {
+  my @dummy = $rs->populate([
+    {
+      name => $fn2,
+    },
+    {
+      name => 'supplied after stringifying object',
+    },
+  ]);
+} 'stringifying objects pass through';
+
+for (
+  $fn,
+  $fn2,
+  'supplied after stringifying object',
+  'supplied before stringifying object'
+) {
+  my $row = $rs->find ({name => $_});
+  ok ($row, "Stringification test row '$_' properly inserted");
+}
+
+lives_ok {
+   $schema->resultset('TwoKeys')->populate([{
+      artist => 1,
+      cd     => 5,
+      fourkeys_to_twokeys => [{
+            f_foo => 1,
+            f_bar => 1,
+            f_hello => 1,
+            f_goodbye => 1,
+            autopilot => 'a',
+      },{
+            f_foo => 2,
+            f_bar => 2,
+            f_hello => 2,
+            f_goodbye => 2,
+            autopilot => 'b',
+      }]
+   }])
+} 'multicol-PK has_many populate works';
+
+done_testing;

Modified: DBIx-Class/0.08/branches/prefetch/t/101populate_rs.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/101populate_rs.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/101populate_rs.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -15,9 +15,7 @@
 use lib qw(t/lib);
 use DBICTest;
 
-plan tests => 142;
 
-
 ## ----------------------------------------------------------------------------
 ## Get a Schema and some ResultSets we can play with.
 ## ----------------------------------------------------------------------------
@@ -26,6 +24,8 @@
 my $art_rs	= $schema->resultset('Artist');
 my $cd_rs	= $schema->resultset('CD');
 
+my $restricted_art_rs	= $art_rs->search({rank => 42});
+
 ok( $schema, 'Got a Schema object');
 ok( $art_rs, 'Got Good Artist Resultset');
 ok( $cd_rs, 'Got Good CD Resultset');
@@ -333,6 +333,18 @@
 		is($cdB->artist->name, 'Fred BloggsD', 'Set Artist to FredD');
 		ok($cdB->artist->artistid == $aid, "Got Expected Artist ID");
 	}
+
+  WITH_COND_FROM_RS: {
+  
+    my ($more_crap) = $restricted_art_rs->populate([
+      {
+        name => 'More Manufactured Crap',
+      },
+    ]);
+    
+    ## Did it use the condition in the resultset?
+    cmp_ok( $more_crap->rank, '==', 42, "Got Correct rank for result object");
+  } 
 }
 
 
@@ -601,6 +613,21 @@
 		ok( $cd2->title eq "VOID_Yet More Tweeny-Pop crap", "Got Expected CD Title");
 	}
 
+  WITH_COND_FROM_RS: {
+  
+    $restricted_art_rs->populate([
+      {
+        name => 'VOID More Manufactured Crap',
+      },
+    ]);
+
+    my $more_crap = $art_rs->search({
+      name => 'VOID More Manufactured Crap'
+    })->first;
+    
+    ## Did it use the condition in the resultset?
+    cmp_ok( $more_crap->rank, '==', 42, "Got Correct rank for result object");
+  } 
 }
 
 ARRAYREF_OF_ARRAYREF_STYLE: {
@@ -619,7 +646,7 @@
   is $jumped->name, 'A singer that jumped the shark two albums ago', 'Correct Name';
   is $cool->name, 'An actually cool singer.', 'Correct Name';
   
-  my ($cooler, $lamer) = $art_rs->populate([
+  my ($cooler, $lamer) = $restricted_art_rs->populate([
     [qw/artistid name/],
     [1003, 'Cooler'],
     [1004, 'Lamer'],	
@@ -627,4 +654,36 @@
   
   is $cooler->name, 'Cooler', 'Correct Name';
   is $lamer->name, 'Lamer', 'Correct Name';  
-}
\ No newline at end of file
+
+  cmp_ok $cooler->rank, '==', 42, 'Correct Rank';
+
+  ARRAY_CONTEXT_WITH_COND_FROM_RS: {
+  
+    my ($mega_lamer) = $restricted_art_rs->populate([
+      {
+        name => 'Mega Lamer',
+      },
+    ]);
+
+    ## Did it use the condition in the resultset?
+    cmp_ok( $mega_lamer->rank, '==', 42, "Got Correct rank for result object");
+  } 
+
+  VOID_CONTEXT_WITH_COND_FROM_RS: {
+  
+    $restricted_art_rs->populate([
+      {
+        name => 'VOID Mega Lamer',
+      },
+    ]);
+
+    my $mega_lamer = $art_rs->search({
+      name => 'VOID Mega Lamer'
+    })->first;
+    
+    ## Did it use the condition in the resultset?
+    cmp_ok( $mega_lamer->rank, '==', 42, "Got Correct rank for result object");
+  } 
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/prefetch/t/104view.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/104view.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/104view.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -1,5 +1,5 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use Test::Exception;
@@ -8,8 +8,6 @@
 
 my $schema = DBICTest->init_schema();
 
-plan tests => 2;
-
 ## Real view
 my $cds_rs_2000 = $schema->resultset('CD')->search( { year => 2000 });
 my $year2kcds_rs = $schema->resultset('Year2000CDs');
@@ -24,5 +22,50 @@
 is($cds_rs_1999->count, $year1999cds_rs->count, 'View Year1999CDs sees all CDs in year 1999');
 
 
+# Test if relationships work correctly
+is_deeply (
+  [
+    $schema->resultset('Year1999CDs')->search (
+      {},
+      {
+        result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+        prefetch => ['artist', { tracks => [qw/cd year1999cd year2000cd/] } ],
+      },
+    )->all
+  ],
+  [
+    $schema->resultset('CD')->search (
+      { 'me.year' => '1999'},
+      {
+        result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+        prefetch => ['artist', { tracks => [qw/cd year1999cd year2000cd/] } ],
+        columns => [qw/cdid single_track title/],   # to match the columns retrieved by the virtview
+      },
+    )->all
+  ],
+  'Prefetch over virtual view gives expected result',
+);
 
+is_deeply (
+  [
+    $schema->resultset('Year2000CDs')->search (
+      {},
+      {
+        result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+        prefetch => ['artist', { tracks => [qw/cd year1999cd year2000cd/] } ],
+      },
+    )->all
+  ],
+  [
+    $schema->resultset('CD')->search (
+      { 'me.year' => '2000'},
+      {
+        result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+        prefetch => ['artist', { tracks => [qw/cd year1999cd year2000cd/] } ],
+      },
+    )->all
+  ],
+  'Prefetch over regular view gives expected result',
+);
 
+done_testing;

Modified: DBIx-Class/0.08/branches/prefetch/t/60core.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/60core.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/60core.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -1,5 +1,5 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use Test::Exception;
@@ -65,7 +65,7 @@
 
 is(@art, 2, 'And then there were two');
 
-ok(!$art->in_storage, "It knows it's dead");
+is($art->in_storage, 0, "It knows it's dead");
 
 dies_ok ( sub { $art->delete }, "Can't delete twice");
 
@@ -144,7 +144,7 @@
   });
 
   is($new_obj->name, 'find_or_new', 'find_or_new: instantiated a new artist');
-  ok(! $new_obj->in_storage, 'new artist is not in storage');
+  is($new_obj->in_storage, 0, 'new artist is not in storage');
 }
 
 my $cd = $schema->resultset("CD")->find(1);

Modified: DBIx-Class/0.08/branches/prefetch/t/71mysql.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/71mysql.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/71mysql.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -25,7 +25,7 @@
 
 $dbh->do("DROP TABLE IF EXISTS cd;");
 
-$dbh->do("CREATE TABLE cd (cdid INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, artist INTEGER, title TEXT, year INTEGER, genreid INTEGER, single_track INTEGER);");
+$dbh->do("CREATE TABLE cd (cdid INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, artist INTEGER, title TEXT, year DATE, genreid INTEGER, single_track INTEGER);");
 
 $dbh->do("DROP TABLE IF EXISTS producer;");
 
@@ -160,8 +160,6 @@
 
     my $type_info = $schema->storage->columns_info_for('artist');
     is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
-
-
 }
 
 my $cd = $schema->resultset ('CD')->create ({});
@@ -227,4 +225,61 @@
       => 'Nothing Found!';
 }
 
+ZEROINSEARCH: {
+  my $cds_per_year = {
+    2001 => 2,
+    2002 => 1,
+    2005 => 3,
+  };
+
+  my $rs = $schema->resultset ('CD');
+  $rs->delete;
+  for my $y (keys %$cds_per_year) {
+    for my $c (1 .. $cds_per_year->{$y} ) {
+      $rs->create ({ title => "CD $y-$c", artist => 1, year => "$y-01-01" });
+    }
+  }
+
+  is ($rs->count, 6, 'CDs created successfully');
+
+  $rs = $rs->search ({}, {
+    select => [ {year => 'year'} ], as => ['y'], distinct => 1, order_by => 'year',
+  });
+
+  is_deeply (
+    [ $rs->get_column ('y')->all ],
+    [ sort keys %$cds_per_year ],
+    'Years group successfully',
+  );
+
+  $rs->create ({ artist => 1, year => '0-1-1', title => 'Jesus Rap' });
+
+  is_deeply (
+    [ $rs->get_column ('y')->all ],
+    [ 0, sort keys %$cds_per_year ],
+    'Zero-year groups successfully',
+  );
+
+  # convoluted search taken verbatim from list 
+  my $restrict_rs = $rs->search({ -and => [
+    year => { '!=', 0 },
+    year => { '!=', undef }
+  ]});
+
+  is_deeply (
+    [ $restrict_rs->get_column('y')->all ],
+    [ $rs->get_column ('y')->all ],
+    'Zero year was correctly excluded from resultset',
+  );
+}
+
+## If find() is the first query after connect()
+## DBI::Storage::sql_maker() will be called before
+## _determine_driver() and so the ::SQLHacks class for MySQL
+## will not be used
+
+my $schema2 = DBICTest::Schema->connect($dsn, $user, $pass);
+$schema2->resultset("Artist")->find(4);
+isa_ok($schema2->storage->sql_maker, 'DBIx::Class::SQLAHacks::MySQL');
+
 done_testing;

Modified: DBIx-Class/0.08/branches/prefetch/t/72pg.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/72pg.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/72pg.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -472,6 +472,7 @@
 
 my @eapk_schemas;
 BEGIN{ @eapk_schemas = map "dbic_apk_$_", 0..5 }
+my %seqs; #< hash of schema.table.col => currval of its (DBIC) primary key sequence
 
 sub run_extended_apk_tests {
   my $schema = shift;
@@ -489,10 +490,18 @@
         for @eapk_schemas;
 
     $dbh->do("CREATE SEQUENCE $eapk_schemas[5].fooseq");
+    $dbh->do("SELECT setval('$eapk_schemas[5].fooseq',400)");
+    $seqs{"$eapk_schemas[1].apk.id2"} = 400;
+
     $dbh->do("CREATE SEQUENCE $eapk_schemas[4].fooseq");
+    $dbh->do("SELECT setval('$eapk_schemas[4].fooseq',300)");
+    $seqs{"$eapk_schemas[3].apk.id2"} = 300;
+
     $dbh->do("CREATE SEQUENCE $eapk_schemas[3].fooseq");
+    $dbh->do("SELECT setval('$eapk_schemas[3].fooseq',200)");
+    $seqs{"$eapk_schemas[4].apk.id2"} = 200;
 
-    $dbh->do("SET search_path = ".join ',', @eapk_schemas );
+    $dbh->do("SET search_path = ".join ',', reverse @eapk_schemas );
   });
 
   # clear our search_path cache
@@ -519,12 +528,14 @@
                qualify_table => 4,
              );
 
+  eapk_poke( $schema );
   eapk_poke( $schema, 0 );
   eapk_poke( $schema, 2 );
   eapk_poke( $schema, 4 );
   eapk_poke( $schema, 1 );
   eapk_poke( $schema, 0 );
   eapk_poke( $schema, 1 );
+  eapk_poke( $schema );
   eapk_poke( $schema, 4 );
   eapk_poke( $schema, 3 );
   eapk_poke( $schema, 1 );
@@ -538,8 +549,6 @@
 # do a DBIC create on the apk table in the given schema number (which is an
 # index of @eapk_schemas)
 
-my %seqs; #< sanity-check hash of schema.table.col => currval of its sequence
-
 sub eapk_poke {
   my ($s, $schema_num) = @_;
 
@@ -547,7 +556,7 @@
       ? $eapk_schemas[$schema_num]
       : '';
 
-  my $schema_name_actual = $schema_name || eapk_get_search_path($s)->[0];
+  my $schema_name_actual = $schema_name || eapk_find_visible_schema($s);
 
   $s->source('ExtAPK')->name($schema_name ? $schema_name.'.apk' : 'apk');
   #< clear sequence name cache
@@ -558,12 +567,13 @@
   lives_ok {
     my $new;
     for my $inc (1,2,3) {
-      $new = $schema->resultset('ExtAPK')->create({});
+      $new = $schema->resultset('ExtAPK')->create({ id1 => 1});
       my $proper_seqval = ++$seqs{"$schema_name_actual.apk.id2"};
       is( $new->id2, $proper_seqval, "$schema_name_actual.apk.id2 correct inc $inc" )
           or eapk_seq_diag($s,$schema_name);
       $new->discard_changes;
-      for my $id (grep $_ ne 'id2', @eapk_id_columns) {
+      is( $new->id1, 1 );
+      for my $id ('id3','id4') {
         my $proper_seqval = ++$seqs{"$schema_name_actual.apk.$id"};
         is( $new->$id, $proper_seqval, "$schema_name_actual.apk.$id correct inc $inc" )
             or eapk_seq_diag($s,$schema_name);
@@ -577,7 +587,7 @@
 # class
 sub eapk_seq_diag {
     my $s = shift;
-    my $schema = shift || eapk_get_search_path($s)->[0];
+    my $schema = shift || eapk_find_visible_schema($s);
 
     diag "$schema.apk sequences: ",
         join(', ',
@@ -633,13 +643,13 @@
         local $_[1]->{Warn} = 0;
 
         my $id_def = $a{nextval}
-            ? "integer primary key not null default nextval('$a{nextval}'::regclass)"
-            : 'serial primary key';
+            ? "integer not null default nextval('$a{nextval}'::regclass)"
+            : 'serial';
         $dbh->do(<<EOS);
 CREATE TABLE $table_name (
   id1 serial
   , id2 $id_def
-  , id3 serial
+  , id3 serial primary key
   , id4 serial
 )
 EOS
@@ -667,3 +677,19 @@
 
     });
 }
+
+sub eapk_find_visible_schema {
+    my ($s) = @_;
+
+    my ($schema) =
+        $s->storage->dbh_do(sub {
+            $_[1]->selectrow_array(<<EOS);
+SELECT n.nspname
+FROM pg_catalog.pg_namespace n
+JOIN pg_catalog.pg_class c ON c.relnamespace = n.oid
+WHERE c.relname = 'apk'
+  AND pg_catalog.pg_table_is_visible(c.oid)
+EOS
+        });
+    return $schema;
+}

Modified: DBIx-Class/0.08/branches/prefetch/t/73oracle.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/73oracle.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/73oracle.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -26,7 +26,7 @@
 }
 
 use strict;
-use warnings;  
+use warnings;
 
 use Test::Exception;
 use Test::More;
@@ -40,7 +40,7 @@
   ' as well as following sequences: \'pkid1_seq\', \'pkid2_seq\' and \'nonpkid_seq\''
   unless ($dsn && $user && $pass);
 
-plan tests => 35;
+plan tests => 36;
 
 DBICTest::Schema->load_classes('ArtistFQN');
 my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
@@ -49,6 +49,7 @@
 
 eval {
   $dbh->do("DROP SEQUENCE artist_seq");
+  $dbh->do("DROP SEQUENCE cd_seq");
   $dbh->do("DROP SEQUENCE pkid1_seq");
   $dbh->do("DROP SEQUENCE pkid2_seq");
   $dbh->do("DROP SEQUENCE nonpkid_seq");
@@ -58,6 +59,7 @@
   $dbh->do("DROP TABLE track");
 };
 $dbh->do("CREATE SEQUENCE artist_seq START WITH 1 MAXVALUE 999999 MINVALUE 0");
+$dbh->do("CREATE SEQUENCE cd_seq START WITH 1 MAXVALUE 999999 MINVALUE 0");
 $dbh->do("CREATE SEQUENCE pkid1_seq START WITH 1 MAXVALUE 999999 MINVALUE 0");
 $dbh->do("CREATE SEQUENCE pkid2_seq START WITH 10 MAXVALUE 999999 MINVALUE 0");
 $dbh->do("CREATE SEQUENCE nonpkid_seq START WITH 20 MAXVALUE 999999 MINVALUE 0");
@@ -67,6 +69,7 @@
 $dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at DATE, small_dt DATE)");
 
 $dbh->do("ALTER TABLE artist ADD (CONSTRAINT artist_pk PRIMARY KEY (artistid))");
+$dbh->do("ALTER TABLE cd ADD (CONSTRAINT cd_pk PRIMARY KEY (cdid))");
 $dbh->do("ALTER TABLE sequence_test ADD (CONSTRAINT sequence_test_constraint PRIMARY KEY (pkid1, pkid2))");
 $dbh->do(qq{
   CREATE OR REPLACE TRIGGER artist_insert_trg
@@ -80,6 +83,18 @@
     END IF;
   END;
 });
+$dbh->do(qq{
+  CREATE OR REPLACE TRIGGER cd_insert_trg
+  BEFORE INSERT ON cd
+  FOR EACH ROW
+  BEGIN
+    IF :new.cdid IS NULL THEN
+      SELECT cd_seq.nextval
+      INTO :new.cdid
+      FROM DUAL;
+    END IF;
+  END;
+});
 
 {
     # Swiped from t/bindtype_columns.t to avoid creating my own Resultset.
@@ -88,7 +103,7 @@
     eval { $dbh->do('DROP TABLE bindtype_test') };
 
     $dbh->do(qq[
-        CREATE TABLE bindtype_test 
+        CREATE TABLE bindtype_test
         (
             id              integer      NOT NULL   PRIMARY KEY,
             bytea           integer      NULL,
@@ -108,13 +123,15 @@
 my $new = $schema->resultset('Artist')->create({ name => 'foo' });
 is($new->artistid, 1, "Oracle Auto-PK worked");
 
+my $cd = $schema->resultset('CD')->create({ artist => 1, title => 'EP C', year => '2003' });
+is($new->artistid, 1, "Oracle Auto-PK worked - using scalar ref as table name");
+
 # test again with fully-qualified table name
 $new = $schema->resultset('ArtistFQN')->create( { name => 'bar' } );
 is( $new->artistid, 2, "Oracle Auto-PK worked with fully-qualified tablename" );
 
 # test join with row count ambiguity
 
-my $cd = $schema->resultset('CD')->create({ cdid => 1, artist => 1, title => 'EP C', year => '2003' });
 my $track = $schema->resultset('Track')->create({ trackid => 1, cd => 1,
     position => 1, title => 'Track1' });
 my $tjoin = $schema->resultset('Track')->search({ 'me.title' => 'Track1'},
@@ -149,7 +166,7 @@
 
 $tcount = $schema->resultset('Track')->search(
   {},
-  { 
+  {
      group_by => [ qw/position title/ ]
   }
 );
@@ -186,7 +203,10 @@
 my $st = $schema->resultset('SequenceTest')->create({ name => 'foo', pkid1 => 55 });
 is($st->pkid1, 55, "Oracle Auto-PK without trigger: First primary key set manually");
 
-{
+SKIP: {
+        skip 'buggy BLOB support in DBD::Oracle 1.23', 8
+          if $DBD::Oracle::VERSION == 1.23;
+
 	my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
 	$binstr{'large'} = $binstr{'small'} x 1024;
 
@@ -212,6 +232,7 @@
 END {
     if($schema && ($dbh = $schema->storage->dbh)) {
         $dbh->do("DROP SEQUENCE artist_seq");
+        $dbh->do("DROP SEQUENCE cd_seq");
         $dbh->do("DROP SEQUENCE pkid1_seq");
         $dbh->do("DROP SEQUENCE pkid2_seq");
         $dbh->do("DROP SEQUENCE nonpkid_seq");

Modified: DBIx-Class/0.08/branches/prefetch/t/746mssql.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/746mssql.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/746mssql.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -143,14 +143,11 @@
     my ($storage, $dbh) = @_;
     eval { $dbh->do("DROP TABLE money_test") };
     $dbh->do(<<'SQL');
-
 CREATE TABLE money_test (
    id INT IDENTITY PRIMARY KEY,
    amount MONEY NULL
 )
-
 SQL
-
 });
 
 my $rs = $schema->resultset('Money');

Modified: DBIx-Class/0.08/branches/prefetch/t/746sybase.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/746sybase.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/746sybase.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -1,5 +1,6 @@
 use strict;
-use warnings;
+use warnings;  
+no warnings 'uninitialized';
 
 use Test::More;
 use Test::Exception;
@@ -8,84 +9,580 @@
 
 my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
 
-plan skip_all => 'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test'
-  unless ($dsn && $user);
+my $TESTS = 63 + 2;
 
-plan tests => 13;
+if (not ($dsn && $user)) {
+  plan skip_all =>
+    'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' .
+    "\nWarning: This test drops and creates the tables " .
+    "'artist', 'money_test' and 'bindtype_test'";
+} else {
+  plan tests => $TESTS*2 + 1;
+}
 
-my $schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1});
+my @storage_types = (
+  'DBI::Sybase::ASE',
+  'DBI::Sybase::ASE::NoBindVars',
+);
+eval "require DBIx::Class::Storage::$_;" for @storage_types;
 
-# start disconnected to test reconnection
-$schema->storage->ensure_connected;
-$schema->storage->_dbh->disconnect;
+my $schema;
+my $storage_idx = -1;
 
-isa_ok( $schema->storage, 'DBIx::Class::Storage::DBI::Sybase' );
+sub get_schema {
+  DBICTest::Schema->connect($dsn, $user, $pass, {
+    on_connect_call => [
+      [ blob_setup => log_on_update => 1 ], # this is a safer option
+    ],
+  });
+}
 
-my $dbh;
-lives_ok (sub {
-  $dbh = $schema->storage->dbh;
-}, 'reconnect works');
+my $ping_count = 0;
+{
+  my $ping = DBIx::Class::Storage::DBI::Sybase::ASE->can('_ping');
+  *DBIx::Class::Storage::DBI::Sybase::ASE::_ping = sub {
+    $ping_count++;
+    goto $ping;
+  };
+}
 
-$schema->storage->dbh_do (sub {
-    my ($storage, $dbh) = @_;
-    eval { $dbh->do("DROP TABLE artist") };
-    $dbh->do(<<'SQL');
+for my $storage_type (@storage_types) {
+  $storage_idx++;
 
+  unless ($storage_type eq 'DBI::Sybase::ASE') { # autodetect
+    DBICTest::Schema->storage_type("::$storage_type");
+  }
+
+  $schema = get_schema();
+
+  $schema->storage->ensure_connected;
+
+  if ($storage_idx == 0 &&
+      $schema->storage->isa('DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars')) {
+# no placeholders in this version of Sybase or DBD::Sybase (or using FreeTDS)
+      my $tb = Test::More->builder;
+      $tb->skip('no placeholders') for 1..$TESTS;
+      next;
+  }
+
+  isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" );
+
+  $schema->storage->_dbh->disconnect;
+  lives_ok (sub { $schema->storage->dbh }, 'reconnect works');
+
+  $schema->storage->dbh_do (sub {
+      my ($storage, $dbh) = @_;
+      eval { $dbh->do("DROP TABLE artist") };
+      $dbh->do(<<'SQL');
 CREATE TABLE artist (
-   artistid INT IDENTITY NOT NULL,
+   artistid INT IDENTITY PRIMARY KEY,
    name VARCHAR(100),
    rank INT DEFAULT 13 NOT NULL,
-   charfield CHAR(10) NULL,
-   primary key(artistid)
+   charfield CHAR(10) NULL
 )
-
 SQL
+  });
 
-});
+  my %seen_id;
 
-my %seen_id;
+# so we start unconnected
+  $schema->storage->disconnect;
 
-# fresh $schema so we start unconnected
-$schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1});
-
 # test primary key handling
-my $new = $schema->resultset('Artist')->create({ name => 'foo' });
-ok($new->artistid > 0, "Auto-PK worked");
+  my $new = $schema->resultset('Artist')->create({ name => 'foo' });
+  ok($new->artistid > 0, "Auto-PK worked");
 
-$seen_id{$new->artistid}++;
+  $seen_id{$new->artistid}++;
 
-# test LIMIT support
-for (1..6) {
+# check redispatch to storage-specific insert when auto-detected storage
+  if ($storage_type eq 'DBI::Sybase::ASE') {
+    DBICTest::Schema->storage_type('::DBI');
+    $schema = get_schema();
+  }
+
+  $new = $schema->resultset('Artist')->create({ name => 'Artist 1' });
+  is ( $seen_id{$new->artistid}, undef, 'id for Artist 1 is unique' );
+  $seen_id{$new->artistid}++;
+
+# inserts happen in a txn, so we make sure it still works inside a txn too
+  $schema->txn_begin;
+
+  for (2..6) {
     $new = $schema->resultset('Artist')->create({ name => 'Artist ' . $_ });
     is ( $seen_id{$new->artistid}, undef, "id for Artist $_ is unique" );
     $seen_id{$new->artistid}++;
-}
+  }
 
-my $it;
+  $schema->txn_commit;
 
-$it = $schema->resultset('Artist')->search( {}, {
+# test simple count
+  is ($schema->resultset('Artist')->count, 7, 'count(*) of whole table ok');
+
+# test LIMIT support
+  my $it = $schema->resultset('Artist')->search({
+    artistid => { '>' => 0 }
+  }, {
     rows => 3,
     order_by => 'artistid',
-});
+  });
 
-TODO: {
-    local $TODO = 'Sybase is very very fucked in the limit department';
+  is( $it->count, 3, "LIMIT count ok" );
 
-    is( $it->count, 3, "LIMIT count ok" );
-}
+  is( $it->next->name, "foo", "iterator->next ok" );
+  $it->next;
+  is( $it->next->name, "Artist 2", "iterator->next ok" );
+  is( $it->next, undef, "next past end of resultset ok" );
 
-# The iterator still works correctly with rows => 3, even though the sql is
-# fucked, very interesting.
+# now try with offset
+  $it = $schema->resultset('Artist')->search({}, {
+    rows => 3,
+    offset => 3,
+    order_by => 'artistid',
+  });
 
-is( $it->next->name, "foo", "iterator->next ok" );
-$it->next;
-is( $it->next->name, "Artist 2", "iterator->next ok" );
-is( $it->next, undef, "next past end of resultset ok" );
+  is( $it->count, 3, "LIMIT with offset count ok" );
 
+  is( $it->next->name, "Artist 3", "iterator->next ok" );
+  $it->next;
+  is( $it->next->name, "Artist 5", "iterator->next ok" );
+  is( $it->next, undef, "next past end of resultset ok" );
 
+# now try a grouped count
+  $schema->resultset('Artist')->create({ name => 'Artist 6' })
+    for (1..6);
+
+  $it = $schema->resultset('Artist')->search({}, {
+    group_by => 'name'
+  });
+
+  is( $it->count, 7, 'COUNT of GROUP_BY ok' );
+
+# do an IDENTITY_INSERT
+  {
+    no warnings 'redefine';
+
+    my @debug_out;
+    local $schema->storage->{debug} = 1;
+    local $schema->storage->debugobj->{callback} = sub {
+      push @debug_out, $_[1];
+    };
+
+    my $txn_used = 0;
+    my $txn_commit = \&DBIx::Class::Storage::DBI::txn_commit;
+    local *DBIx::Class::Storage::DBI::txn_commit = sub {
+      $txn_used = 1;
+      goto &$txn_commit;
+    };
+
+    $schema->resultset('Artist')
+      ->create({ artistid => 999, name => 'mtfnpy' });
+
+    ok((grep /IDENTITY_INSERT/i, @debug_out), 'IDENTITY_INSERT used');
+
+    SKIP: {
+      skip 'not testing lack of txn on IDENTITY_INSERT with NoBindVars', 1
+        if $storage_type =~ /NoBindVars/i;
+
+      is $txn_used, 0, 'no txn on insert with IDENTITY_INSERT';
+    }
+  }
+
+# do an IDENTITY_UPDATE
+  {
+    my @debug_out;
+    local $schema->storage->{debug} = 1;
+    local $schema->storage->debugobj->{callback} = sub {
+      push @debug_out, $_[1];
+    };
+
+    lives_and {
+      $schema->resultset('Artist')
+        ->find(999)->update({ artistid => 555 });
+      ok((grep /IDENTITY_UPDATE/i, @debug_out));
+    } 'IDENTITY_UPDATE used';
+    $ping_count-- if $@;
+  }
+
+  my $bulk_rs = $schema->resultset('Artist')->search({
+    name => { -like => 'bulk artist %' }
+  });
+
+# test insert_bulk using populate.
+  SKIP: {
+    skip 'insert_bulk not supported', 4
+      unless $storage_type !~ /NoBindVars/i;
+
+    lives_ok {
+      $schema->resultset('Artist')->populate([
+        {
+          name => 'bulk artist 1',
+          charfield => 'foo',
+        },
+        {
+          name => 'bulk artist 2',
+          charfield => 'foo',
+        },
+        {
+          name => 'bulk artist 3',
+          charfield => 'foo',
+        },
+      ]);
+    } 'insert_bulk via populate';
+
+    is $bulk_rs->count, 3, 'correct number inserted via insert_bulk';
+
+    is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
+      'column set correctly via insert_bulk');
+
+    my %bulk_ids;
+    @bulk_ids{map $_->artistid, $bulk_rs->all} = ();
+
+    is ((scalar keys %bulk_ids), 3,
+      'identities generated correctly in insert_bulk');
+
+    $bulk_rs->delete;
+  }
+
+# make sure insert_bulk works a second time on the same connection
+  SKIP: {
+    skip 'insert_bulk not supported', 3
+      unless $storage_type !~ /NoBindVars/i;
+
+    lives_ok {
+      $schema->resultset('Artist')->populate([
+        {
+          name => 'bulk artist 1',
+          charfield => 'bar',
+        },
+        {
+          name => 'bulk artist 2',
+          charfield => 'bar',
+        },
+        {
+          name => 'bulk artist 3',
+          charfield => 'bar',
+        },
+      ]);
+    } 'insert_bulk via populate called a second time';
+
+    is $bulk_rs->count, 3,
+      'correct number inserted via insert_bulk';
+
+    is ((grep $_->charfield eq 'bar', $bulk_rs->all), 3,
+      'column set correctly via insert_bulk');
+
+    $bulk_rs->delete;
+  }
+
+# test invalid insert_bulk (missing required column)
+#
+# There should be a rollback, reconnect and the next valid insert_bulk should
+# succeed.
+  throws_ok {
+    $schema->resultset('Artist')->populate([
+      {
+        charfield => 'foo',
+      }
+    ]);
+  } qr/no value or default|does not allow null|placeholders/i,
+# The second pattern is the error from fallback to regular array insert on
+# incompatible charset.
+# The third is for ::NoBindVars with no syb_has_blk.
+  'insert_bulk with missing required column throws error';
+
+# now test insert_bulk with IDENTITY_INSERT
+  SKIP: {
+    skip 'insert_bulk not supported', 3
+      unless $storage_type !~ /NoBindVars/i;
+
+    lives_ok {
+      $schema->resultset('Artist')->populate([
+        {
+          artistid => 2001,
+          name => 'bulk artist 1',
+          charfield => 'foo',
+        },
+        {
+          artistid => 2002,
+          name => 'bulk artist 2',
+          charfield => 'foo',
+        },
+        {
+          artistid => 2003,
+          name => 'bulk artist 3',
+          charfield => 'foo',
+        },
+      ]);
+    } 'insert_bulk with IDENTITY_INSERT via populate';
+
+    is $bulk_rs->count, 3,
+      'correct number inserted via insert_bulk with IDENTITY_INSERT';
+
+    is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
+      'column set correctly via insert_bulk with IDENTITY_INSERT');
+
+    $bulk_rs->delete;
+  }
+
+# test correlated subquery
+  my $subq = $schema->resultset('Artist')->search({ artistid => { '>' => 3 } })
+    ->get_column('artistid')
+    ->as_query;
+  my $subq_rs = $schema->resultset('Artist')->search({
+    artistid => { -in => $subq }
+  });
+  is $subq_rs->count, 11, 'correlated subquery';
+
+# mostly stolen from the blob stuff Nniuq wrote for t/73oracle.t
+  SKIP: {
+    skip 'TEXT/IMAGE support does not work with FreeTDS', 22
+      if $schema->storage->using_freetds;
+
+    my $dbh = $schema->storage->_dbh;
+    {
+      local $SIG{__WARN__} = sub {};
+      eval { $dbh->do('DROP TABLE bindtype_test') };
+
+      $dbh->do(qq[
+        CREATE TABLE bindtype_test 
+        (
+          id    INT   IDENTITY PRIMARY KEY,
+          bytea IMAGE NULL,
+          blob  IMAGE NULL,
+          clob  TEXT  NULL
+        )
+      ],{ RaiseError => 1, PrintError => 0 });
+    }
+
+    my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
+    $binstr{'large'} = $binstr{'small'} x 1024;
+
+    my $maxloblen = length $binstr{'large'};
+    
+    if (not $schema->storage->using_freetds) {
+      $dbh->{'LongReadLen'} = $maxloblen * 2;
+    } else {
+      $dbh->do("set textsize ".($maxloblen * 2));
+    }
+
+    my $rs = $schema->resultset('BindType');
+    my $last_id;
+
+    foreach my $type (qw(blob clob)) {
+      foreach my $size (qw(small large)) {
+        no warnings 'uninitialized';
+
+        my $created;
+        lives_ok {
+          $created = $rs->create( { $type => $binstr{$size} } )
+        } "inserted $size $type without dying";
+
+        $last_id = $created->id if $created;
+
+        lives_and {
+          ok($rs->find($last_id)->$type eq $binstr{$size})
+        } "verified inserted $size $type";
+      }
+    }
+
+    $rs->delete;
+
+    # blob insert with explicit PK
+    # also a good opportunity to test IDENTITY_INSERT
+    lives_ok {
+      $rs->create( { id => 1, blob => $binstr{large} } )
+    } 'inserted large blob without dying with manual PK';
+
+    lives_and {
+      ok($rs->find(1)->blob eq $binstr{large})
+    } 'verified inserted large blob with manual PK';
+
+    # try a blob update
+    my $new_str = $binstr{large} . 'mtfnpy';
+
+    # check redispatch to storage-specific update when auto-detected storage
+    if ($storage_type eq 'DBI::Sybase::ASE') {
+      DBICTest::Schema->storage_type('::DBI');
+      $schema = get_schema();
+    }
+
+    lives_ok {
+      $rs->search({ id => 1 })->update({ blob => $new_str })
+    } 'updated blob successfully';
+
+    lives_and {
+      ok($rs->find(1)->blob eq $new_str)
+    } 'verified updated blob';
+
+    # try a blob update with IDENTITY_UPDATE
+    lives_and {
+      $new_str = $binstr{large} . 'hlagh';
+      $rs->find(1)->update({ id => 999, blob => $new_str });
+      ok($rs->find(999)->blob eq $new_str);
+    } 'verified updated blob with IDENTITY_UPDATE';
+
+    ## try multi-row blob update
+    # first insert some blobs
+    $new_str = $binstr{large} . 'foo';
+    lives_and {
+      $rs->delete;
+      $rs->create({ blob => $binstr{large} }) for (1..2);
+      $rs->update({ blob => $new_str });
+      is((grep $_->blob eq $new_str, $rs->all), 2);
+    } 'multi-row blob update';
+
+    $rs->delete;
+
+    # now try insert_bulk with blobs and only blobs
+    $new_str = $binstr{large} . 'bar';
+    lives_ok {
+      $rs->populate([
+        {
+          bytea => 1,
+          blob => $binstr{large},
+          clob => $new_str,
+        },
+        {
+          bytea => 1,
+          blob => $binstr{large},
+          clob => $new_str,
+        },
+      ]);
+    } 'insert_bulk with blobs does not die';
+
+    is((grep $_->blob eq $binstr{large}, $rs->all), 2,
+      'IMAGE column set correctly via insert_bulk');
+
+    is((grep $_->clob eq $new_str, $rs->all), 2,
+      'TEXT column set correctly via insert_bulk');
+
+    # now try insert_bulk with blobs and a non-blob which also happens to be an
+    # identity column
+    SKIP: {
+      skip 'no insert_bulk without placeholders', 4
+        if $storage_type =~ /NoBindVars/i;
+
+      $rs->delete;
+      $new_str = $binstr{large} . 'bar';
+      lives_ok {
+        $rs->populate([
+          {
+            id => 1,
+            bytea => 1,
+            blob => $binstr{large},
+            clob => $new_str,
+          },
+          {
+            id => 2,
+            bytea => 1,
+            blob => $binstr{large},
+            clob => $new_str,
+          },
+        ]);
+      } 'insert_bulk with blobs and explicit identity does NOT die';
+
+      is((grep $_->blob eq $binstr{large}, $rs->all), 2,
+        'IMAGE column set correctly via insert_bulk with identity');
+
+      is((grep $_->clob eq $new_str, $rs->all), 2,
+        'TEXT column set correctly via insert_bulk with identity');
+
+      is_deeply [ map $_->id, $rs->all ], [ 1,2 ],
+        'explicit identities set correctly via insert_bulk with blobs';
+    }
+
+    lives_and {
+      $rs->delete;
+      $rs->create({ blob => $binstr{large} }) for (1..2);
+      $rs->update({ blob => undef });
+      is((grep !defined($_->blob), $rs->all), 2);
+    } 'blob update to NULL';
+  }
+
+# test MONEY column support (and some other misc. stuff)
+  $schema->storage->dbh_do (sub {
+      my ($storage, $dbh) = @_;
+      eval { $dbh->do("DROP TABLE money_test") };
+      $dbh->do(<<'SQL');
+CREATE TABLE money_test (
+   id INT IDENTITY PRIMARY KEY,
+   amount MONEY DEFAULT $999.99 NULL
+)
+SQL
+  });
+
+  my $rs = $schema->resultset('Money');
+
+# test insert with defaults
+  lives_and {
+    $rs->create({});
+    is((grep $_->amount == 999.99, $rs->all), 1);
+  } 'insert with all defaults works';
+  $rs->delete;
+
+# test insert transaction when there's an active cursor
+  {
+    my $artist_rs = $schema->resultset('Artist');
+    $artist_rs->first;
+    lives_ok {
+      my $row = $schema->resultset('Money')->create({ amount => 100 });
+      $row->delete;
+    } 'inserted a row with an active cursor';
+    $ping_count-- if $@; # dbh_do calls ->connected
+  }
+
+# test insert in an outer transaction when there's an active cursor
+  TODO: {
+    local $TODO = 'this should work once we have eager cursors';
+
+# clear state, or we get a deadlock on $row->delete
+# XXX figure out why this happens
+    $schema->storage->disconnect;
+
+    lives_ok {
+      $schema->txn_do(sub {
+        my $artist_rs = $schema->resultset('Artist');
+        $artist_rs->first;
+        my $row = $schema->resultset('Money')->create({ amount => 100 });
+        $row->delete;
+      });
+    } 'inserted a row with an active cursor in outer txn';
+    $ping_count-- if $@; # dbh_do calls ->connected
+  }
+
+# Now test money values.
+  my $row;
+  lives_ok {
+    $row = $rs->create({ amount => 100 });
+  } 'inserted a money value';
+
+  is eval { $rs->find($row->id)->amount }, 100, 'money value round-trip';
+
+  lives_ok {
+    $row->update({ amount => 200 });
+  } 'updated a money value';
+
+  is eval { $rs->find($row->id)->amount },
+    200, 'updated money value round-trip';
+
+  lives_ok {
+    $row->update({ amount => undef });
+  } 'updated a money value to NULL';
+
+  my $null_amount = eval { $rs->find($row->id)->amount };
+  ok(
+    (($null_amount == undef) && (not $@)),
+    'updated money value to NULL round-trip'
+  );
+  diag $@ if $@;
+}
+
+is $ping_count, 0, 'no pings';
+
 # clean up our mess
 END {
-    my $dbh = eval { $schema->storage->_dbh };
-    $dbh->do('DROP TABLE artist') if $dbh;
+  if (my $dbh = eval { $schema->storage->_dbh }) {
+    eval { $dbh->do("DROP TABLE $_") }
+      for qw/artist bindtype_test money_test/;
+  }
 }
-

Added: DBIx-Class/0.08/branches/prefetch/t/747mssql_ado.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/747mssql_ado.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/prefetch/t/747mssql_ado.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -0,0 +1,70 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MSSQL_ADO_${_}" } qw/DSN USER PASS/};
+
+plan skip_all => 'Set $ENV{DBICTEST_MSSQL_ADO_DSN}, _USER and _PASS to run this test'
+  unless ($dsn && $user);
+
+plan tests => 12;
+
+my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+$schema->storage->ensure_connected;
+
+isa_ok( $schema->storage, 'DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server' );
+
+$schema->storage->dbh_do (sub {
+    my ($storage, $dbh) = @_;
+    eval { $dbh->do("DROP TABLE artist") };
+    $dbh->do(<<'SQL');
+CREATE TABLE artist (
+   artistid INT IDENTITY NOT NULL,
+   name VARCHAR(100),
+   rank INT NOT NULL DEFAULT '13',
+   charfield CHAR(10) NULL,
+   primary key(artistid)
+)
+SQL
+});
+
+my $new = $schema->resultset('Artist')->create({ name => 'foo' });
+ok($new->artistid > 0, 'Auto-PK worked');
+
+# make sure select works
+my $found = $schema->resultset('Artist')->search({ name => 'foo' })->first;
+is $found->artistid, $new->artistid, 'search works';
+
+# test large column list in select
+$found = $schema->resultset('Artist')->search({ name => 'foo' }, {
+  select => ['artistid', 'name', map "'foo' foo_$_", 0..50],
+  as     => ['artistid', 'name', map       "foo_$_", 0..50],
+})->first;
+is $found->artistid, $new->artistid, 'select with big column list';
+is $found->get_column('foo_50'), 'foo', 'last item in big column list';
+
+# create a few more rows
+for (1..6) {
+  $schema->resultset('Artist')->create({ name => 'Artist ' . $_ });
+}
+
+# test multiple active cursors
+my $rs1 = $schema->resultset('Artist')->search({}, { order_by => 'artistid' });
+my $rs2 = $schema->resultset('Artist')->search({}, { order_by => 'name' });
+
+while ($rs1->next) {
+  ok eval { $rs2->next }, 'multiple active cursors';
+}
+
+# clean up our mess
+END {
+  if (my $dbh = eval { $schema->storage->_dbh }) {
+    eval { $dbh->do("DROP TABLE $_") }
+      for qw/artist/;
+  }
+}
+# vim:sw=2 sts=2

Modified: DBIx-Class/0.08/branches/prefetch/t/74mssql.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/74mssql.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/74mssql.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -18,10 +18,6 @@
 plan skip_all => 'Set $ENV{DBICTEST_MSSQL_DSN}, _USER and _PASS to run this test'
   unless ($dsn);
 
-my $TESTS = 13;
-
-plan tests => $TESTS * 2;
-
 my @storage_types = (
   'DBI::Sybase::Microsoft_SQL_Server',
   'DBI::Sybase::Microsoft_SQL_Server::NoBindVars',
@@ -29,6 +25,7 @@
 my $storage_idx = -1;
 my $schema;
 
+my $NUMBER_OF_TESTS_IN_BLOCK = 18;
 for my $storage_type (@storage_types) {
   $storage_idx++;
 
@@ -44,7 +41,7 @@
 
   if ($storage_idx == 0 && ref($schema->storage) =~ /NoBindVars\z/) {
     my $tb = Test::More->builder;
-    $tb->skip('no placeholders') for 1..$TESTS;
+    $tb->skip('no placeholders') for 1..$NUMBER_OF_TESTS_IN_BLOCK;
     next;
   }
 
@@ -133,8 +130,56 @@
 
   is $rs->find($row->id)->amount,
     undef, 'updated money value to NULL round-trip';
+
+  $rs->create({ amount => 300 }) for (1..3);
+
+  # test multiple active statements
+  lives_ok {
+    my $artist_rs = $schema->resultset('Artist');
+    while (my $row = $rs->next) {
+      my $artist = $artist_rs->next;
+    }
+    $rs->reset;
+  } 'multiple active statements';
+
+  $rs->delete;
+
+  # test simple transaction with commit
+  lives_ok {
+    $schema->txn_do(sub {
+      $rs->create({ amount => 400 });
+    });
+  } 'simple transaction';
+
+  cmp_ok $rs->first->amount, '==', 400, 'committed';
+  $rs->reset;
+
+  $rs->delete;
+
+  # test rollback
+  throws_ok {
+    $schema->txn_do(sub {
+      $rs->create({ amount => 400 });
+      die 'mtfnpy';
+    });
+  } qr/mtfnpy/, 'simple failed txn';
+
+  is $rs->first, undef, 'rolled back';
+  $rs->reset;
 }
 
+# test op-induced autoconnect
+lives_ok (sub {
+
+  my $schema =  DBICTest::Schema->clone;
+  $schema->connection($dsn, $user, $pass);
+
+  my $artist = $schema->resultset ('Artist')->search ({}, { order_by => 'artistid' })->next;
+  is ($artist->id, 1, 'Artist retrieved successfully');
+}, 'Query-induced autoconnect works');
+
+done_testing;
+
 # clean up our mess
 END {
   if (my $dbh = eval { $schema->storage->dbh }) {

Modified: DBIx-Class/0.08/branches/prefetch/t/79aliasing.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/79aliasing.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/79aliasing.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -52,7 +52,7 @@
   my $cd_rs = $schema->resultset('CD')->search({ 'artist.name' => 'Caterwauler McCrae' }, { join => 'artist' });
 
   my $cd = $cd_rs->find_or_new({ title => 'Huh?', year => 2006 });
-  ok(! $cd->in_storage, 'new CD not in storage yet');
+  is($cd->in_storage, 0, 'new CD not in storage yet');
   is($cd->title, 'Huh?', 'new CD title is correct');
   is($cd->year, 2006, 'new CD year is correct');
 }

Modified: DBIx-Class/0.08/branches/prefetch/t/80unique.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/80unique.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/80unique.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -195,7 +195,7 @@
       { key => 'cd_artist_title' }
     );
 
-    ok(!$cd1->in_storage, 'CD is not in storage yet after update_or_new');
+    is($cd1->in_storage, 0, 'CD is not in storage yet after update_or_new');
     $cd1->insert;
     ok($cd1->in_storage, 'CD got added to strage after update_or_new && insert');
 

Modified: DBIx-Class/0.08/branches/prefetch/t/93single_accessor_object.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/93single_accessor_object.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/93single_accessor_object.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -48,7 +48,7 @@
 	my $artist = $schema->resultset('Artist')->create({ artistid => 666, name => 'bad religion' });
 	my $cd = $schema->resultset('CD')->create({ cdid => 187, artist => 1, title => 'how could hell be any worse?', year => 1982, genreid => undef });
 
-	ok(!defined($cd->genreid), 'genreid is NULL');
+	ok(!defined($cd->get_column('genreid')), 'genreid is NULL');  #no accessor was defined for this column
 	ok(!defined($cd->genre), 'genre accessor returns undef');
 }
 

Modified: DBIx-Class/0.08/branches/prefetch/t/94versioning.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/94versioning.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/94versioning.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -5,7 +5,6 @@
 use Test::More;
 use File::Spec;
 use File::Copy;
-use Time::HiRes qw/time sleep/;
 
 #warn "$dsn $user $pass";
 my ($dsn, $user, $pass);
@@ -16,6 +15,10 @@
   plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
     unless ($dsn);
 
+  eval { require Time::HiRes }
+    || plan skip_all => 'Test needs Time::HiRes';
+  Time::HiRes->import(qw/time sleep/);
+
   require DBIx::Class::Storage::DBI;
   plan skip_all =>
       'Test needs SQL::Translator ' . DBIx::Class::Storage::DBI->_sqlt_minimum_version

Modified: DBIx-Class/0.08/branches/prefetch/t/95sql_maker.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/95sql_maker.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/95sql_maker.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -70,8 +70,7 @@
 }
 
 # Make sure the carp/croak override in SQLA works (via SQLAHacks)
-my $file = __FILE__;
-$file = "\Q$file\E";
+my $file = quotemeta (__FILE__);
 throws_ok (sub {
   $schema->resultset ('Artist')->search ({}, { order_by => { -asc => 'stuff', -desc => 'staff' } } )->as_query;
 }, qr/$file/, 'Exception correctly croak()ed');

Modified: DBIx-Class/0.08/branches/prefetch/t/95sql_maker_quote.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/95sql_maker_quote.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/95sql_maker_quote.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -6,13 +6,6 @@
 use lib qw(t/lib);
 use DBIC::SqlMakerTest;
 
-BEGIN {
-    eval "use DBD::SQLite";
-    plan $@
-        ? ( skip_all => 'needs DBD::SQLite for testing' )
-        : ( tests => 12 );
-}
-
 use_ok('DBICTest');
 
 my $schema = DBICTest->init_schema();
@@ -235,6 +228,36 @@
 );
 
 
+($sql, @bind) = $sql_maker->select(
+  [ { me => 'cd' }                  ],
+  [qw/ me.cdid me.artist me.title  /],
+  { cdid => \['rlike ?', [cdid => 'X'] ]       },
+  { group_by => 'title', having => \['count(me.artist) > ?', [ cnt => 2] ] },
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title` FROM `cd` `me` WHERE ( `cdid` rlike ? ) GROUP BY `title` HAVING count(me.artist) > ?/,
+  [ [ cdid => 'X'], ['cnt' => '2'] ],
+  'Quoting works with where/having arrayrefsrefs',
+);
+
+
+($sql, @bind) = $sql_maker->select(
+  [ { me => 'cd' }                  ],
+  [qw/ me.cdid me.artist me.title  /],
+  { cdid => \'rlike X'              },
+  { group_by => 'title', having => \'count(me.artist) > 2' },
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title` FROM `cd` `me` WHERE ( `cdid` rlike X ) GROUP BY `title` HAVING count(me.artist) > 2/,
+  [],
+  'Quoting works with where/having scalarrefs',
+);
+
+
 ($sql, @bind) = $sql_maker->update(
           'group',
           {
@@ -330,3 +353,5 @@
   q/UPDATE [group] SET [name] = ?, [order] = ?/, [ ['name' => 'Bill'], ['order' => '12'] ],
   'bracket quoted table names for UPDATE'
 );
+
+done_testing;

Modified: DBIx-Class/0.08/branches/prefetch/t/count/prefetch.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/count/prefetch.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/count/prefetch.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -39,16 +39,9 @@
 
 # collapsing prefetch with distinct
 {
-  my $first_cd = $schema->resultset('Artist')->first->cds->first;
-  $first_cd->update ({
-    genreid => $first_cd->create_related (
-      genre => ({ name => 'vague genre' })
-    )->id
-  });
-
   my $rs = $schema->resultset("Artist")->search(undef, {distinct => 1})
             ->search_related('cds')->search_related('genre',
-                { 'genre.name' => { '!=', 'foo' } },
+                { 'genre.name' => 'emo' },
                 { prefetch => q(cds) },
             );
   is ($rs->all, 1, 'Correct number of objects');
@@ -60,15 +53,22 @@
       SELECT COUNT( * )
         FROM (
           SELECT genre.genreid
-            FROM artist me
-            JOIN cd cds ON cds.artist = me.artistid
+            FROM (
+              SELECT cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track
+                FROM (
+                  SELECT me.artistid, me.name, me.rank, me.charfield
+                    FROM artist me GROUP BY me.artistid, me.name, me.rank, me.charfield
+                ) me
+                LEFT JOIN cd cds ON cds.artist = me.artistid
+              GROUP BY cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track
+            ) cds
             JOIN genre genre ON genre.genreid = cds.genreid
             LEFT JOIN cd cds_2 ON cds_2.genreid = genre.genreid
-          WHERE ( genre.name != ? )
+          WHERE ( genre.name = ? )
           GROUP BY genre.genreid
         ) count_subq
     )',
-    [ [ 'genre.name' => 'foo' ] ],
+    [ [ 'genre.name' => 'emo' ] ],
   );
 }
 

Added: DBIx-Class/0.08/branches/prefetch/t/count/search_related.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/count/search_related.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/prefetch/t/count/search_related.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -0,0 +1,41 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+my $cd_rs = $schema->resultset('CD')->search ({}, { rows => 1, order_by => 'cdid' });
+
+my $track_count = $cd_rs->first->tracks->count;
+
+cmp_ok ($track_count, '>', 1, 'First CD has several tracks');
+
+is ($cd_rs->search_related ('tracks')->count, $track_count, 'related->count returns correct number chained off a limited rs');
+is (scalar ($cd_rs->search_related ('tracks')->all), $track_count, 'related->all returns correct number of objects chained off a limited rs');
+
+
+my $joined_cd_rs = $cd_rs->search ({}, {
+  join => 'tracks', rows => 2, distinct => 1, having => \ 'count(tracks.trackid) > 2',
+});
+
+my $multiple_track_count = $schema->resultset('Track')->search ({
+  cd => { -in => $joined_cd_rs->get_column ('cdid')->as_query }
+})->count;
+
+
+is (
+  $joined_cd_rs->search_related ('tracks')->count,
+  $multiple_track_count,
+  'related->count returns correct number chained off a grouped rs',
+);
+is (
+  scalar ($joined_cd_rs->search_related ('tracks')->all),
+  $multiple_track_count,
+  'related->all returns correct number of objects chained off a grouped rs',
+);
+
+done_testing;

Modified: DBIx-Class/0.08/branches/prefetch/t/from_subquery.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/from_subquery.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/from_subquery.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -1,5 +1,5 @@
 use strict;
-use warnings FATAL => 'all';
+use warnings;
 
 use Test::More;
 

Added: DBIx-Class/0.08/branches/prefetch/t/inflate/datetime_sybase.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/inflate/datetime_sybase.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/prefetch/t/inflate/datetime_sybase.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -0,0 +1,85 @@
+use strict;
+use warnings;  
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
+
+if (not ($dsn && $user)) {
+  plan skip_all =>
+    'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' .
+    "\nWarning: This test drops and creates a table called 'track'";
+} else {
+  eval "use DateTime; use DateTime::Format::Sybase;";
+  if ($@) {
+    plan skip_all => 'needs DateTime and DateTime::Format::Sybase for testing';
+  }
+  else {
+    plan tests => (4 * 2 * 2) + 2; # (tests * dt_types * storage_types) + storage_tests
+  }
+}
+
+my @storage_types = (
+  'DBI::Sybase::ASE',
+  'DBI::Sybase::ASE::NoBindVars',
+);
+my $schema;
+
+for my $storage_type (@storage_types) {
+  $schema = DBICTest::Schema->clone;
+
+  unless ($storage_type eq 'DBI::Sybase::ASE') { # autodetect
+    $schema->storage_type("::$storage_type");
+  }
+  $schema->connection($dsn, $user, $pass, {
+    AutoCommit => 1,
+    on_connect_call => [ 'datetime_setup' ],
+  });
+
+  $schema->storage->ensure_connected;
+
+  isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" );
+
+# coltype, col, date
+  my @dt_types = (
+    ['DATETIME', 'last_updated_at', '2004-08-21T14:36:48.080Z'],
+# minute precision
+    ['SMALLDATETIME', 'small_dt', '2004-08-21T14:36:00.000Z'],
+  );
+  
+  for my $dt_type (@dt_types) {
+    my ($type, $col, $sample_dt) = @$dt_type;
+
+    eval { $schema->storage->dbh->do("DROP TABLE track") };
+    $schema->storage->dbh->do(<<"SQL");
+CREATE TABLE track (
+   trackid INT IDENTITY PRIMARY KEY,
+   cd INT,
+   position INT,
+   $col $type,
+)
+SQL
+    ok(my $dt = DateTime::Format::Sybase->parse_datetime($sample_dt));
+
+    my $row;
+    ok( $row = $schema->resultset('Track')->create({
+          $col => $dt,
+          cd => 1,
+        }));
+    ok( $row = $schema->resultset('Track')
+      ->search({ trackid => $row->trackid }, { select => [$col] })
+      ->first
+    );
+    is( $row->$col, $dt, 'DateTime roundtrip' );
+  }
+}
+
+# clean up our mess
+END {
+  if (my $dbh = eval { $schema->storage->_dbh }) {
+    $dbh->do('DROP TABLE track');
+  }
+}

Modified: DBIx-Class/0.08/branches/prefetch/t/inflate/hri.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/inflate/hri.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/inflate/hri.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -1,7 +1,7 @@
 use strict;
-use warnings;  
+use warnings;
 
-use Test::More qw(no_plan);
+use Test::More;
 use lib qw(t/lib);
 use DBICTest;
 my $schema = DBICTest->init_schema();
@@ -9,7 +9,7 @@
 # Under some versions of SQLite if the $rs is left hanging around it will lock
 # So we create a scope here cos I'm lazy
 {
-    my $rs = $schema->resultset('CD');
+    my $rs = $schema->resultset('CD')->search ({}, { order_by => 'cdid' });
 
     # get the defined columns
     my @dbic_cols = sort $rs->result_source->columns;
@@ -23,9 +23,11 @@
     my @hashref_cols = sort keys %$datahashref1;
 
     is_deeply( \@dbic_cols, \@hashref_cols, 'returned columns' );
+
+    my $cd1 = $rs->find ({cdid => 1});
+    is_deeply ( $cd1, $datahashref1, 'first/find return the same thing');
 }
 
-
 sub check_cols_of {
     my ($dbic_obj, $datahashref) = @_;
     
@@ -135,3 +137,5 @@
   [{ $artist->get_columns, cds => [] }],
   'nested has_many prefetch without entries'
 );
+
+done_testing;

Modified: DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/CD.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/CD.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/CD.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -26,6 +26,7 @@
   'genreid' => { 
     data_type => 'integer',
     is_nullable => 1,
+    accessor => undef,
   },
   'single_track' => {
     data_type => 'integer',

Modified: DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/Track.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/Track.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/Track.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -50,4 +50,17 @@
 __PACKAGE__->might_have( cd_single => 'DBICTest::Schema::CD', 'single_track' );
 __PACKAGE__->might_have( lyrics => 'DBICTest::Schema::Lyrics', 'track_id' );
 
+__PACKAGE__->belongs_to(
+    "year1999cd",
+    "DBICTest::Schema::Year1999CDs",
+    { "foreign.cdid" => "self.cd" },
+    { join_type => 'left' },  # the relationship is of course optional
+);
+__PACKAGE__->belongs_to(
+    "year2000cd",
+    "DBICTest::Schema::Year2000CDs",
+    { "foreign.cdid" => "self.cd" },
+    { join_type => 'left' },
+);
+
 1;

Modified: DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/Year1999CDs.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/Year1999CDs.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/Year1999CDs.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -9,7 +9,7 @@
 __PACKAGE__->table('year1999cds');
 __PACKAGE__->result_source_instance->is_virtual(1);
 __PACKAGE__->result_source_instance->view_definition(
-  "SELECT cdid, artist, title FROM cd WHERE year ='1999'"
+  "SELECT cdid, artist, title, single_track FROM cd WHERE year ='1999'"
 );
 __PACKAGE__->add_columns(
   'cdid' => {
@@ -23,9 +23,17 @@
     data_type => 'varchar',
     size      => 100,
   },
-
+  'single_track' => {
+    data_type => 'integer',
+    is_nullable => 1,
+    is_foreign_key => 1,
+  },
 );
 __PACKAGE__->set_primary_key('cdid');
 __PACKAGE__->add_unique_constraint([ qw/artist title/ ]);
 
+__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' );
+__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track',
+    { "foreign.cd" => "self.cdid" });
+
 1;

Modified: DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/Year2000CDs.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/Year2000CDs.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest/Schema/Year2000CDs.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -1,30 +1,19 @@
-package # hide from PAUSE 
+package # hide from PAUSE
     DBICTest::Schema::Year2000CDs;
-## Used in 104view.t
 
-use base qw/DBICTest::BaseResult/;
+use base qw/DBICTest::Schema::CD/;
 
 __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
-
 __PACKAGE__->table('year2000cds');
-__PACKAGE__->result_source_instance->view_definition(
-  "SELECT cdid, artist, title FROM cd WHERE year ='2000'"
-);
-__PACKAGE__->add_columns(
-  'cdid' => {
-    data_type => 'integer',
-    is_auto_increment => 1,
-  },
-  'artist' => {
-    data_type => 'integer',
-  },
-  'title' => {
-    data_type => 'varchar',
-    size      => 100,
-  },
 
-);
-__PACKAGE__->set_primary_key('cdid');
-__PACKAGE__->add_unique_constraint([ qw/artist title/ ]);
+# need to operate on the instance for things to work
+__PACKAGE__->result_source_instance->view_definition( sprintf (
+  'SELECT %s FROM cd WHERE year = "2000"',
+  join (', ', __PACKAGE__->columns),
+));
 
+__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' );
+__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track',
+    { "foreign.cd" => "self.cdid" });
+
 1;

Modified: DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest.pm
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest.pm	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/lib/DBICTest.pm	2009-11-29 11:59:25 UTC (rev 7983)
@@ -155,6 +155,11 @@
     my $self = shift;
     my $schema = shift;
 
+    $schema->populate('Genre', [
+      [qw/genreid name/],
+      [qw/1       emo  /],
+    ]);
+
     $schema->populate('Artist', [
         [ qw/artistid name/ ],
         [ 1, 'Caterwauler McCrae' ],
@@ -163,8 +168,8 @@
     ]);
 
     $schema->populate('CD', [
-        [ qw/cdid artist title year/ ],
-        [ 1, 1, "Spoonful of bees", 1999 ],
+        [ qw/cdid artist title year genreid/ ],
+        [ 1, 1, "Spoonful of bees", 1999, 1 ],
         [ 2, 1, "Forkful of bees", 2001 ],
         [ 3, 1, "Caterwaulin' Blues", 1997 ],
         [ 4, 2, "Generic Manufactured Singles", 2001 ],
@@ -243,7 +248,7 @@
     
     $schema->populate('TreeLike', [
         [ qw/id parent name/ ],
-        [ 1, undef, 'root' ],        
+        [ 1, undef, 'root' ],
         [ 2, 1, 'foo'  ],
         [ 3, 2, 'bar'  ],
         [ 6, 2, 'blop' ],

Modified: DBIx-Class/0.08/branches/prefetch/t/lib/sqlite.sql
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/lib/sqlite.sql	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/lib/sqlite.sql	2009-11-29 11:59:25 UTC (rev 7983)
@@ -1,6 +1,6 @@
 -- 
 -- Created by SQL::Translator::Producer::SQLite
--- Created on Mon Sep 21 00:11:34 2009
+-- Created on Sun Nov 15 14:13:02 2009
 -- 
 
 
@@ -453,6 +453,6 @@
 -- View: year2000cds
 --
 CREATE VIEW year2000cds AS
-    SELECT cdid, artist, title FROM cd WHERE year ='2000';
+    SELECT cdid, artist, title, year, genreid, single_track FROM cd WHERE year = "2000";
 
 COMMIT;

Modified: DBIx-Class/0.08/branches/prefetch/t/prefetch/grouped.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/prefetch/grouped.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/prefetch/grouped.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -217,11 +217,11 @@
     $rs->as_query,
     '(
       SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
-             tags.tagid, tags.cd, tags.tag 
+             tags.tagid, tags.cd, tags.tag
         FROM (
           SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
             FROM cd me
-          GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+          GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, cdid
           ORDER BY cdid
         ) me
         LEFT JOIN tags tags ON tags.cd = me.cdid
@@ -329,4 +329,28 @@
     );
 }
 
+{
+    my $rs = $schema->resultset('CD')->search({},
+        {
+           '+select' => [{ count => 'tags.tag' }],
+           '+as' => ['test_count'],
+           prefetch => ['tags'],
+           distinct => 1,
+           order_by => {'-asc' => 'tags.tag'},
+           rows => 1
+        }
+    );
+    is_same_sql_bind($rs->as_query, q{
+        (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, me.test_count, tags.tagid, tags.cd, tags.tag
+          FROM (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, COUNT( tags.tag ) AS test_count
+                FROM cd me LEFT JOIN tags tags ON tags.cd = me.cdid
+            GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, tags.tag
+            ORDER BY tags.tag ASC LIMIT 1)
+            me
+          LEFT JOIN tags tags ON tags.cd = me.cdid
+         ORDER BY tags.tag ASC, tags.cd, tags.tag
+        )
+    }, []);
+}
+
 done_testing;

Modified: DBIx-Class/0.08/branches/prefetch/t/prefetch/via_search_related.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/prefetch/via_search_related.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/prefetch/via_search_related.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -37,7 +37,37 @@
 
 }, 'search_related prefetch with order_by works');
 
+TODO: { local $TODO = 'Unqualified columns in where clauses can not be fixed without an SQLA rewrite' if SQL::Abstract->VERSION < 2;
+lives_ok ( sub {
+  my $no_prefetch = $schema->resultset('Track')->search_related(cd =>
+    {
+      'cd.year' => "2000",
+      'tagid' => 1,
+    },
+    {
+      join => 'tags',
+      rows => 1,
+    }
+  );
 
+  my $use_prefetch = $no_prefetch->search(
+    undef,
+    {
+      prefetch => 'tags',
+    }
+  );
+
+  is(
+    scalar ($use_prefetch->all),
+    scalar ($no_prefetch->all),
+    "Amount of returned rows is right"
+  );
+  is($use_prefetch->count, $no_prefetch->count, 'counts with and without prefetch match');
+
+}, 'search_related prefetch with condition referencing unqualified column of a joined table works');
+}
+
+
 lives_ok (sub {
     my $rs = $schema->resultset("Artwork")->search(undef, {distinct => 1})
               ->search_related('artwork_to_artist')->search_related('artist',

Modified: DBIx-Class/0.08/branches/prefetch/t/relationship/core.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/relationship/core.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/relationship/core.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -5,12 +5,11 @@
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
+use DBIC::SqlMakerTest;
 
 my $schema = DBICTest->init_schema();
 my $sdebug = $schema->storage->debug;
 
-plan tests => 79;
-
 # has_a test
 my $cd = $schema->resultset("CD")->find(4);
 my ($artist) = ($INC{'DBICTest/HelperRels'}
@@ -134,7 +133,7 @@
   year => 2007,
 } );
 is( $cd->title, 'Greatest Hits 2: Louder Than Ever', 'find_or_new_related new record ok' );
-ok( ! $cd->in_storage, 'find_or_new_related on a new record: not in_storage' );
+is( $cd->in_storage, 0, 'find_or_new_related on a new record: not in_storage' );
 
 $cd->artist(undef);
 my $newartist = $cd->find_or_new_related( 'artist', {
@@ -260,8 +259,22 @@
 is($def_artist_cd->search_related('artist')->count, 0, 'closed search on null FK');
 
 # test undirected many-to-many relationship (e.g. "related artists")
-my $undir_maps = $schema->resultset("Artist")->find(1)->artist_undirected_maps;
+my $undir_maps = $schema->resultset("Artist")
+                          ->search ({artistid => 1})
+                            ->search_related ('artist_undirected_maps');
 is($undir_maps->count, 1, 'found 1 undirected map for artist 1');
+is_same_sql_bind (
+  $undir_maps->as_query,
+  '(
+    SELECT artist_undirected_maps.id1, artist_undirected_maps.id2
+      FROM artist me
+      LEFT JOIN artist_undirected_map artist_undirected_maps
+        ON artist_undirected_maps.id1 = me.artistid OR artist_undirected_maps.id2 = me.artistid
+    WHERE ( artistid = ? )
+  )',
+  [[artistid => 1]],
+  'expected join sql produced',
+);
 
 $undir_maps = $schema->resultset("Artist")->find(2)->artist_undirected_maps;
 is($undir_maps->count, 1, 'found 1 undirected map for artist 2');
@@ -310,3 +323,5 @@
 
 $cds = $schema->resultset("CD")->search({ 'me.cdid' => 5 }, { join => { single_track => { cd => {} } } });
 is($cds->count, 1, "subjoins under left joins force_left (hashref)");
+
+done_testing;

Modified: DBIx-Class/0.08/branches/prefetch/t/relationship/update_or_create_multi.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/relationship/update_or_create_multi.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/relationship/update_or_create_multi.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -76,8 +76,9 @@
 $schema->storage->debugcb(undef);
 $schema->storage->debug ($sdebug);
 
+my ($search_sql) = $sql[0] =~ /^(SELECT .+?)\:/;
 is_same_sql (
-  $sql[0],
+  $search_sql,
   'SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
     FROM cd me 
     WHERE ( me.artist = ? AND me.title = ? AND me.genreid = ? )

Modified: DBIx-Class/0.08/branches/prefetch/t/resultset/as_query.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/resultset/as_query.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/resultset/as_query.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -1,7 +1,5 @@
-#!/usr/bin/perl
-
 use strict;
-use warnings FATAL => 'all';
+use warnings;
 
 use Test::More;
 

Added: DBIx-Class/0.08/branches/prefetch/t/resultset/is_paged.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/resultset/is_paged.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/prefetch/t/resultset/is_paged.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -0,0 +1,18 @@
+use strict;
+use warnings;
+
+use lib qw(t/lib);
+use Test::More;
+use Test::Exception;
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $tkfks = $schema->resultset('Artist');
+
+ok !$tkfks->is_paged, 'vanilla resultset is not paginated';
+
+my $paginated = $tkfks->search(undef, { page => 5 });
+ok $paginated->is_paged, 'resultset is paginated now';
+
+done_testing;


Property changes on: DBIx-Class/0.08/branches/prefetch/t/resultset/is_paged.t
___________________________________________________________________
Name: svn:mergeinfo
   + 
Name: svn:eol-style
   + native

Added: DBIx-Class/0.08/branches/prefetch/t/resultset/plus_select.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/resultset/plus_select.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/prefetch/t/resultset/plus_select.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -0,0 +1,63 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $cd_rs = $schema->resultset('CD')->search ({genreid => { '!=', undef } }, { order_by => 'cdid' });
+my $track_cnt = $cd_rs->search({}, { rows => 1 })->search_related ('tracks')->count;
+
+my %basecols = $cd_rs->first->get_columns;
+
+# the current implementation of get_inflated_columns will "inflate"
+# relationships by simply calling the accessor, when you have
+# identically named columns and relationships (you shouldn't anyway)
+# I consider this wrong, but at the same time appreciate the
+# ramifications of changing this. Thus the value override  and the
+# TODO to go with it. Delete all of this if ever resolved.
+my %todo_rel_inflation_override = ( artist => $basecols{artist} );
+TODO: {
+  local $TODO = 'Treating relationships as inflatable data is wrong - see comment in ' . __FILE__;
+  ok (! keys %todo_rel_inflation_override);
+}
+
+my $plus_rs = $cd_rs->search (
+  {},
+  { join => 'tracks', distinct => 1, '+select' => { count => 'tracks.trackid' }, '+as' => 'tr_cnt' },
+);
+
+is_deeply (
+  { $plus_rs->first->get_columns },
+  { %basecols, tr_cnt => $track_cnt },
+  'extra columns returned by get_columns',
+);
+
+is_deeply (
+  { $plus_rs->first->get_inflated_columns, %todo_rel_inflation_override },
+  { %basecols, tr_cnt => $track_cnt },
+  'extra columns returned by get_inflated_columns without inflatable columns',
+);
+
+SKIP: {
+  eval { require DateTime };
+  skip "Need DateTime for +select/get_inflated_columns tests", 1 if $@;
+
+  $schema->class('CD')->inflate_column( 'year',
+    { inflate => sub { DateTime->new( year => shift ) },
+      deflate => sub { shift->year } }
+  );
+
+  $basecols{year} = DateTime->new ( year => $basecols{year} );
+
+  is_deeply (
+    { $plus_rs->first->get_inflated_columns, %todo_rel_inflation_override },
+    { %basecols, tr_cnt => $track_cnt },
+    'extra columns returned by get_inflated_columns',
+  );
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/prefetch/t/resultset/update_delete.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/resultset/update_delete.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/resultset/update_delete.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -79,8 +79,12 @@
 );
 
 # grouping on PKs only should pass
-$sub_rs->search ({}, { group_by => [ reverse $sub_rs->result_source->primary_columns ] })     # reverse to make sure the comaprison works
-          ->update ({ pilot_sequence => \ 'pilot_sequence + 1' });
+$sub_rs->search (
+  {},
+  {
+    group_by => [ reverse $sub_rs->result_source->primary_columns ],     # reverse to make sure the PK-list comaprison works
+  },
+)->update ({ pilot_sequence => \ 'pilot_sequence + 1' });
 
 is_deeply (
   [ $tkfks->search ({ autopilot => [qw/a b x y/]}, { order_by => 'autopilot' })
@@ -90,6 +94,19 @@
   'Only two rows incremented',
 );
 
+# also make sure weird scalarref usage works (RT#51409)
+$tkfks->search (
+  \ 'pilot_sequence BETWEEN 11 AND 21',
+)->update ({ pilot_sequence => \ 'pilot_sequence + 1' });
+
+is_deeply (
+  [ $tkfks->search ({ autopilot => [qw/a b x y/]}, { order_by => 'autopilot' })
+            ->get_column ('pilot_sequence')->all 
+  ],
+  [qw/12 22 30 40/],
+  'Only two rows incremented (where => scalarref works)',
+);
+
 $sub_rs->delete;
 
 is ($tkfks->count, $tkfk_cnt -= 2, 'Only two rows deleted');

Modified: DBIx-Class/0.08/branches/prefetch/t/storage/on_connect_call.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/storage/on_connect_call.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/storage/on_connect_call.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -3,67 +3,96 @@
 no warnings qw/once redefine/;
 
 use lib qw(t/lib);
+use DBI;
 use DBICTest;
+use DBICTest::Schema;
+use DBIx::Class::Storage::DBI;
 
-use Test::More tests => 9;
+# !!! do not replace this with done_testing - tests reside in the callbacks
+# !!! number of calls is important
+use Test::More tests => 16;
+# !!!
 
-use DBIx::Class::Storage::DBI;
-my $schema = DBICTest->init_schema(
-  no_connect  => 1,
-  no_deploy   => 1,
-);
+my $schema = DBICTest::Schema->clone;
 
-local *DBIx::Class::Storage::DBI::connect_call_foo = sub {
-  isa_ok $_[0], 'DBIx::Class::Storage::DBI',
-    'got storage in connect_call method';
-  is $_[1], 'bar', 'got param in connect_call method';
-};
+{
+  *DBIx::Class::Storage::DBI::connect_call_foo = sub {
+    isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+      'got storage in connect_call method';
+    is $_[1], 'bar', 'got param in connect_call method';
+  };
 
-local *DBIx::Class::Storage::DBI::disconnect_call_foo = sub {
-  isa_ok $_[0], 'DBIx::Class::Storage::DBI',
-    'got storage in disconnect_call method';
-};
+  *DBIx::Class::Storage::DBI::disconnect_call_foo = sub {
+    isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+      'got storage in disconnect_call method';
+  };
 
-ok $schema->connection(
-  DBICTest->_database,
-  {
-    on_connect_call => [
-        [ do_sql => 'create table test1 (id integer)' ],
-        [ do_sql => [ 'insert into test1 values (?)', {}, 1 ] ],
-        [ do_sql => sub { ['insert into test1 values (2)'] } ],
-        [ sub { $_[0]->dbh->do($_[1]) }, 'insert into test1 values (3)' ],
-        # this invokes $storage->connect_call_foo('bar') (above)
-        [ foo => 'bar' ],
-    ],
-    on_connect_do => 'insert into test1 values (4)',
-    on_disconnect_call => 'foo',
-  },
-), 'connection()';
+  ok $schema->connection(
+      DBICTest->_database,
+    {
+      on_connect_call => [
+          [ do_sql => 'create table test1 (id integer)' ],
+          [ do_sql => [ 'insert into test1 values (?)', {}, 1 ] ],
+          [ do_sql => sub { ['insert into test1 values (2)'] } ],
+          [ sub { $_[0]->dbh->do($_[1]) }, 'insert into test1 values (3)' ],
+          # this invokes $storage->connect_call_foo('bar') (above)
+          [ foo => 'bar' ],
+      ],
+      on_connect_do => 'insert into test1 values (4)',
+      on_disconnect_call => 'foo',
+    },
+  ), 'connection()';
 
-is_deeply (
-  $schema->storage->dbh->selectall_arrayref('select * from test1'),
-  [ [ 1 ], [ 2 ], [ 3 ], [ 4 ] ],
-  'on_connect_call/do actions worked'
-);
+  ok (! $schema->storage->connected, 'start disconnected');
 
-local *DBIx::Class::Storage::DBI::connect_call_foo = sub {
-  isa_ok $_[0], 'DBIx::Class::Storage::DBI',
-    'got storage in connect_call method';
-};
+  is_deeply (
+    $schema->storage->dbh->selectall_arrayref('select * from test1'),
+    [ [ 1 ], [ 2 ], [ 3 ], [ 4 ] ],
+    'on_connect_call/do actions worked'
+  );
 
-local *DBIx::Class::Storage::DBI::connect_call_bar = sub {
-  isa_ok $_[0], 'DBIx::Class::Storage::DBI',
-    'got storage in connect_call method';
-};
+  $schema->storage->disconnect;
+}
 
-$schema->storage->disconnect;
+{
+  *DBIx::Class::Storage::DBI::connect_call_foo = sub {
+    isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+      'got storage in connect_call method';
+  };
 
-ok $schema->connection(
-  DBICTest->_database,
-  {
-    # method list form
-    on_connect_call => [ 'foo', sub { ok 1, "coderef in list form" }, 'bar' ],
-  },
-), 'connection()';
+  *DBIx::Class::Storage::DBI::connect_call_bar = sub {
+    isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+      'got storage in connect_call method';
+  };
 
-$schema->storage->ensure_connected;
+
+  ok $schema->connection(
+    DBICTest->_database,
+    {
+      # method list form
+      on_connect_call => [ 'foo', sub { ok 1, "coderef in list form" }, 'bar' ],
+    },
+  ), 'connection()';
+
+  ok (! $schema->storage->connected, 'start disconnected');
+  $schema->storage->ensure_connected;
+  $schema->storage->disconnect; # this should not fire any tests
+}
+
+{
+  ok $schema->connection(
+    sub { DBI->connect(DBICTest->_database) },
+    {
+      # method list form
+      on_connect_call => [ sub { ok 1, "on_connect_call after DT parser" }, ],
+      on_disconnect_call => [ sub { ok 1, "on_disconnect_call after DT parser" }, ],
+    },
+  ), 'connection()';
+
+  ok (! $schema->storage->connected, 'start disconnected');
+
+  $schema->storage->_determine_driver;  # this should connect due to the coderef
+
+  ok ($schema->storage->connected, 'determine driver connects');
+  $schema->storage->disconnect;
+}

Modified: DBIx-Class/0.08/branches/prefetch/t/storage/on_connect_do.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/storage/on_connect_do.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/storage/on_connect_do.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -5,6 +5,7 @@
 
 use lib qw(t/lib);
 use base 'DBICTest';
+require DBI;
 
 
 my $schema = DBICTest->init_schema(
@@ -28,7 +29,7 @@
 $schema->storage->disconnect;
 
 ok $schema->connection(
-    DBICTest->_database,
+    sub { DBI->connect(DBICTest->_database) },
     {
         on_connect_do       => [
             'CREATE TABLE TEST_empty (id INTEGER)',

Modified: DBIx-Class/0.08/branches/prefetch/t/storage/replication.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/storage/replication.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/storage/replication.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -66,7 +66,7 @@
 
     sub init_schema {
         # current SQLT SQLite producer does not handle DROP TABLE IF EXISTS, trap warnings here
-        local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/ };
+        local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/s };
 
         my ($class, $schema_method) = @_;
 

Modified: DBIx-Class/0.08/branches/prefetch/t/zzzzzzz_sqlite_deadlock.t
===================================================================
--- DBIx-Class/0.08/branches/prefetch/t/zzzzzzz_sqlite_deadlock.t	2009-11-29 11:58:50 UTC (rev 7982)
+++ DBIx-Class/0.08/branches/prefetch/t/zzzzzzz_sqlite_deadlock.t	2009-11-29 11:59:25 UTC (rev 7983)
@@ -10,7 +10,7 @@
 use DBICTest::Schema;
 
 plan tests => 2;
-my $wait_for = 10;  # how many seconds to wait
+my $wait_for = 30;  # how many seconds to wait
 
 for my $close (0,1) {
 




More information about the Bast-commits mailing list