[Bast-commits] r8720 - in DBIx-Class/0.08/branches/run_file_against_storage: . examples/Schema examples/Schema/MyDatabase/Main/Result lib/DBIx lib/DBIx/Class lib/DBIx/Class/Admin lib/DBIx/Class/CDBICompat lib/DBIx/Class/InflateColumn lib/DBIx/Class/Manual lib/DBIx/Class/Optional lib/DBIx/Class/PK lib/DBIx/Class/Relationship lib/DBIx/Class/ResultSource lib/DBIx/Class/ResultSourceProxy lib/DBIx/Class/SQLAHacks lib/DBIx/Class/Schema lib/DBIx/Class/Serialize lib/DBIx/Class/Storage lib/DBIx/Class/Storage/DBI lib/DBIx/Class/Storage/DBI/ADO lib/DBIx/Class/Storage/DBI/ODBC lib/DBIx/Class/Storage/DBI/Oracle lib/DBIx/Class/Storage/DBI/Replicated lib/DBIx/Class/Storage/DBI/Replicated/Balancer lib/DBIx/Class/Storage/DBI/Role lib/DBIx/Class/Storage/DBI/Sybase lib/DBIx/Class/Storage/DBI/Sybase/ASE lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server lib/SQL/Translator/Parser/DBIx lib/SQL/Translator/Producer/DBIx/Class maint script t t/admin t/bind t/cdbi t/cdbi/abstract t/cdbi/testlib t/cdbi/testlib/DBIC/Test t/count t/delete t/inflate t/lib t/lib/DBIC t/lib/DBICNSTest/Bogus t/lib/DBICNSTest/OtherRslt t/lib/DBICNSTest/Result t/lib/DBICNSTest/Rslt t/lib/DBICNSTest/RtBug41083/Schema t/lib/DBICNSTest/RtBug41083/Schema_A t/lib/DBICTest t/lib/DBICTest/ResultSetManager t/lib/DBICTest/Schema t/multi_create t/prefetch t/relationship t/resultset t/schema t/search t/sqlahacks t/sqlahacks/limit_dialects t/sqlahacks/quotes t/sqlahacks/sql_maker t/storage

ribasushi at dev.catalyst.perl.org ribasushi at dev.catalyst.perl.org
Tue Feb 16 10:26:13 GMT 2010


Author: ribasushi
Date: 2010-02-16 10:26:12 +0000 (Tue, 16 Feb 2010)
New Revision: 8720

Added:
   DBIx-Class/0.08/branches/run_file_against_storage/.gitignore
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Admin.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Admin/
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Admin/Types.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Optional/
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Optional/Dependencies.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks/MSSQL.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ADO.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ADO/
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/AmbiguousGlob.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/AutoCast.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Informix.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/SQL_Anywhere.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/SQLAnywhere.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/ASE/
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBIHacks.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/06notabs.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/07eol.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/10optional_deps.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/747mssql_ado.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/748informix.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/749sybase_asa.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/93autocast.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/admin/
   DBIx-Class/0.08/branches/run_file_against_storage/t/admin/01load.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/admin/02ddl.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/admin/03data.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/admin/10script.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/count/search_related.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/delete/complex.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_determine_parser.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_mssql.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_oracle.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_sybase.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_sybase_asa.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/ArtistGUID.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/ComputedColumn.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/CustomSql.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Money.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v1.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v2.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v3.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/diamond.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/existing_in_chain.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/has_many.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/multilev_single_PKeqFK.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/count.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/grouped.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/incomplete.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/join_type.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/one_to_many_to_one.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/via_search_related.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/as_subselect_rs.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/is_ordered.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/is_paged.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/nulls_only.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/plus_select.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/schema/
   DBIx-Class/0.08/branches/run_file_against_storage/t/schema/anon.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/schema/clone.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/search/related_strip_prefetch.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/search/select_chains.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/
   DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/limit_dialects/
   DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/limit_dialects/toplimit.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/quotes/
   DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/quotes/quotes.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/quotes/quotes_newstyle.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/sql_maker/
   DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/sql_maker/sql_maker.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/sql_maker/sql_maker_quote.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/base.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/dbh_do.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/dbi_coderef.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/debug.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/disable_sth_caching.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/error.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/exception.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/on_connect_call.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/on_connect_do.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/ping_count.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/reconnect.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/replicated.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/storage/stats.t
Removed:
   DBIx-Class/0.08/branches/run_file_against_storage/t/18inserterror.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/19quotes.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/19quotes_newstyle.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/31stats.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/32connect_code_ref.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/33storage_reconnect.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/35disable_sth_caching.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/36datetime.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/42toplimit.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/73oracle_inflate.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/89dbicadmin.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/91debug.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/92storage.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/92storage_on_connect_do.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/93storage_replication.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/95sql_maker.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/95sql_maker_quote.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Binary.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/PgBase.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/dbh_do.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersionNew.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersionOrig.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/multilev_might_have_PKeqFK.t
Modified:
   DBIx-Class/0.08/branches/run_file_against_storage/
   DBIx-Class/0.08/branches/run_file_against_storage/Changes
   DBIx-Class/0.08/branches/run_file_against_storage/Features_09
   DBIx-Class/0.08/branches/run_file_against_storage/MANIFEST.SKIP
   DBIx-Class/0.08/branches/run_file_against_storage/Makefile.PL
   DBIx-Class/0.08/branches/run_file_against_storage/TODO
   DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/MyDatabase/Main/Result/Artist.pm
   DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/MyDatabase/Main/Result/Cd.pm
   DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/MyDatabase/Main/Result/Track.pm
   DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/insertdb.pl
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/AccessorGroup.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/AbstractSearch.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ColumnCase.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ColumnGroups.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Constructor.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Copy.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ImaDBI.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Iterator.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/LazyLoading.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Relationship.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Relationships.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Retrieve.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/TempColumns.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Componentised.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Core.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Cursor.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/DB.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Exception.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/InflateColumn.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/InflateColumn/DateTime.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/InflateColumn/File.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Component.pod
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Cookbook.pod
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/DocMap.pod
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Example.pod
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/FAQ.pod
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Intro.pod
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Joining.pod
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Reading.pod
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Troubleshooting.pod
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Ordered.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/PK.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/PK/Auto.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/Accessor.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/Base.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/BelongsTo.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/CascadeActions.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/HasMany.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/HasOne.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/ManyToMany.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/ProxyMethods.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSet.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSetColumn.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSource.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSource/Table.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSource/View.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSourceHandle.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSourceProxy.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSourceProxy/Table.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Row.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks/MySQL.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks/OracleJoins.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Schema.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Schema/Versioned.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Serialize/Storable.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/StartupCheck.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Cursor.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/DB2.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/MSSQL.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/MultiColumnIn.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/NoBindVars.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Oracle.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Oracle/WhereJoins.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Pg.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Types.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Role/QueryCounter.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/SQLite.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/MSSQL.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/mysql.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/Statistics.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/TxnScopeGuard.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/UTF8Columns.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/SQL/Translator/Parser/DBIx/Class.pm
   DBIx-Class/0.08/branches/run_file_against_storage/lib/SQL/Translator/Producer/DBIx/Class/File.pm
   DBIx-Class/0.08/branches/run_file_against_storage/maint/gen-schema.pl
   DBIx-Class/0.08/branches/run_file_against_storage/maint/svn-log.perl
   DBIx-Class/0.08/branches/run_file_against_storage/script/dbicadmin
   DBIx-Class/0.08/branches/run_file_against_storage/t/02pod.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/03podcoverage.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/05components.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/100populate.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/101populate_rs.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/103many_to_many_warning.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/104view.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/20setuperrors.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/26dumper.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/46where_attribute.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/51threads.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/51threadtxn.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/52cycle.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/60core.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/71mysql.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/72pg.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/73oracle.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/745db2.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/746db2_400.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/746mssql.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/746sybase.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/74mssql.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/76joins.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/76select.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/79aliasing.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/80unique.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/81transactions.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/83cache.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/85utf8.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/86might_have.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/86sqlt.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/87ordered.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/88result_set_column.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/90join_torture.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/93nobindvars.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/93single_accessor_object.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/94versioning.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/98savepoints.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/99dbic_sqlt_parser.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/bind/attribute.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/bind/bindtype_columns.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/01-columns.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/02-Film.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/03-subclassing.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/04-lazy.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/06-hasa.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/09-has_many.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/11-triggers.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/12-filter.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/13-constraint.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/14-might_have.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/15-accessor.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/18-has_a.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/19-set_sql.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/21-iterator.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/22-deflate_order.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/26-mutator.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/30-pager.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/98-failure.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/abstract/search_where.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Actor.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/ActorAlias.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Blurb.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/DBIC/Test/SQLite.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Director.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Film.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Lazy.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Log.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyBase.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyFilm.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyFoo.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyStar.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyStarLink.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyStarLinkMCPK.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Order.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/OtherFilm.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/count/count_rs.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/count/distinct.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/count/grouped_pager.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/count/in_subquery.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/count/joined.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/count/prefetch.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/from_subquery.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/core.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_pg.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/file_column.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/hri.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/serialize.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBIC/SqlMakerTest.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Bogus/A.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Bogus/B.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/OtherRslt/D.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Result/A.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Result/B.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Rslt/A.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Rslt/B.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/RtBug41083/Schema/Foo.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/RtBug41083/Schema_A/A.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/AuthorCheck.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/BaseResult.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/ResultSetManager/Foo.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Artist.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Artwork.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Bookmark.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/CD.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Event.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/EventTZPg.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/ForceForeign.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Serialized.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Track.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Year1999CDs.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Year2000CDs.pm
   DBIx-Class/0.08/branches/run_file_against_storage/t/lib/sqlite.sql
   DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/m2m.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/standard.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/attrs_untouched.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/diamond.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/double_prefetch.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/multiple_hasmany.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/standard.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/with_limit.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/after_update.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/core.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/doesnt_exist.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/update_or_create_multi.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/update_or_create_single.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/as_query.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/update_delete.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/search/preserve_original_rs.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/search/subquery.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/zzzzzzz_perl_perf_bug.t
   DBIx-Class/0.08/branches/run_file_against_storage/t/zzzzzzz_sqlite_deadlock.t
Log:
 r6813 at Thesaurus (orig r6812):  castaway | 2009-06-28 06:11:08 +0200
 Tests for grouping with prefetch
 
 r6820 at Thesaurus (orig r6819):  ribasushi | 2009-06-28 13:00:03 +0200
 The prefetch+group_by is a complex problem - branch
 r6844 at Thesaurus (orig r6843):  abraxxa | 2009-06-29 11:02:17 +0200
 fixed typo in test
 
 r6848 at Thesaurus (orig r6847):  ribasushi | 2009-06-29 19:09:00 +0200
 Minor Ordered optimization (don't use count)
 r6856 at Thesaurus (orig r6855):  caelum | 2009-06-29 23:42:11 +0200
  r5451 at hlagh (orig r6605):  caelum | 2009-06-10 09:23:44 -0700
  new branch to implement on_connect_call
  r5484 at hlagh (orig r6633):  caelum | 2009-06-11 11:03:10 -0700
  on_connect_call implementation and set_datetime_format support for Oracle
  r5492 at hlagh (orig r6641):  caelum | 2009-06-11 16:39:28 -0700
  connect_call_set_datetime_format for Oracle, I have no idea why this didn't get committed before...
  r5504 at hlagh (orig r6655):  caelum | 2009-06-12 17:28:06 -0700
  finished up on_connect_call stuff
  r5507 at hlagh (orig r6658):  caelum | 2009-06-13 04:03:36 -0700
  fixup _setup_connect_do, other minor cleanups
  r5508 at hlagh (orig r6659):  caelum | 2009-06-13 04:35:33 -0700
  make the on_(dis)?connect_do accessors returnn the original structure
  r5509 at hlagh (orig r6660):  caelum | 2009-06-13 08:31:52 -0700
  allow undef for _setup_connect_do
  r5522 at hlagh (orig r6679):  caelum | 2009-06-14 09:56:40 -0700
  rename connect_do store
  r5621 at hlagh (orig r6769):  caelum | 2009-06-23 07:38:33 -0700
  minor doc update
  r5628 at hlagh (orig r6777):  caelum | 2009-06-23 16:36:12 -0700
  properly test nanosecond precision with oracle and datetime_setup
  r5669 at hlagh (orig r6784):  caelum | 2009-06-24 10:49:25 -0700
  IC::DT does support timestamp with timezone
  r5768 at hlagh (orig r6846):  caelum | 2009-06-29 08:20:32 -0700
  remove DateTime from 73oracle.t
  r5781 at hlagh (orig r6849):  caelum | 2009-06-29 13:07:43 -0700
  remove the _store stuff for on_connect_do
  r5785 at hlagh (orig r6853):  ribasushi | 2009-06-29 14:38:30 -0700
  Some beautification
 
 r6871 at Thesaurus (orig r6870):  ribasushi | 2009-06-30 10:09:03 +0200
 Cleanup dependency handling a bit
 r6875 at Thesaurus (orig r6874):  ribasushi | 2009-06-30 12:39:06 +0200
 Allow broken resultsource-class-derived objects to still work
 r6876 at Thesaurus (orig r6875):  ribasushi | 2009-06-30 12:40:46 +0200
 clarify
 r6878 at Thesaurus (orig r6877):  ash | 2009-06-30 13:48:13 +0200
 Update POD on Dynamic sub-classing
 
 r6883 at Thesaurus (orig r6882):  ribasushi | 2009-06-30 17:36:38 +0200
  r6815 at Thesaurus (orig r6814):  ribasushi | 2009-06-28 10:32:42 +0200
  Branch to explore double joins on search_related
  r6816 at Thesaurus (orig r6815):  ribasushi | 2009-06-28 10:34:16 +0200
  Thetest case that started it all
  r6817 at Thesaurus (orig r6816):  ribasushi | 2009-06-28 10:35:11 +0200
  The proposed fix (do not add an extra join if it is already present in the topmost join)
  r6818 at Thesaurus (orig r6817):  ribasushi | 2009-06-28 11:04:26 +0200
  Minor omission
  r6819 at Thesaurus (orig r6818):  ribasushi | 2009-06-28 11:07:33 +0200
  Adjust a couple of tests for new behavior (thus all of this might be backwards incompatible to the point of being useless):
  The counts in t/90join_torture.t are now 5*3, not 5*3*3, as a second join is not induced by search_related
  The raw sql scan in t/prefetch/standard.t is just silly, won't even try to understand it
  Just to maintain the TreeLike folding, I add a 3rd children join which was inserted by search_related before the code changes
 
 r6889 at Thesaurus (orig r6888):  ribasushi | 2009-06-30 19:36:11 +0200
 Todoify test for now
 r6890 at Thesaurus (orig r6889):  ribasushi | 2009-06-30 19:37:05 +0200
 Todoify test for now (2)
 r6892 at Thesaurus (orig r6891):  ribasushi | 2009-06-30 19:52:31 +0200
 Todoify test for now (3)
 r6903 at Thesaurus (orig r6902):  ribasushi | 2009-07-01 08:46:12 +0200
 Fixed deadlock test
 r6904 at Thesaurus (orig r6903):  ribasushi | 2009-07-01 12:22:00 +0200
 Clarify exception text
 r6907 at Thesaurus (orig r6906):  ribasushi | 2009-07-01 13:23:46 +0200
  r6821 at Thesaurus (orig r6820):  ribasushi | 2009-06-28 13:09:11 +0200
  Branch for prefetch+group play
  r6823 at Thesaurus (orig r6822):  ribasushi | 2009-06-28 14:38:36 +0200
  Normalize group_by
  r6824 at Thesaurus (orig r6823):  ribasushi | 2009-06-28 14:39:54 +0200
  Proper prefetch+group test
  r6826 at Thesaurus (orig r6825):  ribasushi | 2009-06-28 14:42:48 +0200
  Whoops
  r6828 at Thesaurus (orig r6827):  ribasushi | 2009-06-28 15:06:57 +0200
  Lose the literal sql bits - castaway is right it's silly to support those
  r6833 at Thesaurus (orig r6832):  ribasushi | 2009-06-28 22:38:43 +0200
  Rogue comments
  r6837 at Thesaurus (orig r6836):  ribasushi | 2009-06-29 09:44:25 +0200
  A couple of test fixes
  r6838 at Thesaurus (orig r6837):  ribasushi | 2009-06-29 09:46:13 +0200
  Support for -select/-as in SQLAHacks field selection
  r6839 at Thesaurus (orig r6838):  ribasushi | 2009-06-29 09:49:53 +0200
  This is tested elsewhere
  r6840 at Thesaurus (orig r6839):  ribasushi | 2009-06-29 09:50:43 +0200
  This is tested elsewhere (2)
  r6841 at Thesaurus (orig r6840):  ribasushi | 2009-06-29 10:07:09 +0200
  Test cleanups
  r6842 at Thesaurus (orig r6841):  ribasushi | 2009-06-29 10:11:13 +0200
  Most of the grouped prefetch solution
  r6843 at Thesaurus (orig r6842):  ribasushi | 2009-06-29 10:14:45 +0200
  clearer
  r6845 at Thesaurus (orig r6844):  ribasushi | 2009-06-29 12:05:37 +0200
  And score! (all works)
  r6882 at Thesaurus (orig r6881):  ribasushi | 2009-06-30 16:23:06 +0200
  rs->get_column now properly recognizes prefetch and collapses if at all possible
  r6886 at Thesaurus (orig r6885):  ribasushi | 2009-06-30 17:39:58 +0200
  Whoops
 
 r6910 at Thesaurus (orig r6909):  ribasushi | 2009-07-01 13:27:15 +0200
 Optimize set_column on uninserted objects
 r6921 at Thesaurus (orig r6920):  caelum | 2009-07-01 17:40:32 +0200
  r5859 at hlagh (orig r6912):  caelum | 2009-07-01 06:21:30 -0700
  new connected() for dbd::sybase users
  r5860 at hlagh (orig r6913):  caelum | 2009-07-01 06:25:46 -0700
  add a couple of dbd::sybase reconnection tests
  r5861 at hlagh (orig r6914):  caelum | 2009-07-01 06:35:07 -0700
  better connection test
  r5862 at hlagh (orig r6915):  caelum | 2009-07-01 06:45:05 -0700
  use dbh->do for connected instead of prepare_cached
  r5863 at hlagh (orig r6916):  ribasushi | 2009-07-01 06:55:21 -0700
  Segfault
  r5864 at hlagh (orig r6917):  caelum | 2009-07-01 07:03:22 -0700
  use ->do instead of ->prepare_cached in oracle's connected() too
  r5865 at hlagh (orig r6918):  caelum | 2009-07-01 08:20:52 -0700
  fix segfault with old DBD::Sybase
  r5866 at hlagh (orig r6919):  caelum | 2009-07-01 08:39:18 -0700
  move connection tests into _ping()
 
 r6924 at Thesaurus (orig r6923):  ijw | 2009-07-01 19:34:32 +0200
 Added a test for a resultset to related-resultset join for 0 related records
 r6928 at Thesaurus (orig r6927):  ijw | 2009-07-01 20:04:16 +0200
 Additional tests on prefetch - illustrates the bug with left-join has_many (NULL row returned) and the one that results from the trivial fix (prefetch gives no artist)
 r6932 at Thesaurus (orig r6931):  ribasushi | 2009-07-02 08:08:33 +0200
 Another candidate for somethingawful.com (fix left join-ed count)
 r6934 at Thesaurus (orig r6933):  ribasushi | 2009-07-02 09:04:13 +0200
 Changelog
 r6935 at Thesaurus (orig r6934):  ribasushi | 2009-07-02 11:23:48 +0200
 cleanup
 r6936 at Thesaurus (orig r6935):  ijw | 2009-07-02 12:41:01 +0200
 Check fetched rows == count for related resultsets
 r6937 at Thesaurus (orig r6936):  ijw | 2009-07-02 12:43:47 +0200
 Confirm prefetch doesn't affect main row fetch, and main row fetch works with and without counting
 r6938 at Thesaurus (orig r6937):  ribasushi | 2009-07-02 12:52:51 +0200
 More fail (fix is known but needs work)
 r6939 at Thesaurus (orig r6938):  ribasushi | 2009-07-02 13:07:22 +0200
 And more fail
 r6940 at Thesaurus (orig r6939):  ribasushi | 2009-07-02 13:16:46 +0200
 These tests are in prefetch/count.t
 r6941 at Thesaurus (orig r6940):  ribasushi | 2009-07-02 13:38:31 +0200
 cleanup
 r6942 at Thesaurus (orig r6941):  ribasushi | 2009-07-02 13:38:49 +0200
 Solve more prefetch inflation crap
 r6943 at Thesaurus (orig r6942):  ribasushi | 2009-07-02 13:47:41 +0200
 Make the code readable
 r6944 at Thesaurus (orig r6943):  ribasushi | 2009-07-02 15:52:35 +0200
 Everything works, just need to fix join-path chaining over search_related (to guard against obscure db quirks)
 r6946 at Thesaurus (orig r6945):  caelum | 2009-07-02 21:06:32 +0200
 add sybase reconnect test
 r6948 at Thesaurus (orig r6947):  ribasushi | 2009-07-02 22:20:21 +0200
 Last part of the join handling puzzle
 r6951 at Thesaurus (orig r6950):  ribasushi | 2009-07-03 00:14:50 +0200
  r6360 at Thesaurus (orig r6359):  arcanez | 2009-05-21 20:18:52 +0200
  branch to work on prefetch/select
  r6361 at Thesaurus (orig r6360):  arcanez | 2009-05-21 20:32:46 +0200
  failing test
  r6373 at Thesaurus (orig r6372):  ribasushi | 2009-05-22 11:07:26 +0200
  Simplify unresolvable test by arcanez
  r6905 at Thesaurus (orig r6904):  ribasushi | 2009-07-01 12:54:03 +0200
  Extend test
  r6950 at Thesaurus (orig r6949):  ribasushi | 2009-07-03 00:14:09 +0200
  Apparent fix - simply delay the in_storage flagging of the main object until all prefetched objects are inflated. The rest of the changes are just cosmetics, preparing for the collapse_result rewrite
 
 r6953 at Thesaurus (orig r6952):  ribasushi | 2009-07-03 00:17:22 +0200
 Changes
 r6965 at Thesaurus (orig r6964):  ribasushi | 2009-07-03 13:19:27 +0200
 Add set_ansi_mode on_connect_call for mysql
 Also switch to _do_query instead of plain dbh->do (shows up in the trace)
 r6966 at Thesaurus (orig r6965):  ribasushi | 2009-07-03 13:37:06 +0200
 Capitalize mysql commands
 r6967 at Thesaurus (orig r6966):  ribasushi | 2009-07-03 15:07:49 +0200
 Double an existing might_have test as has_one
 r6968 at Thesaurus (orig r6967):  ribasushi | 2009-07-03 16:36:32 +0200
 Extra test to demonstrate has_one working, and a POD clarification of multicreate
 r6973 at Thesaurus (orig r6972):  ribasushi | 2009-07-03 20:20:42 +0200
  r6554 at Thesaurus (orig r6553):  frew | 2009-06-09 00:06:42 +0200
  branch for mssql top issues
  r6572 at Thesaurus (orig r6571):  frew | 2009-06-09 23:18:46 +0200
  more tests for SQL Server!
  r6573 at Thesaurus (orig r6572):  frew | 2009-06-09 23:49:10 +0200
  Added AmbiguousGlob.pm for silly servers like mssql and mysql.  See docs for more info
  r6574 at Thesaurus (orig r6573):  frew | 2009-06-09 23:55:22 +0200
  fix plan
  r6602 at Thesaurus (orig r6601):  frew | 2009-06-10 17:03:30 +0200
  more failing tests
  r6608 at Thesaurus (orig r6607):  frew | 2009-06-10 20:05:53 +0200
  don't use eval!
  r6610 at Thesaurus (orig r6609):  frew | 2009-06-10 20:07:49 +0200
  beginning of DWIM for IDENTITY_INSERT
  r6628 at Thesaurus (orig r6627):  frew | 2009-06-11 18:13:02 +0200
  still busted :-(
  r6631 at Thesaurus (orig r6630):  frew | 2009-06-11 19:39:00 +0200
  general function to go from column names and ident to result source
  r6632 at Thesaurus (orig r6631):  frew | 2009-06-11 19:40:11 +0200
  Use new _resolve_column_sources method and begin insert_bulk method
  r6635 at Thesaurus (orig r6634):  frew | 2009-06-11 20:12:38 +0200
  updated _resolve_column_source to _resolve_column_info as per ribasushi's suggestion
  r6650 at Thesaurus (orig r6649):  frew | 2009-06-12 17:13:32 +0200
  Now I just need to check if the actual values are set...
  r6651 at Thesaurus (orig r6650):  frew | 2009-06-12 17:26:53 +0200
  Insert Identity works!
  r6652 at Thesaurus (orig r6651):  frew | 2009-06-12 17:34:13 +0200
  silly warns.
  r6684 at Thesaurus (orig r6683):  frew | 2009-06-15 16:49:00 +0200
  failing test
  r6686 at Thesaurus (orig r6685):  ribasushi | 2009-06-15 18:10:26 +0200
  make all resolved attrs visible to sqla
  r6698 at Thesaurus (orig r6697):  ribasushi | 2009-06-17 02:31:37 +0200
  Half way working stuff, needs a LOT of tweaking still
  r6729 at Thesaurus (orig r6728):  ribasushi | 2009-06-19 19:49:27 +0200
  Merge badness
  r6730 at Thesaurus (orig r6729):  ribasushi | 2009-06-19 19:49:40 +0200
  fix eol
  r6731 at Thesaurus (orig r6730):  ribasushi | 2009-06-19 19:55:47 +0200
  augment inheritance
  r6735 at Thesaurus (orig r6734):  ribasushi | 2009-06-20 10:34:42 +0200
  Maybe I've nailed it
  r6746 at Thesaurus (orig r6745):  ribasushi | 2009-06-20 23:53:55 +0200
  Test and merge fixes
  r6747 at Thesaurus (orig r6746):  ribasushi | 2009-06-21 00:01:09 +0200
  Really fix tests
  r6748 at Thesaurus (orig r6747):  ribasushi | 2009-06-21 00:01:54 +0200
  Really fix tests
  r6749 at Thesaurus (orig r6748):  ribasushi | 2009-06-21 00:18:33 +0200
  Now really final
  r6750 at Thesaurus (orig r6749):  ribasushi | 2009-06-21 00:22:23 +0200
  whoops
  r6751 at Thesaurus (orig r6750):  ribasushi | 2009-06-21 00:42:18 +0200
  That should be all
  r6752 at Thesaurus (orig r6751):  ribasushi | 2009-06-21 08:54:00 +0200
  Make sure quoting works
  r6755 at Thesaurus (orig r6754):  ribasushi | 2009-06-21 15:21:23 +0200
  Groundwork for sanification of the toplimit test
  r6863 at Thesaurus (orig r6862):  ribasushi | 2009-06-30 01:13:49 +0200
  Make sure storage classes use c3, just like the rest of dbic (tested on 5.8 as well)
  r6869 at Thesaurus (orig r6868):  ribasushi | 2009-06-30 09:53:27 +0200
  Some fixes after review
  r6874 at Thesaurus (orig r6873):  ribasushi | 2009-06-30 11:54:34 +0200
  Fix borked next invocation
  r6896 at Thesaurus (orig r6895):  frew | 2009-06-30 21:38:26 +0200
  silly misspells and trailing whitespace
  r6955 at Thesaurus (orig r6954):  ribasushi | 2009-07-03 01:21:28 +0200
  Some hack consolidation
  r6962 at Thesaurus (orig r6961):  ribasushi | 2009-07-03 12:06:57 +0200
  Fix some mssql shortcommings when confronted with the new subequeried prefetch sql
  r6963 at Thesaurus (orig r6962):  ribasushi | 2009-07-03 12:47:57 +0200
  Ask for newer DBD::Pg in author mode, suggest the newer version otherwise (proper array support). Make test more resilient as well
  r6964 at Thesaurus (orig r6963):  ribasushi | 2009-07-03 12:49:16 +0200
  Switch to C3 mro throughout the ::Storage hierarchy (DBIx::Class brings in MRO::Compat, and all ::Storage's are based on it, tested on 5.8
  r6969 at Thesaurus (orig r6968):  ribasushi | 2009-07-03 19:54:04 +0200
  Duh
  r6970 at Thesaurus (orig r6969):  frew | 2009-07-03 19:59:48 +0200
  fix tests for new codez
  r6971 at Thesaurus (orig r6970):  ribasushi | 2009-07-03 20:18:53 +0200
  detabify
  r6972 at Thesaurus (orig r6971):  ribasushi | 2009-07-03 20:20:07 +0200
  changes
 
 r6980 at Thesaurus (orig r6979):  ribasushi | 2009-07-04 11:34:08 +0200
 Hide devel documentation from the indexer
 r6981 at Thesaurus (orig r6980):  ribasushi | 2009-07-04 11:37:25 +0200
 Add set_ansi_mode POD
 r6982 at Thesaurus (orig r6981):  ribasushi | 2009-07-04 11:45:24 +0200
 Backout mysql changes for further polishing
 r6985 at Thesaurus (orig r6984):  ribasushi | 2009-07-04 12:08:16 +0200
 Missing newline
 r6986 at Thesaurus (orig r6985):  ribasushi | 2009-07-04 12:11:18 +0200
 typo
 r6987 at Thesaurus (orig r6986):  ribasushi | 2009-07-04 12:40:47 +0200
 Fix POD
 r6988 at Thesaurus (orig r6987):  ribasushi | 2009-07-04 13:09:39 +0200
 todos are shorter now
 r6990 at Thesaurus (orig r6989):  castaway | 2009-07-05 22:00:55 +0200
 Added Pod::Inherit use to Makefile.PL at author-time, comments/suggestions as to whether its too "noisy" welcome.
 
 r6991 at Thesaurus (orig r6990):  ribasushi | 2009-07-06 00:06:52 +0200
 Couple of makefile fixes:
 use is compile time, use require
 recommends is for distro maintainers only, push the dependency into the authors hash (it is not to be executed by mere mortals)
 
 r6992 at Thesaurus (orig r6991):  ribasushi | 2009-07-06 00:55:36 +0200
 Forgotten pod exclusions
 r6993 at Thesaurus (orig r6992):  ribasushi | 2009-07-06 01:07:05 +0200
 Temporarily backout Pod::Inherit changes
 r6994 at Thesaurus (orig r6993):  ribasushi | 2009-07-06 01:10:22 +0200
 Put Pod::Inherit stuff back after proper copy
 r7010 at Thesaurus (orig r7009):  ribasushi | 2009-07-09 12:45:02 +0200
  r6995 at Thesaurus (orig r6994):  ribasushi | 2009-07-06 01:12:57 +0200
  Where 08108 will come from
 
 r7028 at Thesaurus (orig r7027):  caelum | 2009-07-10 23:56:57 +0200
 fix PodInherit call in Makefile.PL
 r7030 at Thesaurus (orig r7029):  robkinyon | 2009-07-11 00:03:07 +0200
 Applied patch from kados regarding use of a DateTime::Format class to validate
 r7031 at Thesaurus (orig r7030):  caelum | 2009-07-11 11:26:40 +0200
 reword IC::DT doc patch
 r7038 at Thesaurus (orig r7037):  dandv | 2009-07-13 14:06:08 +0200
 PK::Auto has moved into Core since 2007
 r7039 at Thesaurus (orig r7038):  dandv | 2009-07-13 14:15:13 +0200
 Fixed has_many example in Intro.pod
 r7040 at Thesaurus (orig r7039):  dandv | 2009-07-13 22:58:45 +0200
 Fixed run-on sentences in FAQ
 r7041 at Thesaurus (orig r7040):  dandv | 2009-07-13 23:18:11 +0200
 Minor POD fixes in Example.pod
 r7042 at Thesaurus (orig r7041):  dandv | 2009-07-13 23:48:18 +0200
 Favored using ->single to get the topmost result over less readable ->slice(0)
 r7043 at Thesaurus (orig r7042):  dandv | 2009-07-14 00:56:31 +0200
 Minor POD fixes in Cookbook
 r7046 at Thesaurus (orig r7045):  ribasushi | 2009-07-14 13:30:55 +0200
 Minor logic cleanup
 r7047 at Thesaurus (orig r7046):  ribasushi | 2009-07-14 14:07:11 +0200
 grouped prefetch fix
 r7054 at Thesaurus (orig r7053):  ijw | 2009-07-15 18:55:35 +0200
 Added SQLA link for more comprehensive documentation of order_by options available
 r7057 at Thesaurus (orig r7056):  caelum | 2009-07-16 00:54:22 +0200
 add "smalldatetime" support to IC::DT
 r7060 at Thesaurus (orig r7059):  ribasushi | 2009-07-16 06:29:41 +0200
  r7013 at Thesaurus (orig r7012):  jnapiorkowski | 2009-07-09 17:00:22 +0200
  new branch
  r7014 at Thesaurus (orig r7013):  jnapiorkowski | 2009-07-09 20:06:44 +0200
  changed the way transactions are detected for replication to work with the standard way to do this, minor doc updates, fix to the force pool so you can force a particular slave, changes to the way the debugging is created
  r7015 at Thesaurus (orig r7014):  jnapiorkowski | 2009-07-09 20:17:03 +0200
  more changes to the way debug output works
  r7016 at Thesaurus (orig r7015):  jnapiorkowski | 2009-07-09 22:26:47 +0200
  big update to the test suite so that we now check to make sure the storage that was expected was actually used
  r7017 at Thesaurus (orig r7016):  jnapiorkowski | 2009-07-09 23:23:37 +0200
  set correct number of tests, changed the debuggin output to not warn on DDL, minor change to a test resultclass so we can deploy to mysql properly
  r7018 at Thesaurus (orig r7017):  jnapiorkowski | 2009-07-09 23:26:59 +0200
  corrected the number of skipped tests
  r7019 at Thesaurus (orig r7018):  jnapiorkowski | 2009-07-09 23:52:22 +0200
  fixed test resultclass formatting, added a few more DBIC::Storage::DBI methods that I might need to delegate.
  r7020 at Thesaurus (orig r7019):  jnapiorkowski | 2009-07-10 01:23:07 +0200
  some documention updates and changed the way we find paths for the sqlite dbfiles to use File::Spec, which I hope will solve some of the Win32 error messages
  r7023 at Thesaurus (orig r7022):  jnapiorkowski | 2009-07-10 18:00:38 +0200
  pod cleanup, fixed broken pod links, and new Introduction pod
  r7024 at Thesaurus (orig r7023):  jnapiorkowski | 2009-07-10 19:10:57 +0200
  updated Changes file to reflect work completed
  r7025 at Thesaurus (orig r7024):  jnapiorkowski | 2009-07-10 19:37:53 +0200
  a few more Moose Type related fixes and added diag to the replication test to report the moose and types version used, to help us debug some of the moose related errors being reported
  r7058 at Thesaurus (orig r7057):  ribasushi | 2009-07-16 06:28:44 +0200
  A couple of typos, and general whitespace cleanup (ick)
 
 r7063 at Thesaurus (orig r7062):  jnapiorkowski | 2009-07-16 17:03:32 +0200
 increased Moose version requirements due to changes in the way type constraints get validated, which is not backwardly compatible
 r7064 at Thesaurus (orig r7063):  dandv | 2009-07-17 03:37:28 +0200
 Minor POD grammar: it's -> its where appropriate
 r7075 at Thesaurus (orig r7074):  tomboh | 2009-07-20 18:20:37 +0200
 Fix POD changes from r7040.
 r7078 at Thesaurus (orig r7077):  norbi | 2009-07-21 00:59:30 +0200
 
 r7079 at Thesaurus (orig r7078):  norbi | 2009-07-21 00:59:58 +0200
  r7232 at vger:  mendel | 2009-07-21 00:58:12 +0200
  Fixed documentation and added test for the "Arbitrary SQL through a custom ResultSource" Cookbook alternate (subclassing) recipe.
 
 r7080 at Thesaurus (orig r7079):  norbi | 2009-07-21 01:05:32 +0200
  r7235 at vger:  mendel | 2009-07-21 01:05:18 +0200
  Fixed 'typo' (removed a word that I left there by accident).
 
 r7081 at Thesaurus (orig r7080):  norbi | 2009-07-21 10:06:21 +0200
  r7237 at vger:  mendel | 2009-07-21 10:06:05 +0200
  Fixing what my svk client screwed up.
 
 r7082 at Thesaurus (orig r7081):  caelum | 2009-07-21 16:51:55 +0200
 update Storage::Replicated prereqs
 r7083 at Thesaurus (orig r7082):  caelum | 2009-07-21 18:16:34 +0200
 show Oracle datetime_setup alter session statements in debug output
 r7086 at Thesaurus (orig r7085):  ribasushi | 2009-07-22 03:50:57 +0200
 Lazy folks do not run the whole test suite before merging >:( 
 r7100 at Thesaurus (orig r7097):  caelum | 2009-07-23 20:14:11 +0200
  r6092 at hlagh (orig r7090):  caelum | 2009-07-23 08:24:39 -0400
  new branch for fixing the MONEY type in MSSQL
  r6093 at hlagh (orig r7091):  caelum | 2009-07-23 08:34:01 -0400
  add test
  r6283 at hlagh (orig r7093):  caelum | 2009-07-23 10:31:08 -0400
  fix money columns
  r6284 at hlagh (orig r7094):  caelum | 2009-07-23 10:34:06 -0400
  minor change
  r6285 at hlagh (orig r7095):  caelum | 2009-07-23 11:01:37 -0400
  add test for updating money value to NULL
  r6286 at hlagh (orig r7096):  caelum | 2009-07-23 14:09:26 -0400
  add money type tests to dbd::sybase+mssql tests
 
 r7129 at Thesaurus (orig r7126):  caelum | 2009-07-28 02:03:47 +0200
 add postgres "timestamp without time zone" support
 r7143 at Thesaurus (orig r7140):  caelum | 2009-07-30 14:46:04 +0200
 update sqlite test schema
 r7148 at Thesaurus (orig r7145):  robkinyon | 2009-07-30 16:13:21 +0200
 Added prefetch caveats
 r7149 at Thesaurus (orig r7146):  robkinyon | 2009-07-30 16:20:02 +0200
 Fixed caveats
 r7152 at Thesaurus (orig r7149):  caelum | 2009-07-30 17:56:01 +0200
 make ::Oracle::Generic load without DBD::Oracle
 r7153 at Thesaurus (orig r7150):  caelum | 2009-07-30 18:04:47 +0200
 make sure DBD::Oracle is loaded when using constants from it
 r7157 at Thesaurus (orig r7154):  castaway | 2009-07-30 22:17:33 +0200
 Mangled Rob's example somewhat, still needs explaining whch circs exactly cause the borken results
 
 r7161 at Thesaurus (orig r7158):  mo | 2009-07-31 12:51:20 +0200
 POD fix
 r7162 at Thesaurus (orig r7159):  mo | 2009-07-31 12:52:42 +0200
 undo that attributes merge stuff
 r7169 at Thesaurus (orig r7166):  castaway | 2009-08-02 12:41:25 +0200
 Mention ResultSet, ResultSource and Row in synopsis
 
 r7170 at Thesaurus (orig r7167):  castaway | 2009-08-02 14:10:53 +0200
 Docs: Explainations of result sources and how to find them
 
 r7175 at Thesaurus (orig r7172):  ribasushi | 2009-08-03 11:01:44 +0200
 Disable Pod::Inherit makefile calls, until we get to version 0.02
 r7179 at Thesaurus (orig r7176):  ribasushi | 2009-08-03 11:51:42 +0200
  r6983 at Thesaurus (orig r6982):  ribasushi | 2009-07-04 11:46:57 +0200
  New branch to experiment with a sanifying mysql on_connect_call
  r6984 at Thesaurus (orig r6983):  ribasushi | 2009-07-04 11:49:44 +0200
  Initial set_ansi_mode code - make sure to utilize _do_query instead of dbh->do, so the result is visible in the trace
  r6987 at Thesaurus (orig r6986):  ribasushi | 2009-07-04 12:40:47 +0200
  Fix POD
  r7178 at Thesaurus (orig r7175):  ribasushi | 2009-08-03 11:51:15 +0200
  Wrap up set_strict_mode for mysql
 
 r7181 at Thesaurus (orig r7178):  ribasushi | 2009-08-03 12:41:32 +0200
 Sanify unqualified column bindtype handling
 Silence a warning when using a custom {from}
 r7201 at Thesaurus (orig r7198):  caelum | 2009-08-04 22:18:27 +0200
 update Changes
 r7208 at Thesaurus (orig r7205):  ribasushi | 2009-08-05 08:34:25 +0200
 Bump dependencies:
 Test::More for the new no_plan/done_testing goodies
 File::Temp as per RT#48431
 r7210 at Thesaurus (orig r7207):  ribasushi | 2009-08-05 08:36:32 +0200
  r7156 at Thesaurus (orig r7153):  robkinyon | 2009-07-30 20:06:04 +0200
  Create prefetch_redux branch
  r7164 at Thesaurus (orig r7161):  robkinyon | 2009-07-31 22:41:01 +0200
  Added MooseX::Traits to Makefile.PL
  r7172 at Thesaurus (orig r7169):  robkinyon | 2009-08-03 05:49:59 +0200
  Added two tests and marked one todo_skip
  r7187 at Thesaurus (orig r7184):  ribasushi | 2009-08-03 17:24:41 +0200
  Use goto to preserve correct error-at-line reporting
  r7189 at Thesaurus (orig r7186):  ribasushi | 2009-08-04 12:34:58 +0200
  Add an extra test specifically for distinct/prefetch
  Remove duplicate test in count/prefetch
  
  Switch to as_query instead of debug overloading
  r7190 at Thesaurus (orig r7187):  ribasushi | 2009-08-04 12:35:57 +0200
  Fix how a distinct-induced group_by is calculated, taking in consideration the new prefetch mechanism
  r7197 at Thesaurus (orig r7194):  ribasushi | 2009-08-04 17:31:33 +0200
  Traits not needed by anything currently in dbic
  r7198 at Thesaurus (orig r7195):  ribasushi | 2009-08-04 17:41:14 +0200
  Move around tests a bit
  r7199 at Thesaurus (orig r7196):  mo | 2009-08-04 21:10:57 +0200
  prefetch-grouped fails, again
  r7204 at Thesaurus (orig r7201):  ribasushi | 2009-08-04 22:50:51 +0200
  Split the search_related prefetch tests into a standalone testfile
  r7205 at Thesaurus (orig r7202):  ribasushi | 2009-08-04 23:05:03 +0200
  Move norbi's test to prefetch_redux - it's the same idea
  r7209 at Thesaurus (orig r7206):  ribasushi | 2009-08-05 08:35:48 +0200
  Tadaaaa (even more prefetch insanity)
 
 r7212 at Thesaurus (orig r7209):  ribasushi | 2009-08-05 08:38:41 +0200
  r7107 at Thesaurus (orig r7104):  caelum | 2009-07-24 06:51:57 +0200
  new branch to move common mssql functionality into the base class, and other tweaks
  r7109 at Thesaurus (orig r7106):  caelum | 2009-07-24 07:28:11 +0200
  moved code to ::DBI::MSSQL and added DT inflation test
  r7112 at Thesaurus (orig r7109):  caelum | 2009-07-24 08:46:16 +0200
  merge in some more MSSQL code, including odbc dynamic cursor support
  r7113 at Thesaurus (orig r7110):  caelum | 2009-07-24 08:49:54 +0200
  fix a warning in SQLAHacks
  r7114 at Thesaurus (orig r7111):  caelum | 2009-07-24 09:22:33 +0200
  add placeholder support detection for mssql through dbd::sybase
  r7118 at Thesaurus (orig r7115):  caelum | 2009-07-24 16:39:06 +0200
  minor doc clarification
  r7122 at Thesaurus (orig r7119):  caelum | 2009-07-25 16:10:30 +0200
  move placeholder support detection into ::Sybase::Base
  r7123 at Thesaurus (orig r7120):  caelum | 2009-07-25 16:12:01 +0200
  add a comment
  r7127 at Thesaurus (orig r7124):  caelum | 2009-07-26 18:04:29 +0200
  SAVEPOINT methods for MSSQL
  r7140 at Thesaurus (orig r7137):  caelum | 2009-07-30 10:12:45 +0200
  better tests for "smalldatetime" support in MSSQL
  r7142 at Thesaurus (orig r7139):  caelum | 2009-07-30 13:29:19 +0200
  MSSQL GUID support
  r7147 at Thesaurus (orig r7144):  caelum | 2009-07-30 15:38:33 +0200
  update sqlite test schema
  r7150 at Thesaurus (orig r7147):  caelum | 2009-07-30 16:26:47 +0200
  make sure the new mssql insert method works on an un-reblessed storage
  r7151 at Thesaurus (orig r7148):  caelum | 2009-07-30 16:55:35 +0200
  better rebless check for insert
  r7154 at Thesaurus (orig r7151):  caelum | 2009-07-30 18:57:22 +0200
  add missing file
  r7155 at Thesaurus (orig r7152):  caelum | 2009-07-30 19:00:40 +0200
  fix syntax error
  r7163 at Thesaurus (orig r7160):  caelum | 2009-07-31 15:52:41 +0200
  fix a bug in _determine_driver
  r7166 at Thesaurus (orig r7163):  caelum | 2009-08-01 18:10:23 +0200
  default collist for storage _resolve_column_info
  r7182 at Thesaurus (orig r7179):  caelum | 2009-08-03 13:42:31 +0200
  check that dynamic cursors are functional if enabled
  r7184 at Thesaurus (orig r7181):  ribasushi | 2009-08-03 14:23:37 +0200
  Adjust expected sql to match the new 'Track' table definition
  r7186 at Thesaurus (orig r7183):  ribasushi | 2009-08-03 15:16:10 +0200
  Simplify code and add some comments
  r7200 at Thesaurus (orig r7197):  caelum | 2009-08-04 21:31:16 +0200
  update oracle tests for new "track" table
  r7203 at Thesaurus (orig r7200):  caelum | 2009-08-04 22:39:57 +0200
  update Changes
 
 r7214 at Thesaurus (orig r7211):  ribasushi | 2009-08-05 08:40:39 +0200
  r7213 at Thesaurus (orig r7210):  ribasushi | 2009-08-05 08:40:20 +0200
  Really sanify _resolve_column_info
 
 r7216 at Thesaurus (orig r7213):  ribasushi | 2009-08-05 10:19:37 +0200
 Reminder about discard_changes and friends
 r7217 at Thesaurus (orig r7214):  ribasushi | 2009-08-05 10:26:20 +0200
 Reformat and fill-in changes
 r7218 at Thesaurus (orig r7215):  caelum | 2009-08-05 10:37:12 +0200
 rename connect_call_use_mars to connect_call_use_MARS
 r7219 at Thesaurus (orig r7216):  ribasushi | 2009-08-05 10:38:14 +0200
 Silence a TODO test
 r7220 at Thesaurus (orig r7217):  caelum | 2009-08-05 10:46:11 +0200
 minor Changes update
 r7230 at Thesaurus (orig r7227):  castaway | 2009-08-05 14:57:52 +0200
 Minty's conversion of cookbook "arbitrary sql" to use ResultSource::View, plus some examples in ::View itself.
 Some style tweaks of mine
 
 r7231 at Thesaurus (orig r7228):  ribasushi | 2009-08-05 15:41:28 +0200
 Dynamically load necessary table classes
 r7236 at Thesaurus (orig r7233):  caelum | 2009-08-05 19:49:51 +0200
 fix rounding issues in mssql money tests
 r7237 at Thesaurus (orig r7234):  caelum | 2009-08-05 20:09:03 +0200
 better money value comparison in tests
 r7239 at Thesaurus (orig r7236):  frew | 2009-08-05 20:53:32 +0200
 whitespace jfklds;ajfklds;a
 r7240 at Thesaurus (orig r7237):  frew | 2009-08-05 20:54:41 +0200
 Fix testing bug.  Windows only.
 r7256 at Thesaurus (orig r7253):  ribasushi | 2009-08-07 11:19:35 +0200
  r7232 at Thesaurus (orig r7229):  jnapiorkowski | 2009-08-05 16:56:32 +0200
  added test for the new default force pool behavior in PK->discard_changes and cleaned up the related tests a bit to give more meaningful info
  r7233 at Thesaurus (orig r7230):  jnapiorkowski | 2009-08-05 16:57:45 +0200
  opps typo in test status messages
  r7234 at Thesaurus (orig r7231):  jnapiorkowski | 2009-08-05 17:03:46 +0200
  added the default attrs to solve the failing test recently commited
  r7235 at Thesaurus (orig r7232):  jnapiorkowski | 2009-08-05 17:58:44 +0200
  added test to make sure you can override the default attributes to discard_changes
  r7241 at Thesaurus (orig r7238):  jnapiorkowski | 2009-08-05 22:00:58 +0200
  added replication as an optional feature to make installing it easier
  r7253 at Thesaurus (orig r7250):  ribasushi | 2009-08-07 11:06:41 +0200
  Streamline makefile dep handling
  r7254 at Thesaurus (orig r7251):  ribasushi | 2009-08-07 11:07:14 +0200
  Switch to done_testing
  r7255 at Thesaurus (orig r7252):  ribasushi | 2009-08-07 11:19:13 +0200
  Move discard_changes code to Row.pm, better docs
 
 r7257 at Thesaurus (orig r7254):  ribasushi | 2009-08-07 11:21:35 +0200
 Remove merged branch
 r7259 at Thesaurus (orig r7256):  ribasushi | 2009-08-07 14:16:13 +0200
 Fix bogus POD
 r7261 at Thesaurus (orig r7258):  ribasushi | 2009-08-07 17:22:58 +0200
 per mst: no optional deps
 r7262 at Thesaurus (orig r7259):  ribasushi | 2009-08-08 17:02:39 +0200
 Stop using discard_changes() in Ordered (if I knew it will be *that* complex I would not touch it)
 r7265 at Thesaurus (orig r7262):  ribasushi | 2009-08-08 17:49:19 +0200
  r7032 at Thesaurus (orig r7031):  caelum | 2009-07-11 11:28:52 +0200
  new branch to reduce connected() calls
  r7033 at Thesaurus (orig r7032):  caelum | 2009-07-11 13:07:41 +0200
  added failing test
  r7034 at Thesaurus (orig r7033):  caelum | 2009-07-11 14:36:53 +0200
  minor optimization
  r7048 at Thesaurus (orig r7047):  caelum | 2009-07-14 15:09:47 +0200
  substantially reduced ping count, dynamic cursors support for mssql through odbc
  r7050 at Thesaurus (orig r7049):  caelum | 2009-07-14 16:06:39 +0200
  a couple more options for odbc/mssql
  r7052 at Thesaurus (orig r7051):  caelum | 2009-07-15 00:14:09 +0200
  unfuck ensure_connected for odbc/mssql
  r7055 at Thesaurus (orig r7054):  caelum | 2009-07-15 21:10:27 +0200
  rename _scope_identity to _identity for odbc/mssql
  r7056 at Thesaurus (orig r7055):  caelum | 2009-07-16 00:41:45 +0200
  add IC::DT tests for odbc/mssql
  r7069 at Thesaurus (orig r7068):  caelum | 2009-07-17 11:47:31 +0200
  don't run connection actions if ->_rebless does not connect
  r7108 at Thesaurus (orig r7105):  caelum | 2009-07-24 07:26:13 +0200
  moving test to another branch
  r7110 at Thesaurus (orig r7107):  caelum | 2009-07-24 07:52:33 +0200
  revert odbc/mssql code to trunk and move it to another branch
  r7111 at Thesaurus (orig r7108):  caelum | 2009-07-24 08:13:35 +0200
  revert t/746mssql.t to trunk and move to another branch
  r7224 at Thesaurus (orig r7221):  caelum | 2009-08-05 11:48:04 +0200
  update branch after pull
  r7225 at Thesaurus (orig r7222):  ribasushi | 2009-08-05 12:09:07 +0200
  Rename last_dbh and turn it into a public method
  r7226 at Thesaurus (orig r7223):  ribasushi | 2009-08-05 12:12:20 +0200
  Whoopsie - more renames
  r7227 at Thesaurus (orig r7224):  ribasushi | 2009-08-05 12:32:09 +0200
  Changes and a deploy() fix
  r7228 at Thesaurus (orig r7225):  ribasushi | 2009-08-05 12:36:01 +0200
  We do not count pings during deploy - they are expected
  r7229 at Thesaurus (orig r7226):  ribasushi | 2009-08-05 12:49:06 +0200
  Clarify autocommit default
  r7238 at Thesaurus (orig r7235):  caelum | 2009-08-05 20:39:47 +0200
  fix up txn_begin and the ping_count test
  r7263 at Thesaurus (orig r7260):  ribasushi | 2009-08-08 17:40:19 +0200
  A more straightforward txn_begin fix, some more test fixes
 
 r7270 at Thesaurus (orig r7267):  ribasushi | 2009-08-09 00:34:31 +0200
  r6822 at Thesaurus (orig r6821):  caelum | 2009-06-28 14:38:12 +0200
  branch
  r6825 at Thesaurus (orig r6824):  caelum | 2009-06-28 14:40:37 +0200
  ->table(\"table")
  r6827 at Thesaurus (orig r6826):  caelum | 2009-06-28 14:55:06 +0200
  revert
  r6829 at Thesaurus (orig r6828):  caelum | 2009-06-28 15:57:40 +0200
   r5742 at hlagh (orig r6819):  ribasushi | 2009-06-28 04:00:03 -0700
   The prefetch+group_by is a complex problem - branch
  
  r6834 at Thesaurus (orig r6833):  caelum | 2009-06-28 23:24:47 +0200
  ->table(\"foo") now works
  r6835 at Thesaurus (orig r6834):  caelum | 2009-06-29 03:54:31 +0200
  another test
  r6849 at Thesaurus (orig r6848):  caelum | 2009-06-29 21:39:26 +0200
  separated table ref test out, changed CDTableRef to a view with less rels
  r6852 at Thesaurus (orig r6851):  caelum | 2009-06-29 22:56:45 +0200
  changed CD to ->table(\"cd")
  r6853 at Thesaurus (orig r6852):  caelum | 2009-06-29 23:13:48 +0200
  fix t/80unique.t
  r6857 at Thesaurus (orig r6856):  caelum | 2009-06-29 23:45:19 +0200
  branch pushed, removing
  r6858 at Thesaurus (orig r6857):  caelum | 2009-06-29 23:46:54 +0200
  removing debug statement
  r6860 at Thesaurus (orig r6859):  ribasushi | 2009-06-30 00:03:21 +0200
  Minor fixes
  r6861 at Thesaurus (orig r6860):  ribasushi | 2009-06-30 00:25:27 +0200
  This is sloppy, but sqlt is sloppy too. All tests pass now, all we really need is to intercept name() set-calls, and use a virtual view (the only legit setter is the new() call in ResultSourceProxy::Table
  r6867 at Thesaurus (orig r6866):  caelum | 2009-06-30 03:34:02 +0200
  forgot to use Scalar::Util ()
  r7007 at Thesaurus (orig r7006):  caelum | 2009-07-09 07:37:22 +0200
   r5766 at hlagh (orig r6843):  abraxxa | 2009-06-29 02:02:17 -0700
   fixed typo in test
   
   r5779 at hlagh (orig r6847):  ribasushi | 2009-06-29 10:09:00 -0700
   Minor Ordered optimization (don't use count)
   r5787 at hlagh (orig r6855):  caelum | 2009-06-29 14:42:11 -0700
    r5451 at hlagh (orig r6605):  caelum | 2009-06-10 09:23:44 -0700
    new branch to implement on_connect_call
    r5484 at hlagh (orig r6633):  caelum | 2009-06-11 11:03:10 -0700
    on_connect_call implementation and set_datetime_format support for Oracle
    r5492 at hlagh (orig r6641):  caelum | 2009-06-11 16:39:28 -0700
    connect_call_set_datetime_format for Oracle, I have no idea why this didn't get committed before...
    r5504 at hlagh (orig r6655):  caelum | 2009-06-12 17:28:06 -0700
    finished up on_connect_call stuff
    r5507 at hlagh (orig r6658):  caelum | 2009-06-13 04:03:36 -0700
    fixup _setup_connect_do, other minor cleanups
    r5508 at hlagh (orig r6659):  caelum | 2009-06-13 04:35:33 -0700
    make the on_(dis)?connect_do accessors returnn the original structure
    r5509 at hlagh (orig r6660):  caelum | 2009-06-13 08:31:52 -0700
    allow undef for _setup_connect_do
    r5522 at hlagh (orig r6679):  caelum | 2009-06-14 09:56:40 -0700
    rename connect_do store
    r5621 at hlagh (orig r6769):  caelum | 2009-06-23 07:38:33 -0700
    minor doc update
    r5628 at hlagh (orig r6777):  caelum | 2009-06-23 16:36:12 -0700
    properly test nanosecond precision with oracle and datetime_setup
    r5669 at hlagh (orig r6784):  caelum | 2009-06-24 10:49:25 -0700
    IC::DT does support timestamp with timezone
    r5768 at hlagh (orig r6846):  caelum | 2009-06-29 08:20:32 -0700
    remove DateTime from 73oracle.t
    r5781 at hlagh (orig r6849):  caelum | 2009-06-29 13:07:43 -0700
    remove the _store stuff for on_connect_do
    r5785 at hlagh (orig r6853):  ribasushi | 2009-06-29 14:38:30 -0700
    Some beautification
   
   r5802 at hlagh (orig r6870):  ribasushi | 2009-06-30 01:09:03 -0700
   Cleanup dependency handling a bit
   r5806 at hlagh (orig r6874):  ribasushi | 2009-06-30 03:39:06 -0700
   Allow broken resultsource-class-derived objects to still work
   r5807 at hlagh (orig r6875):  ribasushi | 2009-06-30 03:40:46 -0700
   clarify
   r5835 at hlagh (orig r6877):  ash | 2009-06-30 04:48:13 -0700
   Update POD on Dynamic sub-classing
   
   r5837 at hlagh (orig r6882):  ribasushi | 2009-06-30 08:36:38 -0700
    r6815 at Thesaurus (orig r6814):  ribasushi | 2009-06-28 10:32:42 +0200
    Branch to explore double joins on search_related
    r6816 at Thesaurus (orig r6815):  ribasushi | 2009-06-28 10:34:16 +0200
    Thetest case that started it all
    r6817 at Thesaurus (orig r6816):  ribasushi | 2009-06-28 10:35:11 +0200
    The proposed fix (do not add an extra join if it is already present in the topmost join)
    r6818 at Thesaurus (orig r6817):  ribasushi | 2009-06-28 11:04:26 +0200
    Minor omission
    r6819 at Thesaurus (orig r6818):  ribasushi | 2009-06-28 11:07:33 +0200
    Adjust a couple of tests for new behavior (thus all of this might be backwards incompatible to the point of being useless):
    The counts in t/90join_torture.t are now 5*3, not 5*3*3, as a second join is not induced by search_related
    The raw sql scan in t/prefetch/standard.t is just silly, won't even try to understand it
    Just to maintain the TreeLike folding, I add a 3rd children join which was inserted by search_related before the code changes
   
   r5843 at hlagh (orig r6888):  ribasushi | 2009-06-30 10:36:11 -0700
   Todoify test for now
   r5844 at hlagh (orig r6889):  ribasushi | 2009-06-30 10:37:05 -0700
   Todoify test for now (2)
   r5846 at hlagh (orig r6891):  ribasushi | 2009-06-30 10:52:31 -0700
   Todoify test for now (3)
   r5850 at hlagh (orig r6902):  ribasushi | 2009-06-30 23:46:12 -0700
   Fixed deadlock test
   r5851 at hlagh (orig r6903):  ribasushi | 2009-07-01 03:22:00 -0700
   Clarify exception text
   r5854 at hlagh (orig r6906):  ribasushi | 2009-07-01 04:23:46 -0700
    r6821 at Thesaurus (orig r6820):  ribasushi | 2009-06-28 13:09:11 +0200
    Branch for prefetch+group play
    r6823 at Thesaurus (orig r6822):  ribasushi | 2009-06-28 14:38:36 +0200
    Normalize group_by
    r6824 at Thesaurus (orig r6823):  ribasushi | 2009-06-28 14:39:54 +0200
    Proper prefetch+group test
    r6826 at Thesaurus (orig r6825):  ribasushi | 2009-06-28 14:42:48 +0200
    Whoops
    r6828 at Thesaurus (orig r6827):  ribasushi | 2009-06-28 15:06:57 +0200
    Lose the literal sql bits - castaway is right it's silly to support those
    r6833 at Thesaurus (orig r6832):  ribasushi | 2009-06-28 22:38:43 +0200
    Rogue comments
    r6837 at Thesaurus (orig r6836):  ribasushi | 2009-06-29 09:44:25 +0200
    A couple of test fixes
    r6838 at Thesaurus (orig r6837):  ribasushi | 2009-06-29 09:46:13 +0200
    Support for -select/-as in SQLAHacks field selection
    r6839 at Thesaurus (orig r6838):  ribasushi | 2009-06-29 09:49:53 +0200
    This is tested elsewhere
    r6840 at Thesaurus (orig r6839):  ribasushi | 2009-06-29 09:50:43 +0200
    This is tested elsewhere (2)
    r6841 at Thesaurus (orig r6840):  ribasushi | 2009-06-29 10:07:09 +0200
    Test cleanups
    r6842 at Thesaurus (orig r6841):  ribasushi | 2009-06-29 10:11:13 +0200
    Most of the grouped prefetch solution
    r6843 at Thesaurus (orig r6842):  ribasushi | 2009-06-29 10:14:45 +0200
    clearer
    r6845 at Thesaurus (orig r6844):  ribasushi | 2009-06-29 12:05:37 +0200
    And score! (all works)
    r6882 at Thesaurus (orig r6881):  ribasushi | 2009-06-30 16:23:06 +0200
    rs->get_column now properly recognizes prefetch and collapses if at all possible
    r6886 at Thesaurus (orig r6885):  ribasushi | 2009-06-30 17:39:58 +0200
    Whoops
   
   r5857 at hlagh (orig r6909):  ribasushi | 2009-07-01 04:27:15 -0700
   Optimize set_column on uninserted objects
   r5867 at hlagh (orig r6920):  caelum | 2009-07-01 08:40:32 -0700
    r5859 at hlagh (orig r6912):  caelum | 2009-07-01 06:21:30 -0700
    new connected() for dbd::sybase users
    r5860 at hlagh (orig r6913):  caelum | 2009-07-01 06:25:46 -0700
    add a couple of dbd::sybase reconnection tests
    r5861 at hlagh (orig r6914):  caelum | 2009-07-01 06:35:07 -0700
    better connection test
    r5862 at hlagh (orig r6915):  caelum | 2009-07-01 06:45:05 -0700
    use dbh->do for connected instead of prepare_cached
    r5863 at hlagh (orig r6916):  ribasushi | 2009-07-01 06:55:21 -0700
    Segfault
    r5864 at hlagh (orig r6917):  caelum | 2009-07-01 07:03:22 -0700
    use ->do instead of ->prepare_cached in oracle's connected() too
    r5865 at hlagh (orig r6918):  caelum | 2009-07-01 08:20:52 -0700
    fix segfault with old DBD::Sybase
    r5866 at hlagh (orig r6919):  caelum | 2009-07-01 08:39:18 -0700
    move connection tests into _ping()
   
   r5873 at hlagh (orig r6923):  ijw | 2009-07-01 10:34:32 -0700
   Added a test for a resultset to related-resultset join for 0 related records
   r5874 at hlagh (orig r6927):  ijw | 2009-07-01 11:04:16 -0700
   Additional tests on prefetch - illustrates the bug with left-join has_many (NULL row returned) and the one that results from the trivial fix (prefetch gives no artist)
   r5876 at hlagh (orig r6931):  ribasushi | 2009-07-01 23:08:33 -0700
   Another candidate for somethingawful.com (fix left join-ed count)
   r5877 at hlagh (orig r6933):  ribasushi | 2009-07-02 00:04:13 -0700
   Changelog
   r5878 at hlagh (orig r6934):  ribasushi | 2009-07-02 02:23:48 -0700
   cleanup
   r5879 at hlagh (orig r6935):  ijw | 2009-07-02 03:41:01 -0700
   Check fetched rows == count for related resultsets
   r5880 at hlagh (orig r6936):  ijw | 2009-07-02 03:43:47 -0700
   Confirm prefetch doesn't affect main row fetch, and main row fetch works with and without counting
   r5881 at hlagh (orig r6937):  ribasushi | 2009-07-02 03:52:51 -0700
   More fail (fix is known but needs work)
   r5882 at hlagh (orig r6938):  ribasushi | 2009-07-02 04:07:22 -0700
   And more fail
   r5883 at hlagh (orig r6939):  ribasushi | 2009-07-02 04:16:46 -0700
   These tests are in prefetch/count.t
   r5884 at hlagh (orig r6940):  ribasushi | 2009-07-02 04:38:31 -0700
   cleanup
   r5885 at hlagh (orig r6941):  ribasushi | 2009-07-02 04:38:49 -0700
   Solve more prefetch inflation crap
   r5886 at hlagh (orig r6942):  ribasushi | 2009-07-02 04:47:41 -0700
   Make the code readable
   r5887 at hlagh (orig r6943):  ribasushi | 2009-07-02 06:52:35 -0700
   Everything works, just need to fix join-path chaining over search_related (to guard against obscure db quirks)
   r5889 at hlagh (orig r6945):  caelum | 2009-07-02 12:06:32 -0700
   add sybase reconnect test
   r5891 at hlagh (orig r6947):  ribasushi | 2009-07-02 13:20:21 -0700
   Last part of the join handling puzzle
   r5894 at hlagh (orig r6950):  ribasushi | 2009-07-02 15:14:50 -0700
    r6360 at Thesaurus (orig r6359):  arcanez | 2009-05-21 20:18:52 +0200
    branch to work on prefetch/select
    r6361 at Thesaurus (orig r6360):  arcanez | 2009-05-21 20:32:46 +0200
    failing test
    r6373 at Thesaurus (orig r6372):  ribasushi | 2009-05-22 11:07:26 +0200
    Simplify unresolvable test by arcanez
    r6905 at Thesaurus (orig r6904):  ribasushi | 2009-07-01 12:54:03 +0200
    Extend test
    r6950 at Thesaurus (orig r6949):  ribasushi | 2009-07-03 00:14:09 +0200
    Apparent fix - simply delay the in_storage flagging of the main object until all prefetched objects are inflated. The rest of the changes are just cosmetics, preparing for the collapse_result rewrite
   
   r5896 at hlagh (orig r6952):  ribasushi | 2009-07-02 15:17:22 -0700
   Changes
   r5909 at hlagh (orig r6964):  ribasushi | 2009-07-03 04:19:27 -0700
   Add set_ansi_mode on_connect_call for mysql
   Also switch to _do_query instead of plain dbh->do (shows up in the trace)
   r5910 at hlagh (orig r6965):  ribasushi | 2009-07-03 04:37:06 -0700
   Capitalize mysql commands
   r5911 at hlagh (orig r6966):  ribasushi | 2009-07-03 06:07:49 -0700
   Double an existing might_have test as has_one
   r5912 at hlagh (orig r6967):  ribasushi | 2009-07-03 07:36:32 -0700
   Extra test to demonstrate has_one working, and a POD clarification of multicreate
   r5917 at hlagh (orig r6972):  ribasushi | 2009-07-03 11:20:42 -0700
    r6554 at Thesaurus (orig r6553):  frew | 2009-06-09 00:06:42 +0200
    branch for mssql top issues
    r6572 at Thesaurus (orig r6571):  frew | 2009-06-09 23:18:46 +0200
    more tests for SQL Server!
    r6573 at Thesaurus (orig r6572):  frew | 2009-06-09 23:49:10 +0200
    Added AmbiguousGlob.pm for silly servers like mssql and mysql.  See docs for more info
    r6574 at Thesaurus (orig r6573):  frew | 2009-06-09 23:55:22 +0200
    fix plan
    r6602 at Thesaurus (orig r6601):  frew | 2009-06-10 17:03:30 +0200
    more failing tests
    r6608 at Thesaurus (orig r6607):  frew | 2009-06-10 20:05:53 +0200
    don't use eval!
    r6610 at Thesaurus (orig r6609):  frew | 2009-06-10 20:07:49 +0200
    beginning of DWIM for IDENTITY_INSERT
    r6628 at Thesaurus (orig r6627):  frew | 2009-06-11 18:13:02 +0200
    still busted :-(
    r6631 at Thesaurus (orig r6630):  frew | 2009-06-11 19:39:00 +0200
    general function to go from column names and ident to result source
    r6632 at Thesaurus (orig r6631):  frew | 2009-06-11 19:40:11 +0200
    Use new _resolve_column_sources method and begin insert_bulk method
    r6635 at Thesaurus (orig r6634):  frew | 2009-06-11 20:12:38 +0200
    updated _resolve_column_source to _resolve_column_info as per ribasushi's suggestion
    r6650 at Thesaurus (orig r6649):  frew | 2009-06-12 17:13:32 +0200
    Now I just need to check if the actual values are set...
    r6651 at Thesaurus (orig r6650):  frew | 2009-06-12 17:26:53 +0200
    Insert Identity works!
    r6652 at Thesaurus (orig r6651):  frew | 2009-06-12 17:34:13 +0200
    silly warns.
    r6684 at Thesaurus (orig r6683):  frew | 2009-06-15 16:49:00 +0200
    failing test
    r6686 at Thesaurus (orig r6685):  ribasushi | 2009-06-15 18:10:26 +0200
    make all resolved attrs visible to sqla
    r6698 at Thesaurus (orig r6697):  ribasushi | 2009-06-17 02:31:37 +0200
    Half way working stuff, needs a LOT of tweaking still
    r6729 at Thesaurus (orig r6728):  ribasushi | 2009-06-19 19:49:27 +0200
    Merge badness
    r6730 at Thesaurus (orig r6729):  ribasushi | 2009-06-19 19:49:40 +0200
    fix eol
    r6731 at Thesaurus (orig r6730):  ribasushi | 2009-06-19 19:55:47 +0200
    augment inheritance
    r6735 at Thesaurus (orig r6734):  ribasushi | 2009-06-20 10:34:42 +0200
    Maybe I've nailed it
    r6746 at Thesaurus (orig r6745):  ribasushi | 2009-06-20 23:53:55 +0200
    Test and merge fixes
    r6747 at Thesaurus (orig r6746):  ribasushi | 2009-06-21 00:01:09 +0200
    Really fix tests
    r6748 at Thesaurus (orig r6747):  ribasushi | 2009-06-21 00:01:54 +0200
    Really fix tests
    r6749 at Thesaurus (orig r6748):  ribasushi | 2009-06-21 00:18:33 +0200
    Now really final
    r6750 at Thesaurus (orig r6749):  ribasushi | 2009-06-21 00:22:23 +0200
    whoops
    r6751 at Thesaurus (orig r6750):  ribasushi | 2009-06-21 00:42:18 +0200
    That should be all
    r6752 at Thesaurus (orig r6751):  ribasushi | 2009-06-21 08:54:00 +0200
    Make sure quoting works
    r6755 at Thesaurus (orig r6754):  ribasushi | 2009-06-21 15:21:23 +0200
    Groundwork for sanification of the toplimit test
    r6863 at Thesaurus (orig r6862):  ribasushi | 2009-06-30 01:13:49 +0200
    Make sure storage classes use c3, just like the rest of dbic (tested on 5.8 as well)
    r6869 at Thesaurus (orig r6868):  ribasushi | 2009-06-30 09:53:27 +0200
    Some fixes after review
    r6874 at Thesaurus (orig r6873):  ribasushi | 2009-06-30 11:54:34 +0200
    Fix borked next invocation
    r6896 at Thesaurus (orig r6895):  frew | 2009-06-30 21:38:26 +0200
    silly misspells and trailing whitespace
    r6955 at Thesaurus (orig r6954):  ribasushi | 2009-07-03 01:21:28 +0200
    Some hack consolidation
    r6962 at Thesaurus (orig r6961):  ribasushi | 2009-07-03 12:06:57 +0200
    Fix some mssql shortcommings when confronted with the new subequeried prefetch sql
    r6963 at Thesaurus (orig r6962):  ribasushi | 2009-07-03 12:47:57 +0200
    Ask for newer DBD::Pg in author mode, suggest the newer version otherwise (proper array support). Make test more resilient as well
    r6964 at Thesaurus (orig r6963):  ribasushi | 2009-07-03 12:49:16 +0200
    Switch to C3 mro throughout the ::Storage hierarchy (DBIx::Class brings in MRO::Compat, and all ::Storage's are based on it, tested on 5.8
    r6969 at Thesaurus (orig r6968):  ribasushi | 2009-07-03 19:54:04 +0200
    Duh
    r6970 at Thesaurus (orig r6969):  frew | 2009-07-03 19:59:48 +0200
    fix tests for new codez
    r6971 at Thesaurus (orig r6970):  ribasushi | 2009-07-03 20:18:53 +0200
    detabify
    r6972 at Thesaurus (orig r6971):  ribasushi | 2009-07-03 20:20:07 +0200
    changes
   
   r5920 at hlagh (orig r6979):  ribasushi | 2009-07-04 02:34:08 -0700
   Hide devel documentation from the indexer
   r5921 at hlagh (orig r6980):  ribasushi | 2009-07-04 02:37:25 -0700
   Add set_ansi_mode POD
   r5922 at hlagh (orig r6981):  ribasushi | 2009-07-04 02:45:24 -0700
   Backout mysql changes for further polishing
   r5925 at hlagh (orig r6984):  ribasushi | 2009-07-04 03:08:16 -0700
   Missing newline
   r5926 at hlagh (orig r6985):  ribasushi | 2009-07-04 03:11:18 -0700
   typo
   r5927 at hlagh (orig r6986):  ribasushi | 2009-07-04 03:40:47 -0700
   Fix POD
   r5928 at hlagh (orig r6987):  ribasushi | 2009-07-04 04:09:39 -0700
   todos are shorter now
   r5929 at hlagh (orig r6989):  castaway | 2009-07-05 13:00:55 -0700
   Added Pod::Inherit use to Makefile.PL at author-time, comments/suggestions as to whether its too "noisy" welcome.
   
   r5930 at hlagh (orig r6990):  ribasushi | 2009-07-05 15:06:52 -0700
   Couple of makefile fixes:
   use is compile time, use require
   recommends is for distro maintainers only, push the dependency into the authors hash (it is not to be executed by mere mortals)
   
   r5931 at hlagh (orig r6991):  ribasushi | 2009-07-05 15:55:36 -0700
   Forgotten pod exclusions
   r5932 at hlagh (orig r6992):  ribasushi | 2009-07-05 16:07:05 -0700
   Temporarily backout Pod::Inherit changes
   r5933 at hlagh (orig r6993):  ribasushi | 2009-07-05 16:10:22 -0700
   Put Pod::Inherit stuff back after proper copy
  
  r7027 at Thesaurus (orig r7026):  caelum | 2009-07-10 23:25:56 +0200
   r5941 at hlagh (orig r7009):  ribasushi | 2009-07-09 03:45:02 -0700
    r6995 at Thesaurus (orig r6994):  ribasushi | 2009-07-06 01:12:57 +0200
    Where 08108 will come from
   
  
  r7029 at Thesaurus (orig r7028):  caelum | 2009-07-10 23:59:31 +0200
   r5959 at hlagh (orig r7027):  caelum | 2009-07-10 14:56:57 -0700
   fix PodInherit call in Makefile.PL
  
  r7067 at Thesaurus (orig r7066):  caelum | 2009-07-17 10:18:24 +0200
   r5961 at hlagh (orig r7029):  robkinyon | 2009-07-10 18:03:07 -0400
   Applied patch from kados regarding use of a DateTime::Format class to validate
   r5962 at hlagh (orig r7030):  caelum | 2009-07-11 05:26:40 -0400
   reword IC::DT doc patch
   r6009 at hlagh (orig r7037):  dandv | 2009-07-13 08:06:08 -0400
   PK::Auto has moved into Core since 2007
   r6010 at hlagh (orig r7038):  dandv | 2009-07-13 08:15:13 -0400
   Fixed has_many example in Intro.pod
   r6011 at hlagh (orig r7039):  dandv | 2009-07-13 16:58:45 -0400
   Fixed run-on sentences in FAQ
   r6012 at hlagh (orig r7040):  dandv | 2009-07-13 17:18:11 -0400
   Minor POD fixes in Example.pod
   r6013 at hlagh (orig r7041):  dandv | 2009-07-13 17:48:18 -0400
   Favored using ->single to get the topmost result over less readable ->slice(0)
   r6014 at hlagh (orig r7042):  dandv | 2009-07-13 18:56:31 -0400
   Minor POD fixes in Cookbook
   r6015 at hlagh (orig r7045):  ribasushi | 2009-07-14 07:30:55 -0400
   Minor logic cleanup
   r6016 at hlagh (orig r7046):  ribasushi | 2009-07-14 08:07:11 -0400
   grouped prefetch fix
   r6023 at hlagh (orig r7053):  ijw | 2009-07-15 12:55:35 -0400
   Added SQLA link for more comprehensive documentation of order_by options available
   r6026 at hlagh (orig r7056):  caelum | 2009-07-15 18:54:22 -0400
   add "smalldatetime" support to IC::DT
   r6029 at hlagh (orig r7059):  ribasushi | 2009-07-16 00:29:41 -0400
    r7013 at Thesaurus (orig r7012):  jnapiorkowski | 2009-07-09 17:00:22 +0200
    new branch
    r7014 at Thesaurus (orig r7013):  jnapiorkowski | 2009-07-09 20:06:44 +0200
    changed the way transactions are detected for replication to work with the standard way to do this, minor doc updates, fix to the force pool so you can force a particular slave, changes to the way the debugging is created
    r7015 at Thesaurus (orig r7014):  jnapiorkowski | 2009-07-09 20:17:03 +0200
    more changes to the way debug output works
    r7016 at Thesaurus (orig r7015):  jnapiorkowski | 2009-07-09 22:26:47 +0200
    big update to the test suite so that we now check to make sure the storage that was expected was actually used
    r7017 at Thesaurus (orig r7016):  jnapiorkowski | 2009-07-09 23:23:37 +0200
    set correct number of tests, changed the debuggin output to not warn on DDL, minor change to a test resultclass so we can deploy to mysql properly
    r7018 at Thesaurus (orig r7017):  jnapiorkowski | 2009-07-09 23:26:59 +0200
    corrected the number of skipped tests
    r7019 at Thesaurus (orig r7018):  jnapiorkowski | 2009-07-09 23:52:22 +0200
    fixed test resultclass formatting, added a few more DBIC::Storage::DBI methods that I might need to delegate.
    r7020 at Thesaurus (orig r7019):  jnapiorkowski | 2009-07-10 01:23:07 +0200
    some documention updates and changed the way we find paths for the sqlite dbfiles to use File::Spec, which I hope will solve some of the Win32 error messages
    r7023 at Thesaurus (orig r7022):  jnapiorkowski | 2009-07-10 18:00:38 +0200
    pod cleanup, fixed broken pod links, and new Introduction pod
    r7024 at Thesaurus (orig r7023):  jnapiorkowski | 2009-07-10 19:10:57 +0200
    updated Changes file to reflect work completed
    r7025 at Thesaurus (orig r7024):  jnapiorkowski | 2009-07-10 19:37:53 +0200
    a few more Moose Type related fixes and added diag to the replication test to report the moose and types version used, to help us debug some of the moose related errors being reported
    r7058 at Thesaurus (orig r7057):  ribasushi | 2009-07-16 06:28:44 +0200
    A couple of typos, and general whitespace cleanup (ick)
   
   r6031 at hlagh (orig r7062):  jnapiorkowski | 2009-07-16 11:03:32 -0400
   increased Moose version requirements due to changes in the way type constraints get validated, which is not backwardly compatible
   r6032 at hlagh (orig r7063):  dandv | 2009-07-16 21:37:28 -0400
   Minor POD grammar: it's -> its where appropriate
  
  r7105 at Thesaurus (orig r7102):  caelum | 2009-07-24 06:34:56 +0200
   r6075 at hlagh (orig r7074):  tomboh | 2009-07-20 12:20:37 -0400
   Fix POD changes from r7040.
   r6081 at hlagh (orig r7077):  norbi | 2009-07-20 18:59:30 -0400
   
   r6082 at hlagh (orig r7078):  norbi | 2009-07-20 18:59:58 -0400
    r7232 at vger:  mendel | 2009-07-21 00:58:12 +0200
    Fixed documentation and added test for the "Arbitrary SQL through a custom ResultSource" Cookbook alternate (subclassing) recipe.
   
   r6083 at hlagh (orig r7079):  norbi | 2009-07-20 19:05:32 -0400
    r7235 at vger:  mendel | 2009-07-21 01:05:18 +0200
    Fixed 'typo' (removed a word that I left there by accident).
   
   r6084 at hlagh (orig r7080):  norbi | 2009-07-21 04:06:21 -0400
    r7237 at vger:  mendel | 2009-07-21 10:06:05 +0200
    Fixing what my svk client screwed up.
   
   r6085 at hlagh (orig r7081):  caelum | 2009-07-21 10:51:55 -0400
   update Storage::Replicated prereqs
   r6086 at hlagh (orig r7082):  caelum | 2009-07-21 12:16:34 -0400
   show Oracle datetime_setup alter session statements in debug output
   r6088 at hlagh (orig r7085):  ribasushi | 2009-07-21 21:50:57 -0400
   Lazy folks do not run the whole test suite before merging >:( 
   r6287 at hlagh (orig r7097):  caelum | 2009-07-23 14:14:11 -0400
    r6092 at hlagh (orig r7090):  caelum | 2009-07-23 08:24:39 -0400
    new branch for fixing the MONEY type in MSSQL
    r6093 at hlagh (orig r7091):  caelum | 2009-07-23 08:34:01 -0400
    add test
    r6283 at hlagh (orig r7093):  caelum | 2009-07-23 10:31:08 -0400
    fix money columns
    r6284 at hlagh (orig r7094):  caelum | 2009-07-23 10:34:06 -0400
    minor change
    r6285 at hlagh (orig r7095):  caelum | 2009-07-23 11:01:37 -0400
    add test for updating money value to NULL
    r6286 at hlagh (orig r7096):  caelum | 2009-07-23 14:09:26 -0400
    add money type tests to dbd::sybase+mssql tests
   
  
  r7135 at Thesaurus (orig r7132):  caelum | 2009-07-28 19:10:40 +0200
   r6365 at hlagh (orig r7126):  caelum | 2009-07-27 20:03:47 -0400
   add postgres "timestamp without time zone" support
  
  r7244 at Thesaurus (orig r7241):  caelum | 2009-08-06 17:12:49 +0200
  add warning for custom resultsources through ->name(SCALARREF) on ->deploy
  r7245 at Thesaurus (orig r7242):  caelum | 2009-08-06 17:54:33 +0200
  improve the ->name(REF) warning code
  r7268 at Thesaurus (orig r7265):  ribasushi | 2009-08-09 00:23:24 +0200
  Clarify POD and cleanup the ->name-hack warning
  r7269 at Thesaurus (orig r7266):  ribasushi | 2009-08-09 00:34:09 +0200
  Fix a corner case and improve comments
 
 r7279 at Thesaurus (orig r7276):  ribasushi | 2009-08-09 15:25:34 +0200
  r6535 at Thesaurus (orig r6534):  ribasushi | 2009-06-06 11:12:03 +0200
  Let's try again
  r6536 at Thesaurus (orig r6535):  ribasushi | 2009-06-06 11:32:00 +0200
  Two failing MC tests
  r6624 at Thesaurus (orig r6623):  ribasushi | 2009-06-11 16:54:09 +0200
  Another multicreate failing test - has_many should not do find_or_create
  r6625 at Thesaurus (orig r6624):  ribasushi | 2009-06-11 16:54:49 +0200
   r6538 at Thesaurus (orig r6537):  ribasushi | 2009-06-07 23:07:55 +0200
   Fix for mysql subquery problem
   r6539 at Thesaurus (orig r6538):  ribasushi | 2009-06-07 23:36:43 +0200
   Make empty/default inserts use standard SQL
   r6540 at Thesaurus (orig r6539):  ribasushi | 2009-06-08 00:59:21 +0200
   Add mysql empty insert SQL override
   Make SQLAHacks parts loadable at runtime via ensure_class_loaded
   r6541 at Thesaurus (orig r6540):  ribasushi | 2009-06-08 01:03:04 +0200
   Make podcoverage happy
   r6542 at Thesaurus (orig r6541):  ribasushi | 2009-06-08 01:24:06 +0200
   Fix find_or_new/create to stop returning random rows when default value insert is requested
   r6543 at Thesaurus (orig r6542):  ribasushi | 2009-06-08 11:36:56 +0200
   Simply order_by/_virtual_order_by handling
   r6553 at Thesaurus (orig r6552):  ribasushi | 2009-06-08 23:56:41 +0200
   duh
   r6557 at Thesaurus (orig r6556):  ash | 2009-06-09 12:20:34 +0200
   Addjust bug to show problem with rows => 1 + child rel
   
   r6558 at Thesaurus (orig r6557):  ribasushi | 2009-06-09 13:12:46 +0200
   Require a recent bugfixed Devel::Cycle
   r6560 at Thesaurus (orig r6559):  ash | 2009-06-09 15:07:30 +0200
   Make IC::DT extra warning state the column name too
   
   r6575 at Thesaurus (orig r6574):  ribasushi | 2009-06-10 00:19:48 +0200
   AuthorCheck fixes
   r6579 at Thesaurus (orig r6578):  ribasushi | 2009-06-10 00:52:17 +0200
    r6522 at Thesaurus (orig r6521):  ribasushi | 2009-06-05 19:27:55 +0200
    New branch to try resultsource related stuff
    r6545 at Thesaurus (orig r6544):  ribasushi | 2009-06-08 13:00:54 +0200
    First stab at adding resultsources to each join in select - works won-der-ful-ly
    r6546 at Thesaurus (orig r6545):  ribasushi | 2009-06-08 13:14:08 +0200
    Commit failing test and thoughts on search arg deflation
    r6576 at Thesaurus (orig r6575):  ribasushi | 2009-06-10 00:31:55 +0200
    Todoify DT in search deflation test until after 0.09
    r6577 at Thesaurus (orig r6576):  ribasushi | 2009-06-10 00:48:07 +0200
    Factor out the $ident resolver
   
   r6581 at Thesaurus (orig r6580):  ribasushi | 2009-06-10 01:21:50 +0200
   Move as_query out of the cursor
   r6582 at Thesaurus (orig r6581):  ribasushi | 2009-06-10 01:27:19 +0200
   Think before commit
   r6583 at Thesaurus (orig r6582):  ribasushi | 2009-06-10 09:37:19 +0200
   Clarify and disable rows/prefetch test - fix is easy, but architecturally unsound - need more time
   r6591 at Thesaurus (orig r6590):  ribasushi | 2009-06-10 13:33:37 +0200
    r6544 at Thesaurus (orig r6543):  ribasushi | 2009-06-08 11:44:59 +0200
    Attempt to figure out why do we repeat joins on complex search_related
    r6586 at Thesaurus (orig r6585):  ribasushi | 2009-06-10 11:22:05 +0200
    Move the rs preservation test to a more suitable place
    r6589 at Thesaurus (orig r6588):  ribasushi | 2009-06-10 13:15:48 +0200
    Finally commit trully failing test
    r6590 at Thesaurus (orig r6589):  ribasushi | 2009-06-10 13:33:14 +0200
    Duh, this was a pretty simple bug
   
   r6593 at Thesaurus (orig r6592):  ribasushi | 2009-06-10 13:43:31 +0200
   What was I thinking - resultsource does not have an ->alias
   r6598 at Thesaurus (orig r6597):  ribasushi | 2009-06-10 14:48:39 +0200
   Adjust changelog
   r6601 at Thesaurus (orig r6600):  ribasushi | 2009-06-10 15:50:43 +0200
   Release 0.08104
   r6615 at Thesaurus (orig r6614):  ribasushi | 2009-06-11 14:29:48 +0200
   Move around inflation tests
   r6616 at Thesaurus (orig r6615):  ribasushi | 2009-06-11 14:32:07 +0200
   explicitly remove manifest on author mode make
   r6617 at Thesaurus (orig r6616):  ribasushi | 2009-06-11 15:02:41 +0200
   IC::DT changes:
   Switch SQLite storage to DT::F::SQLite
   Fix exception when undef_if_invalid and timezone are both set on a column
   Split t/89inflate_datetime into separate tests
   Adjust makefile author dependencies
   r6618 at Thesaurus (orig r6617):  ribasushi | 2009-06-11 15:07:41 +0200
   Move file_column test to inflate/ too
   r6621 at Thesaurus (orig r6620):  ribasushi | 2009-06-11 16:16:20 +0200
    r5713 at Thesaurus (orig r5712):  ribasushi | 2009-03-08 23:53:28 +0100
    Branch for datatype-aware updates
    r6604 at Thesaurus (orig r6603):  ribasushi | 2009-06-10 18:08:25 +0200
    Test for type-aware update
    r6607 at Thesaurus (orig r6606):  ribasushi | 2009-06-10 19:57:04 +0200
    Datatype aware update works
    r6609 at Thesaurus (orig r6608):  ribasushi | 2009-06-10 20:06:40 +0200
    Whoops
    r6614 at Thesaurus (orig r6613):  ribasushi | 2009-06-11 09:23:54 +0200
    Add attribute doc
    r6620 at Thesaurus (orig r6619):  ribasushi | 2009-06-11 16:15:53 +0200
    Use equality, not comparison
   
   r6623 at Thesaurus (orig r6622):  ribasushi | 2009-06-11 16:21:53 +0200
   Changes
  
  r6626 at Thesaurus (orig r6625):  ribasushi | 2009-06-11 17:00:06 +0200
  Adjust renamed relationship
  r6646 at Thesaurus (orig r6645):  ribasushi | 2009-06-12 09:00:02 +0200
  This is not update_or_create - create any non-belongs_to without asking many questions
  r7194 at Thesaurus (orig r7191):  ribasushi | 2009-08-04 15:20:35 +0200
  fix merge fallout
  r7195 at Thesaurus (orig r7192):  ribasushi | 2009-08-04 15:39:05 +0200
  Remove bogus test - the real test is in t/multi_create/has_many.t
  r7196 at Thesaurus (orig r7193):  ribasushi | 2009-08-04 15:48:33 +0200
  Separate the diamond MC test
  Use the new Test::More's no_plan ability
  r7274 at Thesaurus (orig r7271):  ribasushi | 2009-08-09 14:39:29 +0200
  Fix an arcane case with pk==fk tables (use the relationship direction specification if it is available
  r7275 at Thesaurus (orig r7272):  ribasushi | 2009-08-09 14:45:20 +0200
  Optimize handling of {_rel_in_storage}, greatly reducing the amounf ot find_or_create calls (as indicated by the TODOs in t/multi_create/reentrance_count.t
  r7277 at Thesaurus (orig r7274):  ribasushi | 2009-08-09 15:23:24 +0200
  Comment and todoify remaining test - too much of an undertaking / needs discussion
  r7278 at Thesaurus (orig r7275):  ribasushi | 2009-08-09 15:24:58 +0200
  newline
 
 r7282 at Thesaurus (orig r7279):  ribasushi | 2009-08-09 16:17:03 +0200
 Whoops, missed a line
 r7283 at Thesaurus (orig r7280):  mo | 2009-08-09 19:10:56 +0200
 added TODO test: call accessors when create()ing a row
 r7284 at Thesaurus (orig r7281):  ribasushi | 2009-08-10 08:01:59 +0200
 Fix bogus test
 r7291 at Thesaurus (orig r7288):  caelum | 2009-08-10 10:13:19 +0200
 make _determine_driver more reentrant
 r7297 at Thesaurus (orig r7294):  michaelr | 2009-08-10 22:40:33 +0200
 Added exception when resultset called without an argument
 
 
 r7298 at Thesaurus (orig r7295):  andyg | 2009-08-11 00:34:13 +0200
 Add failing test for RT 47779, group_by as a scalar ref
 r7301 at Thesaurus (orig r7298):  ribasushi | 2009-08-11 09:52:03 +0200
 Extra intro pod
 r7302 at Thesaurus (orig r7299):  mo | 2009-08-11 13:20:37 +0200
 removed TODO test
 r7303 at Thesaurus (orig r7300):  ribasushi | 2009-08-11 14:16:28 +0200
 Sanify group_by handling in complex prefetch rewrites
 r7304 at Thesaurus (orig r7301):  ribasushi | 2009-08-11 17:52:49 +0200
 cleanup
 r7305 at Thesaurus (orig r7302):  ribasushi | 2009-08-11 19:40:59 +0200
 Whitespace
 r7306 at Thesaurus (orig r7303):  ribasushi | 2009-08-11 20:00:11 +0200
 Fix an obscure regression when inserting an object with a serialize-deflating column set
 r7314 at Thesaurus (orig r7311):  ribasushi | 2009-08-12 16:11:24 +0200
 Remove needless inflate in Ordered
 r7315 at Thesaurus (orig r7312):  ribasushi | 2009-08-12 16:13:48 +0200
 Remove leftovers from frew's tests
 r7316 at Thesaurus (orig r7313):  ribasushi | 2009-08-12 16:16:08 +0200
 Grrrr
 r7317 at Thesaurus (orig r7314):  ribasushi | 2009-08-13 07:40:44 +0200
 Caelum was right to make _get_dbh private - reverting (and some code refactoring)
 r7318 at Thesaurus (orig r7315):  ribasushi | 2009-08-13 07:41:43 +0200
 Add a db/txn_do retry debugger (interesting results)
 r7319 at Thesaurus (orig r7316):  ribasushi | 2009-08-13 07:42:51 +0200
 Adjust the storage DESTROY and the tests to accomodate the new global RaiseError=1
 r7320 at Thesaurus (orig r7317):  ribasushi | 2009-08-13 08:12:08 +0200
 Last bit
 r7322 at Thesaurus (orig r7319):  ribasushi | 2009-08-17 11:09:39 +0200
 Allow select AS specification for functions only via the -as hash-key (no pod yet)
 r7323 at Thesaurus (orig r7320):  ribasushi | 2009-08-17 11:41:08 +0200
 Cookbook entry for -as and syntax tests
 r7324 at Thesaurus (orig r7321):  ribasushi | 2009-08-17 11:51:21 +0200
 Changes
 r7326 at Thesaurus (orig r7323):  ribasushi | 2009-08-17 12:37:14 +0200
 examples should be correct
 r7332 at Thesaurus (orig r7329):  caelum | 2009-08-18 06:19:12 +0200
 always reconnect in odbc:mssql:connect_call_use_dynamic_cursors
 r7333 at Thesaurus (orig r7330):  caelum | 2009-08-18 06:43:35 +0200
 minor change
 r7335 at Thesaurus (orig r7332):  ribasushi | 2009-08-18 08:51:20 +0200
  r7248 at Thesaurus (orig r7245):  rbuels | 2009-08-06 21:39:05 +0200
  making topic branch for "currval undefined" problem when not qualifying tables with their schema names
  r7249 at Thesaurus (orig r7246):  rbuels | 2009-08-06 21:40:39 +0200
  failing (crashing, really) test for this strange pg thing.  could not figure out a way to make a non-crashing test
  r7250 at Thesaurus (orig r7247):  rbuels | 2009-08-06 21:42:30 +0200
  fix for pg non-schema-qualified thing, with a nice vague commit message.  performance should be the same as before, for the common (schema-qualified) case
  r7251 at Thesaurus (orig r7248):  rbuels | 2009-08-06 22:41:19 +0200
  woops, pg search path fix needed support for quoted schema names in search paths
  r7295 at Thesaurus (orig r7292):  rbuels | 2009-08-10 20:45:50 +0200
  added caching of pg search path in Pg storage object
  r7296 at Thesaurus (orig r7293):  rbuels | 2009-08-10 22:37:31 +0200
  added test for empty table before non-schema-qualified pg sequence test in 72pg.t
  r7299 at Thesaurus (orig r7296):  rbuels | 2009-08-11 00:46:35 +0200
  added blub to Changes for pg_unqualified_schema branch
  r7300 at Thesaurus (orig r7297):  rbuels | 2009-08-11 00:48:53 +0200
  added me (rbuels) to contributors
  r7328 at Thesaurus (orig r7325):  rbuels | 2009-08-17 23:46:21 +0200
  added POD section about schema support to DBIx::Class::Storage::Pg
  r7329 at Thesaurus (orig r7326):  rbuels | 2009-08-17 23:51:40 +0200
  added more tests for multi-schema support in 72pg.t
  r7334 at Thesaurus (orig r7331):  ribasushi | 2009-08-18 08:49:03 +0200
  Un-plan test and fix authorship
 
 r7341 at Thesaurus (orig r7338):  ribasushi | 2009-08-18 10:55:23 +0200
  r7337 at Thesaurus (orig r7334):  ribasushi | 2009-08-18 09:00:03 +0200
  Pre-release branch
  r7338 at Thesaurus (orig r7335):  ribasushi | 2009-08-18 10:32:13 +0200
  Disambiguate POD
  r7339 at Thesaurus (orig r7336):  ribasushi | 2009-08-18 10:32:53 +0200
  Release 0.08109
 
 r7346 at Thesaurus (orig r7343):  robkinyon | 2009-08-19 21:44:48 +0200
 Applied doc patch by spb
 r7347 at Thesaurus (orig r7344):  ribasushi | 2009-08-20 07:50:49 +0200
 Fix a weird-ass sqlt invocation in deployment_statements()
 r7348 at Thesaurus (orig r7345):  ribasushi | 2009-08-20 08:19:07 +0200
 Apply pod patch by arthas (slightly modified)
 r7353 at Thesaurus (orig r7350):  abraxxa | 2009-08-20 15:07:29 +0200
 pod patch for 'Tracing SQL' examples
 
 r7356 at Thesaurus (orig r7353):  spb | 2009-08-20 19:53:02 +0200
 Minor fix to the previous doc patch
 r7357 at Thesaurus (orig r7354):  frew | 2009-08-20 23:54:04 +0200
 add some basic guards to get rid of warnings
 r7361 at Thesaurus (orig r7358):  ribasushi | 2009-08-21 11:18:43 +0200
 Because prefetch uses the cache system, it is not possible to set HRI on a prefetched rs without upsetting the tests - don't compare
 r7372 at Thesaurus (orig r7369):  caelum | 2009-08-24 12:32:57 +0200
 bump CAG dep
 r7391 at Thesaurus (orig r7388):  ribasushi | 2009-08-25 13:43:38 +0200
 typo
 r7392 at Thesaurus (orig r7389):  ribasushi | 2009-08-25 14:29:37 +0200
  r7354 at Thesaurus (orig r7351):  abraxxa | 2009-08-20 17:46:06 +0200
  new branch grouped_has_many_join
  
  r7382 at Thesaurus (orig r7379):  ribasushi | 2009-08-24 22:50:13 +0200
  Seems like abraxxa's bug is fixed
  r7385 at Thesaurus (orig r7382):  ribasushi | 2009-08-25 11:33:40 +0200
  One more test
 
 r7396 at Thesaurus (orig r7393):  ribasushi | 2009-08-26 18:07:51 +0200
 Stop testing deprecated json::syck
 r7397 at Thesaurus (orig r7394):  ribasushi | 2009-08-26 18:08:24 +0200
 Make sure sqlt_type gets called after determining driver
 r7398 at Thesaurus (orig r7395):  ribasushi | 2009-08-26 18:21:53 +0200
 Make POD::Coverage happy... again
 r7399 at Thesaurus (orig r7396):  ribasushi | 2009-08-26 18:31:54 +0200
 Clarify
 r7400 at Thesaurus (orig r7397):  frew | 2009-08-26 22:24:19 +0200
 Remove dead, sketchtowne link
 r7404 at Thesaurus (orig r7401):  ribasushi | 2009-08-27 18:50:12 +0200
 Changes
 r7406 at Thesaurus (orig r7403):  ribasushi | 2009-08-28 00:11:29 +0200
 Add a test proving how dumb I am
 r7407 at Thesaurus (orig r7404):  ribasushi | 2009-08-28 16:34:46 +0200
 Warning to spare mst explanations
 r7422 at Thesaurus (orig r7419):  caelum | 2009-08-29 08:34:07 +0200
  r7381 at hlagh (orig r7380):  ribasushi | 2009-08-24 17:07:58 -0400
  Branch to add autocast support as a standalone piece of code
  r7382 at hlagh (orig r7381):  ribasushi | 2009-08-25 05:06:43 -0400
  Move storage tests to their own dir
  r7385 at hlagh (orig r7384):  ribasushi | 2009-08-25 06:35:19 -0400
  Switch storage class loading to ensure_class_loaded
  r7386 at hlagh (orig r7385):  ribasushi | 2009-08-25 06:37:48 -0400
  Change a datatype for test purposes
  r7387 at hlagh (orig r7386):  ribasushi | 2009-08-25 06:45:35 -0400
  Fix two storage tests
  r7388 at hlagh (orig r7387):  ribasushi | 2009-08-25 06:45:52 -0400
  Actual autocast code
  r18697 at hlagh (orig r7416):  caelum | 2009-08-29 01:42:29 -0400
  rename method and add docs
  r18698 at hlagh (orig r7417):  ribasushi | 2009-08-29 02:07:18 -0400
  Make sure arrays work
  r18699 at hlagh (orig r7418):  caelum | 2009-08-29 02:11:14 -0400
  rename _map_data_type to _native_data_type
 
 r7425 at Thesaurus (orig r7422):  ribasushi | 2009-08-29 08:55:12 +0200
 Make podcoverage happy
 r7426 at Thesaurus (orig r7423):  ribasushi | 2009-08-29 09:06:07 +0200
 Reduce the number of heavy dbh_do calls
 r7439 at Thesaurus (orig r7436):  ribasushi | 2009-08-30 08:54:10 +0200
  r7435 at Thesaurus (orig r7432):  caelum | 2009-08-30 02:53:21 +0200
  new branch
  r7436 at Thesaurus (orig r7433):  caelum | 2009-08-30 03:14:36 +0200
  add dbh_maker option to connect_info hash
  r7437 at Thesaurus (orig r7434):  ribasushi | 2009-08-30 08:51:14 +0200
  Minor cleanup and test enhancement
  r7438 at Thesaurus (orig r7435):  ribasushi | 2009-08-30 08:53:59 +0200
  Changes
 
 r7444 at Thesaurus (orig r7441):  ribasushi | 2009-08-30 09:53:04 +0200
 Sanify 03podcoverage.t, allow wildcard skipping
 r7449 at Thesaurus (orig r7446):  caelum | 2009-08-31 04:36:08 +0200
 support coderef connect_infos for repicated storage
 r7450 at Thesaurus (orig r7447):  caelum | 2009-08-31 04:58:43 +0200
 make replicant dsn detection a bit nicer
 r7451 at Thesaurus (orig r7448):  caelum | 2009-08-31 17:30:37 +0200
 fix case where repelicant coderef dsn does not connect
 r7452 at Thesaurus (orig r7449):  arcanez | 2009-08-31 23:13:50 +0200
 remove . from end of =head links
 r7455 at Thesaurus (orig r7452):  ribasushi | 2009-09-01 10:38:37 +0200
 Quote deps, avoid floating problems
 r7456 at Thesaurus (orig r7453):  ribasushi | 2009-09-01 11:10:11 +0200
 Fix misleading FAQ entry
 r7464 at Thesaurus (orig r7461):  ribasushi | 2009-09-01 16:51:58 +0200
 Fix insert_bulk with rebless
 r7465 at Thesaurus (orig r7462):  ribasushi | 2009-09-01 16:52:39 +0200
 Comment
 r7466 at Thesaurus (orig r7463):  matthewt | 2009-09-01 17:17:08 +0200
 clearer copyright
 r7467 at Thesaurus (orig r7464):  matthewt | 2009-09-01 17:18:31 +0200
 split copyright and license
 r7469 at Thesaurus (orig r7466):  frew | 2009-09-01 20:27:36 +0200
 pod describing strife with MSSQL
 r7483 at Thesaurus (orig r7480):  ribasushi | 2009-09-02 11:07:04 +0200
 Streamline pg test-schemas cleanup
 r7484 at Thesaurus (orig r7481):  ribasushi | 2009-09-02 11:20:25 +0200
 Centralize handling of minimum sqlt version to DBIx::Class
 Bump version to the latest unborked sqlt (still just a recommend)
 r7485 at Thesaurus (orig r7482):  ribasushi | 2009-09-02 11:31:50 +0200
 Some cleanup... don't remember where it came from
 r7486 at Thesaurus (orig r7483):  ribasushi | 2009-09-02 12:19:11 +0200
 First part of mysql insanity
 r7487 at Thesaurus (orig r7484):  ribasushi | 2009-09-02 12:25:35 +0200
 Invoke default_join_type only on undefined types
 r7488 at Thesaurus (orig r7485):  ribasushi | 2009-09-02 12:42:39 +0200
 No fancy methods for the default_jointype, as we don't have proper sqlahacks inheritance and they are... well hacks
 r7489 at Thesaurus (orig r7486):  ribasushi | 2009-09-02 13:00:07 +0200
 Mysql v3 support (ick)
 r7494 at Thesaurus (orig r7491):  rbuels | 2009-09-02 20:33:47 +0200
 POD patch, corrected erroneous usage of dbh_do in Storage::DBI synopsis
 r7500 at Thesaurus (orig r7497):  ribasushi | 2009-09-03 11:11:29 +0200
 POD lists the storable hooks, but does no load them
 r7501 at Thesaurus (orig r7498):  ribasushi | 2009-09-03 11:11:50 +0200
 Storable sanification
 r7502 at Thesaurus (orig r7499):  ribasushi | 2009-09-03 11:24:17 +0200
 Storable is now in Core
 r7503 at Thesaurus (orig r7500):  ribasushi | 2009-09-03 11:36:58 +0200
 Make sure mysql is fixed
 r7506 at Thesaurus (orig r7503):  ribasushi | 2009-09-03 17:16:17 +0200
 Add podcoverage skip
 r7507 at Thesaurus (orig r7504):  ribasushi | 2009-09-03 17:23:19 +0200
 Consolidate _verify_pid calls
 r7511 at Thesaurus (orig r7508):  matthewt | 2009-09-03 20:12:53 +0200
 get the COPYRIGHT in the right pless to not confuse META.yml generation
 r7513 at Thesaurus (orig r7510):  ribasushi | 2009-09-03 20:41:22 +0200
 
 r7514 at Thesaurus (orig r7511):  ribasushi | 2009-09-03 20:41:34 +0200
  r7472 at Thesaurus (orig r7469):  norbi | 2009-09-01 21:43:08 +0200
   r7635 at vger:  mendel | 2009-09-01 21:02:23 +0200
   Added pointer to 'SQL functions on the lhs' to the 'using stored procs' section.
  
 
 r7515 at Thesaurus (orig r7512):  ribasushi | 2009-09-03 20:41:44 +0200
  r7473 at Thesaurus (orig r7470):  norbi | 2009-09-01 21:43:19 +0200
   r7636 at vger:  mendel | 2009-09-01 21:09:43 +0200
   Mentions the possibiliby of creating indexes on SQL function return values.
  
 
 r7516 at Thesaurus (orig r7513):  ribasushi | 2009-09-03 20:41:52 +0200
  r7474 at Thesaurus (orig r7471):  norbi | 2009-09-01 21:43:31 +0200
   r7637 at vger:  mendel | 2009-09-01 21:19:14 +0200
   Rewrote 'SQL functions on the lhs' to use the new SQLA literal SQL + bind feature.
  
 
 r7517 at Thesaurus (orig r7514):  ribasushi | 2009-09-03 20:41:59 +0200
  r7475 at Thesaurus (orig r7472):  norbi | 2009-09-01 21:43:42 +0200
   r7638 at vger:  mendel | 2009-09-01 21:20:17 +0200
   Added a comment to the example code to stress that it does not work.
  
 
 r7518 at Thesaurus (orig r7515):  ribasushi | 2009-09-03 20:42:10 +0200
  r7476 at Thesaurus (orig r7473):  norbi | 2009-09-01 21:43:54 +0200
   r7639 at vger:  mendel | 2009-09-01 21:28:18 +0200
   Added pointer to DBIx::Class::DynamicSubclass.
  
 
 r7519 at Thesaurus (orig r7516):  ribasushi | 2009-09-03 20:42:15 +0200
  r7477 at Thesaurus (orig r7474):  norbi | 2009-09-01 21:44:03 +0200
   r7640 at vger:  mendel | 2009-09-01 21:30:13 +0200
   Replaced deprecated \'colname DESC' order_by syntax with { -desc => 'colname' } syntax.
  
 
 r7520 at Thesaurus (orig r7517):  ribasushi | 2009-09-03 20:42:22 +0200
  r7478 at Thesaurus (orig r7475):  norbi | 2009-09-01 21:44:17 +0200
   r7641 at vger:  mendel | 2009-09-01 21:32:48 +0200
   Rewrote 'SQL functions on the lhs' to use the new SQLA literal SQL + bind feature.
  
 
 r7521 at Thesaurus (orig r7518):  ribasushi | 2009-09-03 20:42:26 +0200
  r7479 at Thesaurus (orig r7476):  norbi | 2009-09-01 21:44:28 +0200
   r7642 at vger:  mendel | 2009-09-01 21:42:25 +0200
   Added many-to-many add_to_*() example to stress that it returns the related row and not the linking table row.
  
 
 r7522 at Thesaurus (orig r7519):  ribasushi | 2009-09-03 20:42:32 +0200
  r7480 at Thesaurus (orig r7477):  norbi | 2009-09-01 22:14:25 +0200
   r7653 at vger:  mendel | 2009-09-01 22:14:11 +0200
   Fixed wrong literal SQL + bind examples (missing operator and placeholders).
  
 
 r7523 at Thesaurus (orig r7520):  ribasushi | 2009-09-03 20:42:37 +0200
  r7481 at Thesaurus (orig r7478):  norbi | 2009-09-01 22:30:48 +0200
   r7655 at vger:  mendel | 2009-09-01 22:30:35 +0200
   Fixed the bind value column names in the SQL literal + bind examples.
  
 
 r7524 at Thesaurus (orig r7521):  ribasushi | 2009-09-03 20:42:45 +0200
  r7482 at Thesaurus (orig r7479):  norbi | 2009-09-01 22:52:21 +0200
   r7657 at vger:  mendel | 2009-09-01 22:52:09 +0200
   Further improvement in the bind value column names in the SQL literal + bind examples.
  
 
 r7549 at Thesaurus (orig r7546):  ribasushi | 2009-09-04 08:47:19 +0200
 Stop connecting to determine dt-parser (test is in pg branch)
 r7553 at Thesaurus (orig r7550):  ribasushi | 2009-09-04 11:20:48 +0200
 Require sqla with bool support
 r7560 at Thesaurus (orig r7557):  ribasushi | 2009-09-04 19:17:32 +0200
 Dumper follies
 r7561 at Thesaurus (orig r7558):  ribasushi | 2009-09-04 19:27:50 +0200
 Even better sqla
 r7570 at Thesaurus (orig r7567):  ribasushi | 2009-09-04 20:49:53 +0200
  r7459 at Thesaurus (orig r7456):  rbuels | 2009-09-01 12:46:46 +0200
  making another pg_unqualified_schema branch, for real this time
  r7460 at Thesaurus (orig r7457):  rbuels | 2009-09-01 12:51:31 +0200
  reworked tests for pg last_insert_id in presence of un-schema-qualified things. adds some todo tests, including a case for which is does not seem to be possible to correctly guess the sequence to use for the liid
  r7461 at Thesaurus (orig r7458):  rbuels | 2009-09-01 12:54:34 +0200
  in Pg storage, added a warning for case when the nextval sequence is not schema qualified
  r7462 at Thesaurus (orig r7459):  rbuels | 2009-09-01 13:01:31 +0200
  tweak to Pg test, warnings_like -> warnings_exist
  r7463 at Thesaurus (orig r7460):  ribasushi | 2009-09-01 13:34:59 +0200
  Rewrap todo properly
  r7490 at Thesaurus (orig r7487):  ribasushi | 2009-09-02 14:16:01 +0200
  Make pg sequence autodetect deterministic (or throw exceptions). Test needs adjusting
  r7491 at Thesaurus (orig r7488):  rbuels | 2009-09-02 19:15:01 +0200
  some reorganization and cleanup of pg-specific tests
  r7492 at Thesaurus (orig r7489):  rbuels | 2009-09-02 20:08:31 +0200
  more cleanup of 72pg.t
  r7495 at Thesaurus (orig r7492):  rbuels | 2009-09-02 20:48:12 +0200
  more cleanup of pg tests, added cascade to drop function, cleaned up create and drop of schemas to use dbh_do
  r7496 at Thesaurus (orig r7493):  rbuels | 2009-09-02 20:50:42 +0200
  oops, missed something screwed up by the pull
  r7525 at Thesaurus (orig r7522):  rbuels | 2009-09-03 20:45:53 +0200
  added __END__ before pod in Pg storage
  r7526 at Thesaurus (orig r7523):  rbuels | 2009-09-03 20:46:00 +0200
  renamed pg test schemas to be more organized
  r7531 at Thesaurus (orig r7528):  rbuels | 2009-09-04 00:28:11 +0200
  more pg test cleanup
  r7532 at Thesaurus (orig r7529):  rbuels | 2009-09-04 00:28:17 +0200
  more pg test cleanup
  r7533 at Thesaurus (orig r7530):  rbuels | 2009-09-04 00:28:25 +0200
  starting work on extended set of Pg auto-pk tests
  r7534 at Thesaurus (orig r7531):  rbuels | 2009-09-04 00:28:31 +0200
  more work on extended set of Pg auto-pk tests
  r7535 at Thesaurus (orig r7532):  rbuels | 2009-09-04 00:28:39 +0200
  more work on pg tests
  r7536 at Thesaurus (orig r7533):  rbuels | 2009-09-04 00:28:45 +0200
  more work on extended set of Pg auto-pk tests
  r7537 at Thesaurus (orig r7534):  rbuels | 2009-09-04 00:28:50 +0200
  added .gitignore for users of git-svn
  r7538 at Thesaurus (orig r7535):  rbuels | 2009-09-04 00:28:58 +0200
  more work on extended set of Pg auto-pk tests
  r7539 at Thesaurus (orig r7536):  rbuels | 2009-09-04 00:29:04 +0200
  added darcs and git to MANIFEST.SKIP version control skipping section
  r7540 at Thesaurus (orig r7537):  rbuels | 2009-09-04 00:41:26 +0200
  more work on extended set of Pg auto-pk tests
  r7541 at Thesaurus (orig r7538):  rbuels | 2009-09-04 00:41:32 +0200
  more work on extended set of Pg auto-pk tests
  r7542 at Thesaurus (orig r7539):  rbuels | 2009-09-04 00:41:38 +0200
  more work on extended set of Pg auto-pk tests
  r7543 at Thesaurus (orig r7540):  rbuels | 2009-09-04 02:20:23 +0200
  more work on extended set of Pg auto-pk tests
  r7544 at Thesaurus (orig r7541):  rbuels | 2009-09-04 02:20:32 +0200
  rewrote autoinc fetcher as a query into the pg_catalog.  all the old tests pass now, but not my new tests.  the new tests might be buggy
  r7545 at Thesaurus (orig r7542):  rbuels | 2009-09-04 02:20:39 +0200
  oops, forgot to put the drop for the extended tests back in the pg tests
  r7546 at Thesaurus (orig r7543):  rbuels | 2009-09-04 02:41:56 +0200
  couple of comment/documentation tweaks to pg storage driver
  r7547 at Thesaurus (orig r7544):  rbuels | 2009-09-04 02:42:02 +0200
  fixed my tests
  r7548 at Thesaurus (orig r7545):  rbuels | 2009-09-04 02:42:09 +0200
  clarified the POD in Pg storage driver regarding multi-schema support
  r7551 at Thesaurus (orig r7548):  ribasushi | 2009-09-04 08:51:30 +0200
  Proper unconnected test
  r7554 at Thesaurus (orig r7551):  ribasushi | 2009-09-04 11:26:12 +0200
  Fixes to pg test after review:
  - Move the store_column test to 60core.t
  - Streamline the select ... for update test
  - Disable all exception warnings for normal test runs
  
  r7555 at Thesaurus (orig r7552):  ribasushi | 2009-09-04 11:56:00 +0200
  Rewrite selector using sqla
  r7562 at Thesaurus (orig r7559):  rbuels | 2009-09-04 19:42:52 +0200
  moved search_path querying function from Pg storage driver into tests
  r7563 at Thesaurus (orig r7560):  rbuels | 2009-09-04 19:43:00 +0200
  refactored how Pg storage driver calls sequence search, made erorror message more informative when query into pg_catalog fails
  r7564 at Thesaurus (orig r7561):  rbuels | 2009-09-04 19:43:08 +0200
  tweaked pg sequence discovery error message a bit more
  r7565 at Thesaurus (orig r7562):  rbuels | 2009-09-04 19:43:17 +0200
  added big block comment explaining Pg sequence discovery strategy
  r7566 at Thesaurus (orig r7563):  rbuels | 2009-09-04 20:35:10 +0200
  added code to use DBD::Pg column_info to fetch column default if recent enough
  r7567 at Thesaurus (orig r7564):  rbuels | 2009-09-04 20:35:18 +0200
  tweaked comment
  r7568 at Thesaurus (orig r7565):  rbuels | 2009-09-04 20:35:30 +0200
  oops, DBD::Pg 2.15.1 should be included in working versions
 
 r7572 at Thesaurus (orig r7569):  ribasushi | 2009-09-04 21:32:01 +0200
 Stop double-caching datetime_parser - keep it in the storage only
 r7573 at Thesaurus (orig r7570):  ribasushi | 2009-09-04 21:36:39 +0200
 No Serialize::Storable in core
 r7574 at Thesaurus (orig r7571):  ribasushi | 2009-09-04 21:49:54 +0200
 Changes
 r7580 at Thesaurus (orig r7577):  ribasushi | 2009-09-06 12:28:44 +0200
 Add mysterious exception test
 r7582 at Thesaurus (orig r7579):  ribasushi | 2009-09-06 15:43:10 +0200
 No connection - no cleanup
 r7583 at Thesaurus (orig r7580):  ribasushi | 2009-09-06 15:45:51 +0200
 Streamline test
 r7584 at Thesaurus (orig r7581):  ribasushi | 2009-09-06 17:39:03 +0200
 Test cleanup:
 Benchmark and Data::Dumper have been in core forever
 Make POD testing conditional as shown in http://use.perl.org/~Alias/journal/38822
 Remove some dead cdbi test files
 Stop openly giving contributors an option to override the authorcheck
 
 r7585 at Thesaurus (orig r7582):  ribasushi | 2009-09-06 17:48:32 +0200
 Done long time ago
 r7586 at Thesaurus (orig r7583):  ribasushi | 2009-09-06 17:56:27 +0200
 Release 0.08110
 r7588 at Thesaurus (orig r7585):  ribasushi | 2009-09-06 18:33:46 +0200
 Stop eating exceptions in ::Storage::DBI::DESTROY
 r7589 at Thesaurus (orig r7586):  ribasushi | 2009-09-06 20:35:30 +0200
 Centralize identity insert control for mssql (it seems that issuing an OFF is not necessary)
 r7590 at Thesaurus (orig r7587):  ribasushi | 2009-09-06 20:45:41 +0200
 Clearer MSSQL error message
 r7591 at Thesaurus (orig r7588):  ribasushi | 2009-09-06 23:58:22 +0200
 Fix mssql pod
 r7592 at Thesaurus (orig r7589):  ribasushi | 2009-09-07 09:06:05 +0200
 Release 0.08111
 r7598 at Thesaurus (orig r7595):  wreis | 2009-09-07 15:31:38 +0200
 improved warn for Storable hooks in ResultSourceHandle
 r7600 at Thesaurus (orig r7597):  ribasushi | 2009-09-07 16:26:59 +0200
 Whoops - last_insert_id allows for multiple autoinc columns - support it in pg
 r7601 at Thesaurus (orig r7598):  ribasushi | 2009-09-07 16:46:14 +0200
 Prune duplicate constraints from the find() condition
 r7606 at Thesaurus (orig r7603):  frew | 2009-09-08 20:13:29 +0200
 Turn IDENTITY_INSERT back off after inserts
 r7616 at Thesaurus (orig r7613):  ribasushi | 2009-09-09 14:16:12 +0200
 Fix warning
 r7617 at Thesaurus (orig r7614):  ribasushi | 2009-09-09 14:42:49 +0200
 Really sanify exception text
 r7624 at Thesaurus (orig r7621):  mo | 2009-09-10 18:53:32 +0200
 added test to make sure that store_column is called even for non-dirty columns
 r7625 at Thesaurus (orig r7622):  bluefeet | 2009-09-10 19:03:21 +0200
 Fix RSC->reset() to no longer return $self, which fixes Cursor::Cached + RSC.
 r7626 at Thesaurus (orig r7623):  ribasushi | 2009-09-10 19:32:03 +0200
 The real fix
 r7627 at Thesaurus (orig r7624):  matthewt | 2009-09-11 02:33:17 +0200
 make it clear that we are not supposed to have optional deps
 r7628 at Thesaurus (orig r7625):  ribasushi | 2009-09-11 06:30:03 +0200
 Changes so far
 r7629 at Thesaurus (orig r7626):  ribasushi | 2009-09-11 06:39:45 +0200
 Fix borked makefile
 r7630 at Thesaurus (orig r7627):  ribasushi | 2009-09-11 15:39:42 +0200
 Fixed minor problem with txn scope guard - rollback exceptions were never reported
 r7632 at Thesaurus (orig r7629):  ribasushi | 2009-09-11 23:06:54 +0200
 Extend prefetch tests
 r7633 at Thesaurus (orig r7630):  ribasushi | 2009-09-11 23:13:45 +0200
 Reverting http://dev.catalyst.perl.org/svnweb/bast/revision?rev=4278 - it seems to pass fine now
 r7634 at Thesaurus (orig r7631):  ribasushi | 2009-09-12 00:15:50 +0200
 Add single() ro RSC
 r7635 at Thesaurus (orig r7632):  ribasushi | 2009-09-12 00:44:01 +0200
 This is how the txnguard should really work
 r7636 at Thesaurus (orig r7633):  ribasushi | 2009-09-12 00:58:21 +0200
 Fix borked example
 r7637 at Thesaurus (orig r7634):  ribasushi | 2009-09-12 00:58:58 +0200
 scopeguard almost done
 r7638 at Thesaurus (orig r7635):  brunov | 2009-09-12 01:25:12 +0200
 Update DBIx::Class::Manual::Example.pod to reflect previous changes in examples/Schema/insertdb.pl
 
 r7639 at Thesaurus (orig r7636):  brunov | 2009-09-12 01:27:17 +0200
 Added Bruno Vecchi to the Contributors section in DBIx/Class.pm
 
 
 r7640 at Thesaurus (orig r7637):  ribasushi | 2009-09-12 01:31:16 +0200
 Final scopeguard tweak (?)
 r7644 at Thesaurus (orig r7641):  ribasushi | 2009-09-12 12:46:51 +0200
 Even better localization of $@, and don't use Test::Warn for the time being, as something is freaking out Sub::UpLevel
 r7670 at Thesaurus (orig r7659):  ribasushi | 2009-09-14 18:24:44 +0200
 Someone claimed this is a problem...
 r7673 at Thesaurus (orig r7662):  ribasushi | 2009-09-15 09:43:46 +0200
 Warn when distinct is used with group_by
 r7674 at Thesaurus (orig r7663):  rbuels | 2009-09-15 22:45:32 +0200
 doc patch, clarified warning about using find_or_create() and friends on tables with auto-increment or similar columns
 r7675 at Thesaurus (orig r7664):  rbuels | 2009-09-15 22:55:15 +0200
 another doc clarification regarding auto-inc columns with find_or_create() and such functions
 r7683 at Thesaurus (orig r7672):  ribasushi | 2009-09-17 13:54:44 +0200
 Fix left-join chaining
 r7694 at Thesaurus (orig r7683):  ribasushi | 2009-09-18 12:36:42 +0200
  r6389 at Thesaurus (orig r6388):  caelum | 2009-05-23 22:48:06 +0200
  recreating Sybase branch
  r6395 at Thesaurus (orig r6394):  caelum | 2009-05-24 01:47:32 +0200
  try not to fuck mssql with the sybase crap
  r6488 at Thesaurus (orig r6487):  caelum | 2009-06-03 17:31:24 +0200
  resolve conflict
  r6490 at Thesaurus (orig r6489):  caelum | 2009-06-03 18:25:36 +0200
  add missing files to sybase branch
  r6492 at Thesaurus (orig r6491):  caelum | 2009-06-04 01:51:39 +0200
  fix Sybase DT stuff and storage bases
  r6493 at Thesaurus (orig r6492):  caelum | 2009-06-04 02:10:45 +0200
  fix base for mssql (can't be a sybase anymore)
  r6494 at Thesaurus (orig r6493):  caelum | 2009-06-04 02:20:37 +0200
  test sybase SMALLDATETIME inflation
  r6495 at Thesaurus (orig r6494):  caelum | 2009-06-04 04:52:31 +0200
  update Sybase docs
  r6501 at Thesaurus (orig r6500):  caelum | 2009-06-04 14:50:49 +0200
  sybase limit count without offset now works
  r6504 at Thesaurus (orig r6503):  caelum | 2009-06-04 18:03:01 +0200
  use TOP for sybase limit count thanks to refactored count
  r6505 at Thesaurus (orig r6504):  caelum | 2009-06-04 18:41:54 +0200
  back to counting rows for Sybase LIMIT counts
  r6506 at Thesaurus (orig r6505):  caelum | 2009-06-04 19:07:48 +0200
  minor sybase count fix
  r6512 at Thesaurus (orig r6511):  caelum | 2009-06-05 01:02:48 +0200
  test sybase group_by count, works
  r6513 at Thesaurus (orig r6512):  caelum | 2009-06-05 01:28:18 +0200
  set date format on _rebless correctly
  r6516 at Thesaurus (orig r6515):  caelum | 2009-06-05 02:24:46 +0200
  manually merged in sybase_noquote branch
  r6518 at Thesaurus (orig r6517):  caelum | 2009-06-05 06:34:25 +0200
  shit doesn't work yet
  r6520 at Thesaurus (orig r6519):  caelum | 2009-06-05 16:55:41 +0200
  update sybase types which shouldn't be quoted
  r6525 at Thesaurus (orig r6524):  caelum | 2009-06-06 04:40:51 +0200
  tweaks to sybase types
  r6527 at Thesaurus (orig r6526):  caelum | 2009-06-06 05:36:03 +0200
  temporary sybase noquote hack
  r6595 at Thesaurus (orig r6594):  caelum | 2009-06-10 13:46:37 +0200
  Sybase::NoBindVars now correctly quotes
  r6596 at Thesaurus (orig r6595):  caelum | 2009-06-10 14:04:19 +0200
  cache rsrc in NoBindVars, use name_sep
  r6597 at Thesaurus (orig r6596):  caelum | 2009-06-10 14:35:52 +0200
  Sybase count by first pk, if available
  r6599 at Thesaurus (orig r6598):  caelum | 2009-06-10 15:00:42 +0200
  cache rsrc in NoBindVars correctly
  r6600 at Thesaurus (orig r6599):  caelum | 2009-06-10 15:27:41 +0200
  handle unknown rsrc in NoBindVars and Sybase::NoBindVars
  r6605 at Thesaurus (orig r6604):  caelum | 2009-06-10 18:17:31 +0200
  cache rsrc properly in NoBindVars, return undef if no rsrc
  r6658 at Thesaurus (orig r6657):  caelum | 2009-06-13 05:57:40 +0200
  switch to DateTime::Format::Sybase
  r6700 at Thesaurus (orig r6699):  caelum | 2009-06-17 16:25:28 +0200
  rename and document dt setup method, will be an on_connect_call at later merge point
  r6701 at Thesaurus (orig r6700):  caelum | 2009-06-17 16:30:08 +0200
  more dt docs reorg
  r6715 at Thesaurus (orig r6714):  caelum | 2009-06-19 01:28:17 +0200
  todo tests for text/image columns in sybase
  r6716 at Thesaurus (orig r6715):  caelum | 2009-06-19 01:46:56 +0200
  added connect_call_blob_setup for Sybase
  r6724 at Thesaurus (orig r6723):  caelum | 2009-06-19 17:12:20 +0200
  cleanups
  r6771 at Thesaurus (orig r6770):  caelum | 2009-06-23 16:42:32 +0200
  minor changes
  r6788 at Thesaurus (orig r6787):  caelum | 2009-06-25 05:31:06 +0200
  fixup POD, comment out count
  r6811 at Thesaurus (orig r6810):  caelum | 2009-06-28 02:14:56 +0200
  prototype blob implementation
  r6857 at Thesaurus (orig r6856):  caelum | 2009-06-29 23:45:19 +0200
  branch pushed, removing
  r6868 at Thesaurus (orig r6867):  caelum | 2009-06-30 03:39:51 +0200
  merge on_connect_call updates
  r6877 at Thesaurus (orig r6876):  caelum | 2009-06-30 12:46:43 +0200
  code cleanups
  r6957 at Thesaurus (orig r6956):  caelum | 2009-07-03 02:32:48 +0200
  minor changes
  r6959 at Thesaurus (orig r6958):  caelum | 2009-07-03 05:04:12 +0200
  fix sybase mro
  r7001 at Thesaurus (orig r7000):  caelum | 2009-07-07 13:34:23 +0200
  fix sybase rebless to NoBindVars
  r7021 at Thesaurus (orig r7020):  caelum | 2009-07-10 12:52:13 +0200
  fix NoBindVars
  r7053 at Thesaurus (orig r7052):  caelum | 2009-07-15 01:39:02 +0200
  set maxConnect in DSN and add docs
  r7065 at Thesaurus (orig r7064):  caelum | 2009-07-17 09:39:54 +0200
  make insertion of blobs into tables with identity columns work, other minor fixes
  r7070 at Thesaurus (orig r7069):  caelum | 2009-07-17 23:30:13 +0200
  some compatibility updated for older DBD::Sybase versions, some initial work on _select_args for blobs
  r7072 at Thesaurus (orig r7071):  caelum | 2009-07-19 23:57:11 +0200
  mangling _select_args turned out to be unnecessary
  r7073 at Thesaurus (orig r7072):  caelum | 2009-07-20 01:02:19 +0200
  minor cleanups
  r7074 at Thesaurus (orig r7073):  caelum | 2009-07-20 15:47:48 +0200
  blob update now works
  r7076 at Thesaurus (orig r7075):  caelum | 2009-07-20 19:06:46 +0200
  change the (incorrect) version check to a check for FreeTDS
  r7077 at Thesaurus (orig r7076):  caelum | 2009-07-20 19:13:25 +0200
  better check for FreeTDS thanks to arcanez
  r7089 at Thesaurus (orig r7086):  caelum | 2009-07-22 07:09:21 +0200
  minor cleanups
  r7091 at Thesaurus (orig r7088):  caelum | 2009-07-22 17:05:37 +0200
  remove unnecessary test Result class
  r7092 at Thesaurus (orig r7089):  caelum | 2009-07-23 00:47:14 +0200
  fix doc for how to check for FreeTDS
  r7095 at Thesaurus (orig r7092):  caelum | 2009-07-23 14:35:53 +0200
  doc tweak
  r7115 at Thesaurus (orig r7112):  caelum | 2009-07-24 09:58:24 +0200
  add support for IDENTITY_INSERT
  r7117 at Thesaurus (orig r7114):  caelum | 2009-07-24 16:19:08 +0200
  savepoint support
  r7120 at Thesaurus (orig r7117):  caelum | 2009-07-24 20:35:37 +0200
  fix race condition in last_insert_id with placeholders
  r7121 at Thesaurus (orig r7118):  caelum | 2009-07-24 21:22:25 +0200
  code cleanup
  r7124 at Thesaurus (orig r7121):  caelum | 2009-07-25 16:19:58 +0200
  use _resolve_column_info in NoBindVars
  r7125 at Thesaurus (orig r7122):  caelum | 2009-07-25 21:23:49 +0200
  make insert work as a nested transaction too
  r7126 at Thesaurus (orig r7123):  caelum | 2009-07-25 22:52:17 +0200
  add money type support
  r7128 at Thesaurus (orig r7125):  caelum | 2009-07-27 03:48:35 +0200
  better FreeTDS support
  r7130 at Thesaurus (orig r7127):  caelum | 2009-07-28 06:23:54 +0200
  minor refactoring, cleanups, doc updates
  r7131 at Thesaurus (orig r7128):  caelum | 2009-07-28 09:32:45 +0200
  forgot to set mro in dbi::cursor
  r7141 at Thesaurus (orig r7138):  caelum | 2009-07-30 10:21:20 +0200
  better test for "smalldatetime" in Sybase
  r7146 at Thesaurus (orig r7143):  caelum | 2009-07-30 15:37:18 +0200
  update sqlite test schema
  r7207 at Thesaurus (orig r7204):  caelum | 2009-08-04 23:40:16 +0200
  update Changes
  r7222 at Thesaurus (orig r7219):  caelum | 2009-08-05 11:02:26 +0200
  fix a couple minor issues after pull from trunk
  r7260 at Thesaurus (orig r7257):  caelum | 2009-08-07 14:45:18 +0200
  add note about where to get Schema::Loader
  r7273 at Thesaurus (orig r7270):  ribasushi | 2009-08-09 01:19:49 +0200
  Changes and minor code rewrap
  r7285 at Thesaurus (orig r7282):  ribasushi | 2009-08-10 08:08:06 +0200
  pesky whitespace
  r7286 at Thesaurus (orig r7283):  ribasushi | 2009-08-10 08:11:46 +0200
  privatize dormant method - it may be useful for sybase at *some* point
  r7287 at Thesaurus (orig r7284):  ribasushi | 2009-08-10 08:19:55 +0200
  Whoops
  r7289 at Thesaurus (orig r7286):  caelum | 2009-08-10 08:44:51 +0200
  document placeholders_with_type_conversion_supported and add a redispatch to reblessed storage in DBI::update
  r7290 at Thesaurus (orig r7287):  caelum | 2009-08-10 10:07:45 +0200
  fix and test redispatch to reblessed storage insert/update
  r7292 at Thesaurus (orig r7289):  caelum | 2009-08-10 10:32:37 +0200
  rename get_connected_schema to get_schema in sybase test
  r7345 at Thesaurus (orig r7342):  ribasushi | 2009-08-18 22:45:06 +0200
  Fix Changes
  r7367 at Thesaurus (orig r7364):  ribasushi | 2009-08-23 10:00:34 +0200
  Minaor speedup
  r7368 at Thesaurus (orig r7365):  ribasushi | 2009-08-23 10:01:10 +0200
  Generalize and hide placeholder support check
  r7369 at Thesaurus (orig r7366):  ribasushi | 2009-08-23 10:04:26 +0200
  Rename the common sybase driver
  r7373 at Thesaurus (orig r7370):  caelum | 2009-08-24 13:21:51 +0200
  make insert only use a txn if needed, add connect_call_unsafe_insert
  r7374 at Thesaurus (orig r7371):  caelum | 2009-08-24 14:42:57 +0200
  add test for IDENTITY_INSERT
  r7378 at Thesaurus (orig r7375):  caelum | 2009-08-24 15:51:48 +0200
  use debugobj->callback instead of local *_query_start in test to capture query
  r7379 at Thesaurus (orig r7376):  caelum | 2009-08-24 17:19:46 +0200
  remove duplicate oracle method and fix an mssql method call
  r7417 at Thesaurus (orig r7414):  caelum | 2009-08-29 07:23:45 +0200
  update link to Schema::Loader branch
  r7427 at Thesaurus (orig r7424):  caelum | 2009-08-29 09:31:41 +0200
  switch to ::DBI::AutoCast
  r7428 at Thesaurus (orig r7425):  ribasushi | 2009-08-29 13:36:22 +0200
  Cleanup:
  Added commented method signatures for easier debugging
  privatize transform_unbound_value as _prep_bind_value
  Remove \@_ splice's in lieu of of simple shifts
  Exposed TYPE_MAPPING used by native_data_type via our
  Removed use of txn_do - internal code uses the scope guard
  Renamed some variables, whitespace cleanup, the works
  r7429 at Thesaurus (orig r7426):  ribasushi | 2009-08-29 13:40:48 +0200
  Varname was absolutely correct
  r7430 at Thesaurus (orig r7427):  caelum | 2009-08-29 14:09:13 +0200
  minor changes for tests to pass again
  r7431 at Thesaurus (orig r7428):  caelum | 2009-08-29 21:08:51 +0200
  fix inserts with active cursors
  r7432 at Thesaurus (orig r7429):  caelum | 2009-08-29 22:53:02 +0200
  remove extra connection
  r7434 at Thesaurus (orig r7431):  caelum | 2009-08-30 00:02:20 +0200
  test correlated subquery
  r7442 at Thesaurus (orig r7439):  ribasushi | 2009-08-30 09:07:00 +0200
  Put the ocmment back
  r7443 at Thesaurus (orig r7440):  ribasushi | 2009-08-30 09:15:41 +0200
  Change should_quote_value to interpolate_unquoted to make it harder to stop quoting by accident (it's easier to return a undef by accident than a 1)
  r7446 at Thesaurus (orig r7443):  caelum | 2009-08-30 18:19:46 +0200
  added txn_scope_guards for blob operations
  r7447 at Thesaurus (orig r7444):  ribasushi | 2009-08-30 18:56:43 +0200
  Rename insert_txn to unsafe_insert
  r7512 at Thesaurus (orig r7509):  ribasushi | 2009-09-03 20:24:14 +0200
  Minor cleanups
  r7575 at Thesaurus (orig r7572):  caelum | 2009-09-05 07:23:57 +0200
  pending review by mpeppler
  r7593 at Thesaurus (orig r7590):  ribasushi | 2009-09-07 09:10:05 +0200
  Release 0.08111 tag
  r7594 at Thesaurus (orig r7591):  ribasushi | 2009-09-07 09:14:33 +0200
  Whoops this should not have committed
  r7602 at Thesaurus (orig r7599):  caelum | 2009-09-07 21:31:38 +0200
  fix _insert_dbh code to only connect when needed, doc update
  r7607 at Thesaurus (orig r7604):  caelum | 2009-09-09 02:15:54 +0200
  remove unsafe_insert
  r7608 at Thesaurus (orig r7605):  ribasushi | 2009-09-09 09:14:20 +0200
  Localisation ain't free, we don't do it unless we have to
  r7609 at Thesaurus (orig r7606):  ribasushi | 2009-09-09 09:40:29 +0200
  Much simpler
  r7610 at Thesaurus (orig r7607):  ribasushi | 2009-09-09 10:38:41 +0200
  Reduce amount of perl-golf :)
  r7611 at Thesaurus (orig r7608):  ribasushi | 2009-09-09 10:41:15 +0200
  This should not have worked - I guess we lack tests?
  r7614 at Thesaurus (orig r7611):  caelum | 2009-09-09 12:08:36 +0200
  test multi-row blob update
  r7619 at Thesaurus (orig r7616):  caelum | 2009-09-09 18:01:15 +0200
  remove Sub::Name hack for method dispatch, pass $next instead
  r7620 at Thesaurus (orig r7617):  caelum | 2009-09-10 02:16:03 +0200
  do blob update over _insert_dbh
  r7661 at Thesaurus (orig r7650):  caelum | 2009-09-13 10:27:44 +0200
  change _insert_dbh to _insert_storage
  r7663 at Thesaurus (orig r7652):  caelum | 2009-09-13 11:52:20 +0200
  make sure _init doesn't loop, steal insert_bulk from mssql, add some insert_bulk tests
  r7664 at Thesaurus (orig r7653):  caelum | 2009-09-13 13:27:51 +0200
  allow subclassing of methods proxied to _writer_storage
  r7666 at Thesaurus (orig r7655):  caelum | 2009-09-14 15:09:21 +0200
  sybase bulk API support stuff (no blobs yet, coming soon...)
  r7667 at Thesaurus (orig r7656):  caelum | 2009-09-14 15:33:14 +0200
  add another test for sybase bulk stuff (passes)
  r7668 at Thesaurus (orig r7657):  caelum | 2009-09-14 15:44:06 +0200
  minor change (fix inverted boolean for warning)
  r7669 at Thesaurus (orig r7658):  caelum | 2009-09-14 15:48:52 +0200
  remove @args from DBI::sth, use full arg list
  r7676 at Thesaurus (orig r7665):  caelum | 2009-09-16 15:06:35 +0200
  use execute_array for insert_bulk, test insert_bulk with blobs, clean up blob tests a bit
  r7680 at Thesaurus (orig r7669):  ribasushi | 2009-09-16 19:36:19 +0200
  Remove branched changes
  r7682 at Thesaurus (orig r7671):  caelum | 2009-09-17 03:03:34 +0200
  I'll rewrite this bit tomorrow to be less retarded
  r7684 at Thesaurus (orig r7673):  caelum | 2009-09-18 04:03:15 +0200
  fix yesterday's stuff, identity_update works, blob updates are better
  r7686 at Thesaurus (orig r7675):  caelum | 2009-09-18 04:22:38 +0200
  column no longer necessary in test
  r7688 at Thesaurus (orig r7677):  caelum | 2009-09-18 08:33:14 +0200
  fix freetds
  r7691 at Thesaurus (orig r7680):  ribasushi | 2009-09-18 12:25:42 +0200
   r7678 at Thesaurus (orig r7667):  ribasushi | 2009-09-16 19:31:14 +0200
   New subbranch
   r7679 at Thesaurus (orig r7668):  ribasushi | 2009-09-16 19:34:29 +0200
   Caelum's work so far
   r7690 at Thesaurus (orig r7679):  caelum | 2009-09-18 11:10:16 +0200
   support for blobs in insert_bulk fallback
  
  r7692 at Thesaurus (orig r7681):  ribasushi | 2009-09-18 12:28:09 +0200
  Rollback all bulk insert code before merge
 
 r7699 at Thesaurus (orig r7688):  ribasushi | 2009-09-18 14:12:05 +0200
 Cleanup exception handling
 r7700 at Thesaurus (orig r7689):  ribasushi | 2009-09-18 14:22:02 +0200
 duh
 r7701 at Thesaurus (orig r7690):  ribasushi | 2009-09-18 14:25:06 +0200
 Minor cleanup of RSC with has_many joins
 r7702 at Thesaurus (orig r7691):  ribasushi | 2009-09-18 14:32:15 +0200
 Changes and dev notes in makefile
 r7705 at Thesaurus (orig r7694):  ribasushi | 2009-09-18 14:52:26 +0200
 Nothing says the grouping column can not be nullable
 r7706 at Thesaurus (orig r7695):  ribasushi | 2009-09-18 14:53:33 +0200
 Changes
 r7707 at Thesaurus (orig r7696):  ribasushi | 2009-09-18 20:09:04 +0200
 This code belogs in Storage::DBI
 r7708 at Thesaurus (orig r7697):  ribasushi | 2009-09-18 20:38:26 +0200
 Clear up some legacy cruft and straighten inheritance
 r7710 at Thesaurus (orig r7699):  ribasushi | 2009-09-21 00:25:20 +0200
 Backout sybase changes
 r7713 at Thesaurus (orig r7702):  ribasushi | 2009-09-21 00:46:32 +0200
 Missed a part of the revert
 r7720 at Thesaurus (orig r7709):  ribasushi | 2009-09-21 02:49:11 +0200
 Oops
 r7721 at Thesaurus (orig r7710):  ribasushi | 2009-09-21 11:02:14 +0200
 Changes
 r7722 at Thesaurus (orig r7711):  ribasushi | 2009-09-21 12:49:30 +0200
 Undocument the from attribute (the description was mostly outdated anyway)
 r7723 at Thesaurus (orig r7712):  ribasushi | 2009-09-21 12:58:58 +0200
 Release 0.08112
 r7726 at Thesaurus (orig r7715):  ribasushi | 2009-09-21 16:26:07 +0200
 A test for an obscure join syntax - make sure we don't break it
 r7732 at Thesaurus (orig r7721):  ribasushi | 2009-09-22 12:58:09 +0200
 this would break in the future - sanitize sql fed to the tester
 r7735 at Thesaurus (orig r7724):  ribasushi | 2009-09-22 13:07:31 +0200
 The hack is no longer necessary with a recent sqla
 r7740 at Thesaurus (orig r7729):  caelum | 2009-09-24 23:44:01 +0200
 add test for multiple active statements in mssql over dbd::sybase
 r7741 at Thesaurus (orig r7730):  caelum | 2009-09-25 08:46:22 +0200
 test on_connect_do with a coderef connect_info too
 r7742 at Thesaurus (orig r7731):  caelum | 2009-09-25 23:26:52 +0200
 failing test for simple transaction with mssql via dbd::sybase
 r7765 at Thesaurus (orig r7753):  ribasushi | 2009-10-03 15:49:14 +0200
 Test reorg (no changes)
 r7766 at Thesaurus (orig r7754):  ribasushi | 2009-10-03 15:55:25 +0200
 Add failing tests for RT#50003
 r7767 at Thesaurus (orig r7755):  caelum | 2009-10-03 16:09:45 +0200
 fix on_connect_ with coderef connect_info
 r7771 at Thesaurus (orig r7759):  ribasushi | 2009-10-04 13:17:53 +0200
 Fix AutoCast's POD
 r7782 at Thesaurus (orig r7770):  ribasushi | 2009-10-09 06:57:20 +0200
  r7777 at Thesaurus (orig r7765):  frew | 2009-10-07 20:05:05 +0200
  add method to check if an rs is paginated
  r7778 at Thesaurus (orig r7766):  frew | 2009-10-07 20:31:02 +0200
  is_paginated method and test
  r7780 at Thesaurus (orig r7768):  frew | 2009-10-09 06:45:36 +0200
  change name of method
  r7781 at Thesaurus (orig r7769):  frew | 2009-10-09 06:47:31 +0200
  add message to changelog for is_paged
 
 r7785 at Thesaurus (orig r7773):  ribasushi | 2009-10-09 11:00:36 +0200
 Ugh CRLF
 r7786 at Thesaurus (orig r7774):  ribasushi | 2009-10-09 11:04:35 +0200
 Skip versioning test on really old perls lacking Time::HiRes
 r7787 at Thesaurus (orig r7775):  ribasushi | 2009-10-09 11:04:50 +0200
 Changes
 r7788 at Thesaurus (orig r7776):  triode | 2009-10-09 22:32:04 +0200
 added troubleshooting case of excessive memory allocation involving TEXT/BLOB/etc
 columns and large LongReadLen
 
 r7789 at Thesaurus (orig r7777):  triode | 2009-10-09 22:44:21 +0200
 added my name to contributors list
 
 r7790 at Thesaurus (orig r7778):  ribasushi | 2009-10-10 18:49:15 +0200
 Whoops, this isn't right
 r7791 at Thesaurus (orig r7779):  ribasushi | 2009-10-11 15:44:18 +0200
 More ordered fixes
 r7793 at Thesaurus (orig r7781):  norbi | 2009-10-13 11:27:18 +0200
  r7982 at vger:  mendel | 2009-10-13 11:26:11 +0200
  Fixed a typo and a POD error.
 
 r7805 at Thesaurus (orig r7793):  ribasushi | 2009-10-16 14:28:35 +0200
 Fix test to stop failing when DT-support is not present
 r7811 at Thesaurus (orig r7799):  caelum | 2009-10-18 11:13:29 +0200
  r20728 at hlagh (orig r7703):  ribasushi | 2009-09-20 18:51:16 -0400
  Another try at a clean sybase branch
  r20730 at hlagh (orig r7705):  ribasushi | 2009-09-20 18:58:09 -0400
  Part one of the sybase work by Caelum (mostly reviewed)
  r20731 at hlagh (orig r7706):  ribasushi | 2009-09-20 19:18:40 -0400
  main sybase branch ready
  r21051 at hlagh (orig r7797):  caelum | 2009-10-18 04:57:43 -0400
   r20732 at hlagh (orig r7707):  ribasushi | 2009-09-20 19:20:00 -0400
   Branch for bulk insert
   r20733 at hlagh (orig r7708):  ribasushi | 2009-09-20 20:06:21 -0400
   All sybase bulk-insert code by Caelum
   r20750 at hlagh (orig r7725):  caelum | 2009-09-24 02:47:39 -0400
   clean up set_identity stuff
   r20751 at hlagh (orig r7726):  caelum | 2009-09-24 05:21:18 -0400
   minor cleanups, test update of blob to NULL
   r20752 at hlagh (orig r7727):  caelum | 2009-09-24 08:45:04 -0400
   remove some duplicate code
   r20753 at hlagh (orig r7728):  caelum | 2009-09-24 09:57:58 -0400
   fix insert with all defaults
   r20786 at hlagh (orig r7732):  caelum | 2009-09-25 21:17:16 -0400
   some cleanups
   r20804 at hlagh (orig r7736):  caelum | 2009-09-28 05:31:38 -0400
   minor changes
   r20805 at hlagh (orig r7737):  caelum | 2009-09-28 06:25:48 -0400
   fix DT stuff
   r20809 at hlagh (orig r7741):  caelum | 2009-09-28 22:25:55 -0400
   removed some dead code, added fix and test for _execute_array_empty
   r20811 at hlagh (orig r7743):  caelum | 2009-09-29 13:36:20 -0400
   minor changes after review
   r20812 at hlagh (orig r7744):  caelum | 2009-09-29 14:16:03 -0400
   do not clobber $rv from execute_array
   r20813 at hlagh (orig r7745):  caelum | 2009-09-29 14:38:14 -0400
   make insert_bulk atomic
   r20815 at hlagh (orig r7747):  caelum | 2009-09-29 20:35:26 -0400
   remove _exhaaust_statements
   r20816 at hlagh (orig r7748):  caelum | 2009-09-29 21:48:38 -0400
   fix insert_bulk when not using bulk api inside a txn
   r20831 at hlagh (orig r7749):  caelum | 2009-09-30 02:53:42 -0400
   added test for populate being atomic
   r20832 at hlagh (orig r7750):  caelum | 2009-09-30 03:00:59 -0400
   factor out subclass-specific _execute_array callback
   r20833 at hlagh (orig r7751):  caelum | 2009-10-01 11:59:30 -0400
   remove a piece of dead code
   r20840 at hlagh (orig r7758):  caelum | 2009-10-03 15:46:56 -0400
   remove _pretty_print
   r20842 at hlagh (orig r7760):  caelum | 2009-10-04 16:19:56 -0400
   minor optimization for insert_bulk
   r21050 at hlagh (orig r7796):  caelum | 2009-10-18 04:56:54 -0400
   error checking related to literal SQL for insert_bulk
  
 
 r7820 at Thesaurus (orig r7808):  caelum | 2009-10-21 03:10:39 +0200
 add test for populate with literal sql mixed with binds, improve error messages
 r7823 at Thesaurus (orig r7811):  ribasushi | 2009-10-21 16:33:45 +0200
 Show what's wrong with the current populate code
 r7824 at Thesaurus (orig r7812):  caelum | 2009-10-22 11:10:38 +0200
 stringify values passed to populate/insert_bulk
 r7825 at Thesaurus (orig r7813):  ribasushi | 2009-10-22 13:17:41 +0200
 Some smoker run the suite for 30 *minutes* - the timeout seems to be too short for them (boggle)
 r7826 at Thesaurus (orig r7814):  caelum | 2009-10-22 14:41:37 +0200
 a few extra tests can never hurt, right? :)
 r7827 at Thesaurus (orig r7815):  ribasushi | 2009-10-23 10:51:05 +0200
 Prevent sqlt from failing silently
 r7828 at Thesaurus (orig r7816):  ribasushi | 2009-10-23 10:52:49 +0200
 { is_foreign_key_constraint => 0, on_delete => undef } is a valid construct - no need to carp
 r7832 at Thesaurus (orig r7820):  robkinyon | 2009-10-26 20:11:22 +0100
 Fixed bad if-check in columns()
 r7840 at Thesaurus (orig r7828):  caelum | 2009-10-31 14:01:56 +0100
 change repository in meta to point to real svn url rather than svnweb
 r7842 at Thesaurus (orig r7830):  caelum | 2009-10-31 21:04:39 +0100
 pass sqlite_version to SQLT
 r7843 at Thesaurus (orig r7831):  caelum | 2009-10-31 21:22:37 +0100
 fix regex to numify sqlite_version
 r7844 at Thesaurus (orig r7832):  caelum | 2009-10-31 23:59:19 +0100
 work-around disconnect bug with DBD::Pg 2.15.1
 r7855 at Thesaurus (orig r7843):  ribasushi | 2009-11-04 10:55:51 +0100
  r7817 at Thesaurus (orig r7805):  rbuels | 2009-10-21 02:37:28 +0200
  making a branch, here we go again with the pg_unqualified_schema
  r7818 at Thesaurus (orig r7806):  rbuels | 2009-10-21 02:38:59 +0200
  more pg unqualified schema tests, which expose a gap in the coverage
  r7819 at Thesaurus (orig r7807):  rbuels | 2009-10-21 03:10:38 +0200
  gutted Pg storage driver's sequence discovery to just rely on DBD::Pg's last_insert_id.  this needs testing with older versions of DBD::Pg
  r7821 at Thesaurus (orig r7809):  rbuels | 2009-10-21 04:00:39 +0200
  more coverage in Pg sequence-discovery tests.  i think this shows why last_insert_id cannot be used.
  r7822 at Thesaurus (orig r7810):  rbuels | 2009-10-21 04:07:05 +0200
  reverted [7807], and just changed code to use the custom pg_catalog query, which is the only thing that works in the pathological case where DBIC is told a different primary key from the primary key that is set on the table in the DB ([7809] added testing for this)
  r7852 at Thesaurus (orig r7840):  rbuels | 2009-11-03 18:47:05 +0100
  added Changes line mentioning tweak to Pg auto-inc fix
  r7854 at Thesaurus (orig r7842):  ribasushi | 2009-11-04 10:55:35 +0100
  Cleanup exceptions
 
 r7858 at Thesaurus (orig r7846):  caelum | 2009-11-06 16:01:30 +0100
 transactions for MSSQL over DBD::Sybase
 r7861 at Thesaurus (orig r7849):  caelum | 2009-11-10 13:16:18 +0100
 made commit/rollback when disconnected an exception
 r7862 at Thesaurus (orig r7850):  robkinyon | 2009-11-10 17:19:57 +0100
 Added a note about select
 r7863 at Thesaurus (orig r7851):  ribasushi | 2009-11-10 18:23:10 +0100
 Changes
 r7867 at Thesaurus (orig r7855):  frew | 2009-11-11 21:56:37 +0100
 RT50874
 r7868 at Thesaurus (orig r7856):  frew | 2009-11-11 23:50:43 +0100
 RT50828
 r7869 at Thesaurus (orig r7857):  frew | 2009-11-11 23:54:15 +0100
 clearer test message
 r7870 at Thesaurus (orig r7858):  frew | 2009-11-12 00:37:27 +0100
 some cleanup for $rs->populate
 r7872 at Thesaurus (orig r7860):  ribasushi | 2009-11-12 01:35:36 +0100
 Fix find on resultset with custom result_class
 r7873 at Thesaurus (orig r7861):  ribasushi | 2009-11-12 01:40:14 +0100
 Fix return value of in_storage
 r7874 at Thesaurus (orig r7862):  ribasushi | 2009-11-12 01:43:48 +0100
 Extra FAQ entry
 r7875 at Thesaurus (orig r7863):  ribasushi | 2009-11-12 02:11:25 +0100
 Sanify _determine_driver handling in ::Storage::DBI
 r7876 at Thesaurus (orig r7864):  ribasushi | 2009-11-12 02:14:37 +0100
 Add mysql determine_driver test by Pedro Melo
 r7881 at Thesaurus (orig r7869):  ribasushi | 2009-11-12 11:10:04 +0100
 _cond_for_update_delete is hopelessly broken attempting to introspect SQLA1. Replace with a horrific but effective hack
 r7882 at Thesaurus (orig r7870):  ribasushi | 2009-11-12 11:15:12 +0100
 Clarifying comment
 r7884 at Thesaurus (orig r7872):  ribasushi | 2009-11-13 00:13:40 +0100
 The real fix for the non-introspectable condition bug, mst++
 r7885 at Thesaurus (orig r7873):  ribasushi | 2009-11-13 00:24:56 +0100
 Some cleanup
 r7887 at Thesaurus (orig r7875):  frew | 2009-11-13 10:01:37 +0100
 fix subtle bug with Sybase database type determination
 r7892 at Thesaurus (orig r7880):  frew | 2009-11-14 00:53:29 +0100
 release woo!
 r7894 at Thesaurus (orig r7882):  caelum | 2009-11-14 03:57:52 +0100
 fix oracle dep in Makefile.PL
 r7895 at Thesaurus (orig r7883):  caelum | 2009-11-14 04:20:53 +0100
 skip Oracle BLOB tests on DBD::Oracle == 1.23
 r7897 at Thesaurus (orig r7885):  caelum | 2009-11-14 09:40:01 +0100
  r7357 at pentium (orig r7355):  caelum | 2009-08-20 17:58:23 -0400
  branch to support MSSQL over ADO
  r7358 at pentium (orig r7356):  caelum | 2009-08-21 00:32:14 -0400
  something apparently working
  r7359 at pentium (orig r7357):  caelum | 2009-08-21 00:53:53 -0400
  slightly better mars test, still passes
 
 r7899 at Thesaurus (orig r7887):  caelum | 2009-11-14 09:41:54 +0100
  r7888 at pentium (orig r7886):  caelum | 2009-11-14 03:41:25 -0500
  add TODO test for large column list in select
 
 r7901 at Thesaurus (orig r7889):  caelum | 2009-11-14 09:47:16 +0100
 add ADO/MSSQL to Changes
 r7902 at Thesaurus (orig r7890):  caelum | 2009-11-14 10:27:29 +0100
 fix the large column list test for ADO/MSSQL, now passes
 r7904 at Thesaurus (orig r7892):  caelum | 2009-11-14 12:20:58 +0100
 fix Changes (ADO change in wrong release)
 r7905 at Thesaurus (orig r7893):  ribasushi | 2009-11-14 19:23:23 +0100
 Release 0.08114
 r7907 at Thesaurus (orig r7895):  ribasushi | 2009-11-15 12:09:17 +0100
 Failing test to highlight mssql autoconnect regression
 r7908 at Thesaurus (orig r7896):  ribasushi | 2009-11-15 12:20:25 +0100
 Fix plan
 r7913 at Thesaurus (orig r7901):  ribasushi | 2009-11-15 13:11:38 +0100
  r7773 at Thesaurus (orig r7761):  norbi | 2009-10-05 14:49:06 +0200
  Created branch 'prefetch_bug-unqualified_column_in_search_related_cond': A bug that manifests when a prefetched table's column is referenced without the table name in the condition of a search_related() on an M:N relationship.
  r7878 at Thesaurus (orig r7866):  ribasushi | 2009-11-12 02:36:08 +0100
  Factor some code out
  r7879 at Thesaurus (orig r7867):  ribasushi | 2009-11-12 09:11:03 +0100
  Factor out more stuff
  r7880 at Thesaurus (orig r7868):  ribasushi | 2009-11-12 09:21:04 +0100
  Saner naming/comments
  r7910 at Thesaurus (orig r7898):  ribasushi | 2009-11-15 12:39:29 +0100
  Move more code to DBIHacks, put back the update/delete rs check, just in case
  r7911 at Thesaurus (orig r7899):  ribasushi | 2009-11-15 13:01:34 +0100
  TODOify test until we get an AST
  r7912 at Thesaurus (orig r7900):  ribasushi | 2009-11-15 13:10:15 +0100
  Hide from pause
 
 r7921 at Thesaurus (orig r7909):  ribasushi | 2009-11-15 14:17:48 +0100
  r7871 at Thesaurus (orig r7859):  ribasushi | 2009-11-12 00:46:07 +0100
  Branches to test some ideas
  r7889 at Thesaurus (orig r7877):  abraxxa | 2009-11-13 12:05:50 +0100
  added rels to view result classes in test schema
  
  r7890 at Thesaurus (orig r7878):  abraxxa | 2009-11-13 13:05:45 +0100
  seems I found the bugger
  
  r7917 at Thesaurus (orig r7905):  ribasushi | 2009-11-15 13:29:23 +0100
  FK constraints towards a view don't quite work
  r7918 at Thesaurus (orig r7906):  ribasushi | 2009-11-15 14:10:10 +0100
  Turn into a straight-inheritance view class
  r7919 at Thesaurus (orig r7907):  ribasushi | 2009-11-15 14:11:03 +0100
  Extensive test of virtual and classic view relationships
  r7920 at Thesaurus (orig r7908):  ribasushi | 2009-11-15 14:17:23 +0100
  Fix non-sqlt schema file
 
 r7923 at Thesaurus (orig r7911):  caelum | 2009-11-15 18:31:37 +0100
 fix MSSQL via DBD::Sybase regression
 r7930 at Thesaurus (orig r7918):  ribasushi | 2009-11-16 19:15:45 +0100
  r7864 at Thesaurus (orig r7852):  edenc | 2009-11-10 20:15:15 +0100
  branching for fixes related to prefetch, distinct and group by
  r7865 at Thesaurus (orig r7853):  edenc | 2009-11-10 20:21:38 +0100
  added test case for ensuring a column mentioned in the order by clause is also included in the group by clause
  r7926 at Thesaurus (orig r7914):  ribasushi | 2009-11-16 08:09:30 +0100
  Make _resolve_column_info function without supplying column names
  r7927 at Thesaurus (orig r7915):  ribasushi | 2009-11-16 08:11:17 +0100
  Fix order_by/distinct bug
 
 r7937 at Thesaurus (orig r7925):  ribasushi | 2009-11-19 12:04:21 +0100
 Bail out eary in Versioned if no versioning checks are requested
 r7938 at Thesaurus (orig r7926):  ribasushi | 2009-11-19 12:06:13 +0100
 POD fixes
 r7940 at Thesaurus (orig r7928):  caelum | 2009-11-22 11:03:33 +0100
 fix connection setup for Sybase
 r7943 at Thesaurus (orig r7931):  caelum | 2009-11-22 13:27:43 +0100
 override _run_connection_actions for internal connection setup in sybase stuff, much cleaner this way
 r7947 at Thesaurus (orig r7935):  ribasushi | 2009-11-23 01:18:28 +0100
 Whoops
 r7948 at Thesaurus (orig r7936):  ribasushi | 2009-11-23 01:28:50 +0100
 Fix ::Versioned regression introduced in r7925
 r7951 at Thesaurus (orig r7939):  caelum | 2009-11-23 12:32:10 +0100
 add subname to rdbms_specific_methods wrapper
 r7953 at Thesaurus (orig r7941):  caelum | 2009-11-23 13:23:14 +0100
  r21187 at hlagh (orig r7933):  ribasushi | 2009-11-22 18:38:34 -0500
  New sybase refactor branch
  r21188 at hlagh (orig r7934):  ribasushi | 2009-11-22 19:06:48 -0500
  refactor part1
  r21192 at hlagh (orig r7938):  ribasushi | 2009-11-22 19:30:05 -0500
  refactor part 2
  r21194 at hlagh (orig r7940):  caelum | 2009-11-23 07:06:46 -0500
  fix test
 
 r7955 at Thesaurus (orig r7943):  ribasushi | 2009-11-23 16:30:13 +0100
 Add missing Sub::Name invocations and improve the SQLA Carp overrides
 r7957 at Thesaurus (orig r7945):  ribasushi | 2009-11-24 10:12:49 +0100
  r7749 at Thesaurus (orig r7738):  norbi | 2009-09-28 22:01:39 +0200
  Created branch 'void_populate_resultset_cond': Fixing a bug: $rs->populate in void context does not use the conditions from $rs.
  r7751 at Thesaurus (orig r7740):  norbi | 2009-09-28 23:26:06 +0200
   r7935 at vger:  mendel | 2009-09-28 23:25:52 +0200
   Undid the previous tweaks to the already existing tests and added new tests instead.
  
  r7928 at Thesaurus (orig r7916):  ribasushi | 2009-11-16 08:48:42 +0100
  Change plan
  r7956 at Thesaurus (orig r7944):  ribasushi | 2009-11-24 10:10:49 +0100
  Better naming and a bit leaner implementation. Main idea remains the same
 
 r7959 at Thesaurus (orig r7947):  ribasushi | 2009-11-24 10:39:52 +0100
 Changes and prevent a spurious todo-pass
 r7962 at Thesaurus (orig r7950):  ribasushi | 2009-11-24 19:43:42 +0100
 Extra sqla quoting test
 r7963 at Thesaurus (orig r7951):  ribasushi | 2009-11-24 19:48:01 +0100
 Extra sqla quoting test(2)
 r7964 at Thesaurus (orig r7952):  ribasushi | 2009-11-25 21:24:10 +0100
 wtf
 r7967 at Thesaurus (orig r7955):  ribasushi | 2009-11-26 11:07:06 +0100
 cleanups
 r7968 at Thesaurus (orig r7956):  ribasushi | 2009-11-26 12:11:21 +0100
 Sanify search_related chaining code (no functional changes)
 r7969 at Thesaurus (orig r7957):  ribasushi | 2009-11-26 12:52:05 +0100
 Another count() quirk down
 r7970 at Thesaurus (orig r7958):  ribasushi | 2009-11-26 14:23:28 +0100
 Add a no-accessor column to generally test handling
 r7972 at Thesaurus (orig r7960):  ribasushi | 2009-11-26 15:32:17 +0100
 Whoops, wrong accessor (things still work though)
 r7977 at Thesaurus (orig r7965):  ribasushi | 2009-11-26 16:43:21 +0100
  r7971 at Thesaurus (orig r7959):  ribasushi | 2009-11-26 14:54:17 +0100
  New branch for get_inflated_column bugfix
  r7974 at Thesaurus (orig r7962):  ribasushi | 2009-11-26 15:56:20 +0100
  Fix for rt46953
  r7975 at Thesaurus (orig r7963):  ribasushi | 2009-11-26 16:05:17 +0100
  Make Test::More happy
  r7976 at Thesaurus (orig r7964):  ribasushi | 2009-11-26 16:43:09 +0100
  Changes
 
 r7980 at Thesaurus (orig r7968):  ribasushi | 2009-11-27 01:38:11 +0100
 Fix search_related wrt grouped resultsets (distinct is currently passed to the new resultset, this is probably wrong)
 r7987 at Thesaurus (orig r7975):  ribasushi | 2009-11-28 16:54:23 +0100
 Cleanup the s.c.o. index
 r7988 at Thesaurus (orig r7976):  ribasushi | 2009-11-28 16:57:04 +0100
 Test based on http://lists.scsys.co.uk/pipermail/dbix-class/2009-November/008599.html
 r8007 at Thesaurus (orig r7995):  castaway | 2009-11-30 16:20:19 +0100
 Remove over-emphasis on +select/+as. Add docs on prefetch and other ways to get related data, with caveats etc. 
 
 r8009 at Thesaurus (orig r7997):  dew | 2009-11-30 19:37:00 +0100
 Alter the docs for has_many relationships to make them a little easier to grok
 r8021 at Thesaurus (orig r8009):  castaway | 2009-12-02 14:19:40 +0100
 Added note about prefetch and has_many related objects
 
 r8029 at Thesaurus (orig r8017):  ribasushi | 2009-12-03 13:24:04 +0100
 Source sanity check on subqueried update/delete
 r8030 at Thesaurus (orig r8018):  ribasushi | 2009-12-03 14:39:37 +0100
 Sanify populate arg handling
 r8040 at Thesaurus (orig r8028):  ribasushi | 2009-12-04 02:46:20 +0100
  r7935 at Thesaurus (orig r7923):  ribasushi | 2009-11-19 11:05:04 +0100
  Branches for RTs
  r7965 at Thesaurus (orig r7953):  ribasushi | 2009-11-26 00:19:21 +0100
  Test and fix scalarref in an inflatable slot corner-case
  r7966 at Thesaurus (orig r7954):  ribasushi | 2009-11-26 00:24:23 +0100
  Looks like we nailed a todo
  r8038 at Thesaurus (orig r8026):  ribasushi | 2009-12-04 02:45:40 +0100
  Changes
  r8039 at Thesaurus (orig r8027):  ribasushi | 2009-12-04 02:46:08 +0100
  Changes(2)
 
 r8055 at Thesaurus (orig r8043):  ribasushi | 2009-12-07 15:11:25 +0100
 Forgotten auto-savepoint example patch
 r8057 at Thesaurus (orig r8045):  ribasushi | 2009-12-08 14:13:38 +0100
 Weird test case
 r8058 at Thesaurus (orig r8046):  ribasushi | 2009-12-08 14:23:31 +0100
 Fix the test - code is correct
 r8063 at Thesaurus (orig r8051):  ribasushi | 2009-12-09 02:33:30 +0100
 It's almost 2010 - load_components ('Core') is like ewwww
 r8067 at Thesaurus (orig r8055):  caelum | 2009-12-09 18:13:33 +0100
 workaround for evil ADO bug
 r8068 at Thesaurus (orig r8056):  ribasushi | 2009-12-09 23:13:59 +0100
  r8022 at Thesaurus (orig r8010):  frew | 2009-12-02 17:57:17 +0100
  branch for replacing TOP with RNO in MSSQL
  r8027 at Thesaurus (orig r8015):  frew | 2009-12-03 02:48:36 +0100
  Switch to RowNumberOver for MSSQL
  r8028 at Thesaurus (orig r8016):  ribasushi | 2009-12-03 10:03:18 +0100
  The correct top100 mssql solution and test
  r8031 at Thesaurus (orig r8019):  frew | 2009-12-03 15:56:35 +0100
  fix RNO for MSSQL to not use a kludgy regexp
  r8032 at Thesaurus (orig r8020):  frew | 2009-12-04 01:33:28 +0100
  initial (broken) version of 42rno.t
  r8033 at Thesaurus (orig r8021):  frew | 2009-12-04 01:37:06 +0100
  first shot at moving stuff around
  r8034 at Thesaurus (orig r8022):  frew | 2009-12-04 01:45:42 +0100
  rename files to get rid of numbers and use folders
  r8035 at Thesaurus (orig r8023):  frew | 2009-12-04 01:48:00 +0100
  missed toplimit
  r8036 at Thesaurus (orig r8024):  frew | 2009-12-04 01:52:44 +0100
  still broken rno test, but now it actually tests mssql
  r8042 at Thesaurus (orig r8030):  ribasushi | 2009-12-04 09:34:56 +0100
  Variable clash
  r8043 at Thesaurus (orig r8031):  ribasushi | 2009-12-04 11:44:47 +0100
  The complex prefetch rewrite actually takes care of this as cleanly as possible
  r8044 at Thesaurus (orig r8032):  ribasushi | 2009-12-04 11:47:22 +0100
  Smarter implementation of the select top 100pct subselect handling
  r8045 at Thesaurus (orig r8033):  ribasushi | 2009-12-04 12:07:05 +0100
  Add support for unordered limited resultsets
  Rename the limit helper to signify it is MS specific
  Make sure we don't lose group_by/having clauses
  r8046 at Thesaurus (orig r8034):  ribasushi | 2009-12-04 12:07:56 +0100
  Un-todoify mssql limit tests - no changes necessary (throw away the obsolete generated sql checks)
  r8047 at Thesaurus (orig r8035):  ribasushi | 2009-12-04 12:24:13 +0100
  Tests for bindvar propagation and Changes
  r8049 at Thesaurus (orig r8037):  ribasushi | 2009-12-04 15:01:32 +0100
  KISS - a select(1) makes perfect ordering criteria
  r8050 at Thesaurus (orig r8038):  ribasushi | 2009-12-04 15:06:11 +0100
  Unify the MSSQL and DB2 RNO implementations - they are the same
  r8051 at Thesaurus (orig r8039):  ribasushi | 2009-12-05 10:29:50 +0100
  Wrap mssql selects in yet another subquery to make limited right-ordered join resultsets possible
  r8052 at Thesaurus (orig r8040):  ribasushi | 2009-12-05 10:46:41 +0100
  Better not touch Top - it's too complex at this point
  r8053 at Thesaurus (orig r8041):  ribasushi | 2009-12-05 11:03:00 +0100
  Extend test just a bit more
  r8054 at Thesaurus (orig r8042):  ribasushi | 2009-12-05 11:44:25 +0100
  DB2 and MSSQL have different default order syntaxes
  r8056 at Thesaurus (orig r8044):  frew | 2009-12-08 02:10:06 +0100
  add version check for mssql 2005 and greater
  r8059 at Thesaurus (orig r8047):  frew | 2009-12-08 16:15:50 +0100
  real exception instead of die
  r8061 at Thesaurus (orig r8049):  ribasushi | 2009-12-09 00:19:49 +0100
  Test for immediate connection with known storage type
  r8062 at Thesaurus (orig r8050):  frew | 2009-12-09 01:24:45 +0100
  fix mssql version check so it's lazier
  r8064 at Thesaurus (orig r8052):  ribasushi | 2009-12-09 02:40:51 +0100
  Fix comment
  r8066 at Thesaurus (orig r8054):  caelum | 2009-12-09 16:12:56 +0100
  fix _get_mssql_version for ODBC
 
 r8071 at Thesaurus (orig r8059):  frew | 2009-12-10 00:32:55 +0100
 fail nicely if user doesn't have perms for xp_msver
 r8073 at Thesaurus (orig r8061):  ribasushi | 2009-12-10 09:36:21 +0100
 Changes
 r8074 at Thesaurus (orig r8062):  ribasushi | 2009-12-10 09:53:38 +0100
 First half of distinct cleanup
 r8075 at Thesaurus (orig r8063):  frew | 2009-12-10 16:04:37 +0100
 release 0.08115
 r8076 at Thesaurus (orig r8064):  ribasushi | 2009-12-12 12:31:12 +0100
 Even clearer unloaded FK exception
 r8078 at Thesaurus (orig r8066):  ribasushi | 2009-12-12 14:27:18 +0100
 As clear as it gets
 r8141 at Thesaurus (orig r8129):  ovid | 2009-12-16 17:40:50 +0100
 Have has_one/might_have warn if set on nullable columns.
 
 r8143 at Thesaurus (orig r8131):  caelum | 2009-12-17 13:30:10 +0100
 somewhat better fix for ADO
 r8144 at Thesaurus (orig r8132):  caelum | 2009-12-17 13:34:20 +0100
 minor changes
 r8146 at Thesaurus (orig r8134):  caelum | 2009-12-17 17:44:34 +0100
 cleanup source_bind_attributes for ADO
 r8147 at Thesaurus (orig r8135):  caelum | 2009-12-17 18:09:55 +0100
 more types for ADO fix, and documentation
 r8148 at Thesaurus (orig r8136):  abraxxa | 2009-12-17 19:54:55 +0100
 Cookbook POD fix for add_drop_table instead of add_drop_tables
 
 r8158 at Thesaurus (orig r8146):  ribasushi | 2009-12-18 14:55:53 +0100
  r8150 at Thesaurus (orig r8138):  abraxxa | 2009-12-17 23:22:07 +0100
  Views without a view_definition won't be added to the SQL::Translator::Schema by the parser + tests
  
  r8151 at Thesaurus (orig r8139):  abraxxa | 2009-12-17 23:23:33 +0100
  test cleanups
  
  r8153 at Thesaurus (orig r8141):  abraxxa | 2009-12-18 14:34:14 +0100
  throw_exception if view_definition is missing instead of silent skipping + test changes
  
  r8154 at Thesaurus (orig r8142):  abraxxa | 2009-12-18 14:40:32 +0100
  use Test::Exception
  
  r8155 at Thesaurus (orig r8143):  abraxxa | 2009-12-18 14:42:00 +0100
  fixed Changes
  
  r8156 at Thesaurus (orig r8144):  abraxxa | 2009-12-18 14:44:52 +0100
  test cleanups
  
  r8157 at Thesaurus (orig r8145):  ribasushi | 2009-12-18 14:46:26 +0100
  Another bitr
 
 r8160 at Thesaurus (orig r8148):  ribasushi | 2009-12-18 15:04:34 +0100
 Fix no_index entries
 r8162 at Thesaurus (orig r8150):  abraxxa | 2009-12-18 15:59:58 +0100
 Schema POD inprovement for dclone
 
 r8163 at Thesaurus (orig r8151):  abraxxa | 2009-12-18 16:07:27 +0100
 link to DBIx::Class::Row
 
 r8164 at Thesaurus (orig r8152):  abraxxa | 2009-12-18 16:08:56 +0100
 fixed typo in Changes
 
 r8165 at Thesaurus (orig r8153):  abraxxa | 2009-12-18 16:14:47 +0100
 dclone pod take #2
 
 r8169 at Thesaurus (orig r8157):  ribasushi | 2009-12-19 18:47:42 +0100
 detabify
 r8170 at Thesaurus (orig r8158):  ribasushi | 2009-12-19 19:41:42 +0100
 Fix RT52812
 r8171 at Thesaurus (orig r8159):  caelum | 2009-12-23 07:16:29 +0100
 minor POD fixes
 r8175 at Thesaurus (orig r8163):  ribasushi | 2009-12-24 09:59:52 +0100
 Fix deployment_statements context sensitivity regression
 r8176 at Thesaurus (orig r8164):  ribasushi | 2009-12-24 10:13:37 +0100
 Don't call the PK setter if no PK
 r8204 at Thesaurus (orig r8192):  caelum | 2009-12-30 22:58:47 +0100
 bump CAG dep
 r8231 at Thesaurus (orig r8219):  matthewt | 2010-01-02 01:41:12 +0100
 fix typo in variable name
 r8238 at Thesaurus (orig r8226):  rafl | 2010-01-02 18:46:40 +0100
 Merge branch 'native_traits'
 
 * native_traits:
   Port replicated storage from MXAH to native traits.
   Create branch native_traits
 r8244 at Thesaurus (orig r8232):  caelum | 2010-01-04 00:30:51 +0100
 fix _rebless into sybase/mssql/nobindvars
 r8247 at Thesaurus (orig r8235):  caelum | 2010-01-05 13:54:56 +0100
  r22328 at hlagh (orig r8201):  caelum | 2009-12-31 12:29:51 -0500
  new branch to fix table aliases in queries over the 30char limit
  r22329 at hlagh (orig r8202):  caelum | 2009-12-31 12:55:50 -0500
  failing test
  r22330 at hlagh (orig r8203):  caelum | 2009-12-31 13:00:35 -0500
  switch oracle tests to done_testing()
  r22331 at hlagh (orig r8204):  caelum | 2009-12-31 15:02:50 -0500
  got something working
  r22332 at hlagh (orig r8205):  caelum | 2009-12-31 15:08:30 -0500
  POD touchups
  r22343 at hlagh (orig r8216):  caelum | 2010-01-01 07:42:03 -0500
  fix uninitialized warning and a bug in ResultSet
  r22419 at hlagh (orig r8234):  caelum | 2010-01-05 07:53:18 -0500
  append half of a base64 MD5 to shortened table aliases for Oracle
 
 r8249 at Thesaurus (orig r8237):  caelum | 2010-01-05 15:27:40 +0100
 minor change: use more of the hash if possible for oracle table alias shortening
 r8251 at Thesaurus (orig r8239):  caelum | 2010-01-06 02:20:17 +0100
 bump perl_version to 5.8.1
 r8252 at Thesaurus (orig r8240):  caelum | 2010-01-06 02:21:41 +0100
 remove alignment mark on base64 md5
 r8260 at Thesaurus (orig r8248):  ribasushi | 2010-01-07 11:21:55 +0100
 5.8.1 is minimum required perl
 r8261 at Thesaurus (orig r8249):  ribasushi | 2010-01-07 11:22:42 +0100
 Minor optimization
 r8262 at Thesaurus (orig r8250):  ribasushi | 2010-01-07 11:23:35 +0100
 Wrong title
 r8265 at Thesaurus (orig r8253):  ribasushi | 2010-01-08 17:48:50 +0100
 Resolve problem reported by http://lists.scsys.co.uk/pipermail/dbix-class/2009-December/008699.html
 r8266 at Thesaurus (orig r8254):  ribasushi | 2010-01-08 17:52:01 +0100
 Put utf8columns in line with the store_column fix
 r8267 at Thesaurus (orig r8255):  ribasushi | 2010-01-08 19:03:26 +0100
 Tests while hunting for something else
 r8268 at Thesaurus (orig r8256):  ribasushi | 2010-01-08 19:14:42 +0100
 Make test look even more like http://lists.scsys.co.uk/pipermail/dbix-class/2009-November/008599.html
 r8277 at Thesaurus (orig r8265):  ribasushi | 2010-01-09 02:16:14 +0100
  r8263 at Thesaurus (orig r8251):  ribasushi | 2010-01-08 15:43:38 +0100
  New branch to find a leak
  r8264 at Thesaurus (orig r8252):  ribasushi | 2010-01-08 15:52:46 +0100
  Weird test failures
  r8272 at Thesaurus (orig r8260):  ribasushi | 2010-01-09 01:24:56 +0100
  Proper invocation
  r8273 at Thesaurus (orig r8261):  ribasushi | 2010-01-09 01:35:34 +0100
  Test for the real leak reason
  r8274 at Thesaurus (orig r8262):  ribasushi | 2010-01-09 01:37:33 +0100
  Void ctx as it should be
  r8275 at Thesaurus (orig r8263):  ribasushi | 2010-01-09 02:10:13 +0100
  A "fix" for sqlt-related schema leaks
  r8276 at Thesaurus (orig r8264):  ribasushi | 2010-01-09 02:15:53 +0100
  Changes
 
 r8287 at Thesaurus (orig r8275):  caelum | 2010-01-10 11:29:06 +0100
  r22483 at hlagh (orig r8272):  caelum | 2010-01-09 05:52:15 -0500
  new branch to add "normalize_connect_info" class method to Storage::DBI
  r22495 at hlagh (orig r8274):  caelum | 2010-01-10 05:27:42 -0500
  split connect_info parser out into private _normalize_connect_info
 
 r8289 at Thesaurus (orig r8277):  caelum | 2010-01-10 12:04:52 +0100
 fix connection details in ::DBI::Replicated docs
 r8291 at Thesaurus (orig r8279):  ribasushi | 2010-01-11 09:50:21 +0100
  r8077 at Thesaurus (orig r8065):  ribasushi | 2009-12-12 14:24:30 +0100
  Branch for yet another mssql ordered prefetch problem
  r8079 at Thesaurus (orig r8067):  ribasushi | 2009-12-12 14:37:48 +0100
  prefetch does not get disassembled properly
  r8112 at Thesaurus (orig r8100):  ribasushi | 2009-12-13 00:07:00 +0100
  Extra test to highlight search_related inefficiency
  r8113 at Thesaurus (orig r8101):  ribasushi | 2009-12-13 00:17:44 +0100
  Real test for search_related and prefetch
  r8114 at Thesaurus (orig r8102):  ribasushi | 2009-12-13 00:19:57 +0100
  Fix corner case regression on search_related on a prefetching rs
  r8115 at Thesaurus (orig r8103):  ribasushi | 2009-12-13 00:21:05 +0100
  Isolate prefetch heads using RNO with a subquery
  r8116 at Thesaurus (orig r8104):  ribasushi | 2009-12-13 00:23:46 +0100
  Changes
  r8125 at Thesaurus (orig r8113):  ribasushi | 2009-12-15 13:06:26 +0100
  Extend mssql limited prefetch tests
  r8126 at Thesaurus (orig r8114):  ribasushi | 2009-12-15 13:08:56 +0100
  Add extra test to prove Alan wrong :)
  r8132 at Thesaurus (orig r8120):  ribasushi | 2009-12-16 00:38:04 +0100
  Do not realias tables in the RNO subqueries
  r8133 at Thesaurus (orig r8121):  ribasushi | 2009-12-16 00:50:52 +0100
  Deliberately disturb alphabetical order
  r8134 at Thesaurus (orig r8122):  ribasushi | 2009-12-16 10:26:43 +0100
  Got a failing test
  r8135 at Thesaurus (orig r8123):  ribasushi | 2009-12-16 10:49:10 +0100
  Cleanup
  r8136 at Thesaurus (orig r8124):  ribasushi | 2009-12-16 10:51:58 +0100
  More moving around
  r8137 at Thesaurus (orig r8125):  ribasushi | 2009-12-16 11:25:37 +0100
  The real mssql problem - it's... bad
  r8138 at Thesaurus (orig r8126):  ribasushi | 2009-12-16 11:29:20 +0100
  Clearer debug
  r8139 at Thesaurus (orig r8127):  ribasushi | 2009-12-16 11:47:48 +0100
  This is horrific but the tests pass... maybe someone will figure out something better
  r8140 at Thesaurus (orig r8128):  ribasushi | 2009-12-16 16:45:47 +0100
  cleanup tests
  r8187 at Thesaurus (orig r8175):  ribasushi | 2009-12-24 16:22:30 +0100
  Ordered subqueries do not work in mssql after all
  r8271 at Thesaurus (orig r8259):  ribasushi | 2010-01-08 23:58:13 +0100
  Cleaner RNO sql
  r8279 at Thesaurus (orig r8267):  ribasushi | 2010-01-09 10:13:16 +0100
  Subqueries no longer experimental
  r8280 at Thesaurus (orig r8268):  ribasushi | 2010-01-09 11:26:46 +0100
  Close the book on mssql ordered subqueries
  r8281 at Thesaurus (orig r8269):  ribasushi | 2010-01-09 11:36:36 +0100
  Changes and typos
  r8283 at Thesaurus (orig r8271):  ribasushi | 2010-01-09 11:42:21 +0100
  Highlight the real problem
  r8285 at Thesaurus (orig r8273):  ribasushi | 2010-01-10 10:07:10 +0100
  Rename subquery to subselect and rewrite POD (per castaway)
  r8290 at Thesaurus (orig r8278):  ribasushi | 2010-01-10 17:01:24 +0100
  rename as per mst
 
 r8295 at Thesaurus (orig r8283):  caelum | 2010-01-11 23:42:30 +0100
 make a public ::Schema::unregister_source
 r8298 at Thesaurus (orig r8286):  abraxxa | 2010-01-12 18:04:18 +0100
 fixed a typo in Changes
 more detailed explanation for the warning about has_one/might_have rels on nullable columns
 
 r8307 at Thesaurus (orig r8295):  abraxxa | 2010-01-13 17:28:05 +0100
 added the sources parser arg to the example code
 
 r8327 at Thesaurus (orig r8315):  ribasushi | 2010-01-15 01:25:39 +0100
  r8167 at Thesaurus (orig r8155):  ribasushi | 2009-12-19 12:50:13 +0100
  New branch for null-only-result fix
  r8168 at Thesaurus (orig r8156):  ribasushi | 2009-12-19 12:51:21 +0100
  Failing test
  r8322 at Thesaurus (orig r8310):  ribasushi | 2010-01-15 00:48:09 +0100
  Correct test order
  r8323 at Thesaurus (orig r8311):  ribasushi | 2010-01-15 01:15:33 +0100
  Generalize the to-node inner-join-er to apply to all related_resultset calls, not just counts
  r8324 at Thesaurus (orig r8312):  ribasushi | 2010-01-15 01:16:05 +0100
  Adjust sql-emitter tests
  r8326 at Thesaurus (orig r8314):  ribasushi | 2010-01-15 01:25:10 +0100
  One more sql-test fix and changes
 
 r8328 at Thesaurus (orig r8316):  ribasushi | 2010-01-15 01:31:58 +0100
 Strict mysql bugfix
 r8329 at Thesaurus (orig r8317):  ribasushi | 2010-01-15 01:38:53 +0100
 Better description of mysql strict option
 r8331 at Thesaurus (orig r8319):  ribasushi | 2010-01-15 03:12:13 +0100
 Update troubleshooting doc
 r8337 at Thesaurus (orig r8325):  ribasushi | 2010-01-15 17:13:28 +0100
 RT52674
 r8346 at Thesaurus (orig r8334):  ribasushi | 2010-01-17 09:41:49 +0100
 No method aliasing in OO code, *ever*
 r8373 at Thesaurus (orig r8360):  ribasushi | 2010-01-18 11:54:51 +0100
 Adjust my email
 r8387 at Thesaurus (orig r8374):  ribasushi | 2010-01-19 13:07:07 +0100
  r8340 at Thesaurus (orig r8328):  abraxxa | 2010-01-15 19:21:20 +0100
  added branch no_duplicate_indexes_for_pk_cols with test and fix
  
  r8343 at Thesaurus (orig r8331):  abraxxa | 2010-01-15 19:32:16 +0100
  don't use eq_set in test
  
  r8344 at Thesaurus (orig r8332):  abraxxa | 2010-01-15 19:44:04 +0100
  don't sort the primary columns because order matters for indexes
  
  r8345 at Thesaurus (orig r8333):  abraxxa | 2010-01-15 19:56:46 +0100
  don't sort the key columns because the order of columns is important for indexes
  
  r8372 at Thesaurus (orig r8359):  abraxxa | 2010-01-18 10:22:09 +0100
  don't sort the columns in the tests either
  
  r8378 at Thesaurus (orig r8365):  abraxxa | 2010-01-18 15:39:28 +0100
  added pod section for parser args
  
  r8379 at Thesaurus (orig r8366):  abraxxa | 2010-01-18 15:53:08 +0100
  better pod thanks to ribasushi
  
  r8380 at Thesaurus (orig r8367):  abraxxa | 2010-01-18 16:04:34 +0100
  test and pod fixes
  
  r8383 at Thesaurus (orig r8370):  abraxxa | 2010-01-19 12:38:44 +0100
  fixed Authors section
  added License section
  fixed t/86sqlt.t tests
  
  r8384 at Thesaurus (orig r8371):  ribasushi | 2010-01-19 12:59:52 +0100
  Regenaretd under new parser
  r8385 at Thesaurus (orig r8372):  ribasushi | 2010-01-19 13:03:51 +0100
  Minor style change and white space trim
  r8386 at Thesaurus (orig r8373):  ribasushi | 2010-01-19 13:06:54 +0100
  Changes abraxxa++
 
 r8390 at Thesaurus (orig r8377):  ribasushi | 2010-01-19 13:41:03 +0100
 Some minor test refactor and tab cleanups
 r8394 at Thesaurus (orig r8381):  frew | 2010-01-19 17:34:10 +0100
 add test to ensure no tabs in perl files
 
 r8397 at Thesaurus (orig r8384):  frew | 2010-01-19 18:00:12 +0100
 fix test to be an author dep
 r8398 at Thesaurus (orig r8385):  ribasushi | 2010-01-19 18:19:40 +0100
 First round of detabification
 r8399 at Thesaurus (orig r8386):  frew | 2010-01-19 23:42:50 +0100
 Add EOL test
 
 r8401 at Thesaurus (orig r8388):  ribasushi | 2010-01-20 08:32:39 +0100
 Fix minor RSC bug
 r8402 at Thesaurus (orig r8389):  roman | 2010-01-20 15:47:26 +0100
 Added a FAQ entry titled: How do I override a run time method (e.g. a relationship accessor)?
 r8403 at Thesaurus (orig r8390):  roman | 2010-01-20 16:31:41 +0100
 Added myself as a contributor.
 r8408 at Thesaurus (orig r8395):  jhannah | 2010-01-21 06:48:14 +0100
 Added FAQ: Custom methods in Result classes
 
 r8413 at Thesaurus (orig r8400):  frew | 2010-01-22 04:17:20 +0100
 add _is_numeric to ::Row
 r8418 at Thesaurus (orig r8405):  ribasushi | 2010-01-22 11:00:05 +0100
 Generalize autoinc/count test
 r8420 at Thesaurus (orig r8407):  ribasushi | 2010-01-22 11:11:49 +0100
 Final round of detabify
 r8421 at Thesaurus (orig r8408):  ribasushi | 2010-01-22 11:12:54 +0100
 Temporarily disable whitespace checkers
 r8426 at Thesaurus (orig r8413):  ribasushi | 2010-01-22 11:35:15 +0100
 Moev failing regression test away from trunk
 r8431 at Thesaurus (orig r8418):  frew | 2010-01-22 17:05:12 +0100
 fix name of _is_numeric to _is_column_numeric
 
 r8437 at Thesaurus (orig r8424):  ribasushi | 2010-01-26 09:33:42 +0100
 Switch to Test::Exception
 r8438 at Thesaurus (orig r8425):  ribasushi | 2010-01-26 09:48:30 +0100
 Test txn_scope_guard regression
 r8439 at Thesaurus (orig r8426):  ribasushi | 2010-01-26 10:10:11 +0100
 Fix txn_begin on external non-AC coderef regression
 r8443 at Thesaurus (orig r8430):  ribasushi | 2010-01-26 14:19:50 +0100
  r8304 at Thesaurus (orig r8292):  nigel | 2010-01-13 16:05:48 +0100
  Branch to extend ::Schema::Versioned to handle series of upgrades
  r8320 at Thesaurus (orig r8308):  nigel | 2010-01-14 16:52:50 +0100
  Changes to support multiple step schema version updates
  r8321 at Thesaurus (orig r8309):  nigel | 2010-01-14 17:05:21 +0100
  Changelog for Changes to support multiple step schema version updates
  r8393 at Thesaurus (orig r8380):  ribasushi | 2010-01-19 13:59:51 +0100
  Botched merge (tests still fail)
  r8395 at Thesaurus (orig r8382):  ribasushi | 2010-01-19 17:37:07 +0100
  More cleanup
  r8396 at Thesaurus (orig r8383):  ribasushi | 2010-01-19 17:48:09 +0100
  Fix last pieces of retardation and UNtodo the quick cycle
  r8442 at Thesaurus (orig r8429):  ribasushi | 2010-01-26 14:18:53 +0100
  No need for 2 statements to get the version
 
 r8445 at Thesaurus (orig r8432):  ribasushi | 2010-01-26 14:22:16 +0100
  r8161 at Thesaurus (orig r8149):  ovid | 2009-12-18 15:59:56 +0100
  Prefetch queries make inefficient SQL when combined with a pager.  This branch
  is to try to isolate some of the join conditions and figure out if we can fix
  this.
  
  r8166 at Thesaurus (orig r8154):  ovid | 2009-12-18 18:17:55 +0100
  Refactor internals to expose some join logic. Awful method and args :(
  
  r8319 at Thesaurus (orig r8307):  ovid | 2010-01-14 15:37:35 +0100
  Attempt to factor our alias handling has mostly failed.
  
  r8330 at Thesaurus (orig r8318):  ribasushi | 2010-01-15 03:02:21 +0100
  Better refactor
  r8332 at Thesaurus (orig r8320):  ribasushi | 2010-01-15 03:14:39 +0100
  Better varnames
  r8347 at Thesaurus (orig r8335):  ribasushi | 2010-01-17 11:33:55 +0100
  More mangling
  r8348 at Thesaurus (orig r8336):  ribasushi | 2010-01-17 13:44:00 +0100
  Getting warmer
  r8349 at Thesaurus (orig r8337):  ribasushi | 2010-01-17 14:00:20 +0100
  That was tricky :)
  r8352 at Thesaurus (orig r8340):  ribasushi | 2010-01-17 15:57:06 +0100
  Turned out to be much trickier
  r8354 at Thesaurus (orig r8342):  ribasushi | 2010-01-17 16:29:20 +0100
  This is made out of awesome
  r8355 at Thesaurus (orig r8343):  ribasushi | 2010-01-17 16:46:02 +0100
  Changes
  r8400 at Thesaurus (orig r8387):  ribasushi | 2010-01-20 08:17:44 +0100
  Whoops - need to dsable quoting
 
 r8459 at Thesaurus (orig r8446):  ribasushi | 2010-01-27 11:56:15 +0100
 Clean up some stuff
 r8463 at Thesaurus (orig r8450):  ribasushi | 2010-01-27 12:08:04 +0100
 Merge some cleanups from the prefetch branch
 r8466 at Thesaurus (orig r8453):  ribasushi | 2010-01-27 12:33:33 +0100
 DSNs can not be empty
 r8471 at Thesaurus (orig r8458):  frew | 2010-01-27 21:38:42 +0100
 fix silly multipk bug
 r8472 at Thesaurus (orig r8459):  ribasushi | 2010-01-28 11:13:16 +0100
 Consolidate insert_bulk guards (and make them show up correctly in the trace)
 r8473 at Thesaurus (orig r8460):  ribasushi | 2010-01-28 11:28:30 +0100
 Fix bogus test DDL
 r8480 at Thesaurus (orig r8467):  ribasushi | 2010-01-28 22:11:59 +0100
  r8381 at Thesaurus (orig r8368):  moses | 2010-01-18 16:41:38 +0100
  Test commit
  r8425 at Thesaurus (orig r8412):  ribasushi | 2010-01-22 11:25:01 +0100
  Informix test + cleanups
  r8428 at Thesaurus (orig r8415):  ribasushi | 2010-01-22 11:59:25 +0100
  Initial informix support
 
 r8482 at Thesaurus (orig r8469):  ribasushi | 2010-01-28 22:19:23 +0100
 Informix changes
 r8483 at Thesaurus (orig r8470):  ribasushi | 2010-01-29 12:01:41 +0100
 Require non-warning-spewing MooseX::Types
 r8484 at Thesaurus (orig r8471):  ribasushi | 2010-01-29 12:15:15 +0100
 Enhance warning test a bit (seems to fail on 5.8)
 r8485 at Thesaurus (orig r8472):  ribasushi | 2010-01-29 13:00:54 +0100
 Fugly 5.8 workaround
 r8494 at Thesaurus (orig r8481):  frew | 2010-01-31 06:47:42 +0100
 cleanup (3 arg open, 1 grep instead of 3)
 r8496 at Thesaurus (orig r8483):  ribasushi | 2010-01-31 10:04:43 +0100
 better skip message
 r8510 at Thesaurus (orig r8497):  caelum | 2010-02-01 12:07:13 +0100
 throw exception on attempt to insert a blob with DBD::Oracle == 1.23
 r8511 at Thesaurus (orig r8498):  caelum | 2010-02-01 12:12:48 +0100
 add RT link for Oracle blob bug in DBD::Oracle == 1.23
 r8527 at Thesaurus (orig r8514):  caelum | 2010-02-02 23:20:17 +0100
  r22968 at hlagh (orig r8502):  caelum | 2010-02-02 05:30:47 -0500
  branch to support Sybase SQL Anywhere
  r22971 at hlagh (orig r8505):  caelum | 2010-02-02 07:21:13 -0500
  ASA last_insert_id and limit support, still needs BLOB support
  r22972 at hlagh (orig r8506):  caelum | 2010-02-02 08:33:57 -0500
  deref table name if needed, check all columns for identity column not just PK
  r22973 at hlagh (orig r8507):  caelum | 2010-02-02 08:48:11 -0500
  test blobs, they work, didn't have to do anything
  r22974 at hlagh (orig r8508):  caelum | 2010-02-02 09:15:44 -0500
  fix stupid identity bug, test empty insert (works), test DTs (not working yet)
  r22976 at hlagh (orig r8510):  caelum | 2010-02-02 14:31:00 -0500
  rename ::Sybase::ASA to ::SQLAnywhere, per mst
  r22978 at hlagh (orig r8512):  caelum | 2010-02-02 17:02:29 -0500
  DT inflation now works
  r22979 at hlagh (orig r8513):  caelum | 2010-02-02 17:18:06 -0500
  minor POD update
 
 r8528 at Thesaurus (orig r8515):  caelum | 2010-02-02 23:23:26 +0100
  r22895 at hlagh (orig r8473):  caelum | 2010-01-30 03:57:26 -0500
  branch to fix computed columns in Sybase ASE
  r22911 at hlagh (orig r8489):  caelum | 2010-01-31 07:18:33 -0500
  empty insert into a Sybase table with computed columns and either data_type => undef or default_value => SCALARREF works now
  r22912 at hlagh (orig r8490):  caelum | 2010-01-31 07:39:32 -0500
  add POD about computed columns and timestamps for Sybase
  r22918 at hlagh (orig r8496):  caelum | 2010-02-01 05:09:07 -0500
  update POD about Schema::Loader for Sybase
 
 r8531 at Thesaurus (orig r8518):  ribasushi | 2010-02-02 23:57:27 +0100
  r8512 at Thesaurus (orig r8499):  boghead | 2010-02-01 23:38:13 +0100
  - Creating a branch for adding _post_inflate_datetime and _pre_deflate_datetime to
    InflateColumn::DateTime
  
  r8513 at Thesaurus (orig r8500):  boghead | 2010-02-01 23:42:14 +0100
  - Add _post_inflate_datetime and _pre_deflate_datetime to InflateColumn::DateTime to allow
    for modifying DateTime objects after inflation or before deflation.
  
  r8524 at Thesaurus (orig r8511):  boghead | 2010-02-02 22:59:28 +0100
  - Simplify by allowing moving column_info depreciated {extra}{timezone} data to
    {timezone} (and the same with locale)
  
 
 r8533 at Thesaurus (orig r8520):  caelum | 2010-02-03 05:19:59 +0100
 support for Sybase SQL Anywhere through ODBC
 r8536 at Thesaurus (orig r8523):  ribasushi | 2010-02-03 08:27:54 +0100
 Changes
 r8537 at Thesaurus (orig r8524):  ribasushi | 2010-02-03 08:31:20 +0100
 Quote fail
 r8538 at Thesaurus (orig r8525):  caelum | 2010-02-03 13:21:37 +0100
 test DT inflation for Sybase SQL Anywhere over ODBC too
 r8539 at Thesaurus (orig r8526):  caelum | 2010-02-03 17:36:39 +0100
 minor code cleanup for SQL Anywhere last_insert_id
 r8540 at Thesaurus (orig r8527):  ribasushi | 2010-02-04 11:28:33 +0100
 Fix bug reported by tommyt
 r8548 at Thesaurus (orig r8535):  ribasushi | 2010-02-04 14:34:45 +0100
 Prepare for new SQLA release
 r8560 at Thesaurus (orig r8547):  ribasushi | 2010-02-05 08:59:04 +0100
 Refactor some evil code
 r8565 at Thesaurus (orig r8552):  ribasushi | 2010-02-05 17:00:12 +0100
 Looks like RSC is finally (halfway) fixed
 r8566 at Thesaurus (orig r8553):  ribasushi | 2010-02-05 17:07:13 +0100
 RSC subquery can not include the prefetch
 r8567 at Thesaurus (orig r8554):  ribasushi | 2010-02-05 17:10:29 +0100
 Fix typo and borked test
 r8569 at Thesaurus (orig r8556):  ribasushi | 2010-02-05 17:33:12 +0100
 Release 0.08116
 r8571 at Thesaurus (orig r8558):  ribasushi | 2010-02-05 18:01:33 +0100
 No idea how I missed all these fails...
 r8572 at Thesaurus (orig r8559):  ribasushi | 2010-02-05 18:13:34 +0100
 Release 0.08117
 r8574 at Thesaurus (orig r8561):  ribasushi | 2010-02-05 18:51:12 +0100
 Try to distinguish trunk from official versions
 r8580 at Thesaurus (orig r8567):  gshank | 2010-02-05 22:29:24 +0100
 add doc on 'where' attribute
 
 r8587 at Thesaurus (orig r8574):  frew | 2010-02-07 21:07:03 +0100
 add as_subselect_rs
 r8588 at Thesaurus (orig r8575):  frew | 2010-02-07 21:13:04 +0100
 fix longstanding unmentioned bug ("me")
 r8589 at Thesaurus (orig r8576):  frew | 2010-02-08 06:17:43 +0100
 another example of as_subselect_rs
 r8590 at Thesaurus (orig r8577):  frew | 2010-02-08 06:23:58 +0100
 fix bug in UTF8Columns
 r8591 at Thesaurus (orig r8578):  ribasushi | 2010-02-08 09:31:01 +0100
 Extend utf8columns test to trap fixed bug
 r8592 at Thesaurus (orig r8579):  ribasushi | 2010-02-08 12:03:23 +0100
 Cleanup rel accessor type handling
 r8593 at Thesaurus (orig r8580):  ribasushi | 2010-02-08 12:20:47 +0100
 Fix some fallout
 r8595 at Thesaurus (orig r8582):  ribasushi | 2010-02-08 12:38:19 +0100
 Merge some obsolete code cleanup from the prefetch branch
 r8596 at Thesaurus (orig r8583):  ribasushi | 2010-02-08 12:42:09 +0100
 Merge fix of RT54039 from prefetch branch
 r8598 at Thesaurus (orig r8585):  ribasushi | 2010-02-08 12:48:31 +0100
 Release 0.08118
 r8600 at Thesaurus (orig r8587):  ribasushi | 2010-02-08 12:52:33 +0100
 Bump trunk version
 r8606 at Thesaurus (orig r8593):  ribasushi | 2010-02-08 16:16:44 +0100
 cheaper lookup
 r8609 at Thesaurus (orig r8596):  ribasushi | 2010-02-10 12:40:37 +0100
 Consolidate last_insert_id handling with a fallback-attempt on DBI::last_insert_id
 r8614 at Thesaurus (orig r8601):  caelum | 2010-02-10 21:29:51 +0100
 workaround for Moose bug affecting Replicated storage
 r8615 at Thesaurus (orig r8602):  caelum | 2010-02-10 21:40:07 +0100
 revert Moose bug workaround, bump Moose dep for Replicated to 0.98
 r8616 at Thesaurus (orig r8603):  caelum | 2010-02-10 22:48:34 +0100
 add a couple proxy methods to Replicated so it can run
 r8628 at Thesaurus (orig r8615):  caelum | 2010-02-11 11:35:01 +0100
  r21090 at hlagh (orig r7836):  caelum | 2009-11-02 06:40:52 -0500
  new branch to fix unhandled methods in Storage::DBI::Replicated
  r21091 at hlagh (orig r7837):  caelum | 2009-11-02 06:42:00 -0500
  add test to display unhandled methods
  r21092 at hlagh (orig r7838):  caelum | 2009-11-02 06:55:34 -0500
  minor fix to last committed test
  r21093 at hlagh (orig r7839):  caelum | 2009-11-02 09:26:00 -0500
  minor test code cleanup
  r23125 at hlagh (orig r8607):  caelum | 2010-02-10 19:25:51 -0500
  add unimplemented Storage::DBI methods to ::DBI::Replicated
  r23130 at hlagh (orig r8612):  ribasushi | 2010-02-11 05:12:48 -0500
  Podtesting exclusion
 
 r8630 at Thesaurus (orig r8617):  frew | 2010-02-11 11:45:54 +0100
 Changes (from a while ago)
 r8631 at Thesaurus (orig r8618):  caelum | 2010-02-11 11:46:58 +0100
 savepoints for SQLAnywhere
 r8640 at Thesaurus (orig r8627):  ribasushi | 2010-02-11 12:33:19 +0100
  r8424 at Thesaurus (orig r8411):  ribasushi | 2010-01-22 11:19:40 +0100
  Chaining POC test
 
 r8641 at Thesaurus (orig r8628):  ribasushi | 2010-02-11 12:34:19 +0100
  r8426 at Thesaurus (orig r8413):  ribasushi | 2010-01-22 11:35:15 +0100
  Moev failing regression test away from trunk
 
 r8642 at Thesaurus (orig r8629):  ribasushi | 2010-02-11 12:34:56 +0100
 
 r8643 at Thesaurus (orig r8630):  ribasushi | 2010-02-11 12:35:03 +0100
  r8507 at Thesaurus (orig r8494):  frew | 2010-02-01 04:33:08 +0100
  small refactor to put select/as/+select/+as etc merging in it's own function
 
 r8644 at Thesaurus (orig r8631):  ribasushi | 2010-02-11 12:35:11 +0100
  r8514 at Thesaurus (orig r8501):  frew | 2010-02-02 05:12:29 +0100
  revert actual changes from yesterday as per ribasushis advice
 
 r8645 at Thesaurus (orig r8632):  ribasushi | 2010-02-11 12:35:16 +0100
  r8522 at Thesaurus (orig r8509):  frew | 2010-02-02 19:39:33 +0100
  delete +stuff if stuff exists
 
 r8646 at Thesaurus (orig r8633):  ribasushi | 2010-02-11 12:35:23 +0100
  r8534 at Thesaurus (orig r8521):  frew | 2010-02-03 06:14:44 +0100
  change deletion/overriding to fix t/76
 
 r8647 at Thesaurus (orig r8634):  ribasushi | 2010-02-11 12:35:30 +0100
  r8535 at Thesaurus (orig r8522):  frew | 2010-02-03 06:57:15 +0100
  some basic readability factorings (aka, fewer nested ternaries and long maps)
 
 r8648 at Thesaurus (orig r8635):  ribasushi | 2010-02-11 12:36:01 +0100
  r8558 at Thesaurus (orig r8545):  frew | 2010-02-04 20:32:54 +0100
  fix incorrect test in t/76select.t and posit an incorrect solution
 
 r8649 at Thesaurus (orig r8636):  ribasushi | 2010-02-11 12:38:47 +0100
 
 r8650 at Thesaurus (orig r8637):  ribasushi | 2010-02-11 12:38:57 +0100
  r8578 at Thesaurus (orig r8565):  ribasushi | 2010-02-05 19:11:09 +0100
  Should not be needed
 
 r8651 at Thesaurus (orig r8638):  ribasushi | 2010-02-11 12:39:03 +0100
  r8579 at Thesaurus (orig r8566):  ribasushi | 2010-02-05 19:13:24 +0100
  SQLA now fixed
 
 r8652 at Thesaurus (orig r8639):  ribasushi | 2010-02-11 12:39:10 +0100
  r8624 at Thesaurus (orig r8611):  ribasushi | 2010-02-11 10:31:08 +0100
  MOAR testing
 
 r8653 at Thesaurus (orig r8640):  ribasushi | 2010-02-11 12:39:17 +0100
  r8626 at Thesaurus (orig r8613):  frew | 2010-02-11 11:16:30 +0100
  fix bad test
 
 r8654 at Thesaurus (orig r8641):  ribasushi | 2010-02-11 12:39:23 +0100
  r8627 at Thesaurus (orig r8614):  frew | 2010-02-11 11:21:52 +0100
  fix t/76, break rsc tests
 
 r8655 at Thesaurus (orig r8642):  ribasushi | 2010-02-11 12:39:30 +0100
  r8632 at Thesaurus (orig r8619):  frew | 2010-02-11 11:53:50 +0100
  fix incorrect test
 
 r8656 at Thesaurus (orig r8643):  ribasushi | 2010-02-11 12:39:35 +0100
  r8633 at Thesaurus (orig r8620):  frew | 2010-02-11 11:54:49 +0100
  make t/76s and t/88 pass by deleting from the correct attr hash
 
 r8657 at Thesaurus (orig r8644):  ribasushi | 2010-02-11 12:39:40 +0100
  r8634 at Thesaurus (orig r8621):  frew | 2010-02-11 11:55:41 +0100
  fix a test due to ordering issues
 
 r8658 at Thesaurus (orig r8645):  ribasushi | 2010-02-11 12:39:45 +0100
  r8635 at Thesaurus (orig r8622):  frew | 2010-02-11 11:58:23 +0100
  this is why you run tests before you commit them.
 
 r8659 at Thesaurus (orig r8646):  ribasushi | 2010-02-11 12:39:51 +0100
  r8636 at Thesaurus (orig r8623):  frew | 2010-02-11 12:00:59 +0100
  fix another ordering issue
 
 r8660 at Thesaurus (orig r8647):  ribasushi | 2010-02-11 12:39:57 +0100
  r8637 at Thesaurus (orig r8624):  frew | 2010-02-11 12:11:31 +0100
  fix for search/select_chains
 
 r8661 at Thesaurus (orig r8648):  ribasushi | 2010-02-11 12:40:03 +0100
 
 r8662 at Thesaurus (orig r8649):  caelum | 2010-02-11 12:40:07 +0100
 test nanosecond precision for SQLAnywhere
 r8663 at Thesaurus (orig r8650):  ribasushi | 2010-02-11 12:40:09 +0100
  r8639 at Thesaurus (orig r8626):  ribasushi | 2010-02-11 12:33:03 +0100
  Changes and small ommission
 
 r8666 at Thesaurus (orig r8653):  ribasushi | 2010-02-11 18:16:45 +0100
 Changes
 r8674 at Thesaurus (orig r8661):  ribasushi | 2010-02-12 09:12:45 +0100
 Fix moose dep
 r8680 at Thesaurus (orig r8667):  dew | 2010-02-12 18:05:11 +0100
 Add is_ordered to DBIC::ResultSet
 r8688 at Thesaurus (orig r8675):  ribasushi | 2010-02-13 09:36:29 +0100
  r8667 at Thesaurus (orig r8654):  ribasushi | 2010-02-11 18:17:35 +0100
  Try a dep-handling idea
  r8675 at Thesaurus (orig r8662):  ribasushi | 2010-02-12 12:46:11 +0100
  Move optional deps out of the Makefile
  r8676 at Thesaurus (orig r8663):  ribasushi | 2010-02-12 13:40:53 +0100
  Support methods to verify group dependencies
  r8677 at Thesaurus (orig r8664):  ribasushi | 2010-02-12 13:45:18 +0100
  Move sqlt dephandling to Optional::Deps
  r8679 at Thesaurus (orig r8666):  ribasushi | 2010-02-12 14:03:17 +0100
  Move replicated to Opt::Deps
  r8684 at Thesaurus (orig r8671):  ribasushi | 2010-02-13 02:47:52 +0100
  Auto-POD for Optional Deps
  r8685 at Thesaurus (orig r8672):  ribasushi | 2010-02-13 02:53:20 +0100
  Privatize the full list method
  r8686 at Thesaurus (orig r8673):  ribasushi | 2010-02-13 02:59:51 +0100
  Scary warning
  r8687 at Thesaurus (orig r8674):  ribasushi | 2010-02-13 09:35:01 +0100
  Changes
 
 r8691 at Thesaurus (orig r8678):  ribasushi | 2010-02-13 10:07:15 +0100
 Autogen comment for Dependencies.pod
 r8692 at Thesaurus (orig r8679):  ribasushi | 2010-02-13 10:11:24 +0100
 Ask for newer M::I
 r8698 at Thesaurus (orig r8685):  ribasushi | 2010-02-13 11:11:10 +0100
 Add author/license to pod
 r8699 at Thesaurus (orig r8686):  arcanez | 2010-02-13 13:43:22 +0100
 fix typo per nuba on irc
 r8705 at Thesaurus (orig r8692):  ribasushi | 2010-02-13 15:15:33 +0100
  r8001 at Thesaurus (orig r7989):  goraxe | 2009-11-30 01:14:47 +0100
  Branch for dbicadmin script refactor
  
  r8003 at Thesaurus (orig r7991):  goraxe | 2009-11-30 01:26:39 +0100
  add DBIx::Class::Admin
  r8024 at Thesaurus (orig r8012):  goraxe | 2009-12-02 22:49:27 +0100
  get deployment tests to pass
  r8025 at Thesaurus (orig r8013):  goraxe | 2009-12-02 22:50:42 +0100
  get deployment tests to pass
  r8026 at Thesaurus (orig r8014):  goraxe | 2009-12-02 23:52:40 +0100
  all ddl tests now pass
  r8083 at Thesaurus (orig r8071):  goraxe | 2009-12-12 17:01:11 +0100
  add quite attribute to DBIx::Class admin
  r8086 at Thesaurus (orig r8074):  goraxe | 2009-12-12 17:36:58 +0100
  add tests for data manipulation ported from 89dbicadmin.t
  r8088 at Thesaurus (orig r8076):  goraxe | 2009-12-12 17:38:07 +0100
  add sleep 1 to t/admin/02ddl.t so insert into upgrade table does not happen too quickly
  r8089 at Thesaurus (orig r8077):  goraxe | 2009-12-12 17:40:33 +0100
  update DBIx::Class::Admin data manip functions to pass the test
  r8095 at Thesaurus (orig r8083):  goraxe | 2009-12-12 19:36:22 +0100
  change passing of preversion to be a parameter
  r8096 at Thesaurus (orig r8084):  goraxe | 2009-12-12 19:38:26 +0100
  add some pod to DBIx::Class::Admin
  r8103 at Thesaurus (orig r8091):  goraxe | 2009-12-12 22:08:55 +0100
  some changes to make DBIx::Class::Admin more compatible with dbicadmin interface
  r8104 at Thesaurus (orig r8092):  goraxe | 2009-12-12 22:09:39 +0100
  commit refactored dbicadmin script and very minor changes to its existing test suite
  r8107 at Thesaurus (orig r8095):  goraxe | 2009-12-12 22:34:35 +0100
  add compatability for --op for dbicadmin, revert test suite
  r8127 at Thesaurus (orig r8115):  goraxe | 2009-12-15 22:14:20 +0100
  dep check to end of module
  r8128 at Thesaurus (orig r8116):  goraxe | 2009-12-15 23:15:25 +0100
  add namespace::autoclean to DBIx::Class::Admin
  r8129 at Thesaurus (orig r8117):  goraxe | 2009-12-15 23:16:00 +0100
  update test suite to skip if cannot load DBIx::Class::Admin
  r8130 at Thesaurus (orig r8118):  goraxe | 2009-12-15 23:18:35 +0100
  add deps check for 89dbicadmin.t
  r8131 at Thesaurus (orig r8119):  goraxe | 2009-12-15 23:19:01 +0100
  include deps for dbicadmin DBIx::Class::Admin to Makefile.PL
  r8149 at Thesaurus (orig r8137):  goraxe | 2009-12-17 23:21:50 +0100
  use DBICTest::_database over creating a schema object to steal conn info
  r8338 at Thesaurus (orig r8326):  goraxe | 2010-01-15 19:00:17 +0100
  change white space to not be tabs
  r8339 at Thesaurus (orig r8327):  goraxe | 2010-01-15 19:10:42 +0100
  remove Module::Load from test suite
  r8358 at Thesaurus (orig r8346):  ribasushi | 2010-01-17 17:52:10 +0100
  Real detabify
  r8359 at Thesaurus (orig r8347):  ribasushi | 2010-01-17 18:01:53 +0100
  Fix POD (spacing matters)
  r8360 at Thesaurus (orig r8348):  ribasushi | 2010-01-17 21:57:53 +0100
  More detabification
  r8361 at Thesaurus (orig r8349):  ribasushi | 2010-01-17 22:33:12 +0100
  Test cleanup
  r8362 at Thesaurus (orig r8350):  ribasushi | 2010-01-17 22:41:11 +0100
  More tets cleanup
  r8363 at Thesaurus (orig r8351):  ribasushi | 2010-01-17 22:43:57 +0100
  And more cleanup
  r8364 at Thesaurus (orig r8352):  ribasushi | 2010-01-17 22:51:21 +0100
  Disallow mucking with INC
  r8365 at Thesaurus (orig r8353):  ribasushi | 2010-01-17 23:23:15 +0100
  More cleanup
  r8366 at Thesaurus (orig r8354):  ribasushi | 2010-01-17 23:27:49 +0100
  Add lib path to ENV so that $^X can see it
  r8367 at Thesaurus (orig r8355):  ribasushi | 2010-01-17 23:33:10 +0100
  Move script-test
  r8368 at Thesaurus (orig r8356):  goraxe | 2010-01-17 23:35:03 +0100
  change warns/dies -> carp/throw_exception
  r8369 at Thesaurus (orig r8357):  goraxe | 2010-01-17 23:53:54 +0100
  add goraxe to contributors
  r8370 at Thesaurus (orig r8358):  goraxe | 2010-01-17 23:54:15 +0100
  remove comment headers 
  r8404 at Thesaurus (orig r8391):  caelum | 2010-01-20 20:54:29 +0100
  minor fixups
  r8405 at Thesaurus (orig r8392):  goraxe | 2010-01-20 21:13:24 +0100
  add private types to coerce
  r8406 at Thesaurus (orig r8393):  goraxe | 2010-01-20 21:17:19 +0100
  remove un-needed coerce from schema_class of type Str
  r8411 at Thesaurus (orig r8398):  caelum | 2010-01-21 23:36:25 +0100
  minor documentation updates
  r8436 at Thesaurus (orig r8423):  caelum | 2010-01-25 02:56:30 +0100
  this code never runs anyway
  r8440 at Thesaurus (orig r8427):  caelum | 2010-01-26 14:05:53 +0100
  prefer JSON::DWIW for barekey support
  r8693 at Thesaurus (orig r8680):  ribasushi | 2010-02-13 10:27:18 +0100
  dbicadmin dependencies
  r8694 at Thesaurus (orig r8681):  ribasushi | 2010-02-13 10:28:04 +0100
  Some cleaup, make use of Text::CSV
  r8695 at Thesaurus (orig r8682):  ribasushi | 2010-02-13 10:34:19 +0100
  We use Try::Tiny in a single spot, not grounds for inlusion in deps
  r8696 at Thesaurus (orig r8683):  ribasushi | 2010-02-13 10:37:30 +0100
  POD section
  r8697 at Thesaurus (orig r8684):  ribasushi | 2010-02-13 11:05:17 +0100
  Switch tests to Optional::Deps
  r8700 at Thesaurus (orig r8687):  ribasushi | 2010-02-13 14:32:50 +0100
  Switch Admin/dbicadmin to Opt::Deps
  r8702 at Thesaurus (orig r8689):  ribasushi | 2010-02-13 14:39:24 +0100
  JSON dep is needed for Admin.pm itself
  r8703 at Thesaurus (orig r8690):  ribasushi | 2010-02-13 15:06:28 +0100
  Test fixes
  r8704 at Thesaurus (orig r8691):  ribasushi | 2010-02-13 15:13:31 +0100
  Changes
 
 r8707 at Thesaurus (orig r8694):  ribasushi | 2010-02-13 16:37:57 +0100
 Test for optional deps manager
 r8710 at Thesaurus (orig r8697):  caelum | 2010-02-14 05:22:03 +0100
 add doc on maximum cursors for SQLAnywhere
 r8711 at Thesaurus (orig r8698):  ribasushi | 2010-02-14 09:23:09 +0100
 Cleanup dependencies / Admin inheritance
 r8712 at Thesaurus (orig r8699):  ribasushi | 2010-02-14 09:28:29 +0100
 Some formatting
 r8715 at Thesaurus (orig r8702):  ribasushi | 2010-02-14 10:46:51 +0100
 This is Moose, so use CMOP
 r8720 at Thesaurus (orig r8707):  ribasushi | 2010-02-15 10:28:22 +0100
 Final POD touches
 r8721 at Thesaurus (orig r8708):  ribasushi | 2010-02-15 10:31:38 +0100
 Spellcheck (jawnsy++)
 r8722 at Thesaurus (orig r8709):  ribasushi | 2010-02-15 10:32:24 +0100
 One more
 r8723 at Thesaurus (orig r8710):  ribasushi | 2010-02-15 14:49:26 +0100
 Release 0.08119
 r8725 at Thesaurus (orig r8712):  ribasushi | 2010-02-15 14:50:56 +0100
 Bump trunl version
 r8726 at Thesaurus (orig r8713):  rafl | 2010-02-15 15:49:55 +0100
 Make sure we actually run all tests, given we're using done_testing.
 r8727 at Thesaurus (orig r8714):  rafl | 2010-02-15 15:50:01 +0100
 Make sure overriding deployment_statements is possible from within schemas.
 r8728 at Thesaurus (orig r8715):  rafl | 2010-02-15 15:56:06 +0100
 Changelogging.
 r8729 at Thesaurus (orig r8716):  rafl | 2010-02-15 15:58:09 +0100
 Make some cookbook code compile.
 r8730 at Thesaurus (orig r8717):  nuba | 2010-02-15 16:11:52 +0100
 spelling fixes in the documaentation, sholud be gud now ;)
 r8732 at Thesaurus (orig r8719):  caelum | 2010-02-16 11:09:58 +0100
 use OO interface of Hash::Merge for ::DBI::Replicated



Property changes on: DBIx-Class/0.08/branches/run_file_against_storage
___________________________________________________________________
Name: svk:merge
   - 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:5969
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:5651
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:6805
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
   + 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/cookbook_fixes:7657
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/prefetch_bug-unqualified_column_in_search_related_cond:7959
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/void_populate_resultset_cond:7935
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7982
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/ado_mssql:7886
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/autocast:7418
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/chaining_fixes:8626
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connect_info_hash:7435
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connected_schema_leak:8264
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cookbook_fixes:7479
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/create_scalarref_rt51559:8027
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/dbicadmin_refactor:8691
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/dephandling:8674
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/get_inflated_columns_rt46953:7964
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_has_many_join:7382
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/handle_all_storage_methods_in_replicated:8612
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/ic_dt_post_inflate:8517
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/informix:8434
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/is_resultset_paginated:7769
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_limit_regression:8278
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_rno_pagination:8054
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multiple_version_upgrade:8429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/no_duplicate_indexes_for_pk_cols:8373
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/normalize_connect_info:8274
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/null_column_regression:8314
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_shorten_aliases:8234
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7842
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch-group_by:7917
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_bug-unqualified_column_in_search_related_cond:7900
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_pager:8431
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqlt_parser_view:8145
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:7682
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_asa:8513
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulk_insert:7679
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulkinsert_support:7796
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_computed_columns:8496
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_refactor:7940
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_support:7797
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/view_rels:7908
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/void_populate_resultset_cond:7944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:8719
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510

Added: DBIx-Class/0.08/branches/run_file_against_storage/.gitignore
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/.gitignore	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/.gitignore	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,7 @@
+META.yml
+Makefile
+README
+blib/
+inc/
+pm_to_blib
+t/var/

Modified: DBIx-Class/0.08/branches/run_file_against_storage/Changes
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/Changes	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/Changes	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,16 +1,245 @@
 Revision history for DBIx::Class
 
+        - Make sure possibly overwritten deployment_statements methods in
+          schemas get called on $schema->deploy.
+
+0.08119 2010-02-15 09:36:00 (UTC)
+        - Add $rs->is_ordered to test for existing order_by on a resultset
+        - Add as_subselect_rs to DBIC::ResultSet from
+          DBIC::Helper::ResultSet::VirtualView::as_virtual_view
+        - Refactor dbicadmin adding DDL manipulation capabilities
+        - New optional dependency manager to aid extension writers
+        - Depend on newest bugfixed Moose
+        - Make resultset chaining consistent wrt selection specification
+        - Storage::DBI::Replicated cleanup
+        - Fix autoinc PKs without an autoinc flag on Sybase ASA
+
+0.08118 2010-02-08 11:53:00 (UTC)
+        - Fix a bug causing UTF8 columns not to be decoded (RT#54395)
+        - Fix bug in One->Many->One prefetch-collapse handling (RT#54039)
+        - Cleanup handling of relationship accessor types
+
+0.08117 2010-02-05 17:10:00 (UTC)
+        - Perl 5.8.1 is now the minimum supported version
+        - Massive optimization of the join resolution code - now joins
+          will be removed from the resulting SQL if DBIC can prove they
+          are not referenced by anything
+        - Subqueries no longer marked experimental
+        - Support for Informix RDBMS (limit/offset and auto-inc columns)
+        - Support for Sybase SQLAnywhere, both native and via ODBC
+        - might_have/has_one now warn if applied calling class's column
+          has is_nullable set to true.
+        - Fixed regression in deploy() with a {sources} table limit applied
+          (RT#52812)
+        - Views without a view_definition will throw an exception when
+          parsed by SQL::Translator::Parser::DBIx::Class
+        - Stop the SQLT parser from auto-adding indexes identical to the
+          Primary Key
+        - InflateColumn::DateTime refactoring to allow fine grained method
+          overloads
+        - Fix ResultSetColumn improperly selecting more than the requested
+          column when +columns/+select is present
+        - Fix failure when update/delete of resultsets with complex WHERE
+          SQLA structures
+        - Fix regression in context sensitiveness of deployment_statements
+        - Fix regression resulting in overcomplicated query on
+          search_related from prefetching resultsets
+        - Fix regression on all-null returning searches (properly switch
+          LEFT JOIN to JOIN in order to distinguish between both cases)
+        - Fix regression in groupedresultset count() used on strict-mode
+          MySQL connections
+        - Better isolation of RNO-limited queries from the rest of a
+          prefetching resultset
+        - New MSSQL specific resultset attribute to allow hacky ordered
+          subquery support
+        - Fix nasty schema/dbhandle leak due to SQL::Translator
+        - Initial implementation of a mechanism for Schema::Version to
+          apply multiple step upgrades
+        - Fix regression on externally supplied $dbh with AutoCommit=0
+        - FAQ "Custom methods in Result classes"
+        - Cookbook POD fix for add_drop_table instead of add_drop_tables
+        - Schema POD improvement for dclone
+
+0.08115 2009-12-10 09:02:00 (CST)
+        - Real limit/offset support for MSSQL server (via Row_Number)
+        - Fix distinct => 1 with non-selecting order_by (the columns
+          in order_by also need to be aded to the resulting group_by)
+        - Do not attempt to deploy FK constraints pointing to a View
+        - Fix count/objects from search_related on limited resultset
+        - Stop propagating distinct => 1 over search_related chains
+        - Make sure populate() inherits the resultset conditions just
+          like create() does
+        - Make get_inflated_columns behave identically to get_columns
+          wrt +select/+as (RT#46953)
+        - Fix problems with scalarrefs under InflateColumn (RT#51559)
+        - Throw exception on delete/update of PK-less resultsets
+        - Refactored Sybase storage driver into a central ::DBI::Sybase
+          dispatcher, and a sybase-specific ::DBI::Sybase::ASE
+        - Fixed an atrocious DBD::ADO bind-value bug
+        - Cookbook/Intro POD improvements
+
+0.08114 2009-11-14 17:45:00 (UTC)
+        - Preliminary support for MSSQL via DBD::ADO
+        - Fix botched 0.08113 release (invalid tarball)
+
+0.08113 2009-11-13 23:13:00 (UTC)
+        - Fix populate with has_many bug
+          (RT #50828)
+        - Fix Oracle autoincrement broken for Resultsets with scalar refs
+          (RT #50874)
+        - Complete Sybase RDBMS support including:
+          - Support for TEXT/IMAGE columns
+          - Support for the 'money' datatype
+          - Transaction savepoints support
+          - DateTime inflation support
+          - Support for bind variables when connecting to a newer Sybase with
+             OpenClient libraries
+          - Support for connections via FreeTDS with CASTs for bind variables
+             when needed
+          - Support for interpolated variables with proper quoting when
+             connecting to an older Sybase and/or via FreeTDS
+          - bulk API support for populate()
+        - Transaction support for MSSQL via DBD::Sybase
+        - Add is_paged method to DBIx::Class::ResultSet so that we can
+          check that if we want a pager
+        - Skip versioning test on really old perls lacking Time::HiRes
+          (RT #50209)
+        - Fixed on_connect_do/call regression when used with a coderef
+          connector (RT #50003)
+        - A couple of fixes to Ordered to remedy subclassing issues
+        - Fixed another lingering problem with PostgreSQL
+          auto-increment support and its interaction with multiple
+          schemas
+        - Remove some IN workarounds, and require a recent version of
+          SQLA instead
+        - Improvements to populate's handling of mixed scalarref values
+        - Fixed regression losing result_class after $rs->find (introduced
+          in 0.08108)
+        - Fix in_storage() to return 1|0 as per existing documentation
+        - Centralize handling of _determine_driver calls prior to certain
+          ::Storage::DBI methods
+        - Fix update/delete arbitrary condition handling (RT#51409)
+        - POD improvements
+
+0.08112 2009-09-21 10:57:00 (UTC)
+        - Remove the recommends from Makefile.PL, DBIx::Class is not
+          supposed to have optional dependencies. ever.
+        - Mangle the DBIx/Class.pm POD to be more clear about
+          copyright and license
+        - Put back PG's multiple autoinc per table support, accidentally
+          dropped during the serial-autodetection rewrite
+        - Make sure ResultSetColumn does not depend on the (undefined)
+          return value of ->cursor->reset()
+        - Add single() to ResultSetColumn (same semantics as ResultSet)
+        - Make sure to turn off IDENTITY_INSERT after insert() on MSSQL
+          tables that needed it
+        - More informative exception on failing _resolve_relationship
+        - Allow undef/NULL as the sole grouping value in Ordered
+        - Fix unreported rollback exceptions in TxnScopeGuard
+        - Fix overly-eager left-join chain enforcing code
+        - Warn about using distinct with an existing group_by
+        - Warn about attempting to $rs->get_column a non-unique column
+          when has_many joins are added to resultset
+        - Refactor of the exception handling system (now everything is a
+          DBIx::Class::Exception object)
+
+0.08111 2009-09-06 21:58:00 (UTC)
+        - The hashref to connection_info now accepts a 'dbh_maker'
+          coderef, allowing better intergration with Catalyst
+        - Fixed a complex prefetch + regular join regression introduced
+          in 0.08108
+        - Fixed insert_bulk rebless handling
+        - Fixed Storable roundtrip regression, and general serialization
+          cleanup
+        - SQLT related fixes:
+          - sqlt_type is now called on the correct storage object
+          - hooks can now see the correct producer_type (RT#47891)
+          - optional SQLT requirements for e.g. deploy() bumped to 0.11002
+        - Really fixed (and greatly cleaned up) postgresql autoinc sequence
+          autodetection
+        - Automatically detect MySQL v3 and use INNER JOIN instead of JOIN
+        - POD improvements (including RT#48769)
+        - Test suite tweaks (including fixes for recent CPANTS fails)
+        - Better support for MSSQL IDENTITY_INSERT ON
+
+0.08109 2009-08-18 08:35:00 (UTC)
+        - Replication updates:
+          - Improved the replication tests so that they are more reliable
+            and accurate, and hopefully solve some cross platform issues.
+          - Bugfixes related to naming particular replicants in a
+            'force_pool' attribute.
+          - Lots of documentation updates, including a new Introduction.pod
+            file.
+          - Fixed the way we detect transaction to make this more reliable
+            and forward looking.
+          - Fixed some trouble with the way Moose Types are used.
+          - Made discard_chages/get_from_storage replication aware (they
+            now read from the master storage by default)
+        - Refactor of MSSQL storage drivers, with some new features:
+          - Support for placeholders for MSSQL via DBD::Sybase with proper
+            autodetection
+          - 'uniqueidentifier' support with auto newid()
+          - Dynamic cursor support and other MARS options for ODBC
+          - savepoints with auto_savepoint => 1
+        - Support for MSSQL 'money' type
+        - Support for 'smalldatetime' type used in MSSQL and Sybase for
+          InflateColumn::DateTime
+        - Support for Postgres 'timestamp without timezone' type in
+          InflateColumn::DateTime (RT#48389)
+        - Added new MySQL specific on_connect_call macro 'set_strict_mode'
+          (also known as make_mysql_not_suck_as_much)
+        - Multiple prefetch-related fixes:
+          - Adjust overly agressive subquery join-chain pruning
+          - Always preserve the outer join-chain - fixes numerous
+            problems with search_related chaining
+          - Deal with the distinct => 1 attribute properly when using
+            prefetch
+        - An extension of the select-hashref syntax, allowing labeling
+          SQL-side aliasing: select => [ { max => 'foo', -as => 'bar' } ]
+        - Massive optimization of the DBI storage layer - reduce the
+          amount of connected() ping-calls
+        - Some fixes of multi-create corner cases
+        - Multiple POD improvements
+        - Added exception when resultset is called without an argument
+        - Improved support for non-schema-qualified tables under
+          Postgres (fixed last_insert_id sequence name auto-detection)
+
+0.08108 2009-07-05 23:15:00 (UTC)
+        - Fixed the has_many prefetch with limit/group deficiency -
+          it is now possible to select "top 5 commenters" while
+          prefetching all their comments
+        - New resultsed method count_rs, returns a ::ResultSetColumn
+          which in turn returns a single count value
+        - Even better support of count with limit
+        - New on_connect_call/on_disconnect_call functionality (check
+          POD of Storage::DBI)
+        - Automatic datetime handling environment/session setup for
+          Oracle via connect_call_datetime_setup()
+        - count/all on related left-joined empty resultsets now correctly
+          returns 0/()
         - Fixed regression when both page and offset are specified on
           a resultset
         - Fixed HRI returning too many empty results on multilevel
           nonexisting prefetch
-        - Fixed the prefetch with limit bug
-        - New resultsed method count_rs, returns a ::ResultSetColumn
-          which in turn returns a single count value
         - make_column_dirty() now overwrites the deflated value with an
           inflated one if such exists
-        - Fixed set_$rel with where restriction deleting rows outside 
+        - Fixed set_$rel with where restriction deleting rows outside
           the restriction
+        - populate() returns the created objects or an arrayref of the
+          created objects depending on scalar vs. list context
+        - Fixed find_related on 'single' relationships - the former
+          implementation would overspecify the WHERE condition, reporting
+          no related objects when there in fact is one
+        - SQL::Translator::Parser::DBIx::Class now attaches tables to the
+          central schema object in relationship dependency order
+        - Fixed regression in set_column() preventing sourceless object
+          manipulations
+        - Fixed a bug in search_related doubling a join if the original
+          $rs already joins/prefetches the same relation
+        - Storage::DBI::connected() improvements for Oracle and Sybase
+        - Fixed prefetch+incomplete select regression introduced in
+          0.08100
+        - MSSQL limit (TOP emulation) fixes and improvements
 
 0.08107 2009-06-14 08:21:00 (UTC)
         - Fix serialization regression introduced in 0.08103 (affects
@@ -27,8 +256,8 @@
         - Update of numeric columns now properly uses != to determine
           dirtyness instead of the usual eq
         - Fixes to IC::DT tests
-        - Fixed exception when undef_if_invalid and timezone are both set on 
-          an invalid datetime column
+        - Fixed exception when undef_if_invalid and timezone are both set
+          on an invalid datetime column
 
 0.08104 2009-06-10 13:38:00 (UTC)
         - order_by now can take \[$sql, @bind] as in
@@ -46,7 +275,7 @@
           side of the relation, to avoid duplicates
         - DBIC now properly handles empty inserts (invoking all default
           values from the DB, normally via INSERT INTO tbl DEFAULT VALUES
-        - Fix find_or_new/create to stop returning random rows when 
+        - Fix find_or_new/create to stop returning random rows when
           default value insert is requested (RT#28875)
         - Make IC::DT extra warning state the column name too
         - It is now possible to transparrently search() on columns
@@ -68,9 +297,9 @@
         - Change ->count code to work correctly with DISTINCT (distinct => 1)
           via GROUP BY
         - Removed interpolation of bind vars for as_query - placeholders
-          are preserved and nested query bind variables are properly 
+          are preserved and nested query bind variables are properly
           merged in the correct order
-        - Refactor DBIx::Class::Storage::DBI::Sybase to automatically 
+        - Refactor DBIx::Class::Storage::DBI::Sybase to automatically
           load a subclass, namely Microsoft_SQL_Server.pm
           (similar to DBIx::Class::Storage::DBI::ODBC)
         - Refactor InflateColumn::DateTime to allow components to
@@ -133,7 +362,7 @@
           - not try and insert things tagged on via new_related unless required
         - Possible to set locale in IC::DateTime extra => {} config
         - Calling the accessor of a belongs_to when the foreign_key
-          was NULL and the row was not stored would unexpectedly fail 
+          was NULL and the row was not stored would unexpectedly fail
         - Split sql statements for deploy only if SQLT::Producer returned a scalar
           containing all statements to be executed
         - Add as_query() for ResultSet and ResultSetColumn. This makes subqueries
@@ -161,8 +390,8 @@
         - new order_by => { -desc => 'colname' } syntax supported
         - PG array datatype supported
         - insert should use store_column, not set_column to avoid marking
-          clean just-stored values as dirty. New test for this 
-        - regression test for source_name 
+          clean just-stored values as dirty. New test for this
+        - regression test for source_name
 
 0.08099_05 2008-10-30 21:30:00 (UTC)
         - Rewrite of Storage::DBI::connect_info(), extended with an
@@ -176,7 +405,7 @@
         - Fixed up related resultsets and multi-create
         - Fixed superfluous connection in ODBC::_rebless
         - Fixed undef PK for first insert in ODBC::Microsoft_SQL_Server
-        - Added virtual method to Versioned so a user can create upgrade 
+        - Added virtual method to Versioned so a user can create upgrade
           path across multiple versions (jgoulah)
         - Better (and marginally faster) implementation of the HashRefInflator
           hash construction algorithm
@@ -185,7 +414,7 @@
 
 0.08099_04 2008-07-24 01:00:00
         - Functionality to storage to enable a sub to be run without FK checks
-        - Fixed $schema->clone bug which caused clone and source to share 
+        - Fixed $schema->clone bug which caused clone and source to share
           internal hash refs
         - Added register_extra_source methods for additional sources
         - Added datetime_undef_if_invalid for InflateColumn::DateTime to
@@ -211,11 +440,11 @@
         - Add warnings for non-unique ResultSet::find queries
         - Changed Storage::DBI::Replication to Storage::DBI::Replicated and
           refactored support.
-        - By default now deploy/diff et al. will ignore constraint and index 
+        - By default now deploy/diff et al. will ignore constraint and index
           names
         - Add ResultSet::_is_deterministic_value, make new_result filter the
           values passed to new to drop values that would generate invalid SQL.
-        - Use Sub::Name to name closures before installing them. Fixes 
+        - Use Sub::Name to name closures before installing them. Fixes
           incompatibility with Moose method modifiers on generated methods.
 
 0.08010 2008-03-01 10:30
@@ -224,7 +453,7 @@
 0.08009 2008-01-20 13:30
         - Made search_rs smarter about when to preserve the cache to fix
           mm prefetch usage
-        - Added Storage::DBI subclass for MSSQL over ODBC. 
+        - Added Storage::DBI subclass for MSSQL over ODBC.
         - Added freeze, thaw and dclone methods to Schema so that thawed
           objects will get re-attached to the schema.
         - Moved dbicadmin to JSON::Any wrapped JSON.pm for a sane API
@@ -238,20 +467,20 @@
           foreign and self parts the wrong way round in the condition
         - ResultSetColumn::func() now returns all results if called in list
           context; this makes things like func('DISTINCT') work as expected
-        - Many-to-many relationships now warn if the utility methods would 
+        - Many-to-many relationships now warn if the utility methods would
           clash
         - InflateColumn::DateTime now accepts an extra parameter of timezone
           to set timezone on the DT object (thanks Sergio Salvi)
-        - Added sqlt_deploy_hook to result classes so that indexes can be 
+        - Added sqlt_deploy_hook to result classes so that indexes can be
           added.
-        - Added startup checks to warn loudly if we appear to be running on 
+        - Added startup checks to warn loudly if we appear to be running on
           RedHat systems from perl-5.8.8-10 and up that have the bless/overload
           patch applied (badly) which causes 2x -> 100x performance penalty.
           (Jon Schutz)
-        - ResultSource::reverse_relationship_info can distinguish between 
+        - ResultSource::reverse_relationship_info can distinguish between
           sources using the same table
         - Row::insert will now not fall over if passed duplicate related objects
-        - Row::copy will not fall over if you have two relationships to the 
+        - Row::copy will not fall over if you have two relationships to the
           same source with a unique constraint on it
 
 0.08007 2007-09-04 19:36:00
@@ -263,7 +492,7 @@
         - Move to using Class::C3::Componentised
         - Remove warn statement from DBIx::Class::Row
 
-0.08005 2007-08-06 
+0.08005 2007-08-06
         - add timestamp fix re rt.cpan 26978 - no test yet but change
           clearly should cause no regressions
         - provide alias for related_resultset via local() so it's set
@@ -278,7 +507,7 @@
           (original fix from diz)
 
 0.08004 2007-08-06 19:00:00
-        - fix storage connect code to not trigger bug via auto-viv 
+        - fix storage connect code to not trigger bug via auto-viv
           (test from aherzog)
         - fixup cursor_class to be an 'inherited' attr for per-package defaults
         - add default_resultset_attributes entry to Schema

Modified: DBIx-Class/0.08/branches/run_file_against_storage/Features_09
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/Features_09	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/Features_09	2010-02-16 10:26:12 UTC (rev 8720)
@@ -14,12 +14,8 @@
  - "belongs_to" to "contains/refers/something"
 
 Using inflated objects/references as values in searches
- - Goes together with subselects above
  - should deflate then run search
 
-FilterColumn - like Inflate, only for changing scalar values
- - This seems to be vaporware atm..
-
 SQL/API feature complete?
  - UNION
  - proper join conditions!
@@ -27,17 +23,16 @@
 
 Moosification - ouch
 
+Metamodel stuff - introspection
+
 Prefetch improvements
  - slow on mysql, speedup?
  - multi has_many prefetch
- - paging working with prefetch
 
 Magically "discover" needed joins/prefetches and add them
  - eg $books->search({ 'author.name' => 'Fred'}), autoadds: join => 'author'
  - also guess aliases when supplying column names that are on joined/related tables
 
-Metamodel stuff - introspection
-
 Storage API/restructure
  - call update/insert etc on the ResultSource, which then calls to storage
  - handle different storages/db-specific code better
@@ -52,4 +47,3 @@
 Documentation - improvements
  - better indexing for finding of stuff in general
  - more cross-referencing of docs
-

Modified: DBIx-Class/0.08/branches/run_file_against_storage/MANIFEST.SKIP
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/MANIFEST.SKIP	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/MANIFEST.SKIP	2010-02-16 10:26:12 UTC (rev 8720)
@@ -6,6 +6,9 @@
 \bCVS\b
 ,v$
 \B\.svn\b
+\B\.git\b
+\B\.gitignore\b
+\b_darcs\b
 
 # Avoid Makemaker generated and utility files.
 \bMakefile$

Modified: DBIx-Class/0.08/branches/run_file_against_storage/Makefile.PL
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/Makefile.PL	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/Makefile.PL	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,105 +1,86 @@
-use inc::Module::Install 0.89;
+use inc::Module::Install 0.93;
 use strict;
 use warnings;
 use POSIX ();
 
-use 5.006001; # delete this line if you want to send patches for earlier.
+use 5.008001;
 
-name     'DBIx-Class';
-perl_version '5.006001';
-all_from 'lib/DBIx/Class.pm';
+use FindBin;
+use lib "$FindBin::Bin/lib";
 
-requires 'DBD::SQLite'              => 1.25;
-requires 'Data::Page'               => 2.00;
-requires 'SQL::Abstract'            => 1.56;
-requires 'SQL::Abstract::Limit'     => 0.13;
-requires 'Class::C3::Componentised' => 1.0005;
-requires 'Carp::Clan'               => 6.0;
-requires 'DBI'                      => 1.605;
-requires 'Module::Find'             => 0.06;
-requires 'Class::Inspector'         => 1.24;
-requires 'Class::Accessor::Grouped' => 0.08003;
-requires 'JSON::Any'                => 1.18;
-requires 'Scope::Guard'             => 0.03;
-requires 'Path::Class'              => 0.16;
-requires 'Sub::Name'                => 0.04;
-requires 'MRO::Compat'              => 0.09;
+###
+### DO NOT ADD OPTIONAL DEPENDENCIES HERE, EVEN AS recommends()
+### All of them should go to DBIx::Class::Optional::Dependencies
+###
 
-# Core
-requires 'List::Util'               => 0;
-requires 'Scalar::Util'             => 0;
-requires 'Storable'                 => 0;
 
-# Perl 5.8.0 doesn't have utf8::is_utf8()
-requires 'Encode'                   => 0 if ($] <= 5.008000);
+name     'DBIx-Class';
+perl_version '5.008001';
+all_from 'lib/DBIx/Class.pm';
 
-test_requires 'Test::More'          => 0.82;
-test_requires 'Test::Builder'       => 0.33;
-test_requires 'Test::Warn'          => 0.11;
-test_requires 'Test::Exception'     => 0;
-test_requires 'Test::Deep'          => 0;
+my $build_requires = {
+  'DBD::SQLite'              => '1.25',
+};
 
-recommends 'SQL::Translator'        => 0.09004;
+my $test_requires = {
+  'File::Temp'               => '0.22',
+  'Test::Builder'            => '0.33',
+  'Test::Deep'               => '0',
+  'Test::Exception'          => '0',
+  'Test::More'               => '0.92',
+  'Test::Warn'               => '0.21',
+};
 
-install_script (qw|
-    script/dbicadmin
-|);
+my $runtime_requires = {
+  # Core
+  'List::Util'               => '0',
+  'Scalar::Util'             => '0',
+  'Storable'                 => '0',
 
-tests_recursive (qw|
-    t
-|);
+  # Dependencies
+  'Carp::Clan'               => '6.0',
+  'Class::Accessor::Grouped' => '0.09002',
+  'Class::C3::Componentised' => '1.0005',
+  'Class::Inspector'         => '1.24',
+  'Data::Page'               => '2.00',
+  'DBI'                      => '1.609',
+  'MRO::Compat'              => '0.09',
+  'Module::Find'             => '0.06',
+  'Path::Class'              => '0.18',
+  'Scope::Guard'             => '0.03',
+  'SQL::Abstract'            => '1.61',
+  'SQL::Abstract::Limit'     => '0.13',
+  'Sub::Name'                => '0.04',
+  'Data::Dumper::Concise'    => '1.000',
+};
 
-resources 'IRC'         => 'irc://irc.perl.org/#dbix-class';
-resources 'license'     => 'http://dev.perl.org/licenses/';
-resources 'repository'  => 'http://dev.catalyst.perl.org/svnweb/bast/browse/DBIx-Class/';
-resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
+# this is so we can order requires alphabetically
+# copies are needed for author requires injection
+my $reqs = {
+  build_requires => { %$build_requires },
+  requires => { %$runtime_requires },
+  test_requires => { %$test_requires },
+};
 
-
 # re-build README and require extra modules for testing if we're in a checkout
+if ($Module::Install::AUTHOR) {
 
-my %force_requires_if_author = (
-  'Test::Pod::Coverage'       => 1.04,
-  'SQL::Translator'           => 0.09007,
+  print "Regenerating README\n";
+  system('pod2text lib/DBIx/Class.pm > README');
 
-  # CDBI-compat related
-  'DBIx::ContextualFetch'     => 0,
-  'Class::DBI::Plugin::DeepAbstractSearch' => 0,
-  'Class::Trigger'            => 0,
-  'Time::Piece::MySQL'        => 0,
-  'Clone'                     => 0,
-  'Date::Simple'              => 3.03,
+  if (-f 'MANIFEST') {
+    print "Removing MANIFEST\n";
+    unlink 'MANIFEST';
+  }
 
-  # t/52cycle.t
-  'Test::Memory::Cycle'       => 0,
-  'Devel::Cycle'              => 1.10,
+  print "Regenerating Optional/Dependencies.pod\n";
+  require DBIx::Class::Optional::Dependencies;
+  DBIx::Class::Optional::Dependencies->_gen_pod;
 
-  # t/inflate/datetime*.t
-  # t/72.pg
-  # t/36datetime.t
-  # t/60core.t
-  'DateTime::Format::SQLite'  => 0,
-  'DateTime::Format::MySQL'   => 0,
-  'DateTime::Format::Pg'      => 0,
+# FIXME Disabled due to unsolved issues, ask theorbtwo
+#  require Module::Install::Pod::Inherit;
+#  PodInherit();
 
-  # t/96_is_deteministic_value.t
-  'DateTime::Format::Strptime' => 0,
-
-  # t/72pg.t
-  $ENV{DBICTEST_PG_DSN}
-    ? ('Sys::SigAction'=> 0)
-    : ()
-  ,
-
-  # t/93storage_replication.t
-  'Moose',                        => 0.77,
-  'MooseX::AttributeHelpers'      => 0.12,
-  'MooseX::Types',                => 0.10,
-  'namespace::clean'              => 0.11,
-  'Hash::Merge',                  => 0.11,
-
-);
-
-if ($Module::Install::AUTHOR) {
   warn <<'EOW';
 ******************************************************************************
 ******************************************************************************
@@ -111,29 +92,72 @@
 
 EOW
 
-  foreach my $module (keys %force_requires_if_author) {
-    build_requires ($module => $force_requires_if_author{$module});
-  }
+  $reqs->{test_requires} = {
+    %{$reqs->{test_requires}},
+    %{DBIx::Class::Optional::Dependencies->_all_optional_requirements},
+  };
+}
 
-  print "Regenerating README\n";
-  system('pod2text lib/DBIx/Class.pm > README');
+# compose final req list, for alphabetical ordering
+my %final_req;
+for my $rtype (keys %$reqs) {
+  for my $mod (keys %{$reqs->{$rtype}} ) {
 
-  if (-f 'MANIFEST') {
-    print "Removing MANIFEST\n";
-    unlink 'MANIFEST';
+    # sanity check req duplications
+    if ($final_req{$mod}) {
+      die "$mod specified as both a '$rtype' and a '$final_req{$mod}[0]'\n";
+    }
+
+    $final_req{$mod} = [ $rtype, $reqs->{$rtype}{$mod}||0 ],
   }
 }
 
+# actual require
+for my $mod (sort keys %final_req) {
+  my ($rtype, $ver) = @{$final_req{$mod}};
+  no strict 'refs';
+  $rtype->($mod, $ver);
+}
+
+install_script (qw|
+    script/dbicadmin
+|);
+
+tests_recursive (qw|
+    t
+|);
+
+resources 'IRC'         => 'irc://irc.perl.org/#dbix-class';
+resources 'license'     => 'http://dev.perl.org/licenses/';
+resources 'repository'  => 'http://dev.catalyst.perl.org/repos/bast/DBIx-Class/';
+resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
+
+# Deprecated/internal modules need no exposure
+no_index directory => $_ for (qw|
+  lib/DBIx/Class/SQLAHacks
+  lib/DBIx/Class/PK/Auto
+|);
+no_index package => $_ for (qw/
+  DBIx::Class::Storage::DBI::AmbiguousGlob
+  DBIx::Class::SQLAHacks DBIx::Class::Storage::DBIHacks
+/);
+
+
 auto_install();
 
 WriteAll();
 
+
 # Re-write META.yml to _exclude_ all forced requires (we do not want to ship this)
 if ($Module::Install::AUTHOR) {
 
-  Meta->{values}{build_requires} = [ grep 
-    { not exists $force_requires_if_author{$_->[0]} }
-    ( @{Meta->{values}{build_requires}} )
+  # FIXME test_requires is not yet part of META
+  my %original_build_requires = ( %$build_requires, %$test_requires );
+
+  print "Regenerating META with author requires excluded\n";
+  Meta->{values}{build_requires} = [ grep
+    { exists $original_build_requires{$_->[0]} }
+   ( @{Meta->{values}{build_requires}} )
   ];
 
   Meta->write;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/TODO
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/TODO	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/TODO	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,12 +3,8 @@
   - ResultSource objects caching ->resultset causes interesting problems
   - find why XSUB dumper kills schema in Catalyst (may be Pg only?)
 
-2006-04-11 by castaway
- - docs of copy() should say that is_auto_increment is essential for auto_incrementing keys
-
 2006-03-25 by mst
   - find a way to un-wantarray search without breaking compat
-  - audit logging component
   - delay relationship setup if done via ->load_classes
   - double-sided relationships
   - make short form of class specifier in relationships work
@@ -21,9 +17,6 @@
    We should still support the old inflate/deflate syntax, but this new 
    way should be recommended. 
 
-2006-02-07 by castaway
- - Extract DBIC::SQL::Abstract into a separate module for CPAN
-
 2006-03-18 by bluefeet
  - Support table locking.
 
@@ -32,13 +25,6 @@
    __PACKAGE__->table(__PACKAGE__->table()); for the result set to 
    return the correct object type.
 
-2006-03-27 by mst
- Add the ability for deploy to be given a directory and grab <dbname>.sql 
- out of there if available. Try SQL::Translator if not. If none of the above, 
- cry (and die()).  Then you can have a script that pre-gens for all available 
- SQLT modules so an app can do its own deploy without SQLT on the target 
- system
-
 2006-05-25 by mst (TODOed by bluefeet)
  Add the search attributes "limit" and "rows_per_page".
  limit: work as expected just like offset does
@@ -47,18 +33,8 @@
        if you haven't specified one of the others
 
 2008-10-30 by ribasushi
- Leftovers for next dev-release
   - Rewrite the test suite to rely on $schema->deploy, allowing for seamless
     testing of various RDBMS using the same tests
-  - Proper support of default create (i.e. create({}) ), with proper workarounds
-    for different Storage's
   - Automatically infer quote_char/name_sep from $schema->storage
-  - Finally incorporate View support (needs real tests)
   - Fix and properly test chained search attribute merging
-
-2008-11-07 by ribasushi
-  - Be loud when a relationship resolution fails because we did not select/as
-    a neccessary pk
   - Recursive update() (all code seems to be already available)
-  - $rs->populate changes its syntax depending on wantarray context (BAD)
-    Also the interface differs from $schema->populate (not so good)

Modified: DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/MyDatabase/Main/Result/Artist.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/MyDatabase/Main/Result/Artist.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/MyDatabase/Main/Result/Artist.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,9 +1,16 @@
 package MyDatabase::Main::Result::Artist;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+
+use warnings;
+use strict;
+
+use base qw/DBIx::Class::Core/;
+
 __PACKAGE__->table('artist');
+
 __PACKAGE__->add_columns(qw/ artistid name /);
+
 __PACKAGE__->set_primary_key('artistid');
+
 __PACKAGE__->has_many('cds' => 'MyDatabase::Main::Result::Cd');
 
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/MyDatabase/Main/Result/Cd.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/MyDatabase/Main/Result/Cd.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/MyDatabase/Main/Result/Cd.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,9 +1,16 @@
 package MyDatabase::Main::Result::Cd;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+
+use warnings;
+use strict;
+
+use base qw/DBIx::Class::Core/;
+
 __PACKAGE__->table('cd');
+
 __PACKAGE__->add_columns(qw/ cdid artist title/);
+
 __PACKAGE__->set_primary_key('cdid');
+
 __PACKAGE__->belongs_to('artist' => 'MyDatabase::Main::Result::Artist');
 __PACKAGE__->has_many('tracks' => 'MyDatabase::Main::Result::Track');
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/MyDatabase/Main/Result/Track.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/MyDatabase/Main/Result/Track.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/MyDatabase/Main/Result/Track.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,9 +1,16 @@
 package MyDatabase::Main::Result::Track;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+
+use warnings;
+use strict;
+
+use base qw/DBIx::Class::Core/;
+
 __PACKAGE__->table('track');
+
 __PACKAGE__->add_columns(qw/ trackid cd title/);
+
 __PACKAGE__->set_primary_key('trackid');
+
 __PACKAGE__->belongs_to('cd' => 'MyDatabase::Main::Result::Cd');
 
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/insertdb.pl
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/insertdb.pl	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/examples/Schema/insertdb.pl	2010-02-16 10:26:12 UTC (rev 8720)
@@ -23,10 +23,10 @@
 
 my @cds;
 foreach my $lp (keys %albums) {
-    my $artist = $schema->resultset('Artist')->search({
+    my $artist = $schema->resultset('Artist')->find({
         name => $albums{$lp}
     });
-    push @cds, [$lp, $artist->first];
+    push @cds, [$lp, $artist->id];
 }
 
 $schema->populate('Cd', [
@@ -47,10 +47,10 @@
 
 my @tracks;
 foreach my $track (keys %tracks) {
-    my $cdname = $schema->resultset('Cd')->search({
+    my $cd = $schema->resultset('Cd')->find({
         title => $tracks{$track},
     });
-    push @tracks, [$cdname->first, $track];
+    push @tracks, [$cd->id, $track];
 }
 
 $schema->populate('Track',[

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/AccessorGroup.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/AccessorGroup.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/AccessorGroup.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -17,8 +17,6 @@
 
 This class now exists in its own right on CPAN as Class::Accessor::Grouped
 
-1;
-
 =head1 AUTHORS
 
 Matt S. Trout <mst at shadowcatsystems.co.uk>

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Admin/Types.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Admin/Types.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Admin/Types.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,48 @@
+package # hide from PAUSE
+    DBIx::Class::Admin::Types;
+
+use MooseX::Types -declare => [qw(
+    DBICConnectInfo
+    DBICArrayRef
+    DBICHashRef
+)];
+use MooseX::Types::Moose qw/Int HashRef ArrayRef Str Any Bool/;
+use MooseX::Types::JSON qw(JSON);
+
+subtype DBICArrayRef,
+    as ArrayRef;
+
+subtype DBICHashRef,
+    as HashRef;
+
+coerce DBICArrayRef,
+  from JSON,
+  via { _json_to_data ($_) };
+
+coerce DBICHashRef,
+  from JSON,
+  via { _json_to_data($_) };
+
+subtype DBICConnectInfo,
+  as ArrayRef;
+
+coerce DBICConnectInfo,
+  from JSON,
+   via { return _json_to_data($_) } ;
+
+coerce DBICConnectInfo,
+  from Str,
+    via { return _json_to_data($_) };
+
+coerce DBICConnectInfo,
+  from HashRef,
+   via { [ $_ ] };
+
+sub _json_to_data {
+  my ($json_str) = @_;
+  my $json = JSON::Any->new(allow_barekey => 1, allow_singlequote => 1, relaxed=>1);
+  my $ret = $json->jsonToObj($json_str);
+  return $ret;
+}
+
+1;

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Admin.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Admin.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Admin.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,568 @@
+package DBIx::Class::Admin;
+
+# check deps
+BEGIN {
+  use Carp::Clan qw/^DBIx::Class/;
+  use DBIx::Class;
+  croak('The following modules are required for DBIx::Class::Admin ' . DBIx::Class::Optional::Dependencies->req_missing_for ('admin') )
+    unless DBIx::Class::Optional::Dependencies->req_ok_for ('admin');
+}
+
+use Moose;
+use MooseX::Types::Moose qw/Int Str Any Bool/;
+use DBIx::Class::Admin::Types qw/DBICConnectInfo DBICHashRef/;
+use MooseX::Types::JSON qw(JSON);
+use MooseX::Types::Path::Class qw(Dir File);
+use Try::Tiny;
+use JSON::Any qw(DWIW XS JSON);
+use namespace::autoclean;
+
+=head1 NAME
+
+DBIx::Class::Admin - Administration object for schemas
+
+=head1 SYNOPSIS
+
+  $ dbicadmin --help
+
+  $ dbicadmin --schema=MyApp::Schema \
+    --connect='["dbi:SQLite:my.db", "", ""]' \
+    --deploy
+
+  $ dbicadmin --schema=MyApp::Schema --class=Employee \
+    --connect='["dbi:SQLite:my.db", "", ""]' \
+    --op=update --set='{ "name": "New_Employee" }'
+
+  use DBIx::Class::Admin;
+
+  # ddl manipulation
+  my $admin = DBIx::Class::Admin->new(
+    schema_class=> 'MY::Schema',
+    sql_dir=> $sql_dir,
+    connect_info => { dsn => $dsn, user => $user, password => $pass },
+  );
+
+  # create SQLite sql
+  $admin->create('SQLite');
+
+  # create SQL diff for an upgrade
+  $admin->create('SQLite', {} , "1.0");
+
+  # upgrade a database
+  $admin->upgrade();
+
+  # install a version for an unversioned schema
+  $admin->install("3.0");
+
+=head1 REQUIREMENTS
+
+The Admin interface has additional requirements not currently part of
+L<DBIx::Class>. See L<DBIx::Class::Optional::Dependencies> for more details.
+
+=head1 ATTRIBUTES
+
+=head2 schema_class
+
+the class of the schema to load
+
+=cut
+
+has 'schema_class' => (
+  is  => 'ro',
+  isa => Str,
+);
+
+
+=head2 schema
+
+A pre-connected schema object can be provided for manipulation
+
+=cut
+
+has 'schema' => (
+  is          => 'ro',
+  isa         => 'DBIx::Class::Schema',
+  lazy_build  => 1,
+);
+
+sub _build_schema {
+  my ($self)  = @_;
+  require Class::MOP;
+  Class::MOP::load_class($self->schema_class);
+
+  $self->connect_info->[3]->{ignore_version} =1;
+  return $self->schema_class->connect(@{$self->connect_info()} ); # ,  $self->connect_info->[3], { ignore_version => 1} );
+}
+
+
+=head2 resultset
+
+a resultset from the schema to operate on
+
+=cut
+
+has 'resultset' => (
+  is  => 'rw',
+  isa => Str,
+);
+
+
+=head2 where
+
+a hash ref or json string to be used for identifying data to manipulate
+
+=cut
+
+has 'where' => (
+  is      => 'rw',
+  isa     => DBICHashRef,
+  coerce  => 1,
+);
+
+
+=head2 set
+
+a hash ref or json string to be used for inserting or updating data
+
+=cut
+
+has 'set' => (
+  is      => 'rw',
+  isa     => DBICHashRef,
+  coerce  => 1,
+);
+
+
+=head2 attrs
+
+a hash ref or json string to be used for passing additonal info to the ->search call
+
+=cut
+
+has 'attrs' => (
+  is      => 'rw',
+  isa     => DBICHashRef,
+  coerce  => 1,
+);
+
+
+=head2 connect_info
+
+connect_info the arguments to provide to the connect call of the schema_class
+
+=cut
+
+has 'connect_info' => (
+  is          => 'ro',
+  isa         => DBICConnectInfo,
+  lazy_build  => 1,
+  coerce      => 1,
+);
+
+sub _build_connect_info {
+  my ($self) = @_;
+  return $self->_find_stanza($self->config, $self->config_stanza);
+}
+
+
+=head2 config_file
+
+config_file provide a config_file to read connect_info from, if this is provided
+config_stanze should also be provided to locate where the connect_info is in the config
+The config file should be in a format readable by Config::General
+
+=cut
+
+has config_file => (
+  is      => 'ro',
+  isa     => File,
+  coerce  => 1,
+);
+
+
+=head2 config_stanza
+
+config_stanza for use with config_file should be a '::' deliminated 'path' to the connection information
+designed for use with catalyst config files
+
+=cut
+
+has 'config_stanza' => (
+  is  => 'ro',
+  isa => Str,
+);
+
+
+=head2 config
+
+Instead of loading from a file the configuration can be provided directly as a hash ref.  Please note 
+config_stanza will still be required.
+
+=cut
+
+has config => (
+  is          => 'ro',
+  isa         => DBICHashRef,
+  lazy_build  => 1,
+);
+
+sub _build_config {
+  my ($self) = @_;
+
+  eval { require Config::Any }
+    or die ("Config::Any is required to parse the config file.\n");
+
+  my $cfg = Config::Any->load_files ( {files => [$self->config_file], use_ext =>1, flatten_to_hash=>1});
+
+  # just grab the config from the config file
+  $cfg = $cfg->{$self->config_file};
+  return $cfg;
+}
+
+
+=head2 sql_dir
+
+The location where sql ddl files should be created or found for an upgrade.
+
+=cut
+
+has 'sql_dir' => (
+  is      => 'ro',
+  isa     => Dir,
+  coerce  => 1,
+);
+
+
+=head2 version
+
+Used for install, the version which will be 'installed' in the schema
+
+=cut
+
+has version => (
+  is  => 'rw',
+  isa => Str,
+);
+
+
+=head2 preversion
+
+Previouse version of the schema to create an upgrade diff for, the full sql for that version of the sql must be in the sql_dir
+
+=cut
+
+has preversion => (
+  is  => 'rw',
+  isa => Str,
+);
+
+
+=head2 force
+
+Try and force certain operations.
+
+=cut
+
+has force => (
+  is  => 'rw',
+  isa => Bool,
+);
+
+
+=head2 quiet
+
+Be less verbose about actions
+
+=cut
+
+has quiet => (
+  is  => 'rw',
+  isa => Bool,
+);
+
+has '_confirm' => (
+  is  => 'bare',
+  isa => Bool,
+);
+
+
+=head1 METHODS
+
+=head2 create
+
+=over 4
+
+=item Arguments: $sqlt_type, \%sqlt_args, $preversion
+
+=back
+
+L<create> will generate sql for the supplied schema_class in sql_dir.  The flavour of sql to 
+generate can be controlled by suppling a sqlt_type which should be a L<SQL::Translator> name.  
+
+Arguments for L<SQL::Translator> can be supplied in the sqlt_args hashref.
+
+Optional preversion can be supplied to generate a diff to be used by upgrade.
+
+=cut
+
+sub create {
+  my ($self, $sqlt_type, $sqlt_args, $preversion) = @_;
+
+  $preversion ||= $self->preversion();
+
+  my $schema = $self->schema();
+  # create the dir if does not exist
+  $self->sql_dir->mkpath() if ( ! -d $self->sql_dir);
+
+  $schema->create_ddl_dir( $sqlt_type, (defined $schema->schema_version ? $schema->schema_version : ""), $self->sql_dir->stringify, $preversion, $sqlt_args );
+}
+
+
+=head2 upgrade
+
+=over 4
+
+=item Arguments: <none>
+
+=back
+
+upgrade will attempt to upgrade the connected database to the same version as the schema_class.
+B<MAKE SURE YOU BACKUP YOUR DB FIRST>
+
+=cut
+
+sub upgrade {
+  my ($self) = @_;
+  my $schema = $self->schema();
+  if (!$schema->get_db_version()) {
+    # schema is unversioned
+    $schema->throw_exception ("Could not determin current schema version, please either install() or deploy().\n");
+  } else {
+    my $ret = $schema->upgrade();
+    return $ret;
+  }
+}
+
+
+=head2 install
+
+=over 4
+
+=item Arguments: $version
+
+=back
+
+install is here to help when you want to move to L<DBIx::Class::Schema::Versioned> and have an existing 
+database.  install will take a version and add the version tracking tables and 'install' the version.  No 
+further ddl modification takes place.  Setting the force attribute to a true value will allow overriding of 
+already versioned databases.
+
+=cut
+
+sub install {
+  my ($self, $version) = @_;
+
+  my $schema = $self->schema();
+  $version ||= $self->version();
+  if (!$schema->get_db_version() ) {
+    # schema is unversioned
+    print "Going to install schema version\n";
+    my $ret = $schema->install($version);
+    print "retun is $ret\n";
+  }
+  elsif ($schema->get_db_version() and $self->force ) {
+    carp "Forcing install may not be a good idea";
+    if($self->_confirm() ) {
+      $self->schema->_set_db_version({ version => $version});
+    }
+  }
+  else {
+    $schema->throw_exception ("Schema already has a version. Try upgrade instead.\n");
+  }
+
+}
+
+
+=head2 deploy
+
+=over 4
+
+=item Arguments: $args
+
+=back
+
+deploy will create the schema at the connected database.  C<$args> are passed straight to 
+L<DBIx::Class::Schema/deploy>.
+
+=cut
+
+sub deploy {
+  my ($self, $args) = @_;
+  my $schema = $self->schema();
+  if (!$schema->get_db_version() ) {
+    # schema is unversioned
+    $schema->deploy( $args, $self->sql_dir)
+      or $schema->throw_exception ("Could not deploy schema.\n"); # FIXME deploy() does not return 1/0 on success/fail
+  } else {
+    $schema->throw_exception("A versioned schema has already been deployed, try upgrade instead.\n");
+  }
+}
+
+=head2 insert
+
+=over 4
+
+=item Arguments: $rs, $set
+
+=back
+
+insert takes the name of a resultset from the schema_class and a hashref of data to insert
+into that resultset
+
+=cut
+
+sub insert {
+  my ($self, $rs, $set) = @_;
+
+  $rs ||= $self->resultset();
+  $set ||= $self->set();
+  my $resultset = $self->schema->resultset($rs);
+  my $obj = $resultset->create( $set );
+  print ''.ref($resultset).' ID: '.join(',',$obj->id())."\n" if (!$self->quiet);
+}
+
+
+=head2 update
+
+=over 4
+
+=item Arguments: $rs, $set, $where
+
+=back
+
+update takes the name of a resultset from the schema_class, a hashref of data to update and
+a where hash used to form the search for the rows to update.
+
+=cut
+
+sub update {
+  my ($self, $rs, $set, $where) = @_;
+
+  $rs ||= $self->resultset();
+  $where ||= $self->where();
+  $set ||= $self->set();
+  my $resultset = $self->schema->resultset($rs);
+  $resultset = $resultset->search( ($where||{}) );
+
+  my $count = $resultset->count();
+  print "This action will modify $count ".ref($resultset)." records.\n" if (!$self->quiet);
+
+  if ( $self->force || $self->_confirm() ) {
+    $resultset->update_all( $set );
+  }
+}
+
+
+=head2 delete
+
+=over 4
+
+=item Arguments: $rs, $where, $attrs
+
+=back
+
+delete takes the name of a resultset from the schema_class, a where hashref and a attrs to pass to ->search.
+The found data is deleted and cannot be recovered.
+
+=cut
+
+sub delete {
+  my ($self, $rs, $where, $attrs) = @_;
+
+  $rs ||= $self->resultset();
+  $where ||= $self->where();
+  $attrs ||= $self->attrs();
+  my $resultset = $self->schema->resultset($rs);
+  $resultset = $resultset->search( ($where||{}), ($attrs||()) );
+
+  my $count = $resultset->count();
+  print "This action will delete $count ".ref($resultset)." records.\n" if (!$self->quiet);
+
+  if ( $self->force || $self->_confirm() ) {
+    $resultset->delete_all();
+  }
+}
+
+
+=head2 select
+
+=over 4
+
+=item Arguments: $rs, $where, $attrs
+
+=back
+
+select takes the name of a resultset from the schema_class, a where hashref and a attrs to pass to ->search. 
+The found data is returned in a array ref where the first row will be the columns list.
+
+=cut
+
+sub select {
+  my ($self, $rs, $where, $attrs) = @_;
+
+  $rs ||= $self->resultset();
+  $where ||= $self->where();
+  $attrs ||= $self->attrs();
+  my $resultset = $self->schema->resultset($rs);
+  $resultset = $resultset->search( ($where||{}), ($attrs||()) );
+
+  my @data;
+  my @columns = $resultset->result_source->columns();
+  push @data, [@columns];# 
+
+  while (my $row = $resultset->next()) {
+    my @fields;
+    foreach my $column (@columns) {
+      push( @fields, $row->get_column($column) );
+    }
+    push @data, [@fields];
+  }
+
+  return \@data;
+}
+
+sub _confirm {
+  my ($self) = @_;
+  print "Are you sure you want to do this? (type YES to confirm) \n";
+  # mainly here for testing
+  return 1 if ($self->meta->get_attribute('_confirm')->get_value($self));
+  my $response = <STDIN>;
+  return 1 if ($response=~/^YES/);
+  return;
+}
+
+sub _find_stanza {
+  my ($self, $cfg, $stanza) = @_;
+  my @path = split /::/, $stanza;
+  while (my $path = shift @path) {
+    if (exists $cfg->{$path}) {
+      $cfg = $cfg->{$path};
+    }
+    else {
+      die ("Could not find $stanza in config, $path does not seem to exist.\n");
+    }
+  }
+  return $cfg;
+}
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself
+
+=cut
+
+1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/AbstractSearch.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/AbstractSearch.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/AbstractSearch.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -10,7 +10,7 @@
 
 =head1 SYNOPSIS
 
-See DBIx::Class::CDBICompat for directions for use.
+See DBIx::Class::CDBICompat for usage directions.
 
 =head1 DESCRIPTION
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ColumnCase.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ColumnCase.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ColumnCase.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -16,10 +16,10 @@
 
 sub has_a {
     my($self, $col, @rest) = @_;
-    
+
     $self->_declare_has_a(lc $col, @rest);
     $self->_mk_inflated_column_accessor($col);
-    
+
     return 1;
 }
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ColumnGroups.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ColumnGroups.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ColumnGroups.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -73,7 +73,7 @@
 
   sub _has_custom_accessor {
     my($class, $name) = @_;
-    
+
     no strict 'refs';
     my $existing_accessor = *{$class .'::'. $name}{CODE};
     return $existing_accessor && !$our_accessors{$existing_accessor};
@@ -90,7 +90,7 @@
       my $fullname = join '::', $class, $name;
       *$fullname = Sub::Name::subname $fullname, $accessor;
     }
-    
+
     $our_accessors{$accessor}++;
 
     return 1;
@@ -120,7 +120,7 @@
     # warn "  $field $alias\n";
     {
       no strict 'refs';
-      
+
       $class->_deploy_accessor($name,  $accessor);
       $class->_deploy_accessor($alias, $accessor);
     }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -11,7 +11,7 @@
 
 =head1 SYNOPSIS
 
-See DBIx::Class::CDBICompat for directions for use.
+See DBIx::Class::CDBICompat for usage directions.
 
 =head1 DESCRIPTION
 
@@ -39,16 +39,16 @@
     my $class = shift;
 
     my $new = $class->next::method(@_);
-    
+
     $new->_make_columns_as_hash;
-    
+
     return $new;
 }
 
 
 sub _make_columns_as_hash {
     my $self = shift;
-    
+
     for my $col ($self->columns) {
         if( exists $self->{$col} ) {
             warn "Skipping mapping $col to a hash key because it exists";

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Constructor.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Constructor.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Constructor.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,6 +3,8 @@
 
 use base qw(DBIx::Class::CDBICompat::ImaDBI);
 
+use Sub::Name();
+
 use strict;
 use warnings;
 
@@ -22,7 +24,7 @@
     return carp("$method already exists in $class")
             if *$meth{CODE};
 
-    *$meth = sub {
+    *$meth = Sub::Name::subname $meth => sub {
             my $self = shift;
             $self->sth_to_objects($self->sql_Retrieve($fragment), \@_);
     };

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Copy.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Copy.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Copy.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -12,7 +12,7 @@
 
 =head1 SYNOPSIS
 
-See DBIx::Class::CDBICompat for directions for use.
+See DBIx::Class::CDBICompat for usage directions.
 
 =head1 DESCRIPTION
 
@@ -25,7 +25,7 @@
 sub copy {
     my($self, $arg) = @_;
     return $self->next::method($arg) if ref $arg;
-    
+
     my @primary_columns = $self->primary_columns;
     croak("Need hash-ref to edit copied column values")
         if @primary_columns > 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ImaDBI.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ImaDBI.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/ImaDBI.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -59,7 +59,7 @@
             $rel_obj->{cond}, $to, $from) );
         return $join;
       }
-        
+
   } );
 
 sub db_Main {
@@ -115,7 +115,7 @@
 
 sub transform_sql {
   my ($class, $sql, @args) = @_;
-  
+
   my $tclass = $class->sql_transformer_class;
   $class->ensure_class_loaded($tclass);
   my $t = $tclass->new($class, $sql, @args);

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Iterator.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Iterator.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Iterator.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -10,7 +10,7 @@
 
 =head1 SYNOPSIS
 
-See DBIx::Class::CDBICompat for directions for use.
+See DBIx::Class::CDBICompat for usage directions.
 
 =head1 DESCRIPTION
 
@@ -25,7 +25,7 @@
 
 sub _init_result_source_instance {
   my $class = shift;
-  
+
   my $table = $class->next::method(@_);
   $table->resultset_class("DBIx::Class::CDBICompat::Iterator::ResultSet");
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/LazyLoading.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/LazyLoading.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/LazyLoading.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -16,12 +16,12 @@
 # request in case the database modifies the new value (say, via a trigger)
 sub update {
     my $self = shift;
-    
+
     my @dirty_columns = keys %{$self->{_dirty_columns}};
-    
+
     my $ret = $self->next::method(@_);
     $self->_clear_column_data(@dirty_columns);
-    
+
     return $ret;
 }
 
@@ -30,12 +30,12 @@
 sub create {
     my $class = shift;
     my($data) = @_;
-    
+
     my @columns = keys %$data;
-    
+
     my $obj = $class->next::method(@_);
     return $obj unless defined $obj;
-    
+
     my %primary_cols = map { $_ => 1 } $class->primary_columns;
     my @data_cols = grep !$primary_cols{$_}, @columns;
     $obj->_clear_column_data(@data_cols);
@@ -46,7 +46,7 @@
 
 sub _clear_column_data {
     my $self = shift;
-    
+
     delete $self->{_column_data}{$_}     for @_;
     delete $self->{_inflated_column}{$_} for @_;
 }
@@ -71,7 +71,7 @@
   for my $col ($self->primary_columns) {
     $changes->{$col} = undef unless exists $changes->{$col};
   }
-  
+
   return $self->next::method($changes);
 }
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -20,9 +20,9 @@
 
 sub nocache {
     my $class = shift;
-    
+
     return $class->__nocache(@_) if @_;
-    
+
     return 1 if $Class::DBI::Weaken_Is_Available == 0;
     return $class->__nocache;
 }
@@ -74,9 +74,9 @@
 sub inflate_result {
   my ($class, @rest) = @_;
   my $new = $class->next::method(@rest);
-  
+
   return $new if $new->nocache;
-  
+
   if (my $key = $new->ID) {
     #warn "Key $key";
     my $live = $class->live_object_index;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Relationship.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Relationship.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Relationship.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -25,7 +25,7 @@
 
 sub new {
     my($class, $args) = @_;
-    
+
     return bless $args, $class;
 }
 
@@ -34,7 +34,7 @@
     my $code = sub {
         $_[0]->{$key};
     };
-    
+
     no strict 'refs';
     *{$method} = Sub::Name::subname $method, $code;
 }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Relationships.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Relationships.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Relationships.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -24,10 +24,10 @@
 
 sub has_a {
     my($self, $col, @rest) = @_;
-    
+
     $self->_declare_has_a($col, @rest);
     $self->_mk_inflated_column_accessor($col);
-    
+
     return 1;
 }
 
@@ -37,7 +37,7 @@
   $self->throw_exception( "No such column ${col}" )
    unless $self->has_column($col);
   $self->ensure_class_loaded($f_class);
-  
+
   my $rel_info;
 
   if ($args{'inflate'} || $args{'deflate'}) { # Non-database has_a
@@ -50,7 +50,7 @@
       $args{'deflate'} = sub { shift->$meth; };
     }
     $self->inflate_column($col, \%args);
-    
+
     $rel_info = {
         class => $f_class
     };
@@ -59,9 +59,9 @@
     $self->belongs_to($col, $f_class);
     $rel_info = $self->result_source_instance->relationship_info($col);
   }
-  
+
   $rel_info->{args} = \%args;
-  
+
   $self->_extend_meta(
     has_a => $col,
     $rel_info
@@ -72,7 +72,7 @@
 
 sub _mk_inflated_column_accessor {
     my($class, $col) = @_;
-    
+
     return $class->mk_group_accessors('inflated_column' => $col);
 }
 
@@ -137,7 +137,7 @@
 
 sub might_have {
   my ($class, $rel, $f_class, @columns) = @_;
-  
+
   my $ret;
   if (ref $columns[0] || !defined $columns[0]) {
     $ret = $class->next::method($rel, $f_class, @columns);
@@ -153,7 +153,7 @@
     might_have => $rel,
     $rel_info
   );
-  
+
   return $ret;
 }
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Retrieve.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Retrieve.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/Retrieve.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -74,7 +74,7 @@
     my $class = shift;
     my $obj = $class->resultset_instance->new_result(@_);
     $obj->in_storage(1);
-    
+
     return $obj;
 }
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/TempColumns.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/TempColumns.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat/TempColumns.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -11,7 +11,7 @@
 
 sub _add_column_group {
   my ($class, $group, @cols) = @_;
-  
+
   return $class->next::method($group, @cols) unless $group eq 'TEMP';
 
   my %new_cols = map { $_ => 1 } @cols;
@@ -61,11 +61,11 @@
 
 sub set {
   my($self, %data) = @_;
-  
+
   my $temp_data = $self->_extract_temp_data(\%data);
-  
+
   $self->set_temp($_, $temp_data->{$_}) for keys %$temp_data;
-  
+
   return $self->next::method(%data);
 }
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/CDBICompat.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -11,7 +11,7 @@
     DBIx::ContextualFetch
     Clone
 );
-                
+
 my @didnt_load;
 for my $module (@Extra_Modules) {
     push @didnt_load, $module unless eval qq{require $module};
@@ -91,7 +91,7 @@
 
 =head2 Choosing Features
 
-In fact, this class is just a receipe containing all the features emulated.
+In fact, this class is just a recipe containing all the features emulated.
 If you like, you can choose which features to emulate by building your 
 own class and loading it like this:
 
@@ -145,17 +145,17 @@
 
 =item Relationships
 
-Relationships between tables (has_a, has_many...) must be delcared after all tables in the relationship have been declared.  Thus the usual CDBI idiom of declaring columns and relationships for each class together will not work.  They must instead be done like so:
+Relationships between tables (has_a, has_many...) must be declared after all tables in the relationship have been declared.  Thus the usual CDBI idiom of declaring columns and relationships for each class together will not work.  They must instead be done like so:
 
     package Foo;
     use base qw(Class::DBI);
-    
+
     Foo->table("foo");
     Foo->columns( All => qw(this that bar) );
 
     package Bar;
     use base qw(Class::DBI);
-    
+
     Bar->table("bar");
     Bar->columns( All => qw(up down) );
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Componentised.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Componentised.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Componentised.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -5,30 +5,39 @@
 use warnings;
 
 use base 'Class::C3::Componentised';
-use Carp::Clan qw/^DBIx::Class/;
+use Carp::Clan qw/^DBIx::Class|^Class::C3::Componentised/;
+use mro 'c3';
 
+# this warns of subtle bugs introduced by UTF8Columns hacky handling of store_column
 sub inject_base {
-  my ($class, $target, @to_inject) = @_;
-  {
-    no strict 'refs';
-    foreach my $to (reverse @to_inject) {
-      my @comps = qw(DigestColumns ResultSetManager Ordered UTF8Columns);
-           # Add components here that need to be loaded before Core
-      foreach my $first_comp (@comps) {
-        if ($to eq 'DBIx::Class::Core' &&
-            $target->isa("DBIx::Class::${first_comp}")) {
-          carp "Possible incorrect order of components in ".
-               "${target}::load_components($first_comp) call: Core loaded ".
-               "before $first_comp. See the documentation for ".
-               "DBIx::Class::$first_comp for more information";
-        }
+  my $class = shift;
+  my $target = shift;
+
+  my @present_components = (@{mro::get_linear_isa ($target)||[]});
+
+  no strict 'refs';
+  for my $comp (reverse @_) {
+
+    if ($comp->isa ('DBIx::Class::UTF8Columns') ) {
+      require B;
+      my @broken;
+
+      for (@present_components) {
+        my $cref = $_->can ('store_column')
+         or next;
+        push @broken, $_ if B::svref_2object($cref)->STASH->NAME ne 'DBIx::Class::Row';
       }
-      unshift( @{"${target}::ISA"}, $to )
-        unless ($target eq $to || $target->isa($to));
+
+      carp "Incorrect loading order of $comp by ${target} will affect other components overriding store_column ("
+          . join (', ', @broken)
+          .'). Refer to the documentation of DBIx::Class::UTF8Columns for more info'
+       if @broken;
     }
+
+    unshift @present_components, $comp;
   }
 
-  $class->next::method($target, @to_inject);
+  $class->next::method($target, @_);
 }
 
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Core.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Core.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Core.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -2,7 +2,6 @@
 
 use strict;
 use warnings;
-no warnings 'qw';
 
 use base qw/DBIx::Class/;
 
@@ -12,7 +11,8 @@
   PK::Auto
   PK
   Row
-  ResultSourceProxy::Table/);
+  ResultSourceProxy::Table
+/);
 
 1;
 
@@ -22,8 +22,8 @@
 
 =head1 SYNOPSIS
 
-  # In your table classes
-  __PACKAGE__->load_components(qw/Core/);
+  # In your result (table) classes
+  use base 'DBIx::Class::Core';
 
 =head1 DESCRIPTION
 
@@ -34,8 +34,6 @@
 
 =over 4
 
-=item L<DBIx::Class::Serialize::Storable>
-
 =item L<DBIx::Class::InflateColumn>
 
 =item L<DBIx::Class::Relationship>

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Cursor.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Cursor.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Cursor.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,6 +3,8 @@
 use strict;
 use warnings;
 
+use base qw/DBIx::Class/;
+
 =head1 NAME
 
 DBIx::Class::Cursor - Abstract object representing a query cursor on a

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/DB.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/DB.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/DB.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -174,7 +174,7 @@
 sub result_source_instance {
   my $class = shift;
   $class = ref $class || $class;
-  
+
   if (@_) {
     my $source = $_[0];
     $class->_result_source_instance([$source, $class]);
@@ -186,7 +186,7 @@
   return unless Scalar::Util::blessed($source);
 
   if ($result_class ne $class) {  # new class
-    # Give this new class it's own source and register it.
+    # Give this new class its own source and register it.
     $source = $source->new({ 
         %$source, 
         source_name  => $class,

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Exception.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Exception.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Exception.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -61,7 +61,7 @@
     else {
         $msg = Carp::longmess($msg);
     }
-    
+
     my $self = { msg => $msg };
     bless $self => $class;
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/InflateColumn/DateTime.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/InflateColumn/DateTime.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/InflateColumn/DateTime.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -15,14 +15,14 @@
 columns to be of the datetime, timestamp or date datatype.
 
   package Event;
-  __PACKAGE__->load_components(qw/InflateColumn::DateTime Core/);
+  use base 'DBIx::Class::Core';
+
+  __PACKAGE__->load_components(qw/InflateColumn::DateTime/);
   __PACKAGE__->add_columns(
     starts_when => { data_type => 'datetime' }
+    create_date => { data_type => 'date' }
   );
 
-NOTE: You B<must> load C<InflateColumn::DateTime> B<before> C<Core>. See
-L<DBIx::Class::Manual::Component> for details.
-
 Then you can treat the specified column as a L<DateTime> object.
 
   print "This event starts the month of ".
@@ -40,17 +40,26 @@
   __PACKAGE__->add_columns(
     starts_when => { data_type => 'varchar', inflate_datetime => 1 }
   );
-  
+
   __PACKAGE__->add_columns(
     starts_when => { data_type => 'varchar', inflate_date => 1 }
   );
 
 It's also possible to explicitly skip inflation:
-  
+
   __PACKAGE__->add_columns(
     starts_when => { data_type => 'datetime', inflate_datetime => 0 }
   );
 
+NOTE: Don't rely on C<InflateColumn::DateTime> to parse date strings for you.
+The column is set directly for any non-references and C<InflateColumn::DateTime>
+is completely bypassed.  Instead, use an input parser to create a DateTime
+object. For instance, if your user input comes as a 'YYYY-MM-DD' string, you can
+use C<DateTime::Format::ISO8601> thusly:
+
+  use DateTime::Format::ISO8601;
+  my $dt = DateTime::Format::ISO8601->parse_datetime('YYYY-MM-DD');
+
 =head1 DESCRIPTION
 
 This module figures out the type of DateTime::Format::* class to 
@@ -60,14 +69,22 @@
 that this feature is new as of 0.07, so it may not be perfect yet - bug
 reports to the list very much welcome).
 
+If the data_type of a field is C<date>, C<datetime> or C<timestamp> (or
+a derivative of these datatypes, e.g. C<timestamp with timezone>), this
+module will automatically call the appropriate parse/format method for
+deflation/inflation as defined in the storage class. For instance, for
+a C<datetime> field the methods C<parse_datetime> and C<format_datetime>
+would be called on deflation/inflation. If the storage class does not
+provide a specialized inflator/deflator, C<[parse|format]_datetime> will
+be used as a fallback. See L<DateTime::Format> for more information on
+date formatting.
+
 For more help with using components, see L<DBIx::Class::Manual::Component/USING>.
 
 =cut
 
 __PACKAGE__->load_components(qw/InflateColumn/);
 
-__PACKAGE__->mk_group_accessors('simple' => '__datetime_parser');
-
 =head2 register_column
 
 Chains with the L<DBIx::Class::Row/register_column> method, and sets
@@ -77,7 +94,7 @@
 In the case of an invalid date, L<DateTime> will throw an exception.  To
 bypass these exceptions and just have the inflation return undef, use
 the C<datetime_undef_if_invalid> option in the column info:
-  
+
     "broken_date",
     {
         data_type => "datetime",
@@ -110,25 +127,26 @@
     if ($type eq "timestamp with time zone" || $type eq "timestamptz") {
       $type = "timestamp";
       $info->{_ic_dt_method} ||= "timestamp_with_timezone";
+    } elsif ($type eq "timestamp without time zone") {
+      $type = "timestamp";
+      $info->{_ic_dt_method} ||= "timestamp_without_timezone";
+    } elsif ($type eq "smalldatetime") {
+      $type = "datetime";
+      $info->{_ic_dt_method} ||= "datetime";
     }
   }
 
-  my $timezone;
   if ( defined $info->{extra}{timezone} ) {
     carp "Putting timezone into extra => { timezone => '...' } has been deprecated, ".
          "please put it directly into the '$column' column definition.";
-    $timezone = $info->{extra}{timezone};
+    $info->{timezone} = $info->{extra}{timezone} unless defined $info->{timezone};
   }
 
-  my $locale;
   if ( defined $info->{extra}{locale} ) {
     carp "Putting locale into extra => { locale => '...' } has been deprecated, ".
          "please put it directly into the '$column' column definition.";
-    $locale = $info->{extra}{locale};
+    $info->{locale} = $info->{extra}{locale} unless defined $info->{locale};
   }
-  
-  $locale   = $info->{locale}   if defined $info->{locale};
-  $timezone = $info->{timezone} if defined $info->{timezone};
 
   my $undef_if_invalid = $info->{datetime_undef_if_invalid};
 
@@ -155,21 +173,12 @@
               $self->throw_exception ("Error while inflating ${value} for ${column} on ${self}: $err");
             }
 
-            $dt->set_time_zone($timezone) if $timezone;
-            $dt->set_locale($locale) if $locale;
-            return $dt;
+            return $obj->_post_inflate_datetime( $dt, \%info );
           },
           deflate => sub {
             my ($value, $obj) = @_;
-            if ($timezone) {
-                carp "You're using a floating timezone, please see the documentation of"
-                  . " DBIx::Class::InflateColumn::DateTime for an explanation"
-                  if ref( $value->time_zone ) eq 'DateTime::TimeZone::Floating'
-                      and not $info{floating_tz_ok}
-                      and not $ENV{DBIC_FLOATING_TZ_OK};
-                $value->set_time_zone($timezone);
-                $value->set_locale($locale) if $locale;
-            }
+
+            $value = $obj->_pre_deflate_datetime( $value, \%info );
             $obj->_deflate_from_datetime( $value, \%info );
           },
         }
@@ -198,12 +207,34 @@
 }
 
 sub _datetime_parser {
-  my $self = shift;
-  if (my $parser = $self->__datetime_parser) {
-    return $parser;
+  shift->result_source->storage->datetime_parser (@_);
+}
+
+sub _post_inflate_datetime {
+  my( $self, $dt, $info ) = @_;
+
+  $dt->set_time_zone($info->{timezone}) if defined $info->{timezone};
+  $dt->set_locale($info->{locale}) if defined $info->{locale};
+
+  return $dt;
+}
+
+sub _pre_deflate_datetime {
+  my( $self, $dt, $info ) = @_;
+
+  if (defined $info->{timezone}) {
+    carp "You're using a floating timezone, please see the documentation of"
+      . " DBIx::Class::InflateColumn::DateTime for an explanation"
+      if ref( $dt->time_zone ) eq 'DateTime::TimeZone::Floating'
+          and not $info->{floating_tz_ok}
+          and not $ENV{DBIC_FLOATING_TZ_OK};
+
+    $dt->set_time_zone($info->{timezone});
   }
-  my $parser = $self->result_source->storage->datetime_parser(@_);
-  return $self->__datetime_parser($parser);
+
+  $dt->set_locale($info->{locale}) if defined $info->{locale};
+
+  return $dt;
 }
 
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/InflateColumn/File.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/InflateColumn/File.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/InflateColumn/File.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -58,7 +58,7 @@
 
 sub insert {
     my $self = shift;
- 
+
     # cache our file columns so we can write them to the fs
     # -after- we have a PK
     my %file_column;
@@ -113,8 +113,10 @@
 
 In your L<DBIx::Class> table class:
 
-    __PACKAGE__->load_components( "PK::Auto", "InflateColumn::File", "Core" );
-    
+    use base 'DBIx::Class::Core';
+
+    __PACKAGE__->load_components(qw/InflateColumn::File/);
+
     # define your columns
     __PACKAGE__->add_columns(
         "id",
@@ -136,8 +138,8 @@
             size                => 255,
         },
     );
-    
 
+
 In your L<Catalyst::Controller> class:
 
 FileColumn requires a hash that contains L<IO::File> as handle and the file's
@@ -152,15 +154,15 @@
         body => '....'
     });
     $c->stash->{entry}=$entry;
-    
 
+
 And Place the following in your TT template
-    
+
     Article Subject: [% entry.subject %]
     Uploaded File: 
     <a href="/static/files/[% entry.id %]/[% entry.filename.filename %]">File</a>
     Body: [% entry.body %]
-    
+
 The file will be stored on the filesystem for later retrieval.  Calling delete
 on your resultset will delete the file from the filesystem.  Retrevial of the
 record automatically inflates the column back to the set hash with the
@@ -174,7 +176,7 @@
 
 =head2 _file_column_callback ($file,$ret,$target)
 
-method made to be overridden for callback purposes.
+Method made to be overridden for callback purposes.
 
 =cut
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/InflateColumn.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/InflateColumn.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/InflateColumn.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -26,7 +26,7 @@
 
 It can be used, for example, to automatically convert to and from
 L<DateTime> objects for your date and time fields. There's a
-conveniece component to actually do that though, try
+convenience component to actually do that though, try
 L<DBIx::Class::InflateColumn::DateTime>.
 
 It will handle all types of references except scalar references. It
@@ -79,7 +79,8 @@
   $self->throw_exception("inflate_column needs attr hashref")
     unless ref $attrs eq 'HASH';
   $self->column_info($col)->{_inflate_info} = $attrs;
-  $self->mk_group_accessors('inflated_column' => [$self->column_info($col)->{accessor} || $col, $col]);
+  my $acc = $self->column_info($col)->{accessor};
+  $self->mk_group_accessors('inflated_column' => [ (defined $acc ? $acc : $col), $col]);
   return 1;
 }
 
@@ -113,7 +114,7 @@
 
 Fetch a column value in its inflated state.  This is directly
 analogous to L<DBIx::Class::Row/get_column> in that it only fetches a
-column already retreived from the database, and then inflates it.
+column already retrieved from the database, and then inflates it.
 Throws an exception if the column requested is not an inflated column.
 
 =cut
@@ -124,8 +125,11 @@
     unless exists $self->column_info($col)->{_inflate_info};
   return $self->{_inflated_column}{$col}
     if exists $self->{_inflated_column}{$col};
-  return $self->{_inflated_column}{$col} =
-           $self->_inflated_column($col, $self->get_column($col));
+
+  my $val = $self->get_column($col);
+  return $val if ref $val eq 'SCALAR';  #that would be a not-yet-reloaded sclarref update
+
+  return $self->{_inflated_column}{$col} = $self->_inflated_column($col, $val);
 }
 
 =head2 set_inflated_column
@@ -175,7 +179,7 @@
 =over 4
 
 =item L<DBIx::Class::Core> - This component is loaded as part of the
-      "core" L<DBIx::Class> components; generally there is no need to
+      C<core> L<DBIx::Class> components; generally there is no need to
       load it directly
 
 =back

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Component.pod
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Component.pod	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Component.pod	2010-02-16 10:26:12 UTC (rev 8720)
@@ -12,31 +12,29 @@
 
 =head1 USING
 
-Components are loaded using the load_components() method within your 
+Components are loaded using the load_components() method within your
 DBIx::Class classes.
 
   package My::Thing;
-  use base qw( DBIx::Class );
-  __PACKAGE__->load_components(qw/ PK::Auto Core /);
+  use base qw( DBIx::Class::Core );
+  __PACKAGE__->load_components(qw/InflateColumn::DateTime TimeStamp/);
 
-Generally you do not want to specify the full package name 
-of a component, instead take off the DBIx::Class:: part of 
-it and just include the rest.  If you do want to load a 
-component outside of the normal namespace you can do so 
+Generally you do not want to specify the full package name
+of a component, instead take off the DBIx::Class:: part of
+it and just include the rest.  If you do want to load a
+component outside of the normal namespace you can do so
 by prepending the component name with a +.
 
   __PACKAGE__->load_components(qw/ +My::Component /);
 
-Once a component is loaded all of it's methods, or otherwise, 
+Once a component is loaded all of it's methods, or otherwise,
 that it provides will be available in your class.
 
-The order in which is you load the components may be 
-very important, depending on the component.  The general 
-rule of thumb is to first load extra components and then 
-load core ones last.  If you are not sure, then read the 
-docs for the components you are using and see if they 
-mention anything about the order in which you should load 
-them.
+The order in which is you load the components may be very
+important, depending on the component. If you are not sure,
+then read the docs for the components you are using and see
+if they mention anything about the order in which you should
+load them.
 
 =head1 CREATING COMPONENTS
 
@@ -47,11 +45,11 @@
   # Create methods, accessors, load other components, etc.
   1;
 
-When a component is loaded it is included in the calling 
-class' inheritance chain using L<Class::C3>.  As well as 
-providing custom utility methods, a component may also 
-override methods provided by other core components, like 
-L<DBIx::Class::Row> and others.  For example, you 
+When a component is loaded it is included in the calling
+class' inheritance chain using L<Class::C3>.  As well as
+providing custom utility methods, a component may also
+override methods provided by other core components, like
+L<DBIx::Class::Row> and others.  For example, you
 could override the insert and delete methods.
 
   sub insert {
@@ -84,6 +82,8 @@
 These components provide extra functionality beyond 
 basic functionality that you can't live without.
 
+L<DBIx::Class::Serialize::Storable> - Hooks for Storable freeze/thaw.
+
 L<DBIx::Class::CDBICompat> - Class::DBI Compatibility layer.
 
 L<DBIx::Class::FormTools> - Build forms with multiple interconnected objects.
@@ -106,26 +106,22 @@
 
 =head2 Experimental
 
-These components are under development, there interfaces may 
-change, they may not work, etc.  So, use them if you want, but 
+These components are under development, their interfaces may
+change, they may not work, etc.  So, use them if you want, but
 be warned.
 
-L<DBIx::Class::Serialize> - Hooks for Storable freeze/thaw.
-
-L<DBIx::Class::Serialize::Storable> - Hooks for Storable freeze/thaw.
-
 L<DBIx::Class::Validation> - Validate all data before submitting to your database.
 
 =head2 Core
 
-These are the components that all, or nearly all, people will use 
-without even knowing it.  These components provide most of 
+These are the components that all, or nearly all, people will use
+without even knowing it.  These components provide most of
 DBIx::Class' functionality.
 
-L<DBIx::Class::AccessorGroup> - Lets you build groups of accessors.
-
 L<DBIx::Class::Core> - Loads various components that "most people" would want.
 
+L<DBIx::Class::AccessorGroup> - Lets you build groups of accessors.
+
 L<DBIx::Class::DB> - Non-recommended classdata schema component.
 
 L<DBIx::Class::InflateColumn> - Automatically create objects from column data.
@@ -145,4 +141,3 @@
 =head1 AUTHOR
 
 Aran Clary Deltac <bluefeet at cpan.org>
-

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Cookbook.pod
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Cookbook.pod	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Cookbook.pod	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,4 +1,4 @@
-=head1 NAME 
+=head1 NAME
 
 DBIx::Class::Manual::Cookbook - Miscellaneous recipes
 
@@ -19,6 +19,8 @@
 
   return $rs->all(); # all records for page 1
 
+  return $rs->page(2); # records for page 2
+
 You can get a L<Data::Page> object for the resultset (suitable for use
 in e.g. a template) using the C<pager> method:
 
@@ -35,8 +37,11 @@
 
 This results in something like the following C<WHERE> clause:
 
-  WHERE artist LIKE '%Lamb%' AND title LIKE '%Fear of Fours%'
+  WHERE artist LIKE ? AND title LIKE ?
 
+And the following bind values for the placeholders: C<'%Lamb%'>, C<'%Fear of
+Fours%'>.
+
 Other queries might require slightly more complex logic:
 
   my @albums = $schema->resultset('Album')->search({
@@ -59,28 +64,30 @@
 
 =head2 Retrieve one and only one row from a resultset
 
-Sometimes you need only the first "top" row of a resultset. While this can be
-easily done with L<< $rs->first|DBIx::Class::ResultSet/first >>, it is suboptimal,
-as a full blown cursor for the resultset will be created and then immediately
-destroyed after fetching the first row object. 
-L<< $rs->single|DBIx::Class::ResultSet/single >> is
-designed specifically for this case - it will grab the first returned result
-without even instantiating a cursor. 
+Sometimes you need only the first "top" row of a resultset. While this
+can be easily done with L<< $rs->first|DBIx::Class::ResultSet/first
+>>, it is suboptimal, as a full blown cursor for the resultset will be
+created and then immediately destroyed after fetching the first row
+object.  L<< $rs->single|DBIx::Class::ResultSet/single >> is designed
+specifically for this case - it will grab the first returned result
+without even instantiating a cursor.
 
-Before replacing all your calls to C<first()> with C<single()> please observe the 
+Before replacing all your calls to C<first()> with C<single()> please observe the
 following CAVEATS:
 
 =over
 
 =item *
+
 While single() takes a search condition just like search() does, it does
 _not_ accept search attributes. However one can always chain a single() to
 a search():
 
-  my $top_cd = $cd_rs -> search({}, { order_by => 'rating' }) -> single;
+  my $top_cd = $cd_rs->search({}, { order_by => 'rating' })->single;
 
 
 =item *
+
 Since single() is the engine behind find(), it is designed to fetch a
 single row per database query. Thus a warning will be issued when the
 underlying SELECT returns more than one row. Sometimes however this usage
@@ -88,7 +95,7 @@
 at the top of the charts at any given time. If you know what you are doing,
 you can silence the warning by explicitly limiting the resultset size:
 
-  my $top_cd = $cd_rs -> search ({}, { order_by => 'rating', rows => 1 }) -> single;
+  my $top_cd = $cd_rs->search ({}, { order_by => 'rating', rows => 1 })->single;
 
 =back
 
@@ -96,80 +103,63 @@
 
 Sometimes you have to run arbitrary SQL because your query is too complex
 (e.g. it contains Unions, Sub-Selects, Stored Procedures, etc.) or has to
-be optimized for your database in a special way, but you still want to 
-get the results as a L<DBIx::Class::ResultSet>. 
-The recommended way to accomplish this is by defining a separate ResultSource 
-for your query. You can then inject complete SQL statements using a scalar 
-reference (this is a feature of L<SQL::Abstract>).
+be optimized for your database in a special way, but you still want to
+get the results as a L<DBIx::Class::ResultSet>.
 
-Say you want to run a complex custom query on your user data, here's what
-you have to add to your User class:
+This is accomplished by defining a
+L<ResultSource::View|DBIx::Class::ResultSource::View> for your query,
+almost like you would define a regular ResultSource.
 
-  package My::Schema::Result::User;
-  
-  use base qw/DBIx::Class/;
-  
-  # ->load_components, ->table, ->add_columns, etc.
+  package My::Schema::Result::UserFriendsComplex;
+  use strict;
+  use warnings;
+  use base qw/DBIx::Class::Core/;
 
-  # Make a new ResultSource based on the User class
-  my $source = __PACKAGE__->result_source_instance();
-  my $new_source = $source->new( $source );
-  $new_source->source_name( 'UserFriendsComplex' );
-  
-  # Hand in your query as a scalar reference
-  # It will be added as a sub-select after FROM,
-  # so pay attention to the surrounding brackets!
-  $new_source->name( \<<SQL );
-  ( SELECT u.* FROM user u 
-  INNER JOIN user_friends f ON u.id = f.user_id 
-  WHERE f.friend_user_id = ?
-  UNION 
-  SELECT u.* FROM user u 
-  INNER JOIN user_friends f ON u.id = f.friend_user_id 
-  WHERE f.user_id = ? )
-  SQL 
+  __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
 
-  # Finally, register your new ResultSource with your Schema
-  My::Schema->register_extra_source( 'UserFriendsComplex' => $new_source );
+  # ->table, ->add_columns, etc.
 
+  # do not attempt to deploy() this view
+  __PACKAGE__->result_source_instance->is_virtual(1);
+
+  __PACKAGE__->result_source_instance->view_definition(q[
+    SELECT u.* FROM user u
+    INNER JOIN user_friends f ON u.id = f.user_id
+    WHERE f.friend_user_id = ?
+    UNION
+    SELECT u.* FROM user u
+    INNER JOIN user_friends f ON u.id = f.friend_user_id
+    WHERE f.user_id = ?
+  ]);
+
 Next, you can execute your complex query using bind parameters like this:
 
-  my $friends = [ $schema->resultset( 'UserFriendsComplex' )->search( {}, 
+  my $friends = $schema->resultset( 'UserFriendsComplex' )->search( {},
     {
       bind  => [ 12345, 12345 ]
     }
-  ) ];
-  
+  );
+
 ... and you'll get back a perfect L<DBIx::Class::ResultSet> (except, of course,
-that you cannot modify the rows it contains, ie. cannot call L</update>,
+that you cannot modify the rows it contains, e.g. cannot call L</update>,
 L</delete>, ...  on it).
 
-If you prefer to have the definitions of these custom ResultSources in separate
-files (instead of stuffing all of them into the same resultset class), you can
-achieve the same with subclassing the resultset class and defining the
-ResultSource there:
+Note that you cannot have bind parameters unless is_virtual is set to true.
 
-  package My::Schema::Result::UserFriendsComplex;
+=over
 
-  use My::Schema::Result::User;
-  use base qw/My::Schema::Result::User/;
+=item * NOTE
 
-  __PACKAGE__->table('dummy');  # currently must be called before anything else
+If you're using the old deprecated C<< $rsrc_instance->name(\'( SELECT ...') >>
+method for custom SQL execution, you are highly encouraged to update your code 
+to use a virtual view as above. If you do not want to change your code, and just
+want to suppress the deprecation warning when you call
+L<DBIx::Class::Schema/deploy>, add this line to your source definition, so that
+C<deploy> will exclude this "table":
 
-  # Hand in your query as a scalar reference
-  # It will be added as a sub-select after FROM,
-  # so pay attention to the surrounding brackets!
-  __PACKAGE__->name( \<<SQL );
-  ( SELECT u.* FROM user u
-  INNER JOIN user_friends f ON u.id = f.user_id
-  WHERE f.friend_user_id = ?
-  UNION
-  SELECT u.* FROM user u
-  INNER JOIN user_friends f ON u.id = f.friend_user_id
-  WHERE f.user_id = ? )
-  SQL
+  sub sqlt_deploy_hook { $_[1]->schema->drop_table ($_[1]) }
 
-TIMTOWDI.
+=back
 
 =head2 Using specific columns
 
@@ -211,13 +201,34 @@
   # SELECT name name, LENGTH( name )
   # FROM artist
 
-Note that the C< as > attribute has absolutely nothing to with the sql
-syntax C< SELECT foo AS bar > (see the documentation in
-L<DBIx::Class::ResultSet/ATTRIBUTES>).  If your alias exists as a
-column in your base class (i.e. it was added with C<add_columns>), you
-just access it as normal. Our C<Artist> class has a C<name> column, so
-we just use the C<name> accessor:
+Note that the C<as> attribute B<has absolutely nothing to do> with the SQL
+syntax C< SELECT foo AS bar > (see the documentation in 
+L<DBIx::Class::ResultSet/ATTRIBUTES>). You can control the C<AS> part of the
+generated SQL via the C<-as> field attribute as follows:
 
+  my $rs = $schema->resultset('Artist')->search(
+    {},
+    {
+      join => 'cds',
+      distinct => 1,
+      '+select' => [ { count => 'cds.cdid', -as => 'amount_of_cds' } ],
+      '+as' => [qw/num_cds/],
+      order_by => { -desc => 'amount_of_cds' },
+    }
+  );
+
+  # Equivalent SQL
+  # SELECT me.artistid, me.name, me.rank, me.charfield, COUNT( cds.cdid ) AS amount_of_cds 
+  #   FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid 
+  # GROUP BY me.artistid, me.name, me.rank, me.charfield 
+  # ORDER BY amount_of_cds DESC 
+
+
+If your alias exists as a column in your base class (i.e. it was added with
+L<add_columns|DBIx::Class::ResultSource/add_columns>), you just access it as
+normal. Our C<Artist> class has a C<name> column, so we just use the C<name>
+accessor:
+
   my $artist = $rs->first();
   my $name = $artist->name();
 
@@ -231,10 +242,12 @@
 
   # Define accessor manually:
   sub name_length { shift->get_column('name_length'); }
-    
+
   # Or use DBIx::Class::AccessorGroup:
   __PACKAGE__->mk_group_accessors('column' => 'name_length');
 
+See also L</Using SQL functions on the left hand side of a comparison>.
+
 =head2 SELECT DISTINCT with multiple columns
 
   my $rs = $schema->resultset('Artist')->search(
@@ -242,7 +255,7 @@
     {
       columns => [ qw/artist_id name rank/ ],
       distinct => 1
-    } 
+    }
   );
 
   my $rs = $schema->resultset('Artist')->search(
@@ -279,7 +292,7 @@
   my $count = $rs->count;
 
   # Equivalent SQL:
-  # SELECT COUNT( * ) FROM (SELECT me.name FROM artist me GROUP BY me.name) count_subq: 
+  # SELECT COUNT( * ) FROM (SELECT me.name FROM artist me GROUP BY me.name) count_subq:
 
 =head2 Grouping results
 
@@ -304,7 +317,7 @@
 are in any way unsure about the use of the attributes above (C< join
 >, C< select >, C< as > and C< group_by >).
 
-=head2 Subqueries (EXPERIMENTAL)
+=head2 Subqueries
 
 You can write subqueries relatively easily in DBIC.
 
@@ -316,13 +329,13 @@
     artist_id => { 'IN' => $inside_rs->get_column('id')->as_query },
   });
 
-The usual operators ( =, !=, IN, NOT IN, etc) are supported.
+The usual operators ( =, !=, IN, NOT IN, etc.) are supported.
 
 B<NOTE>: You have to explicitly use '=' when doing an equality comparison.
 The following will B<not> work:
 
   my $rs = $schema->resultset('CD')->search({
-    artist_id => $inside_rs->get_column('id')->as_query,
+    artist_id => $inside_rs->get_column('id')->as_query,  # does NOT work
   });
 
 =head3 Support
@@ -352,14 +365,10 @@
        WHERE artist_id = me.artist_id
       )
 
-=head3 EXPERIMENTAL
-
-Please note that subqueries are considered an experimental feature.
-
 =head2 Predefined searches
 
 You can write your own L<DBIx::Class::ResultSet> class by inheriting from it
-and define often used searches as methods:
+and defining often used searches as methods:
 
   package My::DBIC::ResultSet::CD;
   use strict;
@@ -377,11 +386,16 @@
 
   1;
 
-To use your resultset, first tell DBIx::Class to create an instance of it
-for you, in your My::DBIC::Schema::CD class:
+If you're using L<DBIx::Class::Schema/load_namespaces>, simply place the file
+into the C<ResultSet> directory next to your C<Result> directory, and it will
+be automatically loaded.
 
+If however you are still using L<DBIx::Class::Schema/load_classes>, first tell
+DBIx::Class to create an instance of the ResultSet class for you, in your
+My::DBIC::Schema::CD class:
+
   # class definition as normal
-  __PACKAGE__->load_components(qw/ Core /);
+  use base 'DBIx::Class::Core';
   __PACKAGE__->table('cd');
 
   # tell DBIC to use the custom ResultSet class
@@ -395,8 +409,10 @@
 
 =head2 Using SQL functions on the left hand side of a comparison
 
-Using SQL functions on the left hand side of a comparison is generally
-not a good idea since it requires a scan of the entire table.  However,
+Using SQL functions on the left hand side of a comparison is generally not a
+good idea since it requires a scan of the entire table. (Unless your RDBMS
+supports indexes on expressions - including return values of functions - and
+you create an index on the return value of the function in question.) However,
 it can be accomplished with C<DBIx::Class> when necessary.
 
 If you do not have quoting on, simply include the function in your search
@@ -404,25 +420,30 @@
 
   $rs->search({ 'YEAR(date_of_birth)' => 1979 });
 
-With quoting on, or for a more portable solution, use the C<where>
-attribute:
+With quoting on, or for a more portable solution, use literal SQL values with
+placeholders:
 
-  $rs->search({}, { where => \'YEAR(date_of_birth) = 1979' });
+  $rs->search(\[ 'YEAR(date_of_birth) = ?', [ plain_value => 1979 ] ]);
 
-=begin hidden
+  # Equivalent SQL:
+  # SELECT * FROM employee WHERE YEAR(date_of_birth) = ?
 
-(When the bind args ordering bug is fixed, this technique will be better
-and can replace the one above.)
+  $rs->search({
+    name => 'Bob',
+    -nest => \[ 'YEAR(date_of_birth) = ?', [ plain_value => 1979 ] ],
+  });
 
-With quoting on, or for a more portable solution, use the C<where> and
-C<bind> attributes:
+  # Equivalent SQL:
+  # SELECT * FROM employee WHERE name = ? AND YEAR(date_of_birth) = ?
 
-  $rs->search({}, {
-      where => \'YEAR(date_of_birth) = ?',
-      bind  => [ 1979 ]
-  });
+Note: the C<plain_value> string in the C<< [ plain_value => 1979 ] >> part
+should be either the same as the name of the column (do this if the type of the
+return value of the function is the same as the type of the column) or
+otherwise it's essentially a dummy string currently (use C<plain_value> as a
+habit). It is used by L<DBIx::Class> to handle special column types.
 
-=end hidden
+See also L<SQL::Abstract/Literal SQL with placeholders and bind values
+(subqueries)>.
 
 =head1 JOINS AND PREFETCHING
 
@@ -439,7 +460,7 @@
 
   my $rs = $schema->resultset('CD')->search(
     {
-      'artists.name' => 'Bob Marley'    
+      'artists.name' => 'Bob Marley'
     },
     {
       join => 'artists', # join the artist table
@@ -452,7 +473,7 @@
   # WHERE artist.name = 'Bob Marley'
 
 In that example both the join, and the condition use the relationship name rather than the table name
-(see DBIx::Class::Manual::Joining for more details on aliasing ).
+(see L<DBIx::Class::Manual::Joining> for more details on aliasing ).
 
 If required, you can now sort on any column in the related tables by including
 it in your C<order_by> attribute, (again using the aliased relation name rather than table name) :
@@ -673,7 +694,7 @@
 
   my $schema = $cd->result_source->schema;
   # use the schema as normal:
-  my $artist_rs = $schema->resultset('Artist'); 
+  my $artist_rs = $schema->resultset('Artist');
 
 This can be useful when you don't want to pass around a Schema object to every
 method.
@@ -693,7 +714,7 @@
 
 =head2 Stringification
 
-Employ the standard stringification technique by using the C<overload>
+Employ the standard stringification technique by using the L<overload>
 module.
 
 To make an object stringify itself as a single column, use something
@@ -741,17 +762,17 @@
     # do whatever else you wanted if it was a new row
   }
 
-=head2 Static sub-classing DBIx::Class result classes 
+=head2 Static sub-classing DBIx::Class result classes
 
 AKA adding additional relationships/methods/etc. to a model for a
 specific usage of the (shared) model.
 
-B<Schema definition> 
- 
-    package My::App::Schema; 
-     
-    use base DBIx::Class::Schema; 
+B<Schema definition>
 
+    package My::App::Schema;
+
+    use base 'DBIx::Class::Schema';
+
     # load subclassed classes from My::App::Schema::Result/ResultSet
     __PACKAGE__->load_namespaces;
 
@@ -763,37 +784,37 @@
         /]});
 
     1;
- 
-B<Result-Subclass definition> 
- 
+
+B<Result-Subclass definition>
+
     package My::App::Schema::Result::Baz;
-     
-    use strict; 
-    use warnings; 
-    use base My::Shared::Model::Result::Baz; 
-    
+
+    use strict;
+    use warnings;
+    use base 'My::Shared::Model::Result::Baz';
+
     # WARNING: Make sure you call table() again in your subclass,
     # otherwise DBIx::Class::ResultSourceProxy::Table will not be called
     # and the class name is not correctly registered as a source
-    __PACKAGE__->table('baz'); 
-     
-    sub additional_method { 
-        return "I'm an additional method only needed by this app"; 
+    __PACKAGE__->table('baz');
+
+    sub additional_method {
+        return "I'm an additional method only needed by this app";
     }
 
     1;
-     
-=head2 Dynamic Sub-classing DBIx::Class proxy classes 
 
+=head2 Dynamic Sub-classing DBIx::Class proxy classes
+
 AKA multi-class object inflation from one table
- 
+
 L<DBIx::Class> classes are proxy classes, therefore some different
 techniques need to be employed for more than basic subclassing.  In
 this example we have a single user table that carries a boolean bit
 for admin.  We would like like to give the admin users
-objects(L<DBIx::Class::Row>) the same methods as a regular user but
+objects (L<DBIx::Class::Row>) the same methods as a regular user but
 also special admin only methods.  It doesn't make sense to create two
-seperate proxy-class files for this.  We would be copying all the user
+separate proxy-class files for this.  We would be copying all the user
 methods into the Admin class.  There is a cleaner way to accomplish
 this.
 
@@ -803,125 +824,129 @@
 grab the object being returned, inspect the values we are looking for,
 bless it if it's an admin object, and then return it.  See the example
 below:
- 
-B<Schema Definition> 
- 
-    package My::Schema; 
-     
-    use base qw/DBIx::Class::Schema/; 
- 
+
+B<Schema Definition>
+
+    package My::Schema;
+
+    use base qw/DBIx::Class::Schema/;
+
     __PACKAGE__->load_namespaces;
 
     1;
- 
- 
-B<Proxy-Class definitions> 
- 
-    package My::Schema::Result::User; 
-     
-    use strict; 
-    use warnings; 
-    use base qw/DBIx::Class/; 
-     
-    ### Defined what our admin class is for ensure_class_loaded 
-    my $admin_class = __PACKAGE__ . '::Admin'; 
-     
-    __PACKAGE__->load_components(qw/Core/); 
-     
-    __PACKAGE__->table('users'); 
-     
-    __PACKAGE__->add_columns(qw/user_id   email    password  
-                                firstname lastname active 
-                                admin/); 
-     
-    __PACKAGE__->set_primary_key('user_id'); 
-     
-    sub inflate_result { 
-        my $self = shift;  
-        my $ret = $self->next::method(@_); 
-        if( $ret->admin ) {### If this is an admin rebless for extra functions  
-            $self->ensure_class_loaded( $admin_class ); 
-            bless $ret, $admin_class; 
-        } 
-        return $ret; 
-    } 
-     
-    sub hello { 
-        print "I am a regular user.\n"; 
-        return ; 
-    } 
-    
+
+
+B<Proxy-Class definitions>
+
+    package My::Schema::Result::User;
+
+    use strict;
+    use warnings;
+    use base qw/DBIx::Class::Core/;
+
+    ### Define what our admin class is, for ensure_class_loaded()
+    my $admin_class = __PACKAGE__ . '::Admin';
+
+    __PACKAGE__->table('users');
+
+    __PACKAGE__->add_columns(qw/user_id   email    password
+                                firstname lastname active
+                                admin/);
+
+    __PACKAGE__->set_primary_key('user_id');
+
+    sub inflate_result {
+        my $self = shift;
+        my $ret = $self->next::method(@_);
+        if( $ret->admin ) {### If this is an admin, rebless for extra functions
+            $self->ensure_class_loaded( $admin_class );
+            bless $ret, $admin_class;
+        }
+        return $ret;
+    }
+
+    sub hello {
+        print "I am a regular user.\n";
+        return ;
+    }
+
     1;
 
-     
-    package My::Schema::Result::User::Admin; 
-     
-    use strict; 
-    use warnings; 
-    use base qw/My::Schema::Result::User/; 
-     
-    sub hello 
-    { 
-        print "I am an admin.\n"; 
-        return; 
-    } 
-     
-    sub do_admin_stuff 
-    { 
-        print "I am doing admin stuff\n"; 
-        return ; 
+
+    package My::Schema::Result::User::Admin;
+
+    use strict;
+    use warnings;
+    use base qw/My::Schema::Result::User/;
+
+    # This line is important
+    __PACKAGE__->table('users');
+
+    sub hello
+    {
+        print "I am an admin.\n";
+        return;
     }
 
+    sub do_admin_stuff
+    {
+        print "I am doing admin stuff\n";
+        return ;
+    }
+
     1;
- 
-B<Test File> test.pl 
- 
-    use warnings; 
-    use strict; 
-    use My::Schema; 
-     
-    my $user_data = { email    => 'someguy at place.com',  
-                      password => 'pass1',  
-                      admin    => 0 }; 
-                           
-    my $admin_data = { email    => 'someadmin at adminplace.com',  
-                       password => 'pass2',  
-                       admin    => 1 }; 
-                           
-    my $schema = My::Schema->connection('dbi:Pg:dbname=test'); 
-     
-    $schema->resultset('User')->create( $user_data ); 
-    $schema->resultset('User')->create( $admin_data ); 
-     
-    ### Now we search for them 
-    my $user = $schema->resultset('User')->single( $user_data ); 
-    my $admin = $schema->resultset('User')->single( $admin_data ); 
-     
-    print ref $user, "\n"; 
-    print ref $admin, "\n"; 
-     
-    print $user->password , "\n"; # pass1 
-    print $admin->password , "\n";# pass2; inherited from User 
-    print $user->hello , "\n";# I am a regular user. 
-    print $admin->hello, "\n";# I am an admin. 
- 
-    ### The statement below will NOT print 
-    print "I can do admin stuff\n" if $user->can('do_admin_stuff'); 
-    ### The statement below will print 
-    print "I can do admin stuff\n" if $admin->can('do_admin_stuff'); 
 
+B<Test File> test.pl
+
+    use warnings;
+    use strict;
+    use My::Schema;
+
+    my $user_data = { email    => 'someguy at place.com',
+                      password => 'pass1',
+                      admin    => 0 };
+
+    my $admin_data = { email    => 'someadmin at adminplace.com',
+                       password => 'pass2',
+                       admin    => 1 };
+
+    my $schema = My::Schema->connection('dbi:Pg:dbname=test');
+
+    $schema->resultset('User')->create( $user_data );
+    $schema->resultset('User')->create( $admin_data );
+
+    ### Now we search for them
+    my $user = $schema->resultset('User')->single( $user_data );
+    my $admin = $schema->resultset('User')->single( $admin_data );
+
+    print ref $user, "\n";
+    print ref $admin, "\n";
+
+    print $user->password , "\n"; # pass1
+    print $admin->password , "\n";# pass2; inherited from User
+    print $user->hello , "\n";# I am a regular user.
+    print $admin->hello, "\n";# I am an admin.
+
+    ### The statement below will NOT print
+    print "I can do admin stuff\n" if $user->can('do_admin_stuff');
+    ### The statement below will print
+    print "I can do admin stuff\n" if $admin->can('do_admin_stuff');
+
+Alternatively you can use L<DBIx::Class::DynamicSubclass> that implements
+exactly the above functionality.
+
 =head2 Skip row object creation for faster results
 
 DBIx::Class is not built for speed, it's built for convenience and
 ease of use, but sometimes you just need to get the data, and skip the
 fancy objects.
-  
+
 To do this simply use L<DBIx::Class::ResultClass::HashRefInflator>.
-  
+
  my $rs = $schema->resultset('CD');
- 
+
  $rs->result_class('DBIx::Class::ResultClass::HashRefInflator');
- 
+
  my $hash_ref = $rs->find(1);
 
 Wasn't that easy?
@@ -965,7 +990,7 @@
 
   my $rs = $schema->resultset('Items')->search(
     {},
-    { 
+    {
        select => [ { sum => 'Cost' } ],
        as     => [ 'total_cost' ], # remember this 'as' is for DBIx::Class::ResultSet not SQL
     }
@@ -994,7 +1019,7 @@
     print $c;
   }
 
-C<ResultSetColumn> only has a limited number of built-in functions, if
+C<ResultSetColumn> only has a limited number of built-in functions. If
 you need one that it doesn't have, then you can use the C<func> method
 instead:
 
@@ -1009,7 +1034,7 @@
 
 =head2 Creating a result set from a set of rows
 
-Sometimes you have a (set of) row objects that you want to put into a 
+Sometimes you have a (set of) row objects that you want to put into a
 resultset without the need to hit the DB again. You can do that by using the
 L<set_cache|DBIx::Class::Resultset/set_cache> method:
 
@@ -1044,13 +1069,13 @@
 
 =head2 Ordering a relationship result set
 
-If you always want a relation to be ordered, you can specify this when you 
+If you always want a relation to be ordered, you can specify this when you
 create the relationship.
 
 To order C<< $book->pages >> by descending page_number, create the relation
 as follows:
 
-  __PACKAGE__->has_many('pages' => 'Page', 'book', { order_by => \'page_number DESC'} );
+  __PACKAGE__->has_many('pages' => 'Page', 'book', { order_by => { -desc => 'page_number'} } );
 
 =head2 Filtering a relationship result set
 
@@ -1063,8 +1088,7 @@
 This is straightforward using L<ManyToMany|DBIx::Class::Relationship/many_to_many>:
 
   package My::User;
-  use base 'DBIx::Class';
-  __PACKAGE__->load_components('Core');
+  use base 'DBIx::Class::Core';
   __PACKAGE__->table('user');
   __PACKAGE__->add_columns(qw/id name/);
   __PACKAGE__->set_primary_key('id');
@@ -1072,8 +1096,7 @@
   __PACKAGE__->many_to_many('addresses' => 'user_address', 'address');
 
   package My::UserAddress;
-  use base 'DBIx::Class';
-  __PACKAGE__->load_components('Core');
+  use base 'DBIx::Class::Core';
   __PACKAGE__->table('user_address');
   __PACKAGE__->add_columns(qw/user address/);
   __PACKAGE__->set_primary_key(qw/user address/);
@@ -1081,8 +1104,7 @@
   __PACKAGE__->belongs_to('address' => 'My::Address');
 
   package My::Address;
-  use base 'DBIx::Class';
-  __PACKAGE__->load_components('Core');
+  use base 'DBIx::Class::Core';
   __PACKAGE__->table('address');
   __PACKAGE__->add_columns(qw/id street town area_code country/);
   __PACKAGE__->set_primary_key('id');
@@ -1092,6 +1114,16 @@
   $rs = $user->addresses(); # get all addresses for a user
   $rs = $address->users(); # get all users for an address
 
+  my $address = $user->add_to_addresses(    # returns a My::Address instance,
+                                            # NOT a My::UserAddress instance!
+    {
+      country => 'United Kingdom',
+      area_code => 'XYZ',
+      town => 'London',
+      street => 'Sesame',
+    }
+  );
+
 =head2 Relationships across DB schemas
 
 Mapping relationships across L<DB schemas|DBIx::Class::Manual::Glossary/DB schema>
@@ -1103,11 +1135,10 @@
 declaration, like so...
 
   package MyDatabase::Main::Artist;
-  use base qw/DBIx::Class/;
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
-  
+  use base qw/DBIx::Class::Core/;
+
   __PACKAGE__->table('database1.artist'); # will use "database1.artist" in FROM clause
-  
+
   __PACKAGE__->add_columns(qw/ artist_id name /);
   __PACKAGE__->set_primary_key('artist_id');
   __PACKAGE__->has_many('cds' => 'MyDatabase::Main::Cd');
@@ -1128,16 +1159,16 @@
 
   package MyDatabase::Schema;
   use Moose;
-  
+
   extends 'DBIx::Class::Schema';
-  
+
   around connection => sub {
     my ( $inner, $self, $dsn, $username, $pass, $attr ) = ( shift, @_ );
-   
+
     my $postfix = delete $attr->{schema_name_postfix};
-    
+
     $inner->(@_);
-    
+
     if ( $postfix ) {
         $self->append_db_name($postfix);
     }
@@ -1145,18 +1176,18 @@
 
   sub append_db_name {
     my ( $self, $postfix ) = @_;
-    
-    my @sources_with_db 
-        = grep 
-            { $_->name =~ /^\w+\./mx } 
-            map 
-                { $self->source($_) } 
+
+    my @sources_with_db
+        = grep
+            { $_->name =~ /^\w+\./mx }
+            map
+                { $self->source($_) }
                 $self->sources;
-    
+
     foreach my $source (@sources_with_db) {
         my $name = $source->name;
         $name =~ s{^(\w+)\.}{${1}${postfix}\.}mx;
-        
+
         $source->name($name);
     }
   }
@@ -1168,17 +1199,17 @@
 then simply iterate over all the Schema's ResultSources, renaming them as
 needed.
 
-To use this facility, simply add or modify the \%attr hashref that is passed to 
+To use this facility, simply add or modify the \%attr hashref that is passed to
 L<connection|DBIx::Class::Schama/connect>, as follows:
 
-  my $schema 
+  my $schema
     = MyDatabase::Schema->connect(
-      $dsn, 
-      $user, 
+      $dsn,
+      $user,
       $pass,
       {
         schema_name_postfix => '_dev'
-        # ... Other options as desired ... 
+        # ... Other options as desired ...
       })
 
 Obviously, one could accomplish even more advanced mapping via a hash map or a
@@ -1220,18 +1251,110 @@
 Nested transactions will work as expected. That is, only the outermost
 transaction will actually issue a commit to the $dbh, and a rollback
 at any level of any transaction will cause the entire nested
-transaction to fail. Support for savepoints and for true nested
-transactions (for databases that support them) will hopefully be added
-in the future.
+transaction to fail.
+ 
+=head2 Nested transactions and auto-savepoints
 
-=head1 SQL 
+If savepoints are supported by your RDBMS, it is possible to achieve true
+nested transactions with minimal effort. To enable auto-savepoints via nested
+transactions, supply the C<< auto_savepoint = 1 >> connection attribute.
 
+Here is an example of true nested transactions. In the example, we start a big
+task which will create several rows. Generation of data for each row is a
+fragile operation and might fail. If we fail creating something, depending on
+the type of failure, we want to abort the whole task, or only skip the failed
+row.
+
+  my $schema = MySchema->connect("dbi:Pg:dbname=my_db");
+
+  # Start a transaction. Every database change from here on will only be 
+  # committed into the database if the eval block succeeds.
+  eval {
+    $schema->txn_do(sub {
+      # SQL: BEGIN WORK;
+
+      my $job = $schema->resultset('Job')->create({ name=> 'big job' });
+      # SQL: INSERT INTO job ( name) VALUES ( 'big job' );
+
+      for (1..10) {
+
+        # Start a nested transaction, which in fact sets a savepoint.
+        eval {
+          $schema->txn_do(sub {
+            # SQL: SAVEPOINT savepoint_0;
+
+            my $thing = $schema->resultset('Thing')->create({ job=>$job->id });
+            # SQL: INSERT INTO thing ( job) VALUES ( 1 );
+
+            if (rand > 0.8) {
+              # This will generate an error, thus setting $@
+
+              $thing->update({force_fail=>'foo'});
+              # SQL: UPDATE thing SET force_fail = 'foo'
+              #      WHERE ( id = 42 );
+            }
+          });
+        };
+        if ($@) {
+          # SQL: ROLLBACK TO SAVEPOINT savepoint_0;
+
+          # There was an error while creating a $thing. Depending on the error
+          # we want to abort the whole transaction, or only rollback the
+          # changes related to the creation of this $thing
+
+          # Abort the whole job
+          if ($@ =~ /horrible_problem/) {
+            print "something horrible happend, aborting job!";
+            die $@;                # rethrow error
+          }
+
+          # Ignore this $thing, report the error, and continue with the
+          # next $thing
+          print "Cannot create thing: $@";
+        }
+        # There was no error, so save all changes since the last 
+        # savepoint.
+
+        # SQL: RELEASE SAVEPOINT savepoint_0;
+      }
+    });
+  };
+  if ($@) {
+    # There was an error while handling the $job. Rollback all changes
+    # since the transaction started, including the already committed
+    # ('released') savepoints. There will be neither a new $job nor any
+    # $thing entry in the database.
+
+    # SQL: ROLLBACK;
+
+    print "ERROR: $@\n";
+  }
+  else {
+    # There was no error while handling the $job. Commit all changes.
+    # Only now other connections can see the newly created $job and
+    # @things.
+
+    # SQL: COMMIT;
+
+    print "Ok\n";
+  }
+
+In this example it might be hard to see where the rollbacks, releases and
+commits are happening, but it works just the same as for plain L<<txn_do>>: If
+the C<eval>-block around C<txn_do> fails, a rollback is issued. If the C<eval>
+succeeds, the transaction is committed (or the savepoint released).
+
+While you can get more fine-grained controll using C<svp_begin>, C<svp_release>
+and C<svp_rollback>, it is strongly recommended to use C<txn_do> with coderefs.
+
+=head1 SQL
+
 =head2 Creating Schemas From An Existing Database
 
-L<DBIx::Class::Schema::Loader> will connect to a database and create a 
+L<DBIx::Class::Schema::Loader> will connect to a database and create a
 L<DBIx::Class::Schema> and associated sources by examining the database.
 
-The recommend way of achieving this is to use the 
+The recommend way of achieving this is to use the
 L<make_schema_at|DBIx::Class::Schema::Loader/make_schema_at> method:
 
   perl -MDBIx::Class::Schema::Loader=make_schema_at,dump_to_dir:./lib \
@@ -1259,7 +1382,7 @@
 To create a new database using the schema:
 
  my $schema = My::Schema->connect($dsn);
- $schema->deploy({ add_drop_tables => 1});
+ $schema->deploy({ add_drop_table => 1});
 
 To import created .sql files using the mysql client:
 
@@ -1293,45 +1416,44 @@
 your database.
 
 Make a table class as you would for any other table
-                                                                               
+
   package MyAppDB::Dual;
   use strict;
   use warnings;
-  use base 'DBIx::Class';
-  __PACKAGE__->load_components("Core");
+  use base 'DBIx::Class::Core';
   __PACKAGE__->table("Dual");
   __PACKAGE__->add_columns(
     "dummy",
     { data_type => "VARCHAR2", is_nullable => 0, size => 1 },
   );
- 
+
 Once you've loaded your table class select from it using C<select>
 and C<as> instead of C<columns>
- 
+
   my $rs = $schema->resultset('Dual')->search(undef,
     { select => [ 'sydate' ],
       as     => [ 'now' ]
     },
   );
- 
+
 All you have to do now is be careful how you access your resultset, the below
 will not work because there is no column called 'now' in the Dual table class
- 
+
   while (my $dual = $rs->next) {
     print $dual->now."\n";
   }
   # Can't locate object method "now" via package "MyAppDB::Dual" at headshot.pl line 23.
- 
+
 You could of course use 'dummy' in C<as> instead of 'now', or C<add_columns> to
 your Dual class for whatever you wanted to select from dual, but that's just
 silly, instead use C<get_column>
- 
+
   while (my $dual = $rs->next) {
     print $dual->get_column('now')."\n";
   }
- 
+
 Or use C<cursor>
- 
+
   my $cursor = $rs->cursor;
   while (my @vals = $cursor->next) {
     print $vals[0]."\n";
@@ -1348,48 +1470,48 @@
         parser_args    => { sources => [ grep $_ ne 'Dual', schema->sources ] },
     };
     $schema->create_ddl_dir( [qw/Oracle/], undef, './sql', undef, $sqlt_args );
- 
+
 Or use L<DBIx::Class::ResultClass::HashRefInflator>
- 
+
   $rs->result_class('DBIx::Class::ResultClass::HashRefInflator');
   while ( my $dual = $rs->next ) {
     print $dual->{now}."\n";
   }
- 
+
 Here are some example C<select> conditions to illustrate the different syntax
-you could use for doing stuff like 
+you could use for doing stuff like
 C<oracles.heavily(nested(functions_can('take', 'lots'), OF), 'args')>
- 
+
   # get a sequence value
   select => [ 'A_SEQ.nextval' ],
- 
+
   # get create table sql
   select => [ { 'dbms_metadata.get_ddl' => [ "'TABLE'", "'ARTIST'" ]} ],
- 
+
   # get a random num between 0 and 100
   select => [ { "trunc" => [ { "dbms_random.value" => [0,100] } ]} ],
- 
+
   # what year is it?
   select => [ { 'extract' => [ \'year from sysdate' ] } ],
- 
+
   # do some math
   select => [ {'round' => [{'cos' => [ \'180 * 3.14159265359/180' ]}]}],
- 
+
   # which day of the week were you born on?
   select => [{'to_char' => [{'to_date' => [ "'25-DEC-1980'", "'dd-mon-yyyy'" ]}, "'day'"]}],
- 
+
   # select 16 rows from dual
   select   => [ "'hello'" ],
   as       => [ 'world' ],
   group_by => [ 'cube( 1, 2, 3, 4 )' ],
- 
- 
 
+
+
 =head2 Adding Indexes And Functions To Your SQL
 
 Often you will want indexes on columns on your table to speed up searching. To
-do this, create a method called C<sqlt_deploy_hook> in the relevant source 
-class (refer to the advanced 
+do this, create a method called C<sqlt_deploy_hook> in the relevant source
+class (refer to the advanced
 L<callback system|DBIx::Class::ResultSource/sqlt_deploy_callback> if you wish
 to share a hook between multiple sources):
 
@@ -1406,13 +1528,13 @@
 
  1;
 
-Sometimes you might want to change the index depending on the type of the 
+Sometimes you might want to change the index depending on the type of the
 database for which SQL is being generated:
 
   my ($db_type = $sqlt_table->schema->translator->producer_type)
     =~ s/^SQL::Translator::Producer:://;
 
-You can also add hooks to the schema level to stop certain tables being 
+You can also add hooks to the schema level to stop certain tables being
 created:
 
  package My::Schema;
@@ -1499,30 +1621,30 @@
 Add the L<DBIx::Class::Schema::Versioned> schema component to your
 Schema class. This will add a new table to your database called
 C<dbix_class_schema_vesion> which will keep track of which version is installed
-and warn if the user trys to run a newer schema version than the
+and warn if the user tries to run a newer schema version than the
 database thinks it has.
 
-Alternatively, you can send the conversion sql scripts to your
+Alternatively, you can send the conversion SQL scripts to your
 customers as above.
 
-=head2 Setting quoting for the generated SQL. 
+=head2 Setting quoting for the generated SQL
 
 If the database contains column names with spaces and/or reserved words, they
 need to be quoted in the SQL queries. This is done using:
 
- __PACKAGE__->storage->sql_maker->quote_char([ qw/[ ]/] );
- __PACKAGE__->storage->sql_maker->name_sep('.');
+ $schema->storage->sql_maker->quote_char([ qw/[ ]/] );
+ $schema->storage->sql_maker->name_sep('.');
 
 The first sets the quote characters. Either a pair of matching
 brackets, or a C<"> or C<'>:
-  
- __PACKAGE__->storage->sql_maker->quote_char('"');
 
+ $schema->storage->sql_maker->quote_char('"');
+
 Check the documentation of your database for the correct quote
 characters to use. C<name_sep> needs to be set to allow the SQL
 generator to put the quotes the correct place.
 
-In most cases you should set these as part of the arguments passed to 
+In most cases you should set these as part of the arguments passed to
 L<DBIx::Class::Schema/connect>:
 
  my $schema = My::Schema->connect(
@@ -1535,6 +1657,17 @@
   }
  )
 
+In some cases, quoting will be required for all users of a schema. To enforce
+this, you can also overload the C<connection> method for your schema class:
+
+ sub connection {
+     my $self = shift;
+     my $rv = $self->next::method( @_ );
+     $rv->storage->sql_maker->quote_char([ qw/[ ]/ ]);
+     $rv->storage->sql_maker->name_sep('.');
+     return $rv;
+ }
+
 =head2 Setting limit dialect for SQL::Abstract::Limit
 
 In some cases, SQL::Abstract::Limit cannot determine the dialect of
@@ -1550,7 +1683,7 @@
 The JDBC bridge is one way of getting access to a MSSQL server from a platform
 that Microsoft doesn't deliver native client libraries for. (e.g. Linux)
 
-The limit dialect can also be set at connect time by specifying a 
+The limit dialect can also be set at connect time by specifying a
 C<limit_dialect> key in the final hash as shown above.
 
 =head2 Working with PostgreSQL array types
@@ -1573,7 +1706,7 @@
     }
   );
 
-In conditions (eg. C<\%cond> in the L<DBIx::Class::ResultSet/search> family of
+In conditions (e.g. C<\%cond> in the L<DBIx::Class::ResultSet/search> family of
 methods) you cannot directly use array references (since this is interpreted as
 a list of values to be C<OR>ed), but you can use the following syntax to force
 passing them as bind values:
@@ -1591,7 +1724,7 @@
 arrayrefs together with the column name, like this: C<< [column_name => value]
 >>.
 
-=head1 BOOTSTRAPPING/MIGRATING 
+=head1 BOOTSTRAPPING/MIGRATING
 
 =head2 Easy migration from class-based to schema-based setup
 
@@ -1602,10 +1735,10 @@
 
   use MyDB;
   use SQL::Translator;
-  
+
   my $schema = MyDB->schema_instance;
-  
-  my $translator           =  SQL::Translator->new( 
+
+  my $translator           =  SQL::Translator->new(
       debug                => $debug          ||  0,
       trace                => $trace          ||  0,
       no_comments          => $no_comments    ||  0,
@@ -1619,13 +1752,13 @@
           'prefix'         => 'My::Schema',
                          },
   );
-  
+
   $translator->parser('SQL::Translator::Parser::DBIx::Class');
   $translator->producer('SQL::Translator::Producer::DBIx::Class::File');
-  
+
   my $output = $translator->translate(@args) or die
           "Error: " . $translator->error;
-  
+
   print $output;
 
 You could use L<Module::Find> to search for all subclasses in the MyDB::*
@@ -1654,16 +1787,16 @@
     return $new;
   }
 
-For more information about C<next::method>, look in the L<Class::C3> 
+For more information about C<next::method>, look in the L<Class::C3>
 documentation. See also L<DBIx::Class::Manual::Component> for more
 ways to write your own base classes to do this.
 
 People looking for ways to do "triggers" with DBIx::Class are probably
-just looking for this. 
+just looking for this.
 
 =head2 Changing one field whenever another changes
 
-For example, say that you have three columns, C<id>, C<number>, and 
+For example, say that you have three columns, C<id>, C<number>, and
 C<squared>.  You would like to make changes to C<number> and have
 C<squared> be automagically set to the value of C<number> squared.
 You can accomplish this by overriding C<store_column>:
@@ -1681,7 +1814,7 @@
 
 =head2 Automatically creating related objects
 
-You might have a class C<Artist> which has many C<CD>s.  Further, if you
+You might have a class C<Artist> which has many C<CD>s.  Further, you
 want to create a C<CD> object every time you insert an C<Artist> object.
 You can accomplish this by overriding C<insert> on your objects:
 
@@ -1872,15 +2005,15 @@
 
 Typically L<DBIx::Class> result classes start off with
 
-    use base qw/DBIx::Class/;
-    __PACKAGE__->load_components(qw/InflateColumn::DateTime Core/);
+    use base qw/DBIx::Class::Core/;
+    __PACKAGE__->load_components(qw/InflateColumn::DateTime/);
 
 If this preamble is moved into a common base class:-
 
     package MyDBICbase;
-    
-    use base qw/DBIx::Class/;
-    __PACKAGE__->load_components(qw/InflateColumn::DateTime Core/);
+
+    use base qw/DBIx::Class::Core/;
+    __PACKAGE__->load_components(qw/InflateColumn::DateTime/);
     1;
 
 and each result class then uses this as a base:-
@@ -1899,7 +2032,7 @@
 to load the result classes. This will use L<Module::Find|Module::Find>
 to find and load the appropriate modules. Explicitly defining the
 classes you wish to load will remove the overhead of
-L<Module::Find|Module::Find> and the related directory operations:-
+L<Module::Find|Module::Find> and the related directory operations:
 
     __PACKAGE__->load_classes(qw/ CD Artist Track /);
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/DocMap.pod
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/DocMap.pod	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/DocMap.pod	2010-02-16 10:26:12 UTC (rev 8720)
@@ -40,8 +40,6 @@
 
 =item L<DBIx::Class::Core> - Set of standard components to load.
 
-=item L<DBIx::Class::Serialize::Storable> - ?
-
 =item L<DBIx::Class::InflateColumn> - Making objects out of your columns.
 
 =item L<DBIx::Class::InflateColumn::DateTime> - Magically turn your datetime or timestamp columns into DateTime objects.

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Example.pod
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Example.pod	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Example.pod	2010-02-16 10:26:12 UTC (rev 8720)
@@ -27,7 +27,7 @@
 
 Install DBIx::Class via CPAN should be sufficient.
 
-=head3 Create the database/tables.
+=head3 Create the database/tables
 
 First make and change the directory:
 
@@ -43,7 +43,7 @@
 
   CREATE TABLE artist (
     artistid INTEGER PRIMARY KEY,
-    name TEXT NOT NULL 
+    name TEXT NOT NULL
   );
 
   CREATE TABLE cd (
@@ -58,9 +58,9 @@
     title TEXT NOT NULL
   );
 
-and create the sqlite database file:
+and create the SQLite database file:
 
-sqlite3 example.db < example.sql
+  sqlite3 example.db < example.sql
 
 =head3 Set up DBIx::Class::Schema
 
@@ -78,7 +78,7 @@
 Then, create the following DBIx::Class::Schema classes:
 
 MyDatabase/Main.pm:
-    
+
   package MyDatabase::Main;
   use base qw/DBIx::Class::Schema/;
   __PACKAGE__->load_namespaces;
@@ -89,8 +89,7 @@
 MyDatabase/Main/Result/Artist.pm:
 
   package MyDatabase::Main::Result::Artist;
-  use base qw/DBIx::Class/;
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  use base qw/DBIx::Class::Core/;
   __PACKAGE__->table('artist');
   __PACKAGE__->add_columns(qw/ artistid name /);
   __PACKAGE__->set_primary_key('artistid');
@@ -102,8 +101,8 @@
 MyDatabase/Main/Result/Cd.pm:
 
   package MyDatabase::Main::Result::Cd;
-  use base qw/DBIx::Class/;
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  use base qw/DBIx::Class::Core/;
+  __PACKAGE__->load_components(qw/InflateColumn::DateTime/);
   __PACKAGE__->table('cd');
   __PACKAGE__->add_columns(qw/ cdid artist title/);
   __PACKAGE__->set_primary_key('cdid');
@@ -116,17 +115,16 @@
 MyDatabase/Main/Result/Track.pm:
 
   package MyDatabase::Main::Result::Track;
-  use base qw/DBIx::Class/;
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  use base qw/DBIx::Class::Core/;
   __PACKAGE__->table('track');
-  __PACKAGE__->add_columns(qw/ trackid cd title/);
+  __PACKAGE__->add_columns(qw/ trackid cd title /);
   __PACKAGE__->set_primary_key('trackid');
   __PACKAGE__->belongs_to('cd' => 'MyDatabase::Main::Result::Cd');
 
   1;
 
 
-=head3 Write a script to insert some records.
+=head3 Write a script to insert some records
 
 insertdb.pl
 
@@ -137,7 +135,7 @@
 
   my $schema = MyDatabase::Main->connect('dbi:SQLite:db/example.db');
 
-  #  here's some of the sql that is going to be generated by the schema
+  #  here's some of the SQL that is going to be generated by the schema
   #  INSERT INTO artist VALUES (NULL,'Michael Jackson');
   #  INSERT INTO artist VALUES (NULL,'Eminem');
 
@@ -155,10 +153,10 @@
 
   my @cds;
   foreach my $lp (keys %albums) {
-    my $artist = $schema->resultset('Artist')->search({
+    my $artist = $schema->resultset('Artist')->find({
       name => $albums{$lp}
     });
-    push @cds, [$lp, $artist->first];
+    push @cds, [$lp, $artist->id];
   }
 
   $schema->populate('Cd', [
@@ -179,10 +177,10 @@
 
   my @tracks;
   foreach my $track (keys %tracks) {
-    my $cdname = $schema->resultset('Cd')->search({
+    my $cdname = $schema->resultset('Cd')->find({
       title => $tracks{$track},
     });
-    push @tracks, [$cdname->first, $track];
+    push @tracks, [$cdname->id, $track];
   }
 
   $schema->populate('Track',[
@@ -200,7 +198,7 @@
   use strict;
 
   my $schema = MyDatabase::Main->connect('dbi:SQLite:db/example.db');
-  # for other DSNs, e.g. MySql, see the perldoc for the relevant dbd
+  # for other DSNs, e.g. MySQL, see the perldoc for the relevant dbd
   # driver, e.g perldoc L<DBD::mysql>.
 
   get_tracks_by_cd('Bad');
@@ -248,8 +246,8 @@
     }
     print "\n";
   }
-  
-  
+
+
   sub get_cd_by_track {
     my $tracktitle = shift;
     print "get_cd_by_track($tracktitle):\n";
@@ -264,7 +262,7 @@
     my $cd = $rs->first;
     print $cd->title . "\n\n";
   }
-  
+
   sub get_cds_by_artist {
     my $artistname = shift;
     print "get_cds_by_artist($artistname):\n";
@@ -347,22 +345,22 @@
 
 =head1 Notes
 
-A reference implentation of the database and scripts in this example
+A reference implementation of the database and scripts in this example
 are available in the main distribution for DBIx::Class under the
-directory t/examples/Schema
+directory F<t/examples/Schema>.
 
 With these scripts we're relying on @INC looking in the current
 working directory.  You may want to add the MyDatabase namespaces to
 @INC in a different way when it comes to deployment.
 
-The testdb.pl script is an excellent start for testing your database
+The F<testdb.pl> script is an excellent start for testing your database
 model.
 
-This example uses load_namespaces to load in the appropriate Row classes
-from the MyDatabase::Main::Result namespace, and any required resultset
-classes from the MyDatabase::Main::ResultSet namespace (although we
-created the directory in the directions above we did not add, or need to
-add, any resultset classes).
+This example uses L<DBIx::Class::Schema/load_namespaces> to load in the
+appropriate L<Row|DBIx::Class::Row> classes from the MyDatabase::Main::Result namespace,
+and any required resultset classes from the MyDatabase::Main::ResultSet
+namespace (although we created the directory in the directions above we
+did not add, or need to add, any resultset classes).
 
 =head1 TODO
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/FAQ.pod
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/FAQ.pod	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/FAQ.pod	2010-02-16 10:26:12 UTC (rev 8720)
@@ -26,8 +26,7 @@
 
 Next, spend some time defining which data you need to store, and how
 it relates to the other data you have. For some help on normalisation,
-go to L<http://b62.tripod.com/doc/dbbase.htm> or
-L<http://209.197.234.36/db/simple.html>.
+go to L<http://b62.tripod.com/doc/dbbase.htm>.
 
 Now, decide whether you want to have the database itself be the
 definitive source of information about the data layout, or your
@@ -87,7 +86,7 @@
 to connect with rights to read/write all the schemas/tables as
 necessary.
 
-=back 
+=back
 
 =head2 Relationships
 
@@ -112,7 +111,7 @@
 Create a C<belongs_to> relationship for the field containing the
 foreign key.  See L<DBIx::Class::Relationship/belongs_to>.
 
-=item .. define a foreign key relationship where the key field may contain NULL?  
+=item .. define a foreign key relationship where the key field may contain NULL?
 
 Just create a C<belongs_to> relationship, as above. If the column is
 NULL then the inflation to the foreign object will not happen. This
@@ -217,10 +216,10 @@
 
  ->search({'created_time' => { '>=', '2006-06-01 00:00:00' } })
 
-Note that to use a function here you need to make the whole value into
-a scalar reference:
+Note that to use a function here you need to make it a scalar
+reference:
 
- ->search({'created_time' => \'>= yesterday()' })
+ ->search({'created_time' => { '>=', \'yesterday()' } })
 
 =item .. search in several tables simultaneously?
 
@@ -244,34 +243,18 @@
 query, which can be accessed similarly to a table, see your database
 documentation for details.
 
-=item .. search using greater-than or less-than and database functions?
-
-To use functions or literal SQL with conditions other than equality
-you need to supply the entire condition, for example:
-
- my $interval = "< now() - interval '12 hours'";
- ->search({last_attempt => \$interval})
-
-and not:
-
- my $interval = "now() - interval '12 hours'";
- ->search({last_attempt => { '<' => \$interval } })
-
 =item .. search with an SQL function on the left hand side?
 
 To use an SQL function on the left hand side of a comparison:
 
- ->search({}, { where => \'YEAR(date_of_birth)=1979' });
+ ->search({ -nest => \[ 'YEAR(date_of_birth) = ?', [ plain_value => 1979 ] ] });
 
-=begin hidden
+Note: the C<plain_value> string in the C<< [ plain_value => 1979 ] >> part
+should be either the same as the name of the column (do this if the type of the
+return value of the function is the same as the type of the column) or
+otherwise it's essentially a dummy string currently (use C<plain_value> as a
+habit). It is used by L<DBIx::Class> to handle special column types.
 
-(When the bind arg ordering bug is fixed, the previous example can be
-replaced with the following.)
-
- ->search({}, { where => \'YEAR(date_of_birth)=?', bind => [ 1979 ] });
-
-=end hidden
-
 Or, if you have quoting off:
 
  ->search({ 'YEAR(date_of_birth)' => 1979 });
@@ -307,8 +290,8 @@
 
 =item .. fetch a whole column of data instead of a row?
 
-Call C<get_column> on a L<DBIx::Class::ResultSet>, this returns a
-L<DBIx::Class::ResultSetColumn>, see it's documentation and the
+Call C<get_column> on a L<DBIx::Class::ResultSet>. This returns a
+L<DBIx::Class::ResultSetColumn>. See its documentation and the
 L<Cookbook|DBIx::Class::Manual::Cookbook> for details.
 
 =item .. fetch a formatted column?
@@ -324,22 +307,17 @@
 
 =item .. fetch a single (or topmost) row?
 
-Sometimes you many only want a single record back from a search. A quick
-way to get that single row is to first run your search as usual:
+See L<DBIx::Class::Manual::Cookbook/Retrieve_one_and_only_one_row_from_a_resultset>.
 
-  ->search->(undef, { order_by => "id DESC" })
+A less readable way is to ask a regular search to return 1 row, using
+L<DBIx::Class::ResultSet/slice>:
 
-Then call L<DBIx::Class::ResultSet/slice> and ask it only to return 1 row:
-
-  ->slice(0)
-
-These two calls can be combined into a single statement:
-
   ->search->(undef, { order_by => "id DESC" })->slice(0)
 
-Why slice instead of L<DBIx::Class::ResultSet/first> or L<DBIx::Class::ResultSet/single>?
-If supported by the database, slice will use LIMIT/OFFSET to hint to the database that we
-really only need one row. This can result in a significant speed improvement.
+which (if supported by the database) will use LIMIT/OFFSET to hint to the
+database that we really only need one row. This can result in a significant
+speed improvement. The method using L<DBIx::Class::ResultSet/single> mentioned
+in the cookbook can do the same if you pass a C<rows> attribute to the search.
 
 =item .. refresh a row from storage?
 
@@ -393,6 +371,9 @@
 
 =item .. insert many rows of data efficiently?
 
+The C<populate> method in L<DBIx::Class::ResultSet> provides
+efficient bulk inserts.
+
 =item .. update a collection of rows at the same time?
 
 Create a resultset using a search, to filter the rows of data you
@@ -410,17 +391,17 @@
 
 But note that when using a scalar reference the column in the database
 will be updated but when you read the value from the object with e.g.
- 
+
  ->somecolumn()
- 
+
 you still get back the scalar reference to the string, B<not> the new
 value in the database. To get that you must refresh the row from storage
 using C<discard_changes()>. Or chain your function calls like this:
 
   ->update->discard_changes
- 
- to update the database and refresh the object in one step.
- 
+
+to update the database and refresh the object in one step.
+
 =item .. store JSON/YAML in a column and have it deflate/inflate automatically?
 
 You can use L<DBIx::Class::InflateColumn> to accomplish YAML/JSON storage transparently.
@@ -452,6 +433,38 @@
 
 =back
 
+=head2 Custom methods in Result classes
+
+You can add custom methods that do arbitrary things, even to unrelated tables. 
+For example, to provide a C<< $book->foo() >> method which searches the 
+cd table, you'd could add this to Book.pm:
+
+  sub foo {
+    my ($self, $col_data) = @_;
+    return $self->result_source->schema->resultset('cd')->search($col_data);
+  }
+
+And invoke that on any Book Result object like so:
+
+  my $rs = $book->foo({ title => 'Down to Earth' });
+
+When two tables ARE related, L<DBIx::Class::Relationship::Base> provides many
+methods to find or create data in related tables for you. But if you want to
+write your own methods, you can.
+
+For example, to provide a C<< $book->foo() >> method to manually implement
+what create_related() from L<DBIx::Class::Relationship::Base> does, you could 
+add this to Book.pm:
+
+  sub foo {
+    my ($self, $relname, $col_data) = @_;
+    return $self->related_resultset($relname)->create($col_data);
+  }
+
+Invoked like this:
+
+  my $author = $book->foo('author', { name => 'Fred' });
+
 =head2 Misc
 
 =over 4
@@ -474,7 +487,7 @@
 	package MyTable;
 
 	use Moose; # import Moose
-	use Moose::Util::TypeConstraint; # import Moose accessor type constraints 
+	use Moose::Util::TypeConstraint; # import Moose accessor type constraints
 
 	extends 'DBIx::Class'; # Moose changes the way we define our parent (base) package
 
@@ -486,7 +499,7 @@
 
 	my $row;
 
-	# assume that some where in here $row will get assigned to a MyTable row
+	# assume that somewhere in here $row will get assigned to a MyTable row
 
 	$row->non_column_data('some string'); # would set the non_column_data accessor
 
@@ -494,7 +507,7 @@
 
 	$row->update(); # would not inline the non_column_data accessor into the update
 
-	
+
 =item How do I use DBIx::Class objects in my TT templates?
 
 Like normal objects, mostly. However you need to watch out for TT
@@ -536,9 +549,68 @@
 =item How do I reduce the overhead of database queries?
 
 You can reduce the overhead of object creation within L<DBIx::Class>
-using the tips in L<DBIx::Class::Manual::Cookbook/"Skip row object creation for faster results"> 
+using the tips in L<DBIx::Class::Manual::Cookbook/"Skip row object creation for faster results">
 and L<DBIx::Class::Manual::Cookbook/"Get raw data for blindingly fast results">
 
+=item How do I override a run time method (e.g. a relationship accessor)?
+
+If you need access to the original accessor, then you must "wrap around" the original method.
+You can do that either with L<Moose::Manual::MethodModifiers> or L<Class::Method::Modifiers>.
+The code example works for both modules:
+
+    package Your::Schema::Group;
+    use Class::Method::Modifiers;
+    
+    # ... declare columns ...
+    
+    __PACKAGE__->has_many('group_servers', 'Your::Schema::GroupServer', 'group_id');
+    __PACKAGE__->many_to_many('servers', 'group_servers', 'server');
+    
+    # if the server group is a "super group", then return all servers
+    # otherwise return only servers that belongs to the given group
+    around 'servers' => sub {
+        my $orig = shift;
+        my $self = shift;
+
+        return $self->$orig(@_) unless $self->is_super_group;
+        return $self->result_source->schema->resultset('Server')->all;
+    };
+
+If you just want to override the original method, and don't care about the data
+from the original accessor, then you have two options. Either use
+L<Method::Signatures::Simple> that does most of the work for you, or do
+it the "dirty way".
+
+L<Method::Signatures::Simple> way:
+
+    package Your::Schema::Group;
+    use Method::Signatures::Simple;
+    
+    # ... declare columns ...
+    
+    __PACKAGE__->has_many('group_servers', 'Your::Schema::GroupServer', 'group_id');
+    __PACKAGE__->many_to_many('servers', 'group_servers', 'server');
+    
+    # The method keyword automatically injects the annoying my $self = shift; for you.
+    method servers {
+        return $self->result_source->schema->resultset('Server')->search({ ... });
+    }
+
+The dirty way:
+
+    package Your::Schema::Group;
+    use Sub::Name;
+    
+    # ... declare columns ...
+    
+    __PACKAGE__->has_many('group_servers', 'Your::Schema::GroupServer', 'group_id');
+    __PACKAGE__->many_to_many('servers', 'group_servers', 'server');
+    
+    *servers = subname servers => sub {
+        my $self = shift;
+        return $self->result_source->schema->resultset('Server')->search({ ... });
+    };
+    
 =back
 
 =head2 Notes for CDBI users
@@ -569,7 +641,7 @@
 second one will use a default port of 5433, while L<DBD::Pg> is compiled with a
 default port of 5432.
 
-You can chance the port setting in C<postgresql.conf>.
+You can change the port setting in C<postgresql.conf>.
 
 =item I've lost or forgotten my mysql password
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Intro.pod
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Intro.pod	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Intro.pod	2010-02-16 10:26:12 UTC (rev 8720)
@@ -11,7 +11,7 @@
 =head1 THE DBIx::Class WAY
 
 Here are a few simple tips that will help you get your bearings with
-DBIx::Class.  
+DBIx::Class.
 
 =head2 Tables become Result classes
 
@@ -29,7 +29,7 @@
 =head2 It's all about the ResultSet
 
 So, we've got some ResultSources defined.  Now, we want to actually use those
-definitions to help us translate the queries we need into handy perl objects!  
+definitions to help us translate the queries we need into handy perl objects!
 
 Let's say we defined a ResultSource for an "album" table with three columns:
 "albumid", "artist", and "title".  Any time we want to query this table, we'll
@@ -39,18 +39,18 @@
   SELECT albumid, artist, title FROM album;
 
 Would be retrieved by creating a ResultSet object from the album table's
-ResultSource, likely by using the "search" method.  
+ResultSource, likely by using the "search" method.
 
 DBIx::Class doesn't limit you to creating only simple ResultSets -- if you
 wanted to do something like:
 
   SELECT title FROM album GROUP BY title;
 
-You could easily achieve it. 
+You could easily achieve it.
 
-The important thing to understand: 
+The important thing to understand:
 
-  Any time you would reach for a SQL query in DBI, you are 
+  Any time you would reach for a SQL query in DBI, you are
   creating a DBIx::Class::ResultSet.
 
 =head2 Search is like "prepare"
@@ -105,24 +105,22 @@
 Next, create each of the classes you want to load as specified above:
 
   package My::Schema::Result::Album;
-  use base qw/DBIx::Class/;
+  use base qw/DBIx::Class::Core/;
 
-Load any components required by each class with the load_components() method.
-This should consist of "Core" plus any additional components you want to use.
-For example, if you want serial/auto-incrementing primary keys:
+Load any additional components you may need with the load_components() method,
+and provide component configuration if required. For example, if you want
+automatic row ordering:
 
-  __PACKAGE__->load_components(qw/ PK::Auto Core /);
+  __PACKAGE__->load_components(qw/ Ordered /);
+  __PACKAGE__->position_column('rank');
 
-C<PK::Auto> is supported for many databases; see L<DBIx::Class::Storage::DBI>
-for more information.
-
 Set the table for your class:
 
   __PACKAGE__->table('album');
 
 Add columns to your class:
 
-  __PACKAGE__->add_columns(qw/ albumid artist title /);
+  __PACKAGE__->add_columns(qw/ albumid artist title rank /);
 
 Each column can also be set up with its own accessor, data_type and other pieces
 of information that it may be useful to have -- just pass C<add_columns> a hash:
@@ -142,19 +140,26 @@
                               is_auto_increment => 0,
                               default_value => '',
                             },
-                          title  => 
+                          title  =>
                             { data_type => 'varchar',
                               size      => 256,
                               is_nullable => 0,
                               is_auto_increment => 0,
                               default_value => '',
+                            },
+                          rank =>
+                            { data_type => 'integer',
+                              size      => 16,
+                              is_nullable => 0,
+                              is_auto_increment => 0,
+                              default_value => '',
                             }
                          );
 
 DBIx::Class doesn't directly use most of this data yet, but various related
 modules such as L<DBIx::Class::WebForm> make use of it. Also it allows you to
 create your database tables from your Schema, instead of the other way around.
-See L<SQL::Translator> for details.
+See L<DBIx::Class::Schema/deploy> for details.
 
 See L<DBIx::Class::ResultSource> for more details of the possible column
 attributes.
@@ -176,7 +181,8 @@
 make a predefined accessor for fetching objects that contain this Table's
 foreign key:
 
-  __PACKAGE__->has_many('albums', 'My::Schema::Result::Artist', 'album_id');
+  # in My::Schema::Result::Artist
+  __PACKAGE__->has_many('albums', 'My::Schema::Result::Album', 'artist');
 
 See L<DBIx::Class::Relationship> for more information about the various types of
 available relationships and how you can design your own.
@@ -202,9 +208,13 @@
 
 =head2 Connecting
 
-To connect to your Schema, you need to provide the connection details.  The
-arguments are the same as for L<DBI/connect>:
+To connect to your Schema, you need to provide the connection details or a
+database handle.
 
+=head3 Via connection details
+
+The arguments are the same as for L<DBI/connect>:
+
   my $schema = My::Schema->connect('dbi:SQLite:/home/me/myapp/my.db');
 
 You can create as many different schema instances as you need. So if you have a
@@ -215,7 +225,7 @@
 Note that L<DBIx::Class::Schema> does not cache connections for you. If you use
 multiple connections, you need to do this manually.
 
-To execute some sql statements on every connect you can add them as an option in
+To execute some SQL statements on every connect you can add them as an option in
 a special fifth argument to connect:
 
   my $another_schema = My::Schema->connect(
@@ -229,6 +239,16 @@
 See L<DBIx::Class::Schema::Storage::DBI/connect_info> for more information about
 this and other special C<connect>-time options.
 
+=head3 Via a database handle
+
+The supplied coderef is expected to return a single connected database handle
+(e.g. a L<DBI> C<$dbh>)
+
+  my $schema = My::Schema->connect (
+    sub { Some::DBH::Factory->connect },
+    \%extra_attrs,
+  );
+
 =head2 Basic usage
 
 Once you've defined the basic classes, either manually or using
@@ -255,8 +275,8 @@
   $album->set_column('title', 'Presence');
   $title = $album->get_column('title');
 
-Just like with L<Class::DBI>, you call C<update> to commit your changes to the
-database:
+Just like with L<Class::DBI>, you call C<update> to save your changes to the
+database (by executing the actual C<UPDATE> statement):
 
   $album->update;
 
@@ -273,7 +293,7 @@
 returns an instance of C<My::Schema::Result::Album> that can be used to access the data
 in the new record:
 
-  my $new_album = $schema->resultset('Album')->create({ 
+  my $new_album = $schema->resultset('Album')->create({
     title  => 'Wish You Were Here',
     artist => 'Pink Floyd'
   });

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Joining.pod
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Joining.pod	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Joining.pod	2010-02-16 10:26:12 UTC (rev 8720)
@@ -17,7 +17,7 @@
 But I'll explain anyway. Assuming you have created your database in a
 more or less sensible way, you will end up with several tables that
 contain C<related> information. For example, you may have a table
-containing information about C<CDs>, containing the CD title and it's
+containing information about C<CD>s, containing the CD title and it's
 year of publication, and another table containing all the C<Track>s
 for the CDs, one track per row.
 
@@ -34,7 +34,8 @@
 So, joins are a way of extending simple select statements to include
 fields from other, related, tables. There are various types of joins,
 depending on which combination of the data you wish to retrieve, see
-MySQL's doc on JOINs: L<http://dev.mysql.com/doc/refman/5.0/en/join.html>.
+MySQL's doc on JOINs:
+L<http://dev.mysql.com/doc/refman/5.0/en/join.html>.
 
 =head1 DEFINING JOINS AND RELATIONSHIPS
 
@@ -42,7 +43,7 @@
 be defined in the L<ResultSource|DBIx::Class::Manual::Glossary/ResultSource> for the
 table. If the relationship needs to be accessed in both directions
 (i.e. Fetch all tracks of a CD, and fetch the CD data for a Track),
-then it needs to be defined in both tables.
+then it needs to be defined for both tables.
 
 For the CDs/Tracks example, that means writing, in C<MySchema::CD>:
 
@@ -68,14 +69,15 @@
 
 When performing either a L<search|DBIx::Class::ResultSet/search> or a
 L<find|DBIx::Class::ResultSet/find> operation, you can specify which
-C<relations> to also fetch data from (or sort by), using the
+C<relations> to also refine your results based on, using the
 L<join|DBIx::Class::ResultSet/join> attribute, like this:
 
   $schema->resultset('CD')->search(
-    { 'Title' => 'Funky CD' },
+    { 'Title' => 'Funky CD',
+      'tracks.Name' => { like => 'T%' }
+    },
     { join      => 'tracks',
-      '+select' => [ 'tracks.Name', 'tracks.Artist' ],
-      '+as'     => [ 'TrackName', 'ArtistName' ]
+      order_by  => ['tracks.id'],
     }
   );
 
@@ -84,18 +86,125 @@
 L<DBIx::Class::ResultSet/ATTRIBUTES>, but here's a quick break down:
 
 The first argument to search is a hashref of the WHERE attributes, in
-this case a simple restriction on the Title column. The second
-argument is a hashref of attributes to the search, '+select' adds
-extra columns to the select (from the joined table(s) or from
-calculations), and '+as' gives aliases to those fields.
+this case a restriction on the Title column in the CD table, and a
+restriction on the name of the track in the Tracks table, but ONLY for
+tracks actually related to the chosen CD(s). The second argument is a
+hashref of attributes to the search, the results will be returned
+sorted by the C<id> of the related tracks.
 
-'join' specifies which C<relationships> to include in the query. The
-distinction between C<relationships> and C<tables> is important here,
-only the C<relationship> names are valid.
+The special 'join' attribute specifies which C<relationships> to
+include in the query. The distinction between C<relationships> and
+C<tables> is important here, only the C<relationship> names are valid.
 
-This example should magically produce SQL like the second select in
-L</WHAT ARE JOINS> above.
+This slightly nonsense example will produce SQL similar to:
 
+  SELECT cd.ID, cd.Title, cd.Year FROM CD cd JOIN Tracks tracks ON cd.ID = tracks.CDID WHERE cd.Title = 'Funky CD' AND tracks.Name LIKE 'T%' ORDER BY 'tracks.id';
+
+=head1 FETCHING RELATED DATA
+
+Another common use for joining to related tables, is to fetch the data
+from both tables in one query, preventing extra round-trips to the
+database. See the example above in L</WHAT ARE JOINS>.
+
+Three techniques are described here. Of the three, only the
+C<prefetch> technique will deal sanely with fetching related objects
+over a C<has_many> relation. The others work fine for 1 to 1 type
+relationships.
+
+=head2 Whole related objects
+
+To fetch entire related objects, e.g. CDs and all Track data, use the
+'prefetch' attribute:
+
+  $schema->resultset('CD')->search(
+    { 'Title' => 'Funky CD',
+    },
+    { prefetch      => 'tracks',
+      order_by  => ['tracks.id'],
+    }
+  );
+
+This will produce SQL similar to the following:
+
+  SELECT cd.ID, cd.Title, cd.Year, tracks.id, tracks.Name, tracks.Artist FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
+
+The syntax of 'prefetch' is the same as 'join' and implies the
+joining, so there is no need to use both together.
+
+=head2 Subset of related fields
+
+To fetch a subset or the related fields, the '+select' and '+as'
+attributes can be used. For example, if the CD data is required and
+just the track name from the Tracks table:
+
+  $schema->resultset('CD')->search(
+    { 'Title' => 'Funky CD',
+    },
+    { join      => 'tracks',
+      '+select' => ['tracks.Name'],
+      '+as'     => ['track_name'],
+      order_by  => ['tracks.id'],
+    }
+  );
+
+Which will produce the query:
+
+  SELECT cd.ID, cd.Title, cd.Year, tracks.Name FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
+
+Note that the '+as' does not produce an SQL 'AS' keyword in the
+output, see the L<DBIx::Class::Manual::FAQ> for an explanation.
+
+This type of column restriction has a downside, the resulting $row
+object will have no 'track_name' accessor:
+
+  while(my $row = $search_rs->next) {
+     print $row->track_name; ## ERROR
+  }
+
+Instead C<get_column> must be used:
+
+  while(my $row = $search_rs->next) {
+     print $row->get_colum('track_name'); ## WORKS
+  }
+
+=head2 Incomplete related objects
+
+In rare circumstances, you may also wish to fetch related data as
+incomplete objects. The usual reason to do is when the related table
+has a very large field you don't need for the current data
+output. This is better solved by storing that field in a separate
+table which you only join to when needed.
+
+To fetch an incomplete related object, supply the dotted notation to the '+as' attribute: 
+
+  $schema->resultset('CD')->search(
+    { 'Title' => 'Funky CD',
+    },
+    { join      => 'tracks',
+      '+select' => ['tracks.Name'],
+      '+as'     => ['tracks.Name'], 
+      order_by  => ['tracks.id'],
+    }
+  );
+
+Which will produce same query as above;
+
+  SELECT cd.ID, cd.Title, cd.Year, tracks.Name FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
+
+Now you can access the result using the relationship accessor:
+
+  while(my $row = $search_rs->next) {
+     print $row->tracks->name; ## WORKS
+  }
+
+However, this will produce broken objects. If the tracks id column is
+not fetched, the object will not be usable for any operation other
+than reading its data. Use the L</Whole related objects> method as
+much as possible to avoid confusion in your code later.
+
+Broken means: Update will not work. Fetching other related objects
+will not work. Deleting the object will not work.
+
 =head1 COMPLEX JOINS AND STUFF
 
 =head2 Across multiple relations
@@ -114,18 +223,16 @@
   $schema->resultset('CD')->search(
     { 'Title' => 'Funky CD' },
     { join      => { 'tracks' => 'artist' },
-      '+select' => [ 'tracks.Name', 'artist.Artist' ],
-      '+as'     => [ 'TrackName', 'ArtistName' ]
     }
   );
 
 Which is:
 
-  SELECT me.ID, me.Title, me.Year, tracks.Name, artist.Artist FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD';
+  SELECT me.ID, me.Title, me.Year FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD';
 
 To perform joins using relations of the tables you are joining to, use
 a hashref to indicate the join depth. This can theoretically go as
-deep as you like (warning, contrived examples!): 
+deep as you like (warning: contrived examples!): 
 
   join => { room => { table => 'leg' } }
 
@@ -147,12 +254,10 @@
     { 'Title' => 'Funky CD' },
     { join      => { 'tracks' => 'artist' },
       order_by  => [ 'tracks.Name', 'artist.Artist' ],
-      '+select' => [ 'tracks.Name', 'artist.Artist' ],
-      '+as'     => [ 'TrackName', 'ArtistName' ]
     }
   );
 
-  SELECT me.ID, me.Title, me.Year, tracks.Name, artist.Artist FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD' ORDER BY tracks.Name, artist.Artist;
+  SELECT me.ID, me.Title, me.Year FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD' ORDER BY tracks.Name, artist.Artist;
 
 This is essential if any of your tables have columns with the same names.
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Reading.pod
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Reading.pod	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Reading.pod	2010-02-16 10:26:12 UTC (rev 8720)
@@ -17,14 +17,14 @@
 Methods should be documented in the files which also contain the code
 for the method, or that file should be hidden from PAUSE completely,
 in which case the methods are documented in the file which loads
-it. Methods may also be documented and refered to in files
+it. Methods may also be documented and referred to in files
 representing the major objects or components on which they can be
 called.
 
 For example, L<DBIx::Class::Relationship> documents the methods
 actually coded in the helper relationship classes like
 DBIx::Class::Relationship::BelongsTo. The BelongsTo file itself is
-hidden from pause as it has no documentation. The accessors created by
+hidden from PAUSE as it has no documentation. The accessors created by
 relationships should be mentioned in L<DBIx::Class::Row>, the major
 object that they will be called on.
 
@@ -46,7 +46,7 @@
 what the method returns.
 
 The first item provides a list of all possible values for the
-arguments of the method in order, separated by C<, >, preceeded by the
+arguments of the method in order, separated by C<, >, preceded by the
 text "Arguments: "
 
 Example (for the belongs_to relationship):
@@ -145,10 +145,10 @@
 =item *
 
 The argument list is followed by some examples of how to use the
-method, using it's various types of arguments.
+method, using its various types of arguments.
 
 The examples can also include ways to use the results if
-applicable. For instance if the documentation is for a relationship
+applicable. For instance, if the documentation is for a relationship
 type, the examples can include how to call the resulting relation
 accessor, how to use the relation name in a search and so on.
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Troubleshooting.pod
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Troubleshooting.pod	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Manual/Troubleshooting.pod	2010-02-16 10:26:12 UTC (rev 8720)
@@ -17,13 +17,13 @@
 
 Alternatively use the C<< storage->debug >> class method:-
 
-  $class->storage->debug(1);
+  $schema->storage->debug(1);
 
 To send the output somewhere else set debugfh:-
 
-  $class->storage->debugfh(IO::File->new('/tmp/trace.out', 'w');
+  $schema->storage->debugfh(IO::File->new('/tmp/trace.out', 'w');
 
-Alternatively you can do this with the environment variable too:-
+Alternatively you can do this with the environment variable, too:-
 
   export DBIC_TRACE="1=/tmp/trace.out"
 
@@ -51,9 +51,8 @@
 
 There's likely a syntax error in the table class referred to elsewhere
 in this error message.  In particular make sure that the package
-declaration is correct, so for a schema C< MySchema > you need to
-specify a fully qualified namespace: C< package MySchema::MyTable; >
-for example.
+declaration is correct. For example, for a schema C< MySchema > 
+you need to specify a fully qualified namespace: C< package MySchema::MyTable; >.
 
 =head2 syntax error at or near "<something>" ...
 
@@ -100,29 +99,21 @@
 L<DBIx::Class::Manual::Cookbook/Setting_quoting_for_the_generated_SQL> for
 details.
 
-Note that quoting may lead to problems with C<order_by> clauses, see
-L<... column "foo DESC" does not exist ...> for info on avoiding those.
-
 =head2 column "foo DESC" does not exist ...
 
-This can happen if you've turned on quoting and then done something like
-this:
+This can happen if you are still using the obsolete order hack, and also
+happen to turn on SQL-quoting.
 
   $rs->search( {}, { order_by => [ 'name DESC' ] } );
 
-This results in SQL like this:
+Since L<DBIx::Class> >= 0.08100 and L<SQL::Abstract> >= 1.50 the above
+should be written as:
 
-  ... ORDER BY "name DESC"
+  $rs->search( {}, { order_by => { -desc => 'name' } } );
 
-The solution is to pass your order_by items as scalar references to avoid
-quoting:
+For more ways to express order clauses refer to
+L<SQL::Abstract/ORDER_BY_CLAUSES>
 
-  $rs->search( {}, { order_by => [ \'name DESC' ] } );
-
-Now you'll get SQL like this:
-
-  ... ORDER BY name DESC
-
 =head2 Perl Performance Issues on Red Hat Systems
 
 There is a problem with slow performance of certain DBIx::Class
@@ -141,20 +132,31 @@
   Fedora 8     - perl-5.8.8-41.fc8
   RHEL5        - perl-5.8.8-15.el5_2.1
 
-The issue is due to perl doing an exhaustive search of blessed objects
+This issue is due to perl doing an exhaustive search of blessed objects
 under certain circumstances.  The problem shows up as performance
-degredation exponential to the number of L<DBIx::Class> row objects in
-memory, so can be unoticeable with certain data sets, but with huge
+degradation exponential to the number of L<DBIx::Class> row objects in
+memory, so can be unnoticeable with certain data sets, but with huge
 performance impacts on other datasets.
 
-A pair of tests for susceptability to the issue, and performance effects
+A pair of tests for susceptibility to the issue and performance effects
 of the bless/overload problem can be found in the L<DBIx::Class> test
-suite in the file C<t/99rh_perl_perf_bug.t>
+suite, in the C<t/99rh_perl_perf_bug.t> file.
 
 Further information on this issue can be found in
 L<https://bugzilla.redhat.com/show_bug.cgi?id=379791>,
 L<https://bugzilla.redhat.com/show_bug.cgi?id=460308> and
 L<http://rhn.redhat.com/errata/RHBA-2008-0876.html>
 
+=head2 Excessive Memory Allocation with TEXT/BLOB/etc. Columns and Large LongReadLen
+
+It has been observed, using L<DBD::ODBC>, that creating a L<DBIx::Class::Row> 
+object which includes a column of data type TEXT/BLOB/etc. will allocate 
+LongReadLen bytes.  This allocation does not leak, but if LongReadLen 
+is large in size, and many such row objects are created, e.g. as the 
+output of a ResultSet query, the memory footprint of the Perl interpreter 
+can grow very large.
+
+The solution is to use the smallest practical value for LongReadLen.
+
 =cut
 


Property changes on: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Optional
___________________________________________________________________
Name: svn:ignore
   + Dependencies.pod


Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Optional/Dependencies.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Optional/Dependencies.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Optional/Dependencies.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,396 @@
+package DBIx::Class::Optional::Dependencies;
+
+use warnings;
+use strict;
+
+use Carp;
+
+# NO EXTERNAL NON-5.8.1 CORE DEPENDENCIES EVER (e.g. C::A::G)
+# This module is to be loaded by Makefile.PM on a pristine system
+
+# POD is generated automatically by calling _gen_pod from the
+# Makefile.PL in $AUTHOR mode
+
+my $moose_basic = {
+  'Moose'                      => '0.98',
+  'MooseX::Types'              => '0.21',
+};
+
+my $admin_basic = {
+  %$moose_basic,
+  'MooseX::Types::Path::Class' => '0.05',
+  'MooseX::Types::JSON'        => '0.02',
+  'JSON::Any'                  => '1.22',
+  'namespace::autoclean'       => '0.09',
+};
+
+my $reqs = {
+  dist => {
+    #'Module::Install::Pod::Inherit' => '0.01',
+  },
+
+  replicated => {
+    req => {
+      %$moose_basic,
+      'namespace::clean'          => '0.11',
+      'Hash::Merge'               => '0.12',
+    },
+    pod => {
+      title => 'Storage::Replicated',
+      desc => 'Modules required for L<DBIx::Class::Storage::DBI::Replicated>',
+    },
+  },
+
+  admin => {
+    req => {
+      %$admin_basic,
+    },
+    pod => {
+      title => 'DBIx::Class::Admin',
+      desc => 'Modules required for the DBIx::Class administrative library',
+    },
+  },
+
+  admin_script => {
+    req => {
+      %$moose_basic,
+      %$admin_basic,
+      'Getopt::Long::Descriptive' => '0.081',
+      'Text::CSV'                 => '1.16',
+    },
+    pod => {
+      title => 'dbicadmin',
+      desc => 'Modules required for the CLI DBIx::Class interface dbicadmin',
+    },
+  },
+
+  deploy => {
+    req => {
+      'SQL::Translator'           => '0.11002',
+    },
+    pod => {
+      title => 'Storage::DBI::deploy()',
+      desc => 'Modules required for L<DBIx::Class::Storage::DBI/deploy> and L<DBIx::Class::Storage::DBI/deploymen_statements>',
+    },
+  },
+
+  author => {
+    req => {
+      'Test::Pod'                 => '1.26',
+      'Test::Pod::Coverage'       => '1.08',
+      'Pod::Coverage'             => '0.20',
+      #'Test::NoTabs'              => '0.9',
+      #'Test::EOL'                 => '0.6',
+    },
+  },
+
+  core => {
+    req => {
+      # t/52cycle.t
+      'Test::Memory::Cycle'       => '0',
+      'Devel::Cycle'              => '1.10',
+
+      # t/36datetime.t
+      # t/60core.t
+      'DateTime::Format::SQLite'  => '0',
+
+      # t/96_is_deteministic_value.t
+      'DateTime::Format::Strptime'=> '0',
+    },
+  },
+
+  cdbicompat => {
+    req => {
+      'DBIx::ContextualFetch'     => '0',
+      'Class::DBI::Plugin::DeepAbstractSearch' => '0',
+      'Class::Trigger'            => '0',
+      'Time::Piece::MySQL'        => '0',
+      'Clone'                     => '0',
+      'Date::Simple'              => '3.03',
+    },
+  },
+
+  rdbms_pg => {
+    req => {
+      $ENV{DBICTEST_PG_DSN}
+        ? (
+          'Sys::SigAction'        => '0',
+          'DBD::Pg'               => '2.009002',
+          'DateTime::Format::Pg'  => '0',
+        ) : ()
+    },
+  },
+
+  rdbms_mysql => {
+    req => {
+      $ENV{DBICTEST_MYSQL_DSN}
+        ? (
+          'DateTime::Format::MySQL' => '0',
+          'DBD::mysql'              => '0',
+        ) : ()
+    },
+  },
+
+  rdbms_oracle => {
+    req => {
+      $ENV{DBICTEST_ORA_DSN}
+        ? (
+          'DateTime::Format::Oracle' => '0',
+        ) : ()
+    },
+  },
+
+  rdbms_ase => {
+    req => {
+      $ENV{DBICTEST_SYBASE_DSN}
+        ? (
+          'DateTime::Format::Sybase' => 0,
+        ) : ()
+    },
+  },
+
+  rdbms_asa => {
+    req => {
+      (scalar grep $_, @ENV{qw/DBICTEST_SYBASE_ASA_DSN DBICTEST_SYBASE_ASA_ODBC_DSN/})
+        ? (
+          'DateTime::Format::Strptime' => 0,
+        ) : ()
+    },
+  },
+};
+
+
+sub _all_optional_requirements {
+  return { map { %{ $reqs->{$_}{req} || {} } } (keys %$reqs) };
+}
+
+sub req_list_for {
+  my ($class, $group) = @_;
+
+  croak "req_list_for() expects a requirement group name"
+    unless $group;
+
+  my $deps = $reqs->{$group}{req}
+    or croak "Requirement group '$group' does not exist";
+
+  return { %$deps };
+}
+
+
+our %req_availability_cache;
+sub req_ok_for {
+  my ($class, $group) = @_;
+
+  croak "req_ok_for() expects a requirement group name"
+    unless $group;
+
+  $class->_check_deps ($group) unless $req_availability_cache{$group};
+
+  return $req_availability_cache{$group}{status};
+}
+
+sub req_missing_for {
+  my ($class, $group) = @_;
+
+  croak "req_missing_for() expects a requirement group name"
+    unless $group;
+
+  $class->_check_deps ($group) unless $req_availability_cache{$group};
+
+  return $req_availability_cache{$group}{missing};
+}
+
+sub req_errorlist_for {
+  my ($class, $group) = @_;
+
+  croak "req_errorlist_for() expects a requirement group name"
+    unless $group;
+
+  $class->_check_deps ($group) unless $req_availability_cache{$group};
+
+  return $req_availability_cache{$group}{errorlist};
+}
+
+sub _check_deps {
+  my ($class, $group) = @_;
+
+  my $deps = $class->req_list_for ($group);
+
+  my %errors;
+  for my $mod (keys %$deps) {
+    if (my $ver = $deps->{$mod}) {
+      eval "use $mod $ver ()";
+    }
+    else {
+      eval "require $mod";
+    }
+
+    $errors{$mod} = $@ if $@;
+  }
+
+  if (keys %errors) {
+    my $missing = join (', ', map { $deps->{$_} ? "$_ >= $deps->{$_}" : $_ } (sort keys %errors) );
+    $missing .= " (see $class for details)" if $reqs->{$group}{pod};
+    $req_availability_cache{$group} = {
+      status => 0,
+      errorlist => { %errors },
+      missing => $missing,
+    };
+  }
+  else {
+    $req_availability_cache{$group} = {
+      status => 1,
+      errorlist => {},
+      missing => '',
+    };
+  }
+}
+
+# This is to be called by the author onbly (automatically in Makefile.PL)
+sub _gen_pod {
+  my $class = shift;
+  my $modfn = __PACKAGE__ . '.pm';
+  $modfn =~ s/\:\:/\//g;
+
+  require DBIx::Class;
+  my $distver = DBIx::Class->VERSION;
+
+  my @chunks = (
+    <<"EOC",
+#########################################################################
+#####################  A U T O G E N E R A T E D ########################
+#########################################################################
+#
+# The contents of this POD file are auto-generated.  Any changes you make
+# will be lost. If you need to change the generated text edit _gen_pod()
+# at the end of $modfn
+#
+EOC
+    '=head1 NAME',
+    "$class - Optional module dependency specifications (for module authors)",
+    '=head1 SYNOPSIS (EXPERIMENTAL)',
+    <<EOS,
+B<THE USAGE SHOWN HERE IS EXPERIMENTAL>
+
+Somewhere in your build-file (e.g. L<Module::Install>'s Makefile.PL):
+
+  ...
+
+  configure_requires 'DBIx::Class' => '$distver';
+
+  require $class;
+
+  my \$deploy_deps = $class->req_list_for ('deploy');
+
+  for (keys %\$deploy_deps) {
+    requires \$_ => \$deploy_deps->{\$_};
+  }
+
+  ...
+
+Note that there are some caveats regarding C<configure_requires()>, more info
+can be found at L<Module::Install/configure_requires>
+EOS
+    '=head1 DESCRIPTION',
+    <<'EOD',
+Some of the less-frequently used features of L<DBIx::Class> have external
+module dependencies on their own. In order not to burden the average user
+with modules he will never use, these optional dependencies are not included
+in the base Makefile.PL. Instead an exception with a descriptive message is
+thrown when a specific feature is missing one or several modules required for
+its operation. This module is the central holding place for  the current list
+of such dependencies, for DBIx::Class core authors, and DBIx::Class extension
+authors alike.
+EOD
+    '=head1 CURRENT REQUIREMENT GROUPS',
+    <<'EOD',
+Dependencies are organized in C<groups> and each group can list one or more
+required modules, with an optional minimum version (or 0 for any version).
+The group name can be used in the 
+EOD
+  );
+
+  for my $group (sort keys %$reqs) {
+    my $p = $reqs->{$group}{pod}
+      or next;
+
+    my $modlist = $reqs->{$group}{req}
+      or next;
+
+    next unless keys %$modlist;
+
+    push @chunks, (
+      "=head2 $p->{title}",
+      "$p->{desc}",
+      '=over',
+      ( map { "=item * $_" . ($modlist->{$_} ? " >= $modlist->{$_}" : '') } (sort keys %$modlist) ),
+      '=back',
+      "Requirement group: B<$group>",
+    );
+  }
+
+  push @chunks, (
+    '=head1 METHODS',
+    '=head2 req_list_for',
+    '=over',
+    '=item Arguments: $group_name',
+    '=item Returns: \%list_of_module_version_pairs',
+    '=back',
+    <<EOD,
+This method should be used by DBIx::Class extension authors, to determine the
+version of modules a specific feature requires in the B<current> version of
+DBIx::Class. See the L<SYNOPSIS|/SYNOPSIS (EXPERIMENTAL)> for a real-world
+example.
+EOD
+
+    '=head2 req_ok_for',
+    '=over',
+    '=item Arguments: $group_name',
+    '=item Returns: 1|0',
+    '=back',
+    'Returns true or false depending on whether all modules required by C<$group_name> are present on the system and loadable',
+
+    '=head2 req_missing_for',
+    '=over',
+    '=item Arguments: $group_name',
+    '=item Returns: $error_message_string',
+    '=back',
+    <<EOD,
+Returns a single line string suitable for inclusion in larger error messages.
+This method would normally be used by DBIx::Class core-module author, to
+indicate to the user that he needs to install specific modules before he will
+be able to use a specific feature.
+
+For example if the requirements for C<replicated> are not available, the
+returned string would look like:
+
+ Moose >= 0.98, MooseX::Types >= 0.21, namespace::clean (see $class for details)
+
+The author is expected to prepend the necessary text to this message before
+returning the actual error seen by the user.
+EOD
+
+    '=head2 req_errorlist_for',
+    '=over',
+    '=item Arguments: $group_name',
+    '=item Returns: \%list_of_loaderrors_per_module',
+    '=back',
+    <<'EOD',
+Returns a hashref containing the actual errors that occured while attempting
+to load each module in the requirement group.
+EOD
+    '=head1 AUTHOR',
+    'See L<DBIx::Class/CONTRIBUTORS>.',
+    '=head1 LICENSE',
+    'You may distribute this code under the same terms as Perl itself',
+  );
+
+  my $fn = __FILE__;
+  $fn =~ s/\.pm$/\.pod/;
+
+  open (my $fh, '>', $fn) or croak "Unable to write to $fn: $!";
+  print $fh join ("\n\n", @chunks);
+  close ($fh);
+}
+
+1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Ordered.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Ordered.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Ordered.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -60,20 +60,20 @@
 
   #!/use/bin/perl
   use My::Item;
-  
+
   my $item = My::Item->create({ name=>'Matt S. Trout' });
   # If using grouping_column:
   my $item = My::Item->create({ name=>'Matt S. Trout', group_id=>1 });
-  
+
   my $rs = $item->siblings();
   my @siblings = $item->siblings();
-  
+
   my $sibling;
   $sibling = $item->first_sibling();
   $sibling = $item->last_sibling();
   $sibling = $item->previous_sibling();
   $sibling = $item->next_sibling();
-  
+
   $item->move_previous();
   $item->move_next();
   $item->move_first();
@@ -127,7 +127,7 @@
 This method specifies a value of L</position_column> which B<would
 never be assigned to a row> during normal operation. When
 a row is moved, its position is set to this value temporarily, so
-that any unique constrainst can not be violated. This value defaults
+that any unique constraints can not be violated. This value defaults
 to 0, which should work for all cases except when your positions do
 indeed start from 0.
 
@@ -272,14 +272,14 @@
     return defined $lsib ? $lsib : 0;
 }
 
-# an optimised method to get the last sibling position without inflating a row object
-sub _last_sibling_pos {
+# an optimized method to get the last sibling position value without inflating a row object
+sub _last_sibling_posval {
     my $self = shift;
     my $position_column = $self->position_column;
 
     my $cursor = $self->next_siblings->search(
         {},
-        { rows => 1, order_by => { '-desc' => $position_column }, columns => $position_column },
+        { rows => 1, order_by => { '-desc' => $position_column }, select => $position_column },
     )->cursor;
 
     my ($pos) = $cursor->next;
@@ -313,7 +313,7 @@
 
 sub move_next {
     my $self = shift;
-    return 0 unless $self->next_siblings->count;
+    return 0 unless defined $self->_last_sibling_posval;  # quick way to check for no more siblings
     return $self->move_to ($self->_position + 1);
 }
 
@@ -341,7 +341,11 @@
 
 sub move_last {
     my $self = shift;
-    return $self->move_to( $self->_group_rs->count );
+    my $last_posval = $self->_last_sibling_posval;
+
+    return 0 unless defined $last_posval;
+
+    return $self->move_to( $self->_position_from_value ($last_posval) );
 }
 
 =head2 move_to
@@ -358,38 +362,58 @@
     my( $self, $to_position ) = @_;
     return 0 if ( $to_position < 1 );
 
-    my $from_position = $self->_position;
-    return 0 if ( $from_position == $to_position );
-
     my $position_column = $self->position_column;
 
-    {
-        my $guard = $self->result_source->schema->txn_scope_guard;
+    my $guard;
 
-        my ($direction, @between);
-        if ( $from_position < $to_position ) {
-            $direction = -1;
-            @between = map { $self->_position_value ($_) } ( $from_position + 1, $to_position );
-        }
-        else {
-            $direction = 1;
-            @between = map { $self->_position_value ($_) } ( $to_position, $from_position - 1 );
-        }
+    if ($self->is_column_changed ($position_column) ) {
+      # something changed our position, we have no idea where we
+      # used to be - requery without using discard_changes
+      # (we need only a specific column back)
 
-        my $new_pos_val = $self->_position_value ($to_position);                              # record this before the shift
+      $guard = $self->result_source->schema->txn_scope_guard;
 
-        # we need to null-position the moved row if the position column is part of a constraint
-        if (grep { $_ eq $position_column } ( map { @$_ } (values %{{ $self->result_source->unique_constraints }} ) ) ) {
-            $self->_ordered_internal_update({ $position_column => $self->null_position_value });
-        }
+      my $cursor = $self->result_source->resultset->search(
+        $self->ident_condition,
+        { select => $position_column },
+      )->cursor;
 
-        $self->_shift_siblings ($direction, @between);
-        $self->_ordered_internal_update({ $position_column => $new_pos_val });
+      my ($pos) = $cursor->next;
+      $self->$position_column ($pos);
+      delete $self->{_dirty_columns}{$position_column};
+    }
 
-        $guard->commit;
+    my $from_position = $self->_position;
 
-        return 1;
+    if ( $from_position == $to_position ) {   # FIXME this will not work for non-numeric order
+      $guard->commit if $guard;
+      return 0;
     }
+
+    $guard ||= $self->result_source->schema->txn_scope_guard;
+
+    my ($direction, @between);
+    if ( $from_position < $to_position ) {
+      $direction = -1;
+      @between = map { $self->_position_value ($_) } ( $from_position + 1, $to_position );
+    }
+    else {
+      $direction = 1;
+      @between = map { $self->_position_value ($_) } ( $to_position, $from_position - 1 );
+    }
+
+    my $new_pos_val = $self->_position_value ($to_position);  # record this before the shift
+
+    # we need to null-position the moved row if the position column is part of a constraint
+    if (grep { $_ eq $position_column } ( map { @$_ } (values %{{ $self->result_source->unique_constraints }} ) ) ) {
+      $self->_ordered_internal_update({ $position_column => $self->null_position_value });
+    }
+
+    $self->_shift_siblings ($direction, @between);
+    $self->_ordered_internal_update({ $position_column => $new_pos_val });
+
+    $guard->commit;
+    return 1;
 }
 
 =head2 move_to_group
@@ -410,10 +434,7 @@
 sub move_to_group {
     my( $self, $to_group, $to_position ) = @_;
 
-    $self->throw_exception ('move_to_group() expects a group specification')
-        unless defined $to_group;
-
-    # if we're given a string, turn it into a hashref
+    # if we're given a single value, turn it into a hashref
     unless (ref $to_group eq 'HASH') {
         my @gcols = $self->_grouping_columns;
 
@@ -424,40 +445,72 @@
     my $position_column = $self->position_column;
 
     return 0 if ( defined($to_position) and $to_position < 1 );
+
+    # check if someone changed the _grouping_columns - this will
+    # prevent _is_in_group working, so we need to requery the db
+    # for the original values
+    my (@dirty_cols, %values, $guard);
+    for ($self->_grouping_columns) {
+      $values{$_} = $self->get_column ($_);
+      push @dirty_cols, $_ if $self->is_column_changed ($_);
+    }
+
+    # re-query only the dirty columns, and restore them on the
+    # object (subsequent code will update them to the correct
+    # after-move values)
+    if (@dirty_cols) {
+      $guard = $self->result_source->schema->txn_scope_guard;
+
+      my $cursor = $self->result_source->resultset->search(
+        $self->ident_condition,
+        { select => \@dirty_cols },
+      )->cursor;
+
+      my @original_values = $cursor->next;
+      $self->set_inflated_columns ({ %values, map { $_ => shift @original_values } (@dirty_cols) });
+      delete $self->{_dirty_columns}{$_} for (@dirty_cols);
+    }
+
     if ($self->_is_in_group ($to_group) ) {
-        return 0 if not defined $to_position;
-        return $self->move_to ($to_position);
+      my $ret;
+      if (defined $to_position) {
+        $ret = $self->move_to ($to_position);
+      }
+
+      $guard->commit if $guard;
+      return $ret||0;
     }
 
-    {
-        my $guard = $self->result_source->schema->txn_scope_guard;
+    $guard ||= $self->result_source->schema->txn_scope_guard;
 
-        # Move to end of current group to adjust siblings
-        $self->move_last;
+    # Move to end of current group to adjust siblings
+    $self->move_last;
 
-        $self->set_inflated_columns({ %$to_group, $position_column => undef });
-        my $new_group_count = $self->_group_rs->count;
+    $self->set_inflated_columns({ %$to_group, $position_column => undef });
+    my $new_group_last_posval = $self->_last_sibling_posval;
+    my $new_group_last_position = $self->_position_from_value (
+      $new_group_last_posval
+    );
 
-        if ( not defined($to_position) or $to_position > $new_group_count) {
-            $self->set_column(
-                $position_column => $new_group_count
-                    ? $self->_next_position_value ( $self->_last_sibling_pos )
-                    : $self->_initial_position_value
-            );
-        }
-        else {
-            my $bumped_pos_val = $self->_position_value ($to_position);
-            my @between = ($to_position, $new_group_count);
-            $self->_shift_siblings (1, @between);   #shift right
-            $self->set_column( $position_column => $bumped_pos_val );
-        }
+    if ( not defined($to_position) or $to_position > $new_group_last_position) {
+      $self->set_column(
+        $position_column => $new_group_last_position
+          ? $self->_next_position_value ( $new_group_last_posval )
+          : $self->_initial_position_value
+      );
+    }
+    else {
+      my $bumped_pos_val = $self->_position_value ($to_position);
+      my @between = map { $self->_position_value ($_) } ($to_position, $new_group_last_position);
+      $self->_shift_siblings (1, @between);   #shift right
+      $self->set_column( $position_column => $bumped_pos_val );
+    }
 
-        $self->_ordered_internal_update;
+    $self->_ordered_internal_update;
 
-        $guard->commit;
+    $guard->commit;
 
-        return 1;
-    }
+    return 1;
 }
 
 =head2 insert
@@ -473,10 +526,10 @@
     my $position_column = $self->position_column;
 
     unless ($self->get_column($position_column)) {
-        my $lsib_pos = $self->_last_sibling_pos;
+        my $lsib_posval = $self->_last_sibling_posval;
         $self->set_column(
-            $position_column => (defined $lsib_pos
-                ? $self->_next_position_value ( $lsib_pos )
+            $position_column => (defined $lsib_posval
+                ? $self->_next_position_value ( $lsib_posval )
                 : $self->_initial_position_value
             )
         );
@@ -501,16 +554,47 @@
     # this is set by _ordered_internal_update()
     return $self->next::method(@_) if $self->{_ORDERED_INTERNAL_UPDATE};
 
-    my $upd = shift;
-    $self->set_inflated_columns($upd) if $upd;
-    my %changes = $self->get_dirty_columns;
-    $self->discard_changes;
-
     my $position_column = $self->position_column;
+    my @ordering_columns = ($self->_grouping_columns, $position_column);
 
+
+    # these steps are necessary to keep the external appearance of
+    # ->update($upd) so that other things overloading update() will
+    # work properly
+    my %original_values = $self->get_columns;
+    my %existing_changes = $self->get_dirty_columns;
+
+    # See if any of the *supplied* changes would affect the ordering
+    # The reason this is so contrived, is that we want to leverage
+    # the datatype aware value comparing, while at the same time
+    # keep the original value intact (it will be updated later by the
+    # corresponding routine)
+
+    my %upd = %{shift || {}};
+    my %changes = %existing_changes;
+
+    for (@ordering_columns) {
+        next unless exists $upd{$_};
+
+        # we do not want to keep propagating this to next::method
+        # as it will be a done deal by the time get there
+        my $value = delete $upd{$_};
+        $self->set_inflated_columns ({ $_ => $value });
+
+        # see if an update resulted in a dirty column
+        # it is important to preserve the old value, as it
+        # will be needed to carry on a successfull move()
+        # operation without re-querying the database
+        if ($self->is_column_changed ($_) && not exists $existing_changes{$_}) {
+            $changes{$_} = $value;
+            $self->set_inflated_columns ({ $_ => $original_values{$_} });
+            delete $self->{_dirty_columns}{$_};
+        }
+    }
+
     # if nothing group/position related changed - short circuit
-    if (not grep { exists $changes{$_} } ($self->_grouping_columns, $position_column) ) {
-        return $self->next::method( \%changes, @_ );
+    if (not grep { exists $changes{$_} } ( @ordering_columns ) ) {
+        return $self->next::method( \%upd, @_ );
     }
 
     {
@@ -522,37 +606,37 @@
             # create new_group by taking the current group and inserting changes
             my $new_group = {$self->_grouping_clause};
             foreach my $col (keys %$new_group) {
-                if (exists $changes{$col}) {
-                    $new_group->{$col} = delete $changes{$col}; # don't want to pass this on to next::method
-                }
+                $new_group->{$col} = $changes{$col} if exists $changes{$col};
             }
 
             $self->move_to_group(
                 $new_group,
                 (exists $changes{$position_column}
-                    # The FIXME bit contradicts the documentation: when changing groups without supplying explicit
-                    # positions in move_to_group(), we push the item to the end of the group.
-                    # However when I was rewriting this, the position from the old group was clearly passed to the new one
+                    # The FIXME bit contradicts the documentation: POD states that
+                    # when changing groups without supplying explicit positions in
+                    # move_to_group(), we push the item to the end of the group.
+                    # However when I was rewriting this, the position from the old
+                    # group was clearly passed to the new one
                     # Probably needs to go away (by ribasushi)
-                    ? delete $changes{$position_column}     # means there was a position change supplied with the update too
-                    : $self->_position                      # FIXME!
+                    ? $changes{$position_column}    # means there was a position change supplied with the update too
+                    : $self->_position              # FIXME! (replace with undef)
                 ),
             );
         }
         elsif (exists $changes{$position_column}) {
-            $self->move_to(delete $changes{$position_column});
+            $self->move_to($changes{$position_column});
         }
 
         my @res;
         my $want = wantarray();
         if (not defined $want) {
-            $self->next::method( \%changes, @_ );
+            $self->next::method( \%upd, @_ );
         }
         elsif ($want) {
-            @res = $self->next::method( \%changes, @_ );
+            @res = $self->next::method( \%upd, @_ );
         }
         else {
-            $res[0] = $self->next::method( \%changes, @_ );
+            $res[0] = $self->next::method( \%upd, @_ );
         }
 
         $guard->commit;
@@ -598,22 +682,25 @@
 if you are working with preexisting non-normalised position data,
 or if you need to work with materialized path columns.
 
-=head2 _position
+=head2 _position_from_value
 
-  my $num_pos = $item->_position;
+  my $num_pos = $item->_position_from_value ( $pos_value )
 
-Returns the B<absolute numeric position> of the current object, with the
-first object being at position 1, its sibling at position 2 and so on.
-By default simply returns the value of L</position_column>.
+Returns the B<absolute numeric position> of an object with a B<position
+value> set to C<$pos_value>. By default simply returns C<$pos_value>.
 
 =cut
-sub _position {
-    my $self = shift;
+sub _position_from_value {
+    my ($self, $val) = @_;
 
+    return 0 unless defined $val;
+
 #    #the right way to do this
-#    return $self->previous_siblings->count + 1;
+#    return $self -> _group_rs
+#                 -> search({ $self->position_column => { '<=', $val } })
+#                 -> count
 
-    return $self->get_column ($self->position_column);
+    return $val;
 }
 
 =head2 _position_value
@@ -759,11 +846,24 @@
     );
 }
 
+=head2 _position
+
+  my $num_pos = $item->_position;
+
+Returns the B<absolute numeric position> of the current object, with the
+first object being at position 1, its sibling at position 2 and so on.
+
+=cut
+sub _position {
+    my $self = shift;
+    return $self->_position_from_value ($self->get_column ($self->position_column) );
+}
+
 =head2 _grouping_clause
 
 This method returns one or more name=>value pairs for limiting a search
-by the grouping column(s).  If the grouping column is not
-defined then this will return an empty list.
+by the grouping column(s).  If the grouping column is not defined then 
+this will return an empty list.
 
 =cut
 sub _grouping_clause {
@@ -821,7 +921,7 @@
 triggering any of the positioning integrity code).
 
 Some day you might get confronted by datasets that have ambiguous
-positioning data (i.e. duplicate position values within the same group,
+positioning data (e.g. duplicate position values within the same group,
 in a table without unique constraints). When manually fixing such data
 keep in mind that you can not invoke L<DBIx::Class::Row/update> like
 you normally would, as it will get confused by the wrong data before
@@ -856,14 +956,14 @@
 
 =head2 Multiple Moves
 
-Be careful when issueing move_* methods to multiple objects.  If 
+Be careful when issuing move_* methods to multiple objects.  If 
 you've pre-loaded the objects then when you move one of the objects 
 the position of the other object will not reflect their new value 
 until you reload them from the database - see
 L<DBIx::Class::Row/discard_changes>.
 
 There are times when you will want to move objects as groups, such 
-as changeing the parent of several objects at once - this directly 
+as changing the parent of several objects at once - this directly 
 conflicts with this problem.  One solution is for us to write a 
 ResultSet class that supports a parent() method, for example.  Another 
 solution is to somehow automagically modify the objects that exist 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/PK/Auto.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/PK/Auto.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/PK/Auto.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -11,7 +11,7 @@
 
 =head1 SYNOPSIS
 
-__PACKAGE__->load_components(qw/Core/);
+use base 'DBIx::Class::Core';
 __PACKAGE__->set_primary_key('id');
 
 =head1 DESCRIPTION
@@ -19,8 +19,6 @@
 This class overrides the insert method to get automatically incremented primary
 keys.
 
-  __PACKAGE__->load_components(qw/Core/);
-
 PK::Auto is now part of Core.
 
 See L<DBIx::Class::Manual::Component> for details of component interactions.

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/PK.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/PK.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/PK.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -20,45 +20,6 @@
 
 =cut
 
-sub _ident_values {
-  my ($self) = @_;
-  return (map { $self->{_column_data}{$_} } $self->primary_columns);
-}
-
-=head2 discard_changes ($attrs)
-
-Re-selects the row from the database, losing any changes that had
-been made.
-
-This method can also be used to refresh from storage, retrieving any
-changes made since the row was last read from storage.
-
-$attrs is expected to be a hashref of attributes suitable for passing as the
-second argument to $resultset->search($cond, $attrs);
-
-=cut
-
-sub discard_changes {
-  my ($self, $attrs) = @_;
-  delete $self->{_dirty_columns};
-  return unless $self->in_storage; # Don't reload if we aren't real!
-  
-  if( my $current_storage = $self->get_from_storage($attrs)) {
-  	
-    # Set $self to the current.
-  	%$self = %$current_storage;
-  	
-    # Avoid a possible infinite loop with
-    # sub DESTROY { $_[0]->discard_changes }
-    bless $current_storage, 'Do::Not::Exist';
-    
-    return $self;  	
-  } else {
-    $self->in_storage(0);
-    return $self;  	
-  }
-}
-
 =head2 id
 
 Returns the primary key(s) for a row. Can't be called as
@@ -74,12 +35,28 @@
   return (wantarray ? @pk : $pk[0]);
 }
 
+sub _ident_values {
+  my ($self) = @_;
+  return (map { $self->{_column_data}{$_} } $self->primary_columns);
+}
+
 =head2 ID
 
 Returns a unique id string identifying a row object by primary key.
 Used by L<DBIx::Class::CDBICompat::LiveObjectIndex> and
 L<DBIx::Class::ObjectCache>.
 
+=over
+
+=item WARNING
+
+The default C<_create_ID> method used by this function orders the returned
+values by the alphabetical order of the primary column names, B<unlike>
+the L</id> method, which follows the same order in which columns were fed
+to L<DBIx::Class::ResultSource/set_primary_key>.
+
+=back
+
 =cut
 
 sub ID {

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/Accessor.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/Accessor.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/Accessor.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -6,6 +6,11 @@
 use Sub::Name ();
 use Class::Inspector ();
 
+our %_pod_inherit_config = 
+  (
+   class_map => { 'DBIx::Class::Relationship::Accessor' => 'DBIx::Class::Relationship' }
+  );
+
 sub register_relationship {
   my ($class, $rel, $info) = @_;
   if (my $acc_type = $info->{attrs}{accessor}) {

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/Base.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/Base.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/Base.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -30,6 +30,8 @@
 
   __PACKAGE__->add_relationship('relname', 'Foreign::Class', $cond, $attrs);
 
+=head3 condition
+
 The condition needs to be an L<SQL::Abstract>-style representation of the
 join between the tables. When resolving the condition for use in a C<JOIN>,
 keys using the pseudo-table C<foreign> are resolved to mean "the Table on the
@@ -67,10 +69,19 @@
 To add an C<OR>ed condition, use an arrayref of hashrefs. See the
 L<SQL::Abstract> documentation for more details.
 
-In addition to the
-L<standard ResultSet attributes|DBIx::Class::ResultSet/ATTRIBUTES>,
-the following attributes are also valid:
+=head3 attributes
 
+The L<standard ResultSet attributes|DBIx::Class::ResultSet/ATTRIBUTES> may
+be used as relationship attributes. In particular, the 'where' attribute is
+useful for filtering relationships:
+
+     __PACKAGE__->has_many( 'valid_users', 'MyApp::Schema::User',
+        { 'foreign.user_id' => 'self.user_id' },
+        { where => { valid => 1 } }
+    );
+
+The following attributes are also valid:
+
 =over 4
 
 =item join_type
@@ -83,18 +94,18 @@
 
 An arrayref containing a list of accessors in the foreign class to create in
 the main class. If, for example, you do the following:
-  
+
   MyDB::Schema::CD->might_have(liner_notes => 'MyDB::Schema::LinerNotes',
     undef, {
       proxy => [ qw/notes/ ],
     });
-  
+
 Then, assuming MyDB::Schema::LinerNotes has an accessor named notes, you can do:
 
   my $cd = MyDB::Schema::CD->find(1);
   $cd->notes('Notes go here'); # set notes -- LinerNotes object is
                                # created if it doesn't exist
-  
+
 =item accessor
 
 Specifies the type of accessor that should be created for the relationship.
@@ -179,7 +190,7 @@
   my $rel_info = $self->relationship_info($rel);
   $self->throw_exception( "No such relationship ${rel}" )
     unless $rel_info;
-  
+
   return $self->{related_resultsets}{$rel} ||= do {
     my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
     $attrs = { %{$rel_info->{attrs} || {}}, %$attrs };
@@ -195,7 +206,7 @@
     if ($cond eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION) {
       my $reverse = $source->reverse_relationship_info($rel);
       foreach my $rev_rel (keys %$reverse) {
-        if ($reverse->{$rev_rel}{attrs}{accessor} eq 'multi') {
+        if ($reverse->{$rev_rel}{attrs}{accessor} && $reverse->{$rev_rel}{attrs}{accessor} eq 'multi') {
           $attrs->{related_objects}{$rev_rel} = [ $self ];
           Scalar::Util::weaken($attrs->{related_object}{$rev_rel}[0]);
         } else {
@@ -249,7 +260,7 @@
   ( $objects_rs ) = $rs->search_related_rs('relname', $cond, $attrs);
 
 This method works exactly the same as search_related, except that 
-it guarantees a restultset, even in list context.
+it guarantees a resultset, even in list context.
 
 =cut
 
@@ -381,7 +392,7 @@
 call set_from_related on the book.
 
 This is called internally when you pass existing objects as values to
-L<DBIx::Class::ResultSet/create>, or pass an object to a belongs_to acessor.
+L<DBIx::Class::ResultSet/create>, or pass an object to a belongs_to accessor.
 
 The columns are only set in the local copy of the object, call L</update> to
 set them in the storage.

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/BelongsTo.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/BelongsTo.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/BelongsTo.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -7,6 +7,11 @@
 use strict;
 use warnings;
 
+our %_pod_inherit_config = 
+  (
+   class_map => { 'DBIx::Class::Relationship::BelongsTo' => 'DBIx::Class::Relationship' }
+  );
+
 sub belongs_to {
   my ($class, $rel, $f_class, $cond, $attrs) = @_;
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/CascadeActions.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/CascadeActions.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/CascadeActions.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,6 +4,11 @@
 use strict;
 use warnings;
 
+our %_pod_inherit_config = 
+  (
+   class_map => { 'DBIx::Class::Relationship::CascadeActions' => 'DBIx::Class::Relationship' }
+  );
+
 sub delete {
   my ($self, @rest) = @_;
   return $self->next::method(@rest) unless ref $self;
@@ -34,8 +39,11 @@
   my @cascade = grep { $rels{$_}{attrs}{cascade_update} } keys %rels;
   foreach my $rel (@cascade) {
     next if (
+      $rels{$rel}{attrs}{accessor}
+        &&
       $rels{$rel}{attrs}{accessor} eq 'single'
-      && !exists($self->{_relationship_data}{$rel})
+        &&
+      !exists($self->{_relationship_data}{$rel})
     );
     $_->update for grep defined, $self->$rel;
   }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/HasMany.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/HasMany.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/HasMany.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,6 +4,11 @@
 use strict;
 use warnings;
 
+our %_pod_inherit_config = 
+  (
+   class_map => { 'DBIx::Class::Relationship::HasMany' => 'DBIx::Class::Relationship' }
+  );
+
 sub has_many {
   my ($class, $rel, $f_class, $cond, $attrs) = @_;
 
@@ -35,7 +40,7 @@
     $class->throw_exception(
       "No such column ${f_key} on foreign class ${f_class} ($guess)"
     ) if $f_class_loaded && !$f_class->has_column($f_key);
-      
+
     $cond = { "foreign.${f_key}" => "self.${pri}" };
   }
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/HasOne.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/HasOne.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/HasOne.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,7 +3,13 @@
 
 use strict;
 use warnings;
+use Carp::Clan qw/^DBIx::Class/;
 
+our %_pod_inherit_config = 
+  (
+   class_map => { 'DBIx::Class::Relationship::HasOne' => 'DBIx::Class::Relationship' }
+  );
+
 sub might_have {
   shift->_has_one('LEFT' => @_);
 }
@@ -16,20 +22,16 @@
   my ($class, $join_type, $rel, $f_class, $cond, $attrs) = @_;
   unless (ref $cond) {
     $class->ensure_class_loaded($f_class);
-    my ($pri, $too_many) = $class->primary_columns;
 
+    my $pri = $class->_get_primary_key;
+  
     $class->throw_exception(
-      "might_have/has_one can only infer join for a single primary key; ".
-      "${class} has more"
-    ) if $too_many;
-
-    $class->throw_exception(
       "might_have/has_one needs a primary key  to infer a join; ".
       "${class} has none"
     ) if !defined $pri && (!defined $cond || !length $cond);
 
     my $f_class_loaded = eval { $f_class->columns };
-    my ($f_key,$guess);
+    my ($f_key,$too_many,$guess);
     if (defined $cond && length $cond) {
       $f_key = $cond;
       $guess = "caller specified foreign key '$f_key'";
@@ -37,11 +39,7 @@
       $f_key = $rel;
       $guess = "using given relationship '$rel' for foreign key";
     } else {
-      ($f_key, $too_many) = $f_class->primary_columns;
-      $class->throw_exception(
-        "might_have/has_one can only infer join for a single primary key; ".
-        "${f_class} has more"
-      ) if $too_many;
+      $f_key = $class->_get_primary_key($f_class);
       $guess = "using primary key of foreign class for foreign key";
     }
     $class->throw_exception(
@@ -49,6 +47,7 @@
     ) if $f_class_loaded && !$f_class->has_column($f_key);
     $cond = { "foreign.${f_key}" => "self.${pri}" };
   }
+  $class->_validate_cond($cond);
   $class->add_relationship($rel, $f_class,
    $cond,
    { accessor => 'single',
@@ -58,4 +57,34 @@
   1;
 }
 
+sub _get_primary_key {
+  my ( $class, $target_class ) = @_;
+  $target_class ||= $class;
+  my ($pri, $too_many) = $target_class->primary_columns;
+  $class->throw_exception(
+    "might_have/has_one can only infer join for a single primary key; ".
+    "${class} has more"
+  ) if $too_many;
+  return $pri;
+}
+
+sub _validate_cond {
+  my ($class, $cond )  = @_;
+
+  return if $ENV{DBIC_DONT_VALIDATE_RELS};
+  return unless 'HASH' eq ref $cond;
+  foreach my $foreign_id ( keys %$cond ) {
+    my $self_id = $cond->{$foreign_id};
+
+    # we can ignore a bad $self_id because add_relationship handles this
+    # warning
+    return unless $self_id =~ /^self\.(.*)$/;
+    my $key = $1;
+    my $column_info = $class->column_info($key);
+    if ( $column_info->{is_nullable} ) {
+      carp(qq'"might_have/has_one" must not be on columns with is_nullable set to true ($class/$key). This might indicate an incorrect use of those relationship helpers instead of belongs_to.');
+    }
+  }
+}
+
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/ManyToMany.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/ManyToMany.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/ManyToMany.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -7,6 +7,11 @@
 use Carp::Clan qw/^DBIx::Class/;
 use Sub::Name ();
 
+our %_pod_inherit_config = 
+  (
+   class_map => { 'DBIx::Class::Relationship::ManyToMany' => 'DBIx::Class::Relationship' }
+  );
+
 sub many_to_many {
   my ($class, $meth, $rel, $f_rel, $rel_attrs) = @_;
 
@@ -59,15 +64,15 @@
       my $rs = $self->search_related($rel)->search_related(
         $f_rel, @_ > 0 ? @_ : undef, { %{$rel_attrs||{}}, %$attrs }
       );
-	  return $rs;
+      return $rs;
     };
 
     my $meth_name = join '::', $class, $meth;
     *$meth_name = Sub::Name::subname $meth_name, sub {
-		my $self = shift;
-		my $rs = $self->$rs_meth( @_ );
-  		return (wantarray ? $rs->all : $rs);
-	};
+      my $self = shift;
+      my $rs = $self->$rs_meth( @_ );
+      return (wantarray ? $rs->all : $rs);
+    };
 
     my $add_meth_name = join '::', $class, $add_meth;
     *$add_meth_name = Sub::Name::subname $add_meth_name, sub {
@@ -97,7 +102,7 @@
       my $link = $self->search_related($rel)->new_result($link_vals);
       $link->set_from_related($f_rel, $obj);
       $link->insert();
-	  return $obj;
+      return $obj;
     };
 
     my $set_meth_name = join '::', $class, $set_meth;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/ProxyMethods.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/ProxyMethods.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship/ProxyMethods.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -6,6 +6,11 @@
 use Sub::Name ();
 use base qw/DBIx::Class/;
 
+our %_pod_inherit_config = 
+  (
+   class_map => { 'DBIx::Class::Relationship::ProxyMethods' => 'DBIx::Class::Relationship' }
+  );
+
 sub register_relationship {
   my ($class, $rel, $info) = @_;
   if (my $proxy_list = $info->{attrs}{proxy}) {

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Relationship.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -106,12 +106,12 @@
 All helper methods are called similar to the following template:
 
   __PACKAGE__->$method_name('relname', 'Foreign::Class', \%cond | \@cond, \%attrs);
-  
+
 Both C<$cond> and C<$attrs> are optional. Pass C<undef> for C<$cond> if
 you want to use the default value for it, but still want to set C<\%attrs>.
 
 See L<DBIx::Class::Relationship::Base> for documentation on the
-attrubutes that are allowed in the C<\%attrs> argument.
+attributes that are allowed in the C<\%attrs> argument.
 
 
 =head2 belongs_to
@@ -232,13 +232,13 @@
 
 =back
 
-Creates a one-to-many relationship, where the corresponding elements
-of the foreign class store the calling class's primary key in one (or
-more) of the foreign class columns. This relationship defaults to using
-the end of this classes namespace as the foreign key in C<$related_class>
-to resolve the join, unless C<$their_fk_column> specifies the foreign
-key column in C<$related_class> or C<cond> specifies a reference to a
-join condition hash.
+Creates a one-to-many relationship where the foreign class refers to
+this class's primary key. This relationship refers to zero or more
+records in the foreign table (e.g. a C<LEFT JOIN>). This relationship 
+defaults to using the end of this classes namespace as the foreign key
+in C<$related_class> to resolve the join, unless C<$their_fk_column>
+specifies the foreign key column in C<$related_class> or C<cond>
+specifies a reference to a join condition hash.
 
 =over
 
@@ -297,7 +297,7 @@
     'My::DBIC::Schema::Book', 
     { 'foreign.author_id' => 'self.id' },
   );
-  
+
   # OR (similar result, assuming related_class is storing our PK, in "author")
   # (the "author" is guessed at from "Author" in the class namespace)
   My::DBIC::Schema::Author->has_many(
@@ -441,6 +441,17 @@
 for a L<list of standard resultset attributes|DBIx::Class::ResultSet/ATTRIBUTES>
 which can be assigned to relationships as well.
 
+Note that if you supply a condition on which to join, if the column in the
+current table allows nulls (i.e., has the C<is_nullable> attribute set to a
+true value), than C<might_have> will warn about this because it's naughty and
+you shouldn't do that.  
+
+ "might_have/has_one" must not be on columns with is_nullable set to true (MySchema::SomeClass/key)
+
+If you must be naughty, you can suppress the warning by setting
+C<DBIC_DONT_VALIDATE_RELS> environment variable to a true value.  Otherwise,
+you probably just want to use C<DBIx::Class::Relationship/belongs_to>.
+
 =head2 has_one
 
 =over 4
@@ -528,6 +539,11 @@
 for a L<list of standard resultset attributes|DBIx::Class::ResultSet/ATTRIBUTES>
 which can be assigned to relationships as well.
 
+Note that if you supply a condition on which to join, if the column in the
+current table allows nulls (i.e., has the C<is_nullable> attribute set to a
+true value), than warnings might apply just as with
+L<DBIx::Class::Relationship/might_have>.
+
 =head2 many_to_many
 
 =over 4

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSet.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSet.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSet.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -7,6 +7,7 @@
         'bool'   => "_bool",
         fallback => 1;
 use Carp::Clan qw/^DBIx::Class/;
+use DBIx::Class::Exception;
 use Data::Page;
 use Storable;
 use DBIx::Class::ResultSetColumn;
@@ -140,7 +141,7 @@
 =head1 OVERLOADING
 
 If a resultset is used in a numeric context it returns the L</count>.
-However, if it is used in a booleand context it is always true.  So if
+However, if it is used in a boolean context it is always true.  So if
 you want to check if a resultset has any results use C<if $rs != 0>.
 C<if $rs> will always be true.
 
@@ -290,10 +291,15 @@
     $rows = $self->get_cache;
   }
 
+  # reset the selector list
+  if (List::Util::first { exists $attrs->{$_} } qw{columns select as}) {
+     delete @{$our_attrs}{qw{select as columns +select +as +columns include_columns}};
+  }
+
   my $new_attrs = { %{$our_attrs}, %{$attrs} };
 
   # merge new attrs into inherited
-  foreach my $key (qw/join prefetch +select +as bind/) {
+  foreach my $key (qw/join prefetch +select +as +columns include_columns bind/) {
     next unless exists $attrs->{$key};
     $new_attrs->{$key} = $self->_merge_attr($our_attrs->{$key}, $attrs->{$key});
   }
@@ -356,9 +362,9 @@
   }
 
   my $rs = (ref $self)->new($self->result_source, $new_attrs);
-  if ($rows) {
-    $rs->set_cache($rows);
-  }
+
+  $rs->set_cache($rows) if ($rows);
+
   return $rs;
 }
 
@@ -518,7 +524,7 @@
     # in ::Relationship::Base::search_related (the row method), and furthermore
     # the relationship is of the 'single' type. This means that the condition
     # provided by the relationship (already attached to $self) is sufficient,
-    # as there can be only one row in the databse that would satisfy the 
+    # as there can be only one row in the database that would satisfy the
     # relationship
   }
   else {
@@ -529,7 +535,7 @@
   }
 
   # Run the query
-  my $rs = $self->search ($query, $attrs);
+  my $rs = $self->search ($query, {result_class => $self->result_class, %$attrs});
   if (keys %{$rs->_resolved_attrs->{collapse}}) {
     my $row = $rs->next;
     carp "Query returned more than one row" if $rs->next;
@@ -570,12 +576,16 @@
   my $where = $self->_collapse_cond($self->{attrs}{where} || {});
   my $num_where = scalar keys %$where;
 
-  my @unique_queries;
+  my (@unique_queries, %seen_column_combinations);
   foreach my $name (@constraint_names) {
-    my @unique_cols = $self->result_source->unique_constraint_columns($name);
-    my $unique_query = $self->_build_unique_query($query, \@unique_cols);
+    my @constraint_cols = $self->result_source->unique_constraint_columns($name);
 
-    my $num_cols = scalar @unique_cols;
+    my $constraint_sig = join "\x00", sort @constraint_cols;
+    next if $seen_column_combinations{$constraint_sig}++;
+
+    my $unique_query = $self->_build_unique_query($query, \@constraint_cols);
+
+    my $num_cols = scalar @constraint_cols;
     my $num_query = scalar keys %$unique_query;
 
     my $total = $num_query + $num_where;
@@ -629,7 +639,7 @@
 =head2 search_related_rs
 
 This method works exactly the same as search_related, except that
-it guarantees a restultset, even in list context.
+it guarantees a resultset, even in list context.
 
 =cut
 
@@ -687,7 +697,7 @@
 
 =item B<Note>
 
-As of 0.08100, this method enforces the assumption that the preceeding
+As of 0.08100, this method enforces the assumption that the preceding
 query returns only one row. If more than one row is returned, you will receive
 a warning:
 
@@ -957,7 +967,9 @@
 
 sub _construct_object {
   my ($self, @row) = @_;
-  my $info = $self->_collapse_result($self->{_attrs}{as}, \@row);
+
+  my $info = $self->_collapse_result($self->{_attrs}{as}, \@row)
+    or return ();
   my @new = $self->result_class->inflate_result($self->result_source, @$info);
   @new = $self->{_attrs}{record_filter}->(@new)
     if exists $self->{_attrs}{record_filter};
@@ -1220,7 +1232,7 @@
 
   my $tmp_attrs = { %$attrs };
 
-  # take off any limits, record_filter is cdbi, and no point of ordering a count 
+  # take off any limits, record_filter is cdbi, and no point of ordering a count
   delete $tmp_attrs->{$_} for (qw/select as rows offset order_by record_filter/);
 
   # overwrite the selector (supplied by the storage)
@@ -1243,19 +1255,27 @@
 
   my $sub_attrs = { %$attrs };
 
-  # these can not go in the subquery, and there is no point of ordering it
-  delete $sub_attrs->{$_} for qw/collapse select as order_by/;
+  # extra selectors do not go in the subquery and there is no point of ordering it
+  delete $sub_attrs->{$_} for qw/collapse select _prefetch_select as order_by/;
 
-  # if we prefetch, we group_by primary keys only as this is what we would get out of the rs via ->next/->all
-  # clobber old group_by regardless
-  if ( keys %{$attrs->{collapse}} ) {
+  # if we multi-prefetch we group_by primary keys only as this is what we would
+  # get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless
+  if ( keys %{$attrs->{collapse}}  ) {
     $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->primary_columns) ]
   }
 
   $sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $sub_attrs);
 
+  # this is so that the query can be simplified e.g.
+  # * ordering can be thrown away in things like Top limit
+  $sub_attrs->{-for_count_only} = 1;
+
+  my $sub_rs = $rsrc->resultset_class->new ($rsrc, $sub_attrs);
+
   $attrs->{from} = [{
-    count_subq => $rsrc->resultset_class->new ($rsrc, $sub_attrs )->as_query
+    -alias => 'count_subq',
+    -source_handle => $rsrc->handle,
+    count_subq => $sub_rs->as_query,
   }];
 
   # the subquery replaces this
@@ -1264,7 +1284,6 @@
   return $self->_count_rs ($attrs);
 }
 
-
 sub _bool {
   return 1;
 }
@@ -1311,13 +1330,12 @@
 
   my @obj;
 
-  # TODO: don't call resolve here
   if (keys %{$self->_resolved_attrs->{collapse}}) {
-#  if ($self->{attrs}{prefetch}) {
-      # Using $self->cursor->all is really just an optimisation.
-      # If we're collapsing has_many prefetches it probably makes
-      # very little difference, and this is cleaner than hacking
-      # _construct_object to survive the approach
+    # Using $self->cursor->all is really just an optimisation.
+    # If we're collapsing has_many prefetches it probably makes
+    # very little difference, and this is cleaner than hacking
+    # _construct_object to survive the approach
+    $self->cursor->reset;
     my @row = $self->cursor->next;
     while (@row) {
       push(@obj, $self->_construct_object(@row));
@@ -1330,6 +1348,7 @@
   }
 
   $self->set_cache(\@obj) if $self->{attrs}{cache};
+
   return @obj;
 }
 
@@ -1344,6 +1363,8 @@
 =back
 
 Resets the resultset's cursor, so you can iterate through the elements again.
+Implicitly resets the storage cursor, so a subsequent L</next> will trigger
+another query.
 
 =cut
 
@@ -1386,8 +1407,12 @@
 
   my $rsrc = $self->result_source;
 
+  # if a condition exists we need to strip all table qualifiers
+  # if this is not possible we'll force a subquery below
+  my $cond = $rsrc->schema->storage->_strip_cond_qualifiers ($self->{cond});
+
   my $needs_group_by_subq = $self->_has_resolved_attr (qw/collapse group_by -join/);
-  my $needs_subq = $self->_has_resolved_attr (qw/row offset/);
+  my $needs_subq = $needs_group_by_subq || (not defined $cond) || $self->_has_resolved_attr(qw/row offset/);
 
   if ($needs_group_by_subq or $needs_subq) {
 
@@ -1405,7 +1430,8 @@
       if (my $g = $attrs->{group_by}) {
         my @current_group_by = map
           { $_ =~ /\./ ? $_ : "$attrs->{alias}.$_" }
-          (ref $g eq 'ARRAY' ? @$g : $g );
+          @$g
+        ;
 
         if (
           join ("\x00", sort @current_group_by)
@@ -1434,70 +1460,11 @@
     return $rsrc->storage->$op(
       $rsrc,
       $op eq 'update' ? $values : (),
-      $self->_cond_for_update_delete,
+      $cond,
     );
   }
 }
 
-
-# _cond_for_update_delete
-#
-# update/delete require the condition to be modified to handle
-# the differing SQL syntax available.  This transforms the $self->{cond}
-# appropriately, returning the new condition.
-
-sub _cond_for_update_delete {
-  my ($self, $full_cond) = @_;
-  my $cond = {};
-
-  $full_cond ||= $self->{cond};
-  # No-op. No condition, we're updating/deleting everything
-  return $cond unless ref $full_cond;
-
-  if (ref $full_cond eq 'ARRAY') {
-    $cond = [
-      map {
-        my %hash;
-        foreach my $key (keys %{$_}) {
-          $key =~ /([^.]+)$/;
-          $hash{$1} = $_->{$key};
-        }
-        \%hash;
-      } @{$full_cond}
-    ];
-  }
-  elsif (ref $full_cond eq 'HASH') {
-    if ((keys %{$full_cond})[0] eq '-and') {
-      $cond->{-and} = [];
-      my @cond = @{$full_cond->{-and}};
-       for (my $i = 0; $i < @cond; $i++) {
-        my $entry = $cond[$i];
-        my $hash;
-        if (ref $entry eq 'HASH') {
-          $hash = $self->_cond_for_update_delete($entry);
-        }
-        else {
-          $entry =~ /([^.]+)$/;
-          $hash->{$1} = $cond[++$i];
-        }
-        push @{$cond->{-and}}, $hash;
-      }
-    }
-    else {
-      foreach my $key (keys %{$full_cond}) {
-        $key =~ /([^.]+)$/;
-        $cond->{$1} = $full_cond->{$key};
-      }
-    }
-  }
-  else {
-    $self->throw_exception("Can't update/delete on resultset with condition unless hash or array");
-  }
-
-  return $cond;
-}
-
-
 =head2 update
 
 =over 4
@@ -1630,7 +1597,7 @@
       ],
      },
      { artistid => 5, name => 'Angsty-Whiny Girl', cds => [
-        { title => 'My parents sold me to a record company' ,year => 2005 },
+        { title => 'My parents sold me to a record company', year => 2005 },
         { title => 'Why Am I So Ugly?', year => 2006 },
         { title => 'I Got Surgery and am now Popular', year => 2007 }
       ],
@@ -1658,7 +1625,7 @@
     [qw/artistid name/],
     [100, 'A Formally Unknown Singer'],
     [101, 'A singer that jumped the shark two albums ago'],
-    [102, 'An actually cool singer.'],
+    [102, 'An actually cool singer'],
   ]);
 
 Please note an important effect on your data when choosing between void and
@@ -1672,11 +1639,11 @@
 =cut
 
 sub populate {
-  my $self = shift @_;
-  my $data = ref $_[0][0] eq 'HASH'
-    ? $_[0] : ref $_[0][0] eq 'ARRAY' ? $self->_normalize_populate_args($_[0]) :
-    $self->throw_exception('Populate expects an arrayref of hashes or arrayref of arrayrefs');
+  my $self = shift;
 
+  # cruft placed in standalone method
+  my $data = $self->_normalize_populate_args(@_);
+
   if(defined wantarray) {
     my @created;
     foreach my $item (@$data) {
@@ -1684,10 +1651,19 @@
     }
     return wantarray ? @created : \@created;
   } else {
-    my ($first, @rest) = @$data;
+    my $first = $data->[0];
 
-    my @names = grep {!ref $first->{$_}} keys %$first;
-    my @rels = grep { $self->result_source->has_relationship($_) } keys %$first;
+    # if a column is a registered relationship, and is a non-blessed hash/array, consider
+    # it relationship data
+    my (@rels, @columns);
+    for (keys %$first) {
+      my $ref = ref $first->{$_};
+      $self->result_source->has_relationship($_) && ($ref eq 'ARRAY' or $ref eq 'HASH')
+        ? push @rels, $_
+        : push @columns, $_
+      ;
+    }
+
     my @pks = $self->result_source->primary_columns;
 
     ## do the belongs_to relationships
@@ -1716,17 +1692,21 @@
         delete $data->[$index]->{$rel};
         $data->[$index] = {%{$data->[$index]}, %$related};
 
-        push @names, keys %$related if $index == 0;
+        push @columns, keys %$related if $index == 0;
       }
     }
 
+    ## inherit the data locked in the conditions of the resultset
+    my ($rs_data) = $self->_merge_cond_with_data({});
+    delete @{$rs_data}{@columns};
+    my @inherit_cols = keys %$rs_data;
+    my @inherit_data = values %$rs_data;
+
     ## do bulk insert on current row
-    my @values = map { [ @$_{@names} ] } @$data;
-
     $self->result_source->storage->insert_bulk(
       $self->result_source,
-      \@names,
-      \@values,
+      [@columns, @inherit_cols],
+      [ map { [ @$_{@columns}, @inherit_data ] } @$data ],
     );
 
     ## do the has_many relationships
@@ -1735,7 +1715,7 @@
       foreach my $rel (@rels) {
         next unless $item->{$rel} && ref $item->{$rel} eq "ARRAY";
 
-        my $parent = $self->find(map {{$_=>$item->{$_}} } @pks)
+        my $parent = $self->find({map { $_ => $item->{$_} } @pks})
      || $self->throw_exception('Cannot find the relating object.');
 
         my $child = $parent->$rel;
@@ -1755,26 +1735,27 @@
   }
 }
 
-=head2 _normalize_populate_args ($args)
 
-Private method used by L</populate> to normalize its incoming arguments.  Factored
-out in case you want to subclass and accept new argument structures to the
-L</populate> method.
+# populate() argumnets went over several incarnations
+# What we ultimately support is AoH
+sub _normalize_populate_args {
+  my ($self, $arg) = @_;
 
-=cut
-
-sub _normalize_populate_args {
-  my ($self, $data) = @_;
-  my @names = @{shift(@$data)};
-  my @results_to_create;
-  foreach my $datum (@$data) {
-    my %result_to_create;
-    foreach my $index (0..$#names) {
-      $result_to_create{$names[$index]} = $$datum[$index];
+  if (ref $arg eq 'ARRAY') {
+    if (ref $arg->[0] eq 'HASH') {
+      return $arg;
     }
-    push @results_to_create, \%result_to_create;
+    elsif (ref $arg->[0] eq 'ARRAY') {
+      my @ret;
+      my @colnames = @{$arg->[0]};
+      foreach my $values (@{$arg}[1 .. $#$arg]) {
+        push @ret, { map { $colnames[$_] => $values->[$_] } (0 .. $#colnames) };
+      }
+      return \@ret;
+    }
   }
-  return \@results_to_create;
+
+  $self->throw_exception('Populate expects an arrayref of hashrefs or arrayref of arrayrefs');
 }
 
 =head2 pager
@@ -1863,46 +1844,66 @@
   $self->throw_exception( "new_result needs a hash" )
     unless (ref $values eq 'HASH');
 
-  my %new;
+  my ($merged_cond, $cols_from_relations) = $self->_merge_cond_with_data($values);
+
+  my %new = (
+    %$merged_cond,
+    @$cols_from_relations
+      ? (-cols_from_relations => $cols_from_relations)
+      : (),
+    -source_handle => $self->_source_handle,
+    -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED
+  );
+
+  return $self->result_class->new(\%new);
+}
+
+# _merge_cond_with_data
+#
+# Takes a simple hash of K/V data and returns its copy merged with the
+# condition already present on the resultset. Additionally returns an
+# arrayref of value/condition names, which were inferred from related
+# objects (this is needed for in-memory related objects)
+sub _merge_cond_with_data {
+  my ($self, $data) = @_;
+
+  my (%new_data, @cols_from_relations);
+
   my $alias = $self->{attrs}{alias};
 
-  if (
-    defined $self->{cond}
-    && $self->{cond} eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION
-  ) {
-    %new = %{ $self->{attrs}{related_objects} || {} };  # nothing might have been inserted yet
-    $new{-from_resultset} = [ keys %new ] if keys %new;
-  } else {
+  if (! defined $self->{cond}) {
+    # just massage $data below
+  }
+  elsif ($self->{cond} eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION) {
+    %new_data = %{ $self->{attrs}{related_objects} || {} };  # nothing might have been inserted yet
+    @cols_from_relations = keys %new_data;
+  }
+  elsif (ref $self->{cond} ne 'HASH') {
     $self->throw_exception(
-      "Can't abstract implicit construct, condition not a hash"
-    ) if ($self->{cond} && !(ref $self->{cond} eq 'HASH'));
-
-    my $collapsed_cond = (
-      $self->{cond}
-        ? $self->_collapse_cond($self->{cond})
-        : {}
+      "Can't abstract implicit construct, resultset condition not a hash"
     );
-
+  }
+  else {
     # precendence must be given to passed values over values inherited from
     # the cond, so the order here is important.
-    my %implied =  %{$self->_remove_alias($collapsed_cond, $alias)};
-    while( my($col,$value) = each %implied ){
-      if(ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '='){
-        $new{$col} = $value->{'='};
+    my $collapsed_cond = $self->_collapse_cond($self->{cond});
+    my %implied = %{$self->_remove_alias($collapsed_cond, $alias)};
+
+    while ( my($col, $value) = each %implied ) {
+      if (ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '=') {
+        $new_data{$col} = $value->{'='};
         next;
       }
-      $new{$col} = $value if $self->_is_deterministic_value($value);
+      $new_data{$col} = $value if $self->_is_deterministic_value($value);
     }
   }
 
-  %new = (
-    %new,
-    %{ $self->_remove_alias($values, $alias) },
-    -source_handle => $self->_source_handle,
-    -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED
+  %new_data = (
+    %new_data,
+    %{ $self->_remove_alias($data, $alias) },
   );
 
-  return $self->result_class->new(\%new);
+  return (\%new_data, \@cols_from_relations);
 }
 
 # _is_deterministic_value
@@ -1925,16 +1926,25 @@
 # of the attributes supplied
 #
 # used to determine if a subquery is neccessary
+#
+# supports some virtual attributes:
+#   -join
+#     This will scan for any joins being present on the resultset.
+#     It is not a mere key-search but a deep inspection of {from}
+#
 
 sub _has_resolved_attr {
   my ($self, @attr_names) = @_;
 
   my $attrs = $self->_resolved_attrs;
 
-  my $join_check_req;
+  my %extra_checks;
 
   for my $n (@attr_names) {
-    ++$join_check_req if $n eq '-join';
+    if (grep { $n eq $_ } (qw/-join/) ) {
+      $extra_checks{$n}++;
+      next;
+    }
 
     my $attr =  $attrs->{$n};
 
@@ -1953,7 +1963,7 @@
 
   # a resolved join is expressed as a multi-level from
   return 1 if (
-    $join_check_req
+    $extra_checks{-join}
       and
     ref $attrs->{from} eq 'ARRAY'
       and
@@ -2018,7 +2028,7 @@
   return \%unaliased;
 }
 
-=head2 as_query (EXPERIMENTAL)
+=head2 as_query
 
 =over 4
 
@@ -2032,8 +2042,6 @@
 
 This is generally used as the RHS for a subquery.
 
-B<NOTE>: This feature is still experimental.
-
 =cut
 
 sub as_query {
@@ -2078,13 +2086,14 @@
 a unique constraint that is not the primary key, or looking for
 related rows.
 
-If you want objects to be saved immediately, use L</find_or_create> instead.
+If you want objects to be saved immediately, use L</find_or_create>
+instead.
 
-B<Note>: C<find_or_new> is probably not what you want when creating a
-new row in a table that uses primary keys supplied by the
-database. Passing in a primary key column with a value of I<undef>
-will cause L</find> to attempt to search for a row with a value of
-I<NULL>.
+B<Note>: Take care when using C<find_or_new> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<find_or_new>, even when set to C<undef>.
 
 =cut
 
@@ -2119,13 +2128,16 @@
 can also be passed an object representing the foreign row, and the
 value will be set to its primary key.
 
-To create related objects, pass a hashref for the value if the related
-item is a foreign key relationship (L<DBIx::Class::Relationship/belongs_to>),
-and use the name of the relationship as the key. (NOT the name of the field,
-necessarily). For C<has_many> and C<has_one> relationships, pass an arrayref
-of hashrefs containing the data for each of the rows to create in the foreign
-tables, again using the relationship name as the key.
+To create related objects, pass a hashref of related-object column values
+B<keyed on the relationship name>. If the relationship is of type C<multi>
+(L<DBIx::Class::Relationship/has_many>) - pass an arrayref of hashrefs.
+The process will correctly identify columns holding foreign keys, and will
+transparently populate them from the keys of the corresponding relation.
+This can be applied recursively, and will work correctly for a structure
+with an arbitrary depth and width, as long as the relationships actually
+exists and the correct column data has been supplied.
 
+
 Instead of hashrefs of plain related data (key/value pairs), you may
 also pass new or inserted objects. New objects (not inserted yet, see
 L</new>), will be inserted into their appropriate tables.
@@ -2161,6 +2173,19 @@
     }
   });
 
+=over
+
+=item WARNING
+
+When subclassing ResultSet never attempt to override this method. Since
+it is a simple shortcut for C<< $self->new_result($attrs)->insert >>, a
+lot of the internals simply never call it, so your override will be
+bypassed more often than not. Override either L<new|DBIx::Class::Row/new>
+or L<insert|DBIx::Class::Row/insert> depending on how early in the
+L</create> process you need to intervene.
+
+=back
+
 =cut
 
 sub create {
@@ -2210,11 +2235,11 @@
 the find has completed and before the create has started. To avoid
 this problem, use find_or_create() inside a transaction.
 
-B<Note>: C<find_or_create> is probably not what you want when creating
-a new row in a table that uses primary keys supplied by the
-database. Passing in a primary key column with a value of I<undef>
-will cause L</find> to attempt to search for a row with a value of
-I<NULL>.
+B<Note>: Take care when using C<find_or_create> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<find_or_create>, even when set to C<undef>.
 
 See also L</find> and L</update_or_create>. For information on how to declare
 unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
@@ -2277,11 +2302,11 @@
 See also L</find> and L</find_or_create>. For information on how to declare
 unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
 
-B<Note>: C<update_or_create> is probably not what you want when
-looking for a row in a table that uses primary keys supplied by the
-database, unless you actually have a key value. Passing in a primary
-key column with a value of I<undef> will cause L</find> to attempt to
-search for a row with a value of I<NULL>.
+B<Note>: Take care when using C<update_or_create> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<update_or_create>, even when set to C<undef>.
 
 =cut
 
@@ -2338,8 +2363,14 @@
       $cd->insert;
   }
 
-See also L</find>, L</find_or_create> and L<find_or_new>.
+B<Note>: Take care when using C<update_or_new> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<update_or_new>, even when set to C<undef>.
 
+See also L</find>, L</find_or_create> and L</find_or_new>.
+
 =cut
 
 sub update_or_new {
@@ -2422,6 +2453,40 @@
   shift->set_cache(undef);
 }
 
+=head2 is_paged
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: true, if the resultset has been paginated
+
+=back
+
+=cut
+
+sub is_paged {
+  my ($self) = @_;
+  return !!$self->{attrs}{page};
+}
+
+=head2 is_ordered
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: true, if the resultset has been ordered with C<order_by>.
+
+=back
+
+=cut
+
+sub is_ordered {
+  my ($self) = @_;
+  return scalar $self->result_source->storage->_parse_order_by($self->{attrs}{order_by});
+}
+
 =head2 related_resultset
 
 =over 4
@@ -2443,21 +2508,30 @@
 
   $self->{related_resultsets} ||= {};
   return $self->{related_resultsets}{$rel} ||= do {
-    my $rel_info = $self->result_source->relationship_info($rel);
+    my $rsrc = $self->result_source;
+    my $rel_info = $rsrc->relationship_info($rel);
 
     $self->throw_exception(
-      "search_related: result source '" . $self->result_source->source_name .
+      "search_related: result source '" . $rsrc->source_name .
         "' has no such relationship $rel")
       unless $rel_info;
 
-    my ($from,$seen) = $self->_resolve_from($rel);
+    my $attrs = $self->_chain_relationship($rel);
 
-    my $join_count = $seen->{$rel};
-    my $alias = ($join_count > 1 ? join('_', $rel, $join_count) : $rel);
+    my $join_count = $attrs->{seen_join}{$rel};
 
+    my $alias = $self->result_source->storage
+        ->relname_to_table_alias($rel, $join_count);
+
+    # since this is search_related, and we already slid the select window inwards
+    # (the select/as attrs were deleted in the beginning), we need to flip all
+    # left joins to inner, so we get the expected results
+    # read the comment on top of the actual function to see what this does
+    $attrs->{from} = $rsrc->schema->storage->_straight_join_to_node ($attrs->{from}, $alias);
+
+
     #XXX - temp fix for result_class bug. There likely is a more elegant fix -groditi
-    my %attrs = %{$self->{attrs}||{}};
-    delete @attrs{qw(result_class alias)};
+    delete @{$attrs}{qw(result_class alias)};
 
     my $new_cache;
 
@@ -2468,7 +2542,7 @@
       }
     }
 
-    my $rel_source = $self->result_source->related_source($rel);
+    my $rel_source = $rsrc->related_source($rel);
 
     my $new = do {
 
@@ -2478,20 +2552,14 @@
       # to work sanely (e.g. RestrictWithObject wants to be able to add
       # extra query restrictions, and these may need to be $alias.)
 
-      my $attrs = $rel_source->resultset_attributes;
-      local $attrs->{alias} = $alias;
+      my $rel_attrs = $rel_source->resultset_attributes;
+      local $rel_attrs->{alias} = $alias;
 
       $rel_source->resultset
                  ->search_rs(
                      undef, {
-                       %attrs,
-                       join => undef,
-                       prefetch => undef,
-                       select => undef,
-                       as => undef,
-                       where => $self->{cond},
-                       seen_join => $seen,
-                       from => $from,
+                       %$attrs,
+                       where => $attrs->{where},
                    });
     };
     $new->set_cache($new_cache) if $new_cache;
@@ -2542,42 +2610,173 @@
   return ($self->{attrs} || {})->{alias} || 'me';
 }
 
+=head2 as_subselect_rs
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $resultset
+
+=back
+
+Act as a barrier to SQL symbols.  The resultset provided will be made into a
+"virtual view" by including it as a subquery within the from clause.  From this
+point on, any joined tables are inaccessible to ->search on the resultset (as if
+it were simply where-filtered without joins).  For example:
+
+ my $rs = $schema->resultset('Bar')->search({'x.name' => 'abc'},{ join => 'x' });
+
+ # 'x' now pollutes the query namespace
+
+ # So the following works as expected
+ my $ok_rs = $rs->search({'x.other' => 1});
+
+ # But this doesn't: instead of finding a 'Bar' related to two x rows (abc and
+ # def) we look for one row with contradictory terms and join in another table
+ # (aliased 'x_2') which we never use
+ my $broken_rs = $rs->search({'x.name' => 'def'});
+
+ my $rs2 = $rs->as_subselect_rs;
+
+ # doesn't work - 'x' is no longer accessible in $rs2, having been sealed away
+ my $not_joined_rs = $rs2->search({'x.other' => 1});
+
+ # works as expected: finds a 'table' row related to two x rows (abc and def)
+ my $correctly_joined_rs = $rs2->search({'x.name' => 'def'});
+
+Another example of when one might use this would be to select a subset of
+columns in a group by clause:
+
+ my $rs = $schema->resultset('Bar')->search(undef, {
+   group_by => [qw{ id foo_id baz_id }],
+ })->as_subselect_rs->search(undef, {
+   columns => [qw{ id foo_id }]
+ });
+
+In the above example normally columns would have to be equal to the group by,
+but because we isolated the group by into a subselect the above works.
+
+=cut
+
+sub as_subselect_rs {
+   my $self = shift;
+
+   return $self->result_source->resultset->search( undef, {
+      alias => $self->current_source_alias,
+      from => [{
+            $self->current_source_alias => $self->as_query,
+            -alias         => $self->current_source_alias,
+            -source_handle => $self->result_source->handle,
+         }]
+   });
+}
+
 # This code is called by search_related, and makes sure there
 # is clear separation between the joins before, during, and
 # after the relationship. This information is needed later
 # in order to properly resolve prefetch aliases (any alias
 # with a relation_chain_depth less than the depth of the
 # current prefetch is not considered)
-sub _resolve_from {
+#
+# The increments happen twice per join. An even number means a
+# relationship specified via a search_related, whereas an odd
+# number indicates a join/prefetch added via attributes
+#
+# Also this code will wrap the current resultset (the one we
+# chain to) in a subselect IFF it contains limiting attributes
+sub _chain_relationship {
   my ($self, $rel) = @_;
   my $source = $self->result_source;
-  my $attrs = $self->{attrs};
+  my $attrs = { %{$self->{attrs}||{}} };
 
-  my $from = [ @{
-      $attrs->{from}
-        ||
-      [{
-        -source_handle => $source->handle,
-        -alias => $attrs->{alias},
-        $attrs->{alias} => $source->from,
-      }]
-  }];
-
-  my $seen = { %{$attrs->{seen_join} || {} } };
-
   # we need to take the prefetch the attrs into account before we
   # ->_resolve_join as otherwise they get lost - captainL
-  my $merged = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
+  my $join = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
 
-  push @$from, $source->_resolve_join($merged, $attrs->{alias}, $seen) if ($merged);
+  delete @{$attrs}{qw/join prefetch collapse distinct select as columns +select +as +columns/};
 
-  ++$seen->{-relation_chain_depth};
+  my $seen = { %{ (delete $attrs->{seen_join}) || {} } };
 
-  push @$from, $source->_resolve_join($rel, $attrs->{alias}, $seen);
+  my $from;
+  my @force_subq_attrs = qw/offset rows group_by having/;
 
-  ++$seen->{-relation_chain_depth};
+  if (
+    ($attrs->{from} && ref $attrs->{from} ne 'ARRAY')
+      ||
+    $self->_has_resolved_attr (@force_subq_attrs)
+  ) {
+    # Nuke the prefetch (if any) before the new $rs attrs
+    # are resolved (prefetch is useless - we are wrapping
+    # a subquery anyway).
+    my $rs_copy = $self->search;
+    $rs_copy->{attrs}{join} = $self->_merge_attr (
+      $rs_copy->{attrs}{join},
+      delete $rs_copy->{attrs}{prefetch},
+    );
 
-  return ($from,$seen);
+    $from = [{
+      -source_handle => $source->handle,
+      -alias => $attrs->{alias},
+      $attrs->{alias} => $rs_copy->as_query,
+    }];
+    delete @{$attrs}{@force_subq_attrs, 'where'};
+    $seen->{-relation_chain_depth} = 0;
+  }
+  elsif ($attrs->{from}) {  #shallow copy suffices
+    $from = [ @{$attrs->{from}} ];
+  }
+  else {
+    $from = [{
+      -source_handle => $source->handle,
+      -alias => $attrs->{alias},
+      $attrs->{alias} => $source->from,
+    }];
+  }
+
+  my $jpath = ($seen->{-relation_chain_depth})
+    ? $from->[-1][0]{-join_path}
+    : [];
+
+  my @requested_joins = $source->_resolve_join(
+    $join,
+    $attrs->{alias},
+    $seen,
+    $jpath,
+  );
+
+  push @$from, @requested_joins;
+
+  $seen->{-relation_chain_depth}++;
+
+  # if $self already had a join/prefetch specified on it, the requested
+  # $rel might very well be already included. What we do in this case
+  # is effectively a no-op (except that we bump up the chain_depth on
+  # the join in question so we could tell it *is* the search_related)
+  my $already_joined;
+
+  # we consider the last one thus reverse
+  for my $j (reverse @requested_joins) {
+    my ($last_j) = keys %{$j->[0]{-join_path}[-1]};
+    if ($rel eq $last_j) {
+      $j->[0]{-relation_chain_depth}++;
+      $already_joined++;
+      last;
+    }
+  }
+
+  unless ($already_joined) {
+    push @$from, $source->_resolve_join(
+      $rel,
+      $attrs->{alias},
+      $seen,
+      $jpath,
+    );
+  }
+
+  $seen->{-relation_chain_depth}++;
+
+  return {%$attrs, from => $from, seen_join => $seen};
 }
 
 # too many times we have to do $attrs = { %{$self->_resolved_attrs} }
@@ -2599,31 +2798,47 @@
 
   # build columns (as long as select isn't set) into a set of as/select hashes
   unless ( $attrs->{select} ) {
-      @colbits = map {
-          ( ref($_) eq 'HASH' )
-              ? $_
-              : {
-                  (
-                    /^\Q${alias}.\E(.+)$/
-                      ? "$1"
-                      : "$_"
-                  )
-                =>
-                  (
-                    /\./
-                      ? "$_"
-                      : "${alias}.$_"
-                  )
-            }
-      } ( ref($attrs->{columns}) eq 'ARRAY' ) ? @{ delete $attrs->{columns}} : (delete $attrs->{columns} || $source->columns );
+
+    my @cols;
+    if ( ref $attrs->{columns} eq 'ARRAY' ) {
+      @cols = @{ delete $attrs->{columns}}
+    } elsif ( defined $attrs->{columns} ) {
+      @cols = delete $attrs->{columns}
+    } else {
+      @cols = $source->columns
+    }
+
+    for (@cols) {
+      if ( ref $_ eq 'HASH' ) {
+        push @colbits, $_
+      } else {
+        my $key = /^\Q${alias}.\E(.+)$/
+          ? "$1"
+          : "$_";
+        my $value = /\./
+          ? "$_"
+          : "${alias}.$_";
+        push @colbits, { $key => $value };
+      }
+    }
   }
+
   # add the additional columns on
-  foreach ( 'include_columns', '+columns' ) {
-      push @colbits, map {
-          ( ref($_) eq 'HASH' )
-            ? $_
-            : { ( split( /\./, $_ ) )[-1] => ( /\./ ? $_ : "${alias}.$_" ) }
-      } ( ref($attrs->{$_}) eq 'ARRAY' ) ? @{ delete $attrs->{$_} } : delete $attrs->{$_} if ( $attrs->{$_} );
+  foreach (qw{include_columns +columns}) {
+    if ( $attrs->{$_} ) {
+      my @list = ( ref($attrs->{$_}) eq 'ARRAY' )
+        ? @{ delete $attrs->{$_} }
+        : delete $attrs->{$_};
+      for (@list) {
+        if ( ref($_) eq 'HASH' ) {
+          push @colbits, $_
+        } else {
+          my $key = ( split /\./, $_ )[-1];
+          my $value = ( /\./ ? $_ : "$alias.$_" );
+          push @colbits, { $key => $value };
+        }
+      }
+    }
   }
 
   # start with initial select items
@@ -2632,15 +2847,22 @@
         ( ref $attrs->{select} eq 'ARRAY' )
       ? [ @{ $attrs->{select} } ]
       : [ $attrs->{select} ];
-    $attrs->{as} = (
-      $attrs->{as}
-      ? (
-        ref $attrs->{as} eq 'ARRAY'
-        ? [ @{ $attrs->{as} } ]
-        : [ $attrs->{as} ]
+
+    if ( $attrs->{as} ) {
+      $attrs->{as} =
+        (
+          ref $attrs->{as} eq 'ARRAY'
+            ? [ @{ $attrs->{as} } ]
+            : [ $attrs->{as} ]
         )
-      : [ map { m/^\Q${alias}.\E(.+)$/ ? $1 : $_ } @{ $attrs->{select} } ]
-    );
+    } else {
+      $attrs->{as} = [ map {
+         m/^\Q${alias}.\E(.+)$/
+           ? $1
+           : $_
+         } @{ $attrs->{select} }
+      ]
+    }
   }
   else {
 
@@ -2650,31 +2872,28 @@
   }
 
   # now add colbits to select/as
-  push( @{ $attrs->{select} }, map { values( %{$_} ) } @colbits );
-  push( @{ $attrs->{as} },     map { keys( %{$_} ) } @colbits );
+  push @{ $attrs->{select} }, map values %{$_}, @colbits;
+  push @{ $attrs->{as}     }, map keys   %{$_}, @colbits;
 
-  my $adds;
-  if ( $adds = delete $attrs->{'+select'} ) {
+  if ( my $adds = delete $attrs->{'+select'} ) {
     $adds = [$adds] unless ref $adds eq 'ARRAY';
-    push(
-      @{ $attrs->{select} },
-      map { /\./ || ref $_ ? $_ : "${alias}.$_" } @$adds
-    );
+    push @{ $attrs->{select} },
+      map { /\./ || ref $_ ? $_ : "$alias.$_" } @$adds;
   }
-  if ( $adds = delete $attrs->{'+as'} ) {
+  if ( my $adds = delete $attrs->{'+as'} ) {
     $adds = [$adds] unless ref $adds eq 'ARRAY';
-    push( @{ $attrs->{as} }, @$adds );
+    push @{ $attrs->{as} }, @$adds;
   }
 
-  $attrs->{from} ||= [ {
+  $attrs->{from} ||= [{
     -source_handle => $source->handle,
     -alias => $self->{attrs}{alias},
     $self->{attrs}{alias} => $source->from,
-  } ];
+  }];
 
   if ( $attrs->{join} || $attrs->{prefetch} ) {
 
-    $self->throw_exception ('join/prefetch can not be used with a literal scalarref {from}')
+    $self->throw_exception ('join/prefetch can not be used with a custom {from}')
       if ref $attrs->{from} ne 'ARRAY';
 
     my $join = delete $attrs->{join} || {};
@@ -2687,83 +2906,109 @@
       [
         @{ $attrs->{from} },
         $source->_resolve_join(
-          $join, $alias, { %{ $attrs->{seen_join} || {} } }
+          $join,
+          $alias,
+          { %{ $attrs->{seen_join} || {} } },
+          ( $attrs->{seen_join} && keys %{$attrs->{seen_join}})
+            ? $attrs->{from}[-1][0]{-join_path}
+            : []
+          ,
         )
       ];
   }
 
-  if ( $attrs->{order_by} ) {
+  if ( defined $attrs->{order_by} ) {
     $attrs->{order_by} = (
       ref( $attrs->{order_by} ) eq 'ARRAY'
       ? [ @{ $attrs->{order_by} } ]
-      : [ $attrs->{order_by} ]
+      : [ $attrs->{order_by} || () ]
     );
   }
-  else {
-    $attrs->{order_by} = [];
+
+  if ($attrs->{group_by} and ref $attrs->{group_by} ne 'ARRAY') {
+    $attrs->{group_by} = [ $attrs->{group_by} ];
   }
 
-  # If the order_by is otherwise empty - we will use this for TOP limit
-  # emulation and the like.
-  # Although this is needed only if the order_by is not defined, it is
-  # actually cheaper to just populate this rather than properly examining
-  # order_by (stuf like [ {} ] and the like)
-  $attrs->{_virtual_order_by} = [ $self->result_source->primary_columns ];
+  # generate the distinct induced group_by early, as prefetch will be carried via a
+  # subquery (since a group_by is present)
+  if (delete $attrs->{distinct}) {
+    if ($attrs->{group_by}) {
+      carp ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)");
+    }
+    else {
+      $attrs->{group_by} = [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
 
+      # add any order_by parts that are not already present in the group_by
+      # we need to be careful not to add any named functions/aggregates
+      # i.e. select => [ ... { count => 'foo', -as 'foocount' } ... ]
+      my %already_grouped = map { $_ => 1 } (@{$attrs->{group_by}});
 
+      my $storage = $self->result_source->schema->storage;
+
+      my $rs_column_list = $storage->_resolve_column_info ($attrs->{from});
+
+      for my $chunk ($storage->_parse_order_by($attrs->{order_by})) {
+        if ($rs_column_list->{$chunk} && not $already_grouped{$chunk}++) {
+          push @{$attrs->{group_by}}, $chunk;
+        }
+      }
+    }
+  }
+
   $attrs->{collapse} ||= {};
   if ( my $prefetch = delete $attrs->{prefetch} ) {
     $prefetch = $self->_merge_attr( {}, $prefetch );
 
     my $prefetch_ordering = [];
 
-    my $join_map = $self->_joinpath_aliases ($attrs->{from}, $attrs->{seen_join});
+    # this is a separate structure (we don't look in {from} directly)
+    # as the resolver needs to shift things off the lists to work
+    # properly (identical-prefetches on different branches)
+    my $join_map = {};
+    if (ref $attrs->{from} eq 'ARRAY') {
 
+      my $start_depth = $attrs->{seen_join}{-relation_chain_depth} || 0;
+
+      for my $j ( @{$attrs->{from}}[1 .. $#{$attrs->{from}} ] ) {
+        next unless $j->[0]{-alias};
+        next unless $j->[0]{-join_path};
+        next if ($j->[0]{-relation_chain_depth} || 0) < $start_depth;
+
+        my @jpath = map { keys %$_ } @{$j->[0]{-join_path}};
+
+        my $p = $join_map;
+        $p = $p->{$_} ||= {} for @jpath[ ($start_depth/2) .. $#jpath]; #only even depths are actual jpath boundaries
+        push @{$p->{-join_aliases} }, $j->[0]{-alias};
+      }
+    }
+
     my @prefetch =
       $source->_resolve_prefetch( $prefetch, $alias, $join_map, $prefetch_ordering, $attrs->{collapse} );
 
-    push( @{ $attrs->{select} }, map { $_->[0] } @prefetch );
-    push( @{ $attrs->{as} },     map { $_->[1] } @prefetch );
+    # we need to somehow mark which columns came from prefetch
+    $attrs->{_prefetch_select} = [ map { $_->[0] } @prefetch ];
 
-    push( @{ $attrs->{order_by} }, @$prefetch_ordering );
+    push @{ $attrs->{select} }, @{$attrs->{_prefetch_select}};
+    push @{ $attrs->{as} }, (map { $_->[1] } @prefetch);
+
+    push( @{$attrs->{order_by}}, @$prefetch_ordering );
     $attrs->{_collapse_order_by} = \@$prefetch_ordering;
   }
 
-
-  if (delete $attrs->{distinct}) {
-    $attrs->{group_by} ||= [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
-  }
-
   # if both page and offset are specified, produce a combined offset
   # even though it doesn't make much sense, this is what pre 081xx has
   # been doing
   if (my $page = delete $attrs->{page}) {
-    $attrs->{offset} = ($attrs->{rows} * ($page - 1)) +
-      ($attrs->{offset} || 0);
+    $attrs->{offset} =
+      ($attrs->{rows} * ($page - 1))
+            +
+      ($attrs->{offset} || 0)
+    ;
   }
 
   return $self->{_attrs} = $attrs;
 }
 
-sub _joinpath_aliases {
-  my ($self, $fromspec, $seen) = @_;
-
-  my $paths = {};
-  return $paths unless ref $fromspec eq 'ARRAY';
-
-  for my $j (@$fromspec) {
-
-    next if ref $j ne 'ARRAY';
-    next if $j->[0]{-relation_chain_depth} < ( $seen->{-relation_chain_depth} || 0);
-
-    my $p = $paths;
-    $p = $p->{$_} ||= {} for @{$j->[0]{-join_path}};
-    push @{$p->{-join_aliases} }, $j->[0]{-alias};
-  }
-
-  return $paths;
-}
-
 sub _rollout_attr {
   my ($self, $attr) = @_;
 
@@ -2806,6 +3051,13 @@
 sub _calculate_score {
   my ($self, $a, $b) = @_;
 
+  if (defined $a xor defined $b) {
+    return 0;
+  }
+  elsif (not defined $a) {
+    return 1;
+  }
+
   if (ref $b eq 'HASH') {
     my ($b_key) = keys %{$b};
     if (ref $a eq 'HASH') {
@@ -2887,12 +3139,13 @@
 
 sub throw_exception {
   my $self=shift;
+
   if (ref $self && $self->_source_handle->schema) {
     $self->_source_handle->schema->throw_exception(@_)
-  } else {
-    croak(@_);
   }
-
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 # XXX: FIXME: Attributes docs need clearing up
@@ -2914,11 +3167,16 @@
 
 =back
 
-Which column(s) to order the results by. If a single column name, or
-an arrayref of names is supplied, the argument is passed through
-directly to SQL. The hashref syntax allows for connection-agnostic
-specification of ordering direction:
+Which column(s) to order the results by.
 
+[The full list of suitable values is documented in
+L<SQL::Abstract/"ORDER BY CLAUSES">; the following is a summary of
+common options.]
+
+If a single column name, or an arrayref of names is supplied, the
+argument is passed through directly to SQL. The hashref syntax allows
+for connection-agnostic specification of ordering direction:
+
  For descending order:
 
   order_by => { -desc => [qw/col1 col2 col3/] }
@@ -3004,6 +3262,9 @@
 attribute, the column names returned are storage-dependent. E.g. MySQL would
 return a column named C<count(employeeid)> in the above example.
 
+B<NOTE:> You will almost always need a corresponding 'as' entry when you use
+'select'.
+
 =head2 +select
 
 =over 4
@@ -3068,7 +3329,7 @@
 will fail miserably.
 
 To get around this limitation, you can supply literal SQL to your
-C<select> attibute that contains the C<AS alias> text, eg:
+C<select> attribute that contains the C<AS alias> text, e.g.
 
   select => [\'myfield AS alias']
 
@@ -3179,7 +3440,7 @@
 C<prefetch> can be used with the following relationship types: C<belongs_to>,
 C<has_one> (or if you're using C<add_relationship>, any relationship declared
 with an accessor type of 'single' or 'filter'). A more complex example that
-prefetches an artists cds, the tracks on those cds, and the tags associted
+prefetches an artists cds, the tracks on those cds, and the tags associated
 with that artist is given below (assuming many-to-many from artists to tags):
 
  my $rs = $schema->resultset('Artist')->search(
@@ -3196,6 +3457,42 @@
 B<NOTE:> If you specify a C<prefetch> attribute, the C<join> and C<select>
 attributes will be ignored.
 
+B<CAVEATs>: Prefetch does a lot of deep magic. As such, it may not behave
+exactly as you might expect.
+
+=over 4
+
+=item *
+
+Prefetch uses the L</cache> to populate the prefetched relationships. This
+may or may not be what you want.
+
+=item *
+
+If you specify a condition on a prefetched relationship, ONLY those
+rows that match the prefetched condition will be fetched into that relationship.
+This means that adding prefetch to a search() B<may alter> what is returned by
+traversing a relationship. So, if you have C<< Artist->has_many(CDs) >> and you do
+
+  my $artist_rs = $schema->resultset('Artist')->search({
+      'cds.year' => 2008,
+  }, {
+      join => 'cds',
+  });
+
+  my $count = $artist_rs->first->cds->count;
+
+  my $artist_rs_prefetch = $artist_rs->search( {}, { prefetch => 'cds' } );
+
+  my $prefetch_count = $artist_rs_prefetch->first->cds->count;
+
+  cmp_ok( $count, '==', $prefetch_count, "Counts should be the same" );
+
+that cmp_ok() may or may not pass depending on the datasets involved. This
+behavior may or may not survive the 0.09 transition.
+
+=back
+
 =head2 page
 
 =over 4
@@ -3222,7 +3519,7 @@
 
 =back
 
-Specifes the maximum number of rows for direct retrieval or the number of
+Specifies the maximum number of rows for direct retrieval or the number of
 rows per page if the page attribute or method is used.
 
 =head2 offset
@@ -3270,7 +3567,8 @@
 
 =back
 
-Set to 1 to group by all columns.
+Set to 1 to group by all columns. If the resultset already has a group_by
+attribute, this setting is ignored and an appropriate warning is issued.
 
 =head2 where
 
@@ -3281,8 +3579,8 @@
   # only return rows WHERE deleted IS NULL for all searches
   __PACKAGE__->resultset_attributes({ where => { deleted => undef } }); )
 
-Can be overridden by passing C<{ where => undef }> as an attribute
-to a resulset.
+Can be overridden by passing C<< { where => undef } >> as an attribute
+to a resultset.
 
 =back
 
@@ -3304,177 +3602,6 @@
 For more examples of using these attributes, see
 L<DBIx::Class::Manual::Cookbook>.
 
-=head2 from
-
-=over 4
-
-=item Value: \@from_clause
-
-=back
-
-The C<from> attribute gives you manual control over the C<FROM> clause of SQL
-statements generated by L<DBIx::Class>, allowing you to express custom C<JOIN>
-clauses.
-
-NOTE: Use this on your own risk.  This allows you to shoot off your foot!
-
-C<join> will usually do what you need and it is strongly recommended that you
-avoid using C<from> unless you cannot achieve the desired result using C<join>.
-And we really do mean "cannot", not just tried and failed. Attempting to use
-this because you're having problems with C<join> is like trying to use x86
-ASM because you've got a syntax error in your C. Trust us on this.
-
-Now, if you're still really, really sure you need to use this (and if you're
-not 100% sure, ask the mailing list first), here's an explanation of how this
-works.
-
-The syntax is as follows -
-
-  [
-    { <alias1> => <table1> },
-    [
-      { <alias2> => <table2>, -join_type => 'inner|left|right' },
-      [], # nested JOIN (optional)
-      { <table1.column1> => <table2.column2>, ... (more conditions) },
-    ],
-    # More of the above [ ] may follow for additional joins
-  ]
-
-  <table1> <alias1>
-  JOIN
-    <table2> <alias2>
-    [JOIN ...]
-  ON <table1.column1> = <table2.column2>
-  <more joins may follow>
-
-An easy way to follow the examples below is to remember the following:
-
-    Anything inside "[]" is a JOIN
-    Anything inside "{}" is a condition for the enclosing JOIN
-
-The following examples utilize a "person" table in a family tree application.
-In order to express parent->child relationships, this table is self-joined:
-
-    # Person->belongs_to('father' => 'Person');
-    # Person->belongs_to('mother' => 'Person');
-
-C<from> can be used to nest joins. Here we return all children with a father,
-then search against all mothers of those children:
-
-  $rs = $schema->resultset('Person')->search(
-      undef,
-      {
-          alias => 'mother', # alias columns in accordance with "from"
-          from => [
-              { mother => 'person' },
-              [
-                  [
-                      { child => 'person' },
-                      [
-                          { father => 'person' },
-                          { 'father.person_id' => 'child.father_id' }
-                      ]
-                  ],
-                  { 'mother.person_id' => 'child.mother_id' }
-              ],
-          ]
-      },
-  );
-
-  # Equivalent SQL:
-  # SELECT mother.* FROM person mother
-  # JOIN (
-  #   person child
-  #   JOIN person father
-  #   ON ( father.person_id = child.father_id )
-  # )
-  # ON ( mother.person_id = child.mother_id )
-
-The type of any join can be controlled manually. To search against only people
-with a father in the person table, we could explicitly use C<INNER JOIN>:
-
-    $rs = $schema->resultset('Person')->search(
-        undef,
-        {
-            alias => 'child', # alias columns in accordance with "from"
-            from => [
-                { child => 'person' },
-                [
-                    { father => 'person', -join_type => 'inner' },
-                    { 'father.id' => 'child.father_id' }
-                ],
-            ]
-        },
-    );
-
-    # Equivalent SQL:
-    # SELECT child.* FROM person child
-    # INNER JOIN person father ON child.father_id = father.id
-
-You can select from a subquery by passing a resultset to from as follows.
-
-    $schema->resultset('Artist')->search( 
-        undef, 
-        {   alias => 'artist2',
-            from  => [ { artist2 => $artist_rs->as_query } ],
-        } );
-
-    # and you'll get sql like this..
-    # SELECT artist2.artistid, artist2.name, artist2.rank, artist2.charfield FROM 
-    #   ( SELECT me.artistid, me.name, me.rank, me.charfield FROM artists me ) artist2
-
-If you need to express really complex joins, you
-can supply literal SQL to C<from> via a scalar reference. In this case
-the contents of the scalar will replace the table name associated with the
-resultsource.
-
-WARNING: This technique might very well not work as expected on chained
-searches - you have been warned.
-
-    # Assuming the Event resultsource is defined as:
-
-        MySchema::Event->add_columns (
-            sequence => {
-                data_type => 'INT',
-                is_auto_increment => 1,
-            },
-            location => {
-                data_type => 'INT',
-            },
-            type => {
-                data_type => 'INT',
-            },
-        );
-        MySchema::Event->set_primary_key ('sequence');
-
-    # This will get back the latest event for every location. The column
-    # selector is still provided by DBIC, all we do is add a JOIN/WHERE
-    # combo to limit the resultset
-
-    $rs = $schema->resultset('Event');
-    $table = $rs->result_source->name;
-    $latest = $rs->search (
-        undef,
-        { from => \ "
-            (SELECT e1.* FROM $table e1
-                JOIN $table e2
-                    ON e1.location = e2.location
-                    AND e1.sequence < e2.sequence
-                WHERE e2.sequence is NULL
-            ) me",
-        },
-    );
-
-    # Equivalent SQL (with the DBIC chunks added):
-
-    SELECT me.sequence, me.location, me.type FROM
-       (SELECT e1.* FROM events e1
-           JOIN events e2
-               ON e1.location = e2.location
-               AND e1.sequence < e2.sequence
-           WHERE e2.sequence is NULL
-       ) me;
-
 =head2 for
 
 =over 4

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSetColumn.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSetColumn.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSetColumn.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,7 +1,12 @@
 package DBIx::Class::ResultSetColumn;
+
 use strict;
 use warnings;
+
 use base 'DBIx::Class';
+
+use Carp::Clan qw/^DBIx::Class/;
+use DBIx::Class::Exception;
 use List::Util;
 
 =head1 NAME
@@ -37,34 +42,87 @@
   my ($class, $rs, $column) = @_;
   $class = ref $class if ref $class;
 
-  $rs->throw_exception("column must be supplied") unless $column;
+  $rs->throw_exception('column must be supplied') unless $column;
 
-  my $new_parent_rs = $rs->search_rs; # we don't want to mess up the original, so clone it
+  my $orig_attrs = $rs->_resolved_attrs;
 
-  # prefetch causes additional columns to be fetched, but we can not just make a new
-  # rs via the _resolved_attrs trick - we need to retain the separation between
-  # +select/+as and select/as. At the same time we want to preserve any joins that the
-  # prefetch would otherwise generate.
-  my $init_attrs = $new_parent_rs->{attrs} ||= {};
-  delete $init_attrs->{collapse};
-  $init_attrs->{join} = $rs->_merge_attr( delete $init_attrs->{join}, delete $init_attrs->{prefetch} );
-
   # If $column can be found in the 'as' list of the parent resultset, use the
   # corresponding element of its 'select' list (to keep any custom column
   # definition set up with 'select' or '+select' attrs), otherwise use $column
   # (to create a new column definition on-the-fly).
-  my $attrs = $new_parent_rs->_resolved_attrs;
-
-  my $as_list = $attrs->{as} || [];
-  my $select_list = $attrs->{select} || [];
+  my $as_list = $orig_attrs->{as} || [];
+  my $select_list = $orig_attrs->{select} || [];
   my $as_index = List::Util::first { ($as_list->[$_] || "") eq $column } 0..$#$as_list;
   my $select = defined $as_index ? $select_list->[$as_index] : $column;
 
+  my $new_parent_rs;
+  # analyze the order_by, and see if it is done over a function/nonexistentcolumn
+  # if this is the case we will need to wrap a subquery since the result of RSC
+  # *must* be a single column select
+  my %collist = map { $_ => 1 } ($rs->result_source->columns, $column);
+  if (
+    scalar grep
+      { ! $collist{$_} }
+      ( $rs->result_source->schema->storage->_parse_order_by ($orig_attrs->{order_by} ) ) 
+  ) {
+    my $alias = $rs->current_source_alias;
+    # nuke the prefetch before collapsing to sql
+    my $subq_rs = $rs->search;
+    $subq_rs->{attrs}{join} = $subq_rs->_merge_attr( $subq_rs->{attrs}{join}, delete $subq_rs->{attrs}{prefetch} );
+
+    $new_parent_rs = $rs->result_source->resultset->search ( {}, {
+      alias => $alias,
+      from => [{
+        $alias => $subq_rs->as_query,
+        -alias => $alias,
+        -source_handle => $rs->result_source->handle,
+      }]
+    });
+  }
+
+  $new_parent_rs ||= $rs->search_rs;
+  my $new_attrs = $new_parent_rs->{attrs} ||= {};
+
+  # prefetch causes additional columns to be fetched, but we can not just make a new
+  # rs via the _resolved_attrs trick - we need to retain the separation between
+  # +select/+as and select/as. At the same time we want to preserve any joins that the
+  # prefetch would otherwise generate.
+  $new_attrs->{join} = $rs->_merge_attr( $new_attrs->{join}, delete $new_attrs->{prefetch} );
+
+  # {collapse} would mean a has_many join was injected, which in turn means
+  # we need to group *IF WE CAN* (only if the column in question is unique)
+  if (!$new_attrs->{group_by} && keys %{$orig_attrs->{collapse}}) {
+
+    # scan for a constraint that would contain our column only - that'd be proof
+    # enough it is unique
+    my $constraints = { $rs->result_source->unique_constraints };
+    for my $constraint_columns ( values %$constraints ) {
+
+      next unless @$constraint_columns == 1;
+
+      my $col = $constraint_columns->[0];
+      my $fqcol = join ('.', $new_attrs->{alias}, $col);
+
+      if ($col eq $select or $fqcol eq $select) {
+        $new_attrs->{group_by} = [ $select ];
+        delete $new_attrs->{distinct}; # it is ignored when group_by is present
+        last;
+      }
+    }
+
+    if (!$new_attrs->{group_by}) {
+      carp (
+          "Attempting to retrieve non-unique column '$column' on a resultset containing "
+        . 'one-to-many joins will return duplicate results.'
+      );
+    }
+  }
+
   my $new = bless { _select => $select, _as => $column, _parent_resultset => $new_parent_rs }, $class;
   return $new;
 }
 
-=head2 as_query (EXPERIMENTAL)
+=head2 as_query
 
 =over 4
 
@@ -78,8 +136,6 @@
 
 This is generally used as the RHS for a subquery.
 
-B<NOTE>: This feature is still experimental.
-
 =cut
 
 sub as_query { return shift->_resultset->as_query(@_) }
@@ -104,7 +160,10 @@
 
 sub next {
   my $self = shift;
+
+  # using cursor so we don't inflate anything
   my ($row) = $self->_resultset->cursor->next;
+
   return $row;
 }
 
@@ -128,6 +187,8 @@
 
 sub all {
   my $self = shift;
+
+  # using cursor so we don't inflate anything
   return map { $_->[0] } $self->_resultset->cursor->all;
 }
 
@@ -173,10 +234,41 @@
 
 sub first {
   my $self = shift;
-  my ($row) = $self->_resultset->cursor->reset->next;
+
+  # using cursor so we don't inflate anything
+  $self->_resultset->cursor->reset;
+  my ($row) = $self->_resultset->cursor->next;
+
   return $row;
 }
 
+=head2 single
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $value
+
+=back
+
+Much like L<DBIx::Class::ResultSet/single> fetches one and only one column
+value using the cursor directly. If additional rows are present a warning
+is issued before discarding the cursor.
+
+=cut
+
+sub single {
+  my $self = shift;
+
+  my $attrs = $self->_resultset->_resolved_attrs;
+  my ($row) = $self->_resultset->result_source->storage->select_single(
+    $attrs->{from}, $attrs->{select}, $attrs->{where}, $attrs
+  );
+
+  return $row;
+}
+
 =head2 min
 
 =over 4
@@ -317,7 +409,7 @@
 sub func {
   my ($self,$function) = @_;
   my $cursor = $self->func_rs($function)->cursor;
-  
+
   if( wantarray ) {
     return map { $_->[ 0 ] } $cursor->all;
   }
@@ -352,16 +444,18 @@
 =head2 throw_exception
 
 See L<DBIx::Class::Schema/throw_exception> for details.
-  
+
 =cut 
-    
+
 sub throw_exception {
   my $self=shift;
+
   if (ref $self && $self->{_parent_resultset}) {
-    $self->{_parent_resultset}->throw_exception(@_)
-  } else {
-    croak(@_);
+    $self->{_parent_resultset}->throw_exception(@_);
   }
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 # _resultset
@@ -374,7 +468,7 @@
 #
 # Returns the underlying resultset. Creates it from the parent resultset if
 # necessary.
-# 
+#
 sub _resultset {
   my $self = shift;
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSource/Table.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSource/Table.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSource/Table.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -16,7 +16,7 @@
 
 =head1 DESCRIPTION
 
-Table object that inherits from L<DBIx::Class::ResultSource>
+Table object that inherits from L<DBIx::Class::ResultSource>.
 
 =head1 METHODS
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSource/View.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSource/View.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSource/View.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -17,28 +17,40 @@
 
 =head1 SYNOPSIS
 
-  package MyDB::Schema::Year2000CDs;
+  package MyDB::Schema::Result::Year2000CDs;
 
-  use DBIx::Class::ResultSource::View;
+  use base qw/DBIx::Class::Core/;
 
-  __PACKAGE__->load_components('Core');
   __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
 
   __PACKAGE__->table('year2000cds');
   __PACKAGE__->result_source_instance->is_virtual(1);
   __PACKAGE__->result_source_instance->view_definition(
       "SELECT cdid, artist, title FROM cd WHERE year ='2000'"
-      );
+  );
+  __PACKAGE__->add_columns(
+    'cdid' => {
+      data_type => 'integer',
+      is_auto_increment => 1,
+    },
+    'artist' => {
+      data_type => 'integer',
+    },
+    'title' => {
+      data_type => 'varchar',
+      size      => 100,
+    },
+  );
 
 =head1 DESCRIPTION
 
 View object that inherits from L<DBIx::Class::ResultSource>
 
-This class extends ResultSource to add basic view support. 
+This class extends ResultSource to add basic view support.
 
-A view has a L</view_definition>, which contains an SQL query. The
-query cannot have parameters. It may contain JOINs, sub selects and
-any other SQL your database supports.
+A view has a L</view_definition>, which contains a SQL query. The query can
+only have parameters if L</is_virtual> is set to true. It may contain JOINs,
+sub selects and any other SQL your database supports.
 
 View definition SQL is deployed to your database on
 L<DBIx::Class::Schema/deploy> unless you set L</is_virtual> to true.
@@ -50,6 +62,37 @@
 exist in your database as a real view. The L</view_definition> in this
 case replaces the view name in a FROM clause in a subselect.
 
+=head1 EXAMPLES
+
+Having created the MyDB::Schema::Year2000CDs schema as shown in the SYNOPSIS
+above, you can then:
+
+  $2000_cds = $schema->resultset('Year2000CDs')
+                     ->search()
+                     ->all();
+  $count    = $schema->resultset('Year2000CDs')
+                     ->search()
+                     ->count();
+
+If you modified the schema to include a placeholder
+
+  __PACKAGE__->result_source_instance->view_definition(
+      "SELECT cdid, artist, title FROM cd WHERE year ='?'"
+  );
+
+and ensuring you have is_virtual set to true:
+
+  __PACKAGE__->result_source_instance->is_virtual(1);
+
+You could now say:
+
+  $2001_cds = $schema->resultset('Year2000CDs')
+                     ->search({}, { bind => [2001] })
+                     ->all();
+  $count    = $schema->resultset('Year2000CDs')
+                     ->search({}, { bind => [2001] })
+                     ->count();
+
 =head1 SQL EXAMPLES
 
 =over

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSource.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSource.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSource.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -5,8 +5,9 @@
 
 use DBIx::Class::ResultSet;
 use DBIx::Class::ResultSourceHandle;
+
+use DBIx::Class::Exception;
 use Carp::Clan qw/^DBIx::Class/;
-use Storable;
 
 use base qw/DBIx::Class/;
 
@@ -24,13 +25,74 @@
 
 =head1 SYNOPSIS
 
+  # Create a table based result source, in a result class.
+
+  package MyDB::Schema::Result::Artist;
+  use base qw/DBIx::Class::Core/;
+
+  __PACKAGE__->table('artist');
+  __PACKAGE__->add_columns(qw/ artistid name /);
+  __PACKAGE__->set_primary_key('artistid');
+  __PACKAGE__->has_many(cds => 'MyDB::Schema::Result::CD');
+
+  1;
+
+  # Create a query (view) based result source, in a result class
+  package MyDB::Schema::Result::Year2000CDs;
+  use base qw/DBIx::Class::Core/;
+
+  __PACKAGE__->load_components('InflateColumn::DateTime');
+  __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
+
+  __PACKAGE__->table('year2000cds');
+  __PACKAGE__->result_source_instance->is_virtual(1);
+  __PACKAGE__->result_source_instance->view_definition(
+      "SELECT cdid, artist, title FROM cd WHERE year ='2000'"
+      );
+
+
 =head1 DESCRIPTION
 
-A ResultSource is a component of a schema from which results can be directly
-retrieved, most usually a table (see L<DBIx::Class::ResultSource::Table>)
+A ResultSource is an object that represents a source of data for querying.
 
-Basic view support also exists, see L<<DBIx::Class::ResultSource::View>.
+This class is a base class for various specialised types of result
+sources, for example L<DBIx::Class::ResultSource::Table>. Table is the
+default result source type, so one is created for you when defining a
+result class as described in the synopsis above.
 
+More specifically, the L<DBIx::Class::Core> base class pulls in the
+L<DBIx::Class::ResultSourceProxy::Table> component, which defines
+the L<table|DBIx::Class::ResultSourceProxy::Table/table> method.
+When called, C<table> creates and stores an instance of
+L<DBIx::Class::ResultSoure::Table>. Luckily, to use tables as result
+sources, you don't need to remember any of this.
+
+Result sources representing select queries, or views, can also be
+created, see L<DBIx::Class::ResultSource::View> for full details.
+
+=head2 Finding result source objects
+
+As mentioned above, a result source instance is created and stored for
+you when you define a L<Result Class|DBIx::Class::Manual::Glossary/Result Class>.
+
+You can retrieve the result source at runtime in the following ways:
+
+=over
+
+=item From a Schema object:
+
+   $schema->source($source_name);
+
+=item From a Row object:
+
+   $row->result_source;
+
+=item From a ResultSet object:
+
+   $rs->result_source;
+
+=back
+
 =head1 METHODS
 
 =pod
@@ -69,9 +131,9 @@
 
   $source->add_columns('col1' => \%col1_info, 'col2' => \%col2_info, ...);
 
-Adds columns to the result source. If supplied key => hashref pairs, uses
-the hashref as the column_info for that column. Repeated calls of this
-method will add more columns, not replace them.
+Adds columns to the result source. If supplied colname => hashref
+pairs, uses the hashref as the L</column_info> for that column. Repeated
+calls of this method will add more columns, not replace them.
 
 The column names given will be created as accessor methods on your
 L<DBIx::Class::Row> objects. You can change the name of the accessor
@@ -84,40 +146,62 @@
 
 =item accessor
 
+   { accessor => '_name' }
+
+   # example use, replace standard accessor with one of your own:
+   sub name {
+       my ($self, $value) = @_;
+
+       die "Name cannot contain digits!" if($value =~ /\d/);
+       $self->_name($value);
+
+       return $self->_name();
+   }
+
 Use this to set the name of the accessor method for this column. If unset,
 the name of the column will be used.
 
 =item data_type
 
-This contains the column type. It is automatically filled by the
-L<SQL::Translator::Producer::DBIx::Class::File> producer, and the
-L<DBIx::Class::Schema::Loader> module. If you do not enter a
-data_type, DBIx::Class will attempt to retrieve it from the
-database for you, using L<DBI>'s column_info method. The values of this
-key are typically upper-cased.
+   { data_type => 'integer' }
 
+This contains the column type. It is automatically filled if you use the
+L<SQL::Translator::Producer::DBIx::Class::File> producer, or the
+L<DBIx::Class::Schema::Loader> module. 
+
 Currently there is no standard set of values for the data_type. Use
 whatever your database supports.
 
 =item size
 
+   { size => 20 }
+
 The length of your column, if it is a column type that can have a size
-restriction. This is currently only used by L<DBIx::Class::Schema/deploy>.
+restriction. This is currently only used to create tables from your
+schema, see L<DBIx::Class::Schema/deploy>.
 
 =item is_nullable
 
-Set this to a true value for a columns that is allowed to contain
-NULL values. This is currently only used by L<DBIx::Class::Schema/deploy>.
+   { is_nullable => 1 }
 
+Set this to a true value for a columns that is allowed to contain NULL
+values, default is false. This is currently only used to create tables
+from your schema, see L<DBIx::Class::Schema/deploy>.
+
 =item is_auto_increment
 
+   { is_auto_increment => 1 }
+
 Set this to a true value for a column whose value is somehow
-automatically set. This is used to determine which columns to empty
-when cloning objects using L<DBIx::Class::Row/copy>. It is also used by
+automatically set, defaults to false. This is used to determine which
+columns to empty when cloning objects using
+L<DBIx::Class::Row/copy>. It is also used by
 L<DBIx::Class::Schema/deploy>.
 
 =item is_numeric
 
+   { is_numeric => 1 }
+
 Set this to a true or false value (not C<undef>) to explicitly specify
 if this column contains numeric data. This controls how set_column
 decides whether to consider a column dirty after an update: if
@@ -130,22 +214,29 @@
 
 =item is_foreign_key
 
+   { is_foreign_key => 1 }
+
 Set this to a true value for a column that contains a key from a
-foreign table. This is currently only used by
-L<DBIx::Class::Schema/deploy>.
+foreign table, defaults to false. This is currently only used to
+create tables from your schema, see L<DBIx::Class::Schema/deploy>.
 
 =item default_value
 
-Set this to the default value which will be inserted into a column
-by the database. Can contain either a value or a function (use a
+   { default_value => \'now()' }
+
+Set this to the default value which will be inserted into a column by
+the database. Can contain either a value or a function (use a
 reference to a scalar e.g. C<\'now()'> if you want a function). This
-is currently only used by L<DBIx::Class::Schema/deploy>.
+is currently only used to create tables from your schema, see
+L<DBIx::Class::Schema/deploy>.
 
 See the note on L<DBIx::Class::Row/new> for more information about possible
 issues related to db-side default values.
 
 =item sequence
 
+   { sequence => 'my_table_seq' }
+
 Set this on a primary key column to the name of the sequence used to
 generate a new key value. If not specified, L<DBIx::Class::PK::Auto>
 will attempt to retrieve the name of the sequence from the database
@@ -153,10 +244,16 @@
 
 =item auto_nextval
 
-Set this to a true value for a column whose value is retrieved
-automatically from an oracle sequence. If you do not use an Oracle
-trigger to get the nextval, you have to set sequence as well.
+Set this to a true value for a column whose value is retrieved automatically
+from a sequence or function (if supported by your Storage driver.) For a
+sequence, if you do not use a trigger to get the nextval, you have to set the
+L</sequence> value as well.
 
+Also set this for MSSQL columns with the 'uniqueidentifier'
+L<DBIx::Class::ResultSource/data_type> whose values you want to automatically
+generate using C<NEWID()>, unless they are a primary key in which case this will
+be done anyway.
+
 =item extra
 
 This is used by L<DBIx::Class::Schema/deploy> and L<SQL::Translator>
@@ -171,13 +268,13 @@
 
 =over
 
-=item Arguments: $colname, [ \%columninfo ]
+=item Arguments: $colname, \%columninfo?
 
 =item Return value: 1/0 (true/false)
 
 =back
 
-  $source->add_column('col' => \%info?);
+  $source->add_column('col' => \%info);
 
 Add a single column and optional column info. Uses the same column
 info keys as L</add_columns>.
@@ -237,8 +334,8 @@
   my $info = $source->column_info($col);
 
 Returns the column metadata hashref for a column, as originally passed
-to L</add_columns>. See the description of L</add_columns> for information
-on the contents of the hashref.
+to L</add_columns>. See L</add_columns> above for information on the
+contents of the hashref.
 
 =cut
 
@@ -292,7 +389,7 @@
   my $self = shift;
   $self->throw_exception(
     "columns() is a read-only accessor, did you mean add_columns()?"
-  ) if (@_ > 1);
+  ) if @_;
   return @{$self->{_ordered_columns}||[]};
 }
 
@@ -362,14 +459,16 @@
 
 =back
 
-Defines one or more columns as primary key for this source. Should be
+Defines one or more columns as primary key for this source. Must be
 called after L</add_columns>.
 
 Additionally, defines a L<unique constraint|add_unique_constraint>
 named C<primary>.
 
 The primary key columns are used by L<DBIx::Class::PK::Auto> to
-retrieve automatically created values from the database.
+retrieve automatically created values from the database. They are also
+used as default joining columns when specifying relationships, see
+L<DBIx::Class::Relationship>.
 
 =cut
 
@@ -408,7 +507,7 @@
 
 =over 4
 
-=item Arguments: [ $name ], \@colnames
+=item Arguments: $name?, \@colnames
 
 =item Return value: undefined
 
@@ -426,11 +525,13 @@
 
   __PACKAGE__->add_unique_constraint([ qw/column1 column2/ ]);
 
-This will result in a unique constraint named C<table_column1_column2>, where
-C<table> is replaced with the table name.
+This will result in a unique constraint named
+C<table_column1_column2>, where C<table> is replaced with the table
+name.
 
-Unique constraints are used, for example, when you call
-L<DBIx::Class::ResultSet/find>. Only columns in the constraint are searched.
+Unique constraints are used, for example, when you pass the constraint
+name as the C<key> attribute to L<DBIx::Class::ResultSet/find>. Then
+only columns in the constraint are searched.
 
 Throws an error if any of the given column names do not yet exist on
 the result source.
@@ -484,7 +585,10 @@
 sub name_unique_constraint {
   my ($self, $cols) = @_;
 
-  return join '_', $self->name, @$cols;
+  my $name = $self->name;
+  $name = $$name if (ref $name eq 'SCALAR');
+
+  return join '_', $name, @$cols;
 }
 
 =head2 unique_constraints
@@ -499,7 +603,8 @@
 
   $source->unique_constraints();
 
-Read-only accessor which returns a hash of unique constraints on this source.
+Read-only accessor which returns a hash of unique constraints on this
+source.
 
 The hash is keyed by constraint name, and contains an arrayref of
 column names as values.
@@ -659,12 +764,16 @@
 
 =back
 
-  package My::ResultSetClass;
+  package My::Schema::ResultSet::Artist;
   use base 'DBIx::Class::ResultSet';
   ...
 
-  $source->resultset_class('My::ResultSet::Class');
+  # In the result class
+  __PACKAGE__->resultset_class('My::Schema::ResultSet::Artist');
 
+  # Or in code
+  $source->resultset_class('My::Schema::ResultSet::Artist');
+
 Set the class of the resultset. This is useful if you want to create your
 own resultset methods. Create your own class derived from
 L<DBIx::Class::ResultSet>, and set it here. If called with no arguments,
@@ -681,6 +790,10 @@
 
 =back
 
+  # In the result class
+  __PACKAGE__->resultset_attributes({ order_by => [ 'id' ] });
+
+  # Or in code
   $source->resultset_attributes({ order_by => [ 'id' ] });
 
 Store a collection of resultset attributes, that will be set on every
@@ -893,7 +1006,7 @@
   }
   return unless $f_source; # Can't test rel without f_source
 
-  eval { $self->_resolve_join($rel, 'me') };
+  eval { $self->_resolve_join($rel, 'me', {}, []) };
 
   if ($@) { # If the resolve failed, back out and re-throw the error
     delete $rels{$rel}; #
@@ -981,7 +1094,7 @@
 L<DBIx::Class::Relationship>.
 
 The returned hashref is keyed by the name of the opposing
-relationship, and contains it's data in the same manner as
+relationship, and contains its data in the same manner as
 L</relationship_info>.
 
 =cut
@@ -1075,66 +1188,79 @@
   return $found;
 }
 
-sub resolve_join {
-  carp 'resolve_join is a private method, stop calling it';
-  my $self = shift;
-  $self->_resolve_join (@_);
-}
-
 # Returns the {from} structure used to express JOIN conditions
 sub _resolve_join {
-  my ($self, $join, $alias, $seen, $jpath, $force_left) = @_;
+  my ($self, $join, $alias, $seen, $jpath, $parent_force_left) = @_;
 
   # we need a supplied one, because we do in-place modifications, no returns
   $self->throw_exception ('You must supply a seen hashref as the 3rd argument to _resolve_join')
-    unless $seen;
+    unless ref $seen eq 'HASH';
 
-  # This isn't quite right, we should actually dive into $seen and reconstruct
-  # the entire path (the reference entry point would be the join conditional
-  # with depth == current_depth - 1. At this point however nothing depends on
-  # having the entire path, transcending related_resultset, so just leave it
-  # as is, hairy enough already.
-  $jpath ||= [];
+  $self->throw_exception ('You must supply a joinpath arrayref as the 4th argument to _resolve_join')
+    unless ref $jpath eq 'ARRAY';
 
-  if (ref $join eq 'ARRAY') {
+  $jpath = [@$jpath]; # copy
+
+  if (not defined $join) {
+    return ();
+  }
+  elsif (ref $join eq 'ARRAY') {
     return
       map {
-        $self->_resolve_join($_, $alias, $seen, [@$jpath], $force_left);
+        $self->_resolve_join($_, $alias, $seen, $jpath, $parent_force_left);
       } @$join;
-  } elsif (ref $join eq 'HASH') {
-    return
-      map {
-        my $as = ($seen->{$_} ? join ('_', $_, $seen->{$_} + 1) : $_);  # the actual seen value will be incremented below
-        local $force_left->{force} = $force_left->{force};
-        (
-          $self->_resolve_join($_, $alias, $seen, [@$jpath], $force_left),
-          $self->related_source($_)->_resolve_join(
-            $join->{$_}, $as, $seen, [@$jpath, $_], $force_left
-          )
-        );
-      } keys %$join;
-  } elsif (ref $join) {
-    $self->throw_exception("No idea how to resolve join reftype ".ref $join);
-  } else {
+  }
+  elsif (ref $join eq 'HASH') {
 
-    my $count = ++$seen->{$join};
-    my $as = ($count > 1 ? "${join}_${count}" : $join);
+    my @ret;
+    for my $rel (keys %$join) {
 
-    my $rel_info = $self->relationship_info($join);
-    $self->throw_exception("No such relationship ${join}") unless $rel_info;
-    my $type;
-    if ($force_left) {
-      $type = 'left';
-    } else {
-      $type = $rel_info->{attrs}{join_type} || '';
-      $force_left = 1 if lc($type) eq 'left';
+      my $rel_info = $self->relationship_info($rel)
+        or $self->throw_exception("No such relationship ${rel}");
+
+      my $force_left = $parent_force_left;
+      $force_left ||= lc($rel_info->{attrs}{join_type}||'') eq 'left';
+
+      # the actual seen value will be incremented by the recursion
+      my $as = $self->storage->relname_to_table_alias(
+        $rel, ($seen->{$rel} && $seen->{$rel} + 1)
+      );
+
+      push @ret, (
+        $self->_resolve_join($rel, $alias, $seen, [@$jpath], $force_left),
+        $self->related_source($rel)->_resolve_join(
+          $join->{$rel}, $as, $seen, [@$jpath, { $rel => $as }], $force_left
+        )
+      );
     }
+    return @ret;
 
+  }
+  elsif (ref $join) {
+    $self->throw_exception("No idea how to resolve join reftype ".ref $join);
+  }
+  else {
+    my $count = ++$seen->{$join};
+    my $as = $self->storage->relname_to_table_alias(
+      $join, ($count > 1 && $count)
+    );
+
+    my $rel_info = $self->relationship_info($join)
+      or $self->throw_exception("No such relationship ${join}");
+
     my $rel_src = $self->related_source($join);
     return [ { $as => $rel_src->from,
                -source_handle => $rel_src->handle,
-               -join_type => $type,
-               -join_path => [@$jpath, $join],
+               -join_type => $parent_force_left
+                  ? 'left'
+                  : $rel_info->{attrs}{join_type}
+                ,
+               -join_path => [@$jpath, { $join => $as } ],
+               -is_single => (
+                  $rel_info->{attrs}{accessor}
+                    &&
+                  List::Util::first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/)
+                ),
                -alias => $as,
                -relation_chain_depth => $seen->{-relation_chain_depth} || 0,
              },
@@ -1153,18 +1279,22 @@
 # hashref of columns of the related object.
 sub _pk_depends_on {
   my ($self, $relname, $rel_data) = @_;
-  my $cond = $self->relationship_info($relname)->{cond};
 
+  my $relinfo = $self->relationship_info($relname);
+
+  # don't assume things if the relationship direction is specified
+  return $relinfo->{attrs}{is_foreign_key_constraint}
+    if exists ($relinfo->{attrs}{is_foreign_key_constraint});
+
+  my $cond = $relinfo->{cond};
   return 0 unless ref($cond) eq 'HASH';
 
   # map { foreign.foo => 'self.bar' } to { bar => 'foo' }
-
   my $keyhash = { map { my $x = $_; $x =~ s/.*\.//; $x; } reverse %$cond };
 
   # assume anything that references our PK probably is dependent on us
   # rather than vice versa, unless the far side is (a) defined or (b)
   # auto-increment
-
   my $rel_source = $self->related_source($relname);
 
   foreach my $p ($self->primary_columns) {
@@ -1193,7 +1323,6 @@
 
 sub _resolve_condition {
   my ($self, $cond, $as, $for) = @_;
-  #warn %$cond;
   if (ref $cond eq 'HASH') {
     my %ret;
     foreach my $k (keys %{$cond}) {
@@ -1207,10 +1336,14 @@
         #warn "$self $k $for $v";
         unless ($for->has_column_loaded($v)) {
           if ($for->in_storage) {
-            $self->throw_exception(
-              "Column ${v} not loaded or not passed to new() prior to insert()"
-                ." on ${for} trying to resolve relationship (maybe you forgot "
-                  ."to call ->discard_changes to get defaults from the db)"
+            $self->throw_exception(sprintf
+              "Unable to resolve relationship '%s' from object %s: column '%s' not "
+            . 'loaded from storage (or not passed to new() prior to insert()). You '
+            . 'probably need to call ->discard_changes to get the server-side defaults '
+            . 'from the database.',
+              $as,
+              $for,
+              $v,
             );
           }
           return $UNRESOLVABLE_CONDITION;
@@ -1234,93 +1367,24 @@
   } elsif (ref $cond eq 'ARRAY') {
     return [ map { $self->_resolve_condition($_, $as, $for) } @$cond ];
   } else {
-   die("Can't handle this yet :(");
+   die("Can't handle condition $cond yet :(");
   }
 }
 
-# Legacy code, needs to go entirely away (fully replaced by _resolve_prefetch)
-sub resolve_prefetch {
-  carp 'resolve_prefetch is a private method, stop calling it';
 
-  my ($self, $pre, $alias, $seen, $order, $collapse) = @_;
-  $seen ||= {};
-  if( ref $pre eq 'ARRAY' ) {
-    return
-      map { $self->resolve_prefetch( $_, $alias, $seen, $order, $collapse ) }
-        @$pre;
-  }
-  elsif( ref $pre eq 'HASH' ) {
-    my @ret =
-    map {
-      $self->resolve_prefetch($_, $alias, $seen, $order, $collapse),
-      $self->related_source($_)->resolve_prefetch(
-               $pre->{$_}, "${alias}.$_", $seen, $order, $collapse)
-    } keys %$pre;
-    return @ret;
-  }
-  elsif( ref $pre ) {
-    $self->throw_exception(
-      "don't know how to resolve prefetch reftype ".ref($pre));
-  }
-  else {
-    my $count = ++$seen->{$pre};
-    my $as = ($count > 1 ? "${pre}_${count}" : $pre);
-    my $rel_info = $self->relationship_info( $pre );
-    $self->throw_exception( $self->name . " has no such relationship '$pre'" )
-      unless $rel_info;
-    my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
-    my $rel_source = $self->related_source($pre);
-
-    if (exists $rel_info->{attrs}{accessor}
-         && $rel_info->{attrs}{accessor} eq 'multi') {
-      $self->throw_exception(
-        "Can't prefetch has_many ${pre} (join cond too complex)")
-        unless ref($rel_info->{cond}) eq 'HASH';
-      my $dots = @{[$as_prefix =~ m/\./g]} + 1; # +1 to match the ".${as_prefix}"
-      if (my ($fail) = grep { @{[$_ =~ m/\./g]} == $dots }
-                         keys %{$collapse}) {
-        my ($last) = ($fail =~ /([^\.]+)$/);
-        carp (
-          "Prefetching multiple has_many rels ${last} and ${pre} "
-          .(length($as_prefix)
-            ? "at the same level (${as_prefix}) "
-            : "at top level "
-          )
-          . 'will explode the number of row objects retrievable via ->next or ->all. '
-          . 'Use at your own risk.'
-        );
-      }
-      #my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); }
-      #              values %{$rel_info->{cond}};
-      $collapse->{".${as_prefix}${pre}"} = [ $rel_source->primary_columns ];
-        # action at a distance. prepending the '.' allows simpler code
-        # in ResultSet->_collapse_result
-      my @key = map { (/^foreign\.(.+)$/ ? ($1) : ()); }
-                    keys %{$rel_info->{cond}};
-      my @ord = (ref($rel_info->{attrs}{order_by}) eq 'ARRAY'
-                   ? @{$rel_info->{attrs}{order_by}}
-                   : (defined $rel_info->{attrs}{order_by}
-                       ? ($rel_info->{attrs}{order_by})
-                       : ()));
-      push(@$order, map { "${as}.$_" } (@key, @ord));
-    }
-
-    return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] }
-      $rel_source->columns;
-  }
-}
-
 # Accepts one or more relationships for the current source and returns an
 # array of column names for each of those relationships. Column names are
 # prefixed relative to the current source, in accordance with where they appear
-# in the supplied relationships. Needs an alias_map generated by
-# $rs->_joinpath_aliases
+# in the supplied relationships.
 
 sub _resolve_prefetch {
   my ($self, $pre, $alias, $alias_map, $order, $collapse, $pref_path) = @_;
   $pref_path ||= [];
 
-  if( ref $pre eq 'ARRAY' ) {
+  if (not defined $pre) {
+    return ();
+  }
+  elsif( ref $pre eq 'ARRAY' ) {
     return
       map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ) }
         @$pre;
@@ -1339,15 +1403,14 @@
       "don't know how to resolve prefetch reftype ".ref($pre));
   }
   else {
-
     my $p = $alias_map;
     $p = $p->{$_} for (@$pref_path, $pre);
 
     $self->throw_exception (
-      "Unable to resolve prefetch $pre - join alias map does not contain an entry for path "
+      "Unable to resolve prefetch '$pre' - join alias map does not contain an entry for path: "
       . join (' -> ', @$pref_path, $pre)
     ) if (ref $p->{-join_aliases} ne 'ARRAY' or not @{$p->{-join_aliases}} );
-    
+
     my $as = shift @{$p->{-join_aliases}};
 
     my $rel_info = $self->relationship_info( $pre );
@@ -1356,8 +1419,7 @@
     my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
     my $rel_source = $self->related_source($pre);
 
-    if (exists $rel_info->{attrs}{accessor}
-         && $rel_info->{attrs}{accessor} eq 'multi') {
+    if ($rel_info->{attrs}{accessor} && $rel_info->{attrs}{accessor} eq 'multi') {
       $self->throw_exception(
         "Can't prefetch has_many ${pre} (join cond too complex)")
         unless ref($rel_info->{cond}) eq 'HASH';
@@ -1384,7 +1446,8 @@
                     keys %{$rel_info->{cond}};
       my @ord = (ref($rel_info->{attrs}{order_by}) eq 'ARRAY'
                    ? @{$rel_info->{attrs}{order_by}}
-                   : (defined $rel_info->{attrs}{order_by}
+   
+                : (defined $rel_info->{attrs}{order_by}
                        ? ($rel_info->{attrs}{order_by})
                        : ()));
       push(@$order, map { "${as}.$_" } (@key, @ord));
@@ -1447,7 +1510,7 @@
 =cut
 
 sub handle {
-    return new DBIx::Class::ResultSourceHandle({
+    return DBIx::Class::ResultSourceHandle->new({
         schema         => $_[0]->schema,
         source_moniker => $_[0]->source_name
     });
@@ -1461,11 +1524,13 @@
 
 sub throw_exception {
   my $self = shift;
+
   if (defined $self->schema) {
     $self->schema->throw_exception(@_);
-  } else {
-    croak(@_);
   }
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 =head2 source_info
@@ -1500,7 +1565,7 @@
   __PACKAGE__->column_info_from_storage(1);
 
 Enables the on-demand automatic loading of the above column
-metadata from storage as neccesary.  This is *deprecated*, and
+metadata from storage as necessary.  This is *deprecated*, and
 should not be used.  It will be removed before 1.0.
 
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSourceHandle.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSourceHandle.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSourceHandle.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -77,26 +77,27 @@
     my ($self, $cloning) = @_;
 
     my $to_serialize = { %$self };
-    
-    my $class = $self->schema->class($self->source_moniker);
-    $to_serialize->{schema} = $class;
+
+    delete $to_serialize->{schema};
+    $to_serialize->{_frozen_from_class} = $self->schema->class($self->source_moniker);
+
     return (Storable::freeze($to_serialize));
 }
 
 =head2 STORABLE_thaw
 
 Thaws frozen handle. Resets the internal schema reference to the package
-variable C<$thaw_schema>. The recomened way of setting this is to use 
+variable C<$thaw_schema>. The recommended way of setting this is to use 
 C<< $schema->thaw($ice) >> which handles this for you.
 
 =cut
 
 
 sub STORABLE_thaw {
-    my ($self, $cloning,$ice) = @_;
+    my ($self, $cloning, $ice) = @_;
     %$self = %{ Storable::thaw($ice) };
 
-    my $class = delete $self->{schema};
+    my $class = delete $self->{_frozen_from_class};
     if( $thaw_schema ) {
         $self->{schema} = $thaw_schema;
     }
@@ -105,7 +106,8 @@
         $self->{schema} = $rs->schema if $rs;
     }
 
-    carp "Unable to restore schema" unless $self->{schema};
+    carp "Unable to restore schema. Look at 'freeze' and 'thaw' methods in DBIx::Class::Schema."
+        unless $self->{schema};
 }
 
 =head1 AUTHOR

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSourceProxy/Table.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSourceProxy/Table.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSourceProxy/Table.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -6,6 +6,7 @@
 use base qw/DBIx::Class::ResultSourceProxy/;
 
 use DBIx::Class::ResultSource::Table;
+use Scalar::Util ();
 
 __PACKAGE__->mk_classdata(table_class => 'DBIx::Class::ResultSource::Table');
 
@@ -22,8 +23,11 @@
     my $class_has_table_instance = ($table and $table->result_class eq $class);
     return $table if $class_has_table_instance;
 
+    my $table_class = $class->table_class;
+    $class->ensure_class_loaded($table_class);
+
     if( $table ) {
-        $table = $class->table_class->new({
+        $table = $table_class->new({
             %$table,
             result_class => $class,
             source_name => undef,
@@ -31,7 +35,7 @@
         });
     }
     else {
-        $table = $class->table_class->new({
+        $table = $table_class->new({
             name            => undef,
             result_class    => $class,
             source_name     => undef,
@@ -67,7 +71,7 @@
 =head2 table
 
   __PACKAGE__->table('tbl_name');
-  
+
 Gets or sets the table name.
 
 =cut
@@ -75,8 +79,13 @@
 sub table {
   my ($class, $table) = @_;
   return $class->result_source_instance->name unless $table;
-  unless (ref $table) {
-    $table = $class->table_class->new({
+
+  unless (Scalar::Util::blessed($table) && $table->isa($class->table_class)) {
+
+    my $table_class = $class->table_class;
+    $class->ensure_class_loaded($table_class);
+
+    $table = $table_class->new({
         $class->can('result_source_instance') ?
           %{$class->result_source_instance||{}} : (),
         name => $table,

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSourceProxy.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSourceProxy.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/ResultSourceProxy.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -41,7 +41,9 @@
   }
 }
 
-*add_column = \&add_columns;
+sub add_column {
+  shift->add_columns(@_);
+}
 
 sub has_column {
   shift->result_source_instance->has_column(@_);

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Row.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Row.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Row.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,9 +4,9 @@
 use warnings;
 
 use base qw/DBIx::Class/;
-use Carp::Clan qw/^DBIx::Class/;
+
+use DBIx::Class::Exception;
 use Scalar::Util ();
-use Scope::Guard;
 
 ###
 ### Internal method
@@ -155,43 +155,40 @@
     $new->result_source($source);
   }
 
-  if (my $related = delete $attrs->{-from_resultset}) {
+  if (my $related = delete $attrs->{-cols_from_relations}) {
     @{$new->{_ignore_at_insert}={}}{@$related} = ();
   }
 
   if ($attrs) {
     $new->throw_exception("attrs must be a hashref")
       unless ref($attrs) eq 'HASH';
-    
+
     my ($related,$inflated);
-    ## Pretend all the rels are actual objects, unset below if not, for insert() to fix
-    $new->{_rel_in_storage} = 1;
 
     foreach my $key (keys %$attrs) {
       if (ref $attrs->{$key}) {
         ## Can we extract this lot to use with update(_or .. ) ?
-        confess "Can't do multi-create without result source" unless $source;
+        $new->throw_exception("Can't do multi-create without result source")
+          unless $source;
         my $info = $source->relationship_info($key);
-        if ($info && $info->{attrs}{accessor}
-          && $info->{attrs}{accessor} eq 'single')
-        {
+        my $acc_type = $info->{attrs}{accessor} || '';
+        if ($acc_type eq 'single') {
           my $rel_obj = delete $attrs->{$key};
           if(!Scalar::Util::blessed($rel_obj)) {
             $rel_obj = $new->__new_related_find_or_new_helper($key, $rel_obj);
           }
 
           if ($rel_obj->in_storage) {
+            $new->{_rel_in_storage}{$key} = 1;
             $new->set_from_related($key, $rel_obj);
           } else {
-            $new->{_rel_in_storage} = 0;
             MULTICREATE_DEBUG and warn "MC $new uninserted $key $rel_obj\n";
           }
 
           $related->{$key} = $rel_obj;
           next;
-        } elsif ($info && $info->{attrs}{accessor}
-            && $info->{attrs}{accessor} eq 'multi'
-            && ref $attrs->{$key} eq 'ARRAY') {
+        }
+        elsif ($acc_type eq 'multi' && ref $attrs->{$key} eq 'ARRAY' ) {
           my $others = delete $attrs->{$key};
           my $total = @$others;
           my @objects;
@@ -202,27 +199,26 @@
             }
 
             if ($rel_obj->in_storage) {
-              $new->set_from_related($key, $rel_obj);
+              $rel_obj->throw_exception ('A multi relationship can not be pre-existing when doing multicreate. Something went wrong');
             } else {
-              $new->{_rel_in_storage} = 0;
               MULTICREATE_DEBUG and
                 warn "MC $new uninserted $key $rel_obj (${\($idx+1)} of $total)\n";
             }
-            $new->set_from_related($key, $rel_obj) if $rel_obj->in_storage;
             push(@objects, $rel_obj);
           }
           $related->{$key} = \@objects;
           next;
-        } elsif ($info && $info->{attrs}{accessor}
-          && $info->{attrs}{accessor} eq 'filter')
-        {
+        }
+        elsif ($acc_type eq 'filter') {
           ## 'filter' should disappear and get merged in with 'single' above!
           my $rel_obj = delete $attrs->{$key};
           if(!Scalar::Util::blessed($rel_obj)) {
             $rel_obj = $new->__new_related_find_or_new_helper($key, $rel_obj);
           }
-          unless ($rel_obj->in_storage) {
-            $new->{_rel_in_storage} = 0;
+          if ($rel_obj->in_storage) {
+            $new->{_rel_in_storage}{$key} = 1;
+          }
+          else {
             MULTICREATE_DEBUG and warn "MC $new uninserted $key $rel_obj";
           }
           $inflated->{$key} = $rel_obj;
@@ -235,7 +231,7 @@
       }
       $new->throw_exception("No such column $key on $class")
         unless $class->has_column($key);
-      $new->store_column($key => $attrs->{$key});          
+      $new->store_column($key => $attrs->{$key});
     }
 
     $new->{_relationship_data} = $related if $related;
@@ -283,31 +279,25 @@
   my $rollback_guard;
 
   # Check if we stored uninserted relobjs here in new()
-  my %related_stuff = (%{$self->{_relationship_data} || {}}, 
+  my %related_stuff = (%{$self->{_relationship_data} || {}},
                        %{$self->{_inflated_column} || {}});
 
-  if(!$self->{_rel_in_storage}) {
+  # insert what needs to be inserted before us
+  my %pre_insert;
+  for my $relname (keys %related_stuff) {
+    my $rel_obj = $related_stuff{$relname};
 
-    # The guard will save us if we blow out of this scope via die
-    $rollback_guard = $source->storage->txn_scope_guard;
+    if (! $self->{_rel_in_storage}{$relname}) {
+      next unless (Scalar::Util::blessed($rel_obj)
+                    && $rel_obj->isa('DBIx::Class::Row'));
 
-    ## Should all be in relationship_data, but we need to get rid of the
-    ## 'filter' reltype..
-    ## These are the FK rels, need their IDs for the insert.
+      next unless $source->_pk_depends_on(
+                    $relname, { $rel_obj->get_columns }
+                  );
 
-    my @pri = $self->primary_columns;
+      # The guard will save us if we blow out of this scope via die
+      $rollback_guard ||= $source->storage->txn_scope_guard;
 
-    REL: foreach my $relname (keys %related_stuff) {
-
-      my $rel_obj = $related_stuff{$relname};
-
-      next REL unless (Scalar::Util::blessed($rel_obj)
-                       && $rel_obj->isa('DBIx::Class::Row'));
-
-      next REL unless $source->_pk_depends_on(
-                        $relname, { $rel_obj->get_columns }
-                      );
-
       MULTICREATE_DEBUG and warn "MC $self pre-reconstructing $relname $rel_obj\n";
 
       my $them = { %{$rel_obj->{_relationship_data} || {} }, $rel_obj->get_inflated_columns };
@@ -315,12 +305,21 @@
                     ->related_source($relname)
                     ->resultset
                     ->find_or_create($them);
+
       %{$rel_obj} = %{$re};
-      $self->set_from_related($relname, $rel_obj);
-      delete $related_stuff{$relname};
+      $self->{_rel_in_storage}{$relname} = 1;
     }
+
+    $self->set_from_related($relname, $rel_obj);
+    delete $related_stuff{$relname};
   }
 
+  # start a transaction here if not started yet and there is more stuff
+  # to insert after us
+  if (keys %related_stuff) {
+    $rollback_guard ||= $source->storage->txn_scope_guard
+  }
+
   MULTICREATE_DEBUG and do {
     no warnings 'uninitialized';
     warn "MC $self inserting (".join(', ', $self->get_columns).")\n";
@@ -332,13 +331,12 @@
 
   ## PK::Auto
   my @auto_pri = grep {
-                   !defined $self->get_column($_) || 
-                   ref($self->get_column($_)) eq 'SCALAR'
+                  (not defined $self->get_column($_))
+                    ||
+                  (ref($self->get_column($_)) eq 'SCALAR')
                  } $self->primary_columns;
 
   if (@auto_pri) {
-    #$self->throw_exception( "More than one possible key found for auto-inc on ".ref $self )
-    #  if defined $too_many;
     MULTICREATE_DEBUG and warn "MC $self fetching missing PKs ".join(', ', @auto_pri)."\n";
     my $storage = $self->result_source->storage;
     $self->throw_exception( "Missing primary key but Storage doesn't support last_insert_id" )
@@ -353,47 +351,47 @@
   $self->{_dirty_columns} = {};
   $self->{related_resultsets} = {};
 
-  if(!$self->{_rel_in_storage}) {
-    ## Now do the relationships that need our ID (has_many etc.)
-    foreach my $relname (keys %related_stuff) {
-      my $rel_obj = $related_stuff{$relname};
-      my @cands;
-      if (Scalar::Util::blessed($rel_obj)
-          && $rel_obj->isa('DBIx::Class::Row')) {
-        @cands = ($rel_obj);
-      } elsif (ref $rel_obj eq 'ARRAY') {
-        @cands = @$rel_obj;
-      }
-      if (@cands) {
-        my $reverse = $source->reverse_relationship_info($relname);
-        foreach my $obj (@cands) {
-          $obj->set_from_related($_, $self) for keys %$reverse;
-          my $them = { %{$obj->{_relationship_data} || {} }, $obj->get_inflated_columns };
-          if ($self->__their_pk_needs_us($relname, $them)) {
-            if (exists $self->{_ignore_at_insert}{$relname}) {
-              MULTICREATE_DEBUG and warn "MC $self skipping post-insert on $relname";
-            } else {
-              MULTICREATE_DEBUG and warn "MC $self re-creating $relname $obj";
-              my $re = $self->result_source
-                            ->related_source($relname)
-                            ->resultset
-                            ->find_or_create($them);
-              %{$obj} = %{$re};
-              MULTICREATE_DEBUG and warn "MC $self new $relname $obj";
-            }
+  foreach my $relname (keys %related_stuff) {
+    next unless $source->has_relationship ($relname);
+
+    my @cands = ref $related_stuff{$relname} eq 'ARRAY'
+      ? @{$related_stuff{$relname}}
+      : $related_stuff{$relname}
+    ;
+
+    if (@cands
+          && Scalar::Util::blessed($cands[0])
+            && $cands[0]->isa('DBIx::Class::Row')
+    ) {
+      my $reverse = $source->reverse_relationship_info($relname);
+      foreach my $obj (@cands) {
+        $obj->set_from_related($_, $self) for keys %$reverse;
+        my $them = { %{$obj->{_relationship_data} || {} }, $obj->get_inflated_columns };
+        if ($self->__their_pk_needs_us($relname, $them)) {
+          if (exists $self->{_ignore_at_insert}{$relname}) {
+            MULTICREATE_DEBUG and warn "MC $self skipping post-insert on $relname";
           } else {
-            MULTICREATE_DEBUG and warn "MC $self post-inserting $obj";
-            $obj->insert();
+            MULTICREATE_DEBUG and warn "MC $self re-creating $relname $obj";
+            my $re = $self->result_source
+                          ->related_source($relname)
+                          ->resultset
+                          ->create($them);
+            %{$obj} = %{$re};
+            MULTICREATE_DEBUG and warn "MC $self new $relname $obj";
           }
+        } else {
+          MULTICREATE_DEBUG and warn "MC $self post-inserting $obj";
+          $obj->insert();
         }
       }
     }
-    delete $self->{_ignore_at_insert};
-    $rollback_guard->commit;
   }
 
   $self->in_storage(1);
-  undef $self->{_orig_ident};
+  delete $self->{_orig_ident};
+  delete $self->{_ignore_at_insert};
+  $rollback_guard->commit if $rollback_guard;
+
   return $self;
 }
 
@@ -413,7 +411,7 @@
 Indicates whether the object exists as a row in the database or
 not. This is set to true when L<DBIx::Class::ResultSet/find>,
 L<DBIx::Class::ResultSet/create> or L<DBIx::Class::ResultSet/insert>
-are used. 
+are used.
 
 Creating a row object using L<DBIx::Class::ResultSet/new>, or calling
 L</delete> on one, sets it to false.
@@ -423,7 +421,7 @@
 sub in_storage {
   my ($self, $val) = @_;
   $self->{_in_storage} = $val if @_ > 1;
-  return $self->{_in_storage};
+  return $self->{_in_storage} ? 1 : 0;
 }
 
 =head2 update
@@ -452,7 +450,7 @@
 to C<update>, e.g. ( { %{ $href } } )
 
 If the values passed or any of the column values set on the object
-contain scalar references, eg:
+contain scalar references, e.g.:
 
   $row->last_modified(\'NOW()');
   # OR
@@ -519,14 +517,16 @@
 
 The object is still perfectly usable, but L</in_storage> will
 now return 0 and the object must be reinserted using L</insert>
-before it can be used to L</update> the row again. 
+before it can be used to L</update> the row again.
 
 If you delete an object in a class with a C<has_many> relationship, an
 attempt is made to delete all the related objects as well. To turn
 this behaviour off, pass C<< cascade_delete => 0 >> in the C<$attr>
 hashref of the relationship, see L<DBIx::Class::Relationship>. Any
 database-level cascade or restrict will take precedence over a
-DBIx-Class-based cascading delete. 
+DBIx-Class-based cascading delete, since DBIx-Class B<deletes the
+main row first> and only then attempts to delete any remaining related
+rows.
 
 If you delete an object within a txn_do() (see L<DBIx::Class::Storage/txn_do>)
 and the transaction subsequently fails, the row object will remain marked as
@@ -600,7 +600,7 @@
   return $self->{_column_data}{$column} if exists $self->{_column_data}{$column};
   if (exists $self->{_inflated_column}{$column}) {
     return $self->store_column($column,
-      $self->_deflated_column($column, $self->{_inflated_column}{$column}));   
+      $self->_deflated_column($column, $self->{_inflated_column}{$column}));
   }
   $self->throw_exception( "No such column '${column}'" ) unless $self->has_column($column);
   return undef;
@@ -702,7 +702,7 @@
 Throws an exception if the column does not exist.
 
 Marks a column as having been changed regardless of whether it has
-really changed.  
+really changed.
 
 =cut
 sub make_column_dirty {
@@ -711,7 +711,7 @@
   $self->throw_exception( "No such column '${column}'" )
     unless exists $self->{_column_data}{$column} || $self->has_column($column);
 
-  # the entire clean/dirty code relieas on exists, not on true/false
+  # the entire clean/dirty code relies on exists, not on true/false
   return 1 if exists $self->{_dirty_columns}{$column};
 
   $self->{_dirty_columns}{$column} = 1;
@@ -750,12 +750,43 @@
 
 sub get_inflated_columns {
   my $self = shift;
-  return map {
-    my $accessor = $self->column_info($_)->{'accessor'} || $_;
-    ($_ => $self->$accessor);
-  } grep $self->has_column_loaded($_), $self->columns;
+
+  my %loaded_colinfo = (map
+    { $_ => $self->column_info($_) }
+    (grep { $self->has_column_loaded($_) } $self->columns)
+  );
+
+  my %inflated;
+  for my $col (keys %loaded_colinfo) {
+    if (exists $loaded_colinfo{$col}{accessor}) {
+      my $acc = $loaded_colinfo{$col}{accessor};
+      $inflated{$col} = $self->$acc if defined $acc;
+    }
+    else {
+      $inflated{$col} = $self->$col;
+    }
+  }
+
+  # return all loaded columns with the inflations overlayed on top
+  return ($self->get_columns, %inflated);
 }
 
+sub _is_column_numeric {
+   my ($self, $column) = @_;
+    my $colinfo = $self->column_info ($column);
+
+    # cache for speed (the object may *not* have a resultsource instance)
+    if (not defined $colinfo->{is_numeric} && $self->_source_handle) {
+      $colinfo->{is_numeric} =
+        $self->result_source->schema->storage->is_datatype_numeric ($colinfo->{data_type})
+          ? 1
+          : 0
+        ;
+    }
+
+    return $colinfo->{is_numeric};
+}
+
 =head2 set_column
 
   $row->set_column($col => $val);
@@ -784,12 +815,15 @@
   $self->{_orig_ident} ||= $self->ident_condition;
   my $old_value = $self->get_column($column);
 
-  $self->store_column($column, $new_value);
+  $new_value = $self->store_column($column, $new_value);
 
   my $dirty;
-  if (defined $old_value xor defined $new_value) {
+  if (!$self->in_storage) { # no point tracking dirtyness on uninserted data
     $dirty = 1;
   }
+  elsif (defined $old_value xor defined $new_value) {
+    $dirty = 1;
+  }
   elsif (not defined $old_value) {  # both undef
     $dirty = 0;
   }
@@ -797,18 +831,7 @@
     $dirty = 0;
   }
   else {  # do a numeric comparison if datatype allows it
-    my $colinfo = $self->column_info ($column);
-
-    # cache for speed
-    if (not defined $colinfo->{is_numeric}) {
-      $colinfo->{is_numeric} =
-        $self->result_source->schema->storage->is_datatype_numeric ($colinfo->{data_type})
-          ? 1
-          : 0
-        ;
-    }
-
-    if ($colinfo->{is_numeric}) {
+    if ($self->_is_column_numeric($column)) {
       $dirty = $old_value != $new_value;
     }
     else {
@@ -829,7 +852,7 @@
 
   $row->set_columns({ $col => $val, ... });
 
-=over 
+=over
 
 =item Arguments: \%columndata
 
@@ -864,7 +887,7 @@
 =back
 
 Sets more than one column value at once. Any inflated values are
-deflated and the raw values stored. 
+deflated and the raw values stored.
 
 Any related values passed as Row objects, using the relation name as a
 key, are reduced to the appropriate foreign key values and stored. If
@@ -875,7 +898,7 @@
 L<DBIx::Class::Relationship/has_many> key, and create the related
 objects if necessary.
 
-Be aware that the input hashref might be edited in place, so dont rely
+Be aware that the input hashref might be edited in place, so don't rely
 on it being the same after a call to C<set_inflated_columns>. If you
 need to preserve the hashref, it is sufficient to pass a shallow copy
 to C<set_inflated_columns>, e.g. ( { %{ $href } } )
@@ -889,26 +912,23 @@
   foreach my $key (keys %$upd) {
     if (ref $upd->{$key}) {
       my $info = $self->relationship_info($key);
-      if ($info && $info->{attrs}{accessor}
-        && $info->{attrs}{accessor} eq 'single')
-      {
+      my $acc_type = $info->{attrs}{accessor} || '';
+      if ($acc_type eq 'single') {
         my $rel = delete $upd->{$key};
         $self->set_from_related($key => $rel);
         $self->{_relationship_data}{$key} = $rel;
-      } elsif ($info && $info->{attrs}{accessor}
-        && $info->{attrs}{accessor} eq 'multi') {
-          $self->throw_exception(
-            "Recursive update is not supported over relationships of type multi ($key)"
-          );
       }
-      elsif ($self->has_column($key)
-        && exists $self->column_info($key)->{_inflate_info})
-      {
+      elsif ($acc_type eq 'multi') {
+        $self->throw_exception(
+          "Recursive update is not supported over relationships of type '$acc_type' ($key)"
+        );
+      }
+      elsif ($self->has_column($key) && exists $self->column_info($key)->{_inflate_info}) {
         $self->set_inflated_column($key, delete $upd->{$key});
       }
     }
   }
-  $self->set_columns($upd);    
+  $self->set_columns($upd);
 }
 
 =head2 copy
@@ -932,7 +952,7 @@
 the new object.
 
 Relationships will be followed by the copy procedure B<only> if the
-relationship specifes a true value for its
+relationship specifies a true value for its
 L<cascade_copy|DBIx::Class::Relationship::Base> attribute. C<cascade_copy>
 is set by default on C<has_many> relationships and unset on all others.
 
@@ -954,8 +974,8 @@
   $new->set_inflated_columns($changes);
   $new->insert;
 
-  # Its possible we'll have 2 relations to the same Source. We need to make 
-  # sure we don't try to insert the same row twice esle we'll violate unique
+  # Its possible we'll have 2 relations to the same Source. We need to make
+  # sure we don't try to insert the same row twice else we'll violate unique
   # constraints
   my $rels_copied = {};
 
@@ -963,7 +983,7 @@
     my $rel_info = $self->result_source->relationship_info($rel);
 
     next unless $rel_info->{attrs}{cascade_copy};
-  
+
     my $resolved = $self->result_source->_resolve_condition(
       $rel_info->{cond}, $rel, $new
     );
@@ -975,7 +995,7 @@
       $copied->{$id_str} = 1;
       my $rel_copy = $related->copy($resolved);
     }
- 
+
   }
   return $new;
 }
@@ -1042,56 +1062,68 @@
   my ($source_handle) = $source;
 
   if ($source->isa('DBIx::Class::ResultSourceHandle')) {
-      $source = $source_handle->resolve
-  } else {
-      $source_handle = $source->handle
+    $source = $source_handle->resolve
+  } 
+  else {
+    $source_handle = $source->handle
   }
 
   my $new = {
     _source_handle => $source_handle,
     _column_data => $me,
-    _in_storage => 1
   };
   bless $new, (ref $class || $class);
 
-  my $schema;
   foreach my $pre (keys %{$prefetch||{}}) {
-    my $pre_val = $prefetch->{$pre};
-    my $pre_source = $source->related_source($pre);
-    $class->throw_exception("Can't prefetch non-existent relationship ${pre}")
-      unless $pre_source;
-    if (ref($pre_val->[0]) eq 'ARRAY') { # multi
-      my @pre_objects;
-      foreach my $pre_rec (@$pre_val) {
-        unless ($pre_source->primary_columns == grep { exists $pre_rec->[0]{$_}
-           and defined $pre_rec->[0]{$_} } $pre_source->primary_columns) {
-          next;
+
+    my $pre_source = $source->related_source($pre)
+      or $class->throw_exception("Can't prefetch non-existent relationship ${pre}");
+
+    my $accessor = $source->relationship_info($pre)->{attrs}{accessor}
+      or $class->throw_exception("No accessor for prefetched $pre");
+
+    my @pre_vals;
+    if (ref $prefetch->{$pre}[0] eq 'ARRAY') {
+      @pre_vals = @{$prefetch->{$pre}};
+    }
+    elsif ($accessor eq 'multi') {
+      $class->throw_exception("Implicit prefetch (via select/columns) not supported with accessor 'multi'");
+    }
+    else {
+      @pre_vals = $prefetch->{$pre};
+    }
+
+    my @pre_objects;
+    for my $me_pref (@pre_vals) {
+
+        # FIXME - this should not be necessary
+        # the collapser currently *could* return bogus elements with all
+        # columns set to undef
+        my $has_def;
+        for (values %{$me_pref->[0]}) {
+          if (defined $_) {
+            $has_def++;
+            last;
+          }
         }
-        push(@pre_objects, $pre_source->result_class->inflate_result(
-                             $pre_source, @{$pre_rec}));
-      }
-      $new->related_resultset($pre)->set_cache(\@pre_objects);
-    } elsif (defined $pre_val->[0]) {
-      my $fetched;
-      unless ($pre_source->primary_columns == grep { exists $pre_val->[0]{$_}
-         and !defined $pre_val->[0]{$_} } $pre_source->primary_columns)
-      {
-        $fetched = $pre_source->result_class->inflate_result(
-                      $pre_source, @{$pre_val});
-      }
-      my $accessor = $source->relationship_info($pre)->{attrs}{accessor};
-      $class->throw_exception("No accessor for prefetched $pre")
-       unless defined $accessor;
-      if ($accessor eq 'single') {
-        $new->{_relationship_data}{$pre} = $fetched;
-      } elsif ($accessor eq 'filter') {
-        $new->{_inflated_column}{$pre} = $fetched;
-      } else {
-       $class->throw_exception("Prefetch not supported with accessor '$accessor'");
-      }
-      $new->related_resultset($pre)->set_cache([ $fetched ]);
+        next unless $has_def;
+
+        push @pre_objects, $pre_source->result_class->inflate_result(
+          $pre_source, @$me_pref
+        );
     }
+
+    if ($accessor eq 'single') {
+      $new->{_relationship_data}{$pre} = $pre_objects[0];
+    }
+    elsif ($accessor eq 'filter') {
+      $new->{_inflated_column}{$pre} = $pre_objects[0];
+    }
+
+    $new->related_resultset($pre)->set_cache(\@pre_objects);
   }
+
+  $new->in_storage (1);
   return $new;
 }
 
@@ -1260,14 +1292,53 @@
     my $self = shift @_;
     my $attrs = shift @_;
     my $resultset = $self->result_source->resultset;
-    
+
     if(defined $attrs) {
-    	$resultset = $resultset->search(undef, $attrs);
+      $resultset = $resultset->search(undef, $attrs);
     }
-    
+
     return $resultset->find($self->{_orig_ident} || $self->ident_condition);
 }
 
+=head2 discard_changes ($attrs)
+
+Re-selects the row from the database, losing any changes that had
+been made.
+
+This method can also be used to refresh from storage, retrieving any
+changes made since the row was last read from storage.
+
+$attrs is expected to be a hashref of attributes suitable for passing as the
+second argument to $resultset->search($cond, $attrs);
+
+=cut
+
+sub discard_changes {
+  my ($self, $attrs) = @_;
+  delete $self->{_dirty_columns};
+  return unless $self->in_storage; # Don't reload if we aren't real!
+
+  # add a replication default to read from the master only
+  $attrs = { force_pool => 'master', %{$attrs||{}} };
+
+  if( my $current_storage = $self->get_from_storage($attrs)) {
+
+    # Set $self to the current.
+    %$self = %$current_storage;
+
+    # Avoid a possible infinite loop with
+    # sub DESTROY { $_[0]->discard_changes }
+    bless $current_storage, 'Do::Not::Exist';
+
+    return $self;
+  }
+  else {
+    $self->in_storage(0);
+    return $self;
+  }
+}
+
+
 =head2 throw_exception
 
 See L<DBIx::Class::Schema/throw_exception>.
@@ -1276,11 +1347,13 @@
 
 sub throw_exception {
   my $self=shift;
+
   if (ref $self && ref $self->result_source && $self->result_source->schema) {
-    $self->result_source->schema->throw_exception(@_);
-  } else {
-    croak(@_);
+    $self->result_source->schema->throw_exception(@_)
   }
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 =head2 id
@@ -1317,6 +1390,13 @@
 changes made since the row was last read from storage. Actually
 implemented in L<DBIx::Class::PK>
 
+Note: If you are using L<DBIx::Class::Storage::DBI::Replicated> as your
+storage, please kept in mind that if you L</discard_changes> on a row that you
+just updated or created, you should wrap the entire bit inside a transaction.
+Otherwise you run the risk that you insert or update to the master database
+but read from a replicant database that has not yet been updated from the
+master.  This will result in unexpected results.
+
 =cut
 
 1;

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks/MSSQL.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks/MSSQL.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks/MSSQL.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,14 @@
+package # Hide from PAUSE
+  DBIx::Class::SQLAHacks::MSSQL;
+
+use base qw( DBIx::Class::SQLAHacks );
+use Carp::Clan qw/^DBIx::Class|^SQL::Abstract/;
+
+#
+# MSSQL does not support ... OVER() ... RNO limits
+#
+sub _rno_default_order {
+  return \ '(SELECT(1))';
+}
+
+1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks/MySQL.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks/MySQL.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks/MySQL.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -12,7 +12,7 @@
   my $self = shift;
 
   my $table = $_[0];
-  $table = $self->_quote($table) unless ref($table);
+  $table = $self->_quote($table);
 
   if (! $_[1] or (ref $_[1] eq 'HASH' and !keys %{$_[1]} ) ) {
     return "INSERT INTO ${table} () VALUES ()"

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks/OracleJoins.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks/OracleJoins.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks/OracleJoins.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -96,8 +96,7 @@
 
 This module was originally written to support Oracle < 9i where ANSI joins
 weren't supported at all, but became the module for Oracle >= 8 because
-Oracle's optimising of ANSI joins is horrible.  (See:
-http://scsys.co.uk:8001/7495)
+Oracle's optimising of ANSI joins is horrible.
 
 =head1 SYNOPSIS
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/SQLAHacks.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,10 +1,15 @@
 package # Hide from PAUSE
   DBIx::Class::SQLAHacks;
 
+# This module is a subclass of SQL::Abstract::Limit and includes a number
+# of DBIC-specific workarounds, not yet suitable for inclusion into the
+# SQLA core
+
 use base qw/SQL::Abstract::Limit/;
 use strict;
 use warnings;
 use Carp::Clan qw/^DBIx::Class|^SQL::Abstract/;
+use Sub::Name();
 
 BEGIN {
   # reinstall the carp()/croak() functions imported into SQL::Abstract
@@ -12,21 +17,23 @@
   no warnings qw/redefine/;
   no strict qw/refs/;
   for my $f (qw/carp croak/) {
+
     my $orig = \&{"SQL::Abstract::$f"};
-    *{"SQL::Abstract::$f"} = sub {
-
-      local $Carp::CarpLevel = 1;   # even though Carp::Clan ignores this, $orig will not
-
-      if (Carp::longmess() =~ /DBIx::Class::SQLAHacks::[\w]+\(\) called/) {
-        __PACKAGE__->can($f)->(@_);
-      }
-      else {
-        $orig->(@_);
-      }
-    }
+    *{"SQL::Abstract::$f"} = Sub::Name::subname "SQL::Abstract::$f" =>
+      sub {
+        if (Carp::longmess() =~ /DBIx::Class::SQLAHacks::[\w]+ .+? called \s at/x) {
+          __PACKAGE__->can($f)->(@_);
+        }
+        else {
+          goto $orig;
+        }
+      };
   }
 }
 
+
+# Tries to determine limit dialect.
+#
 sub new {
   my $self = shift->SUPER::new(@_);
 
@@ -40,117 +47,254 @@
 }
 
 
-# Some databases (sqlite) do not handle multiple parenthesis
-# around in/between arguments. A tentative x IN ( ( 1, 2 ,3) )
-# is interpreted as x IN 1 or something similar.
-#
-# Since we currently do not have access to the SQLA AST, resort
-# to barbaric mutilation of any SQL supplied in literal form
+# ANSI standard Limit/Offset implementation. DB2 and MSSQL use this
+sub _RowNumberOver {
+  my ($self, $sql, $order, $rows, $offset ) = @_;
 
-sub _strip_outer_paren {
-  my ($self, $arg) = @_;
+  # get the select to make the final amount of columns equal the original one
+  my ($select) = $sql =~ /^ \s* SELECT \s+ (.+?) \s+ FROM/ix
+    or croak "Unrecognizable SELECT: $sql";
 
-  return $self->_SWITCH_refkind ($arg, {
-    ARRAYREFREF => sub {
-      $$arg->[0] = __strip_outer_paren ($$arg->[0]);
-      return $arg;
-    },
-    SCALARREF => sub {
-      return \__strip_outer_paren( $$arg );
-    },
-    FALLBACK => sub {
-      return $arg
-    },
-  });
-}
+  # get the order_by only (or make up an order if none exists)
+  my $order_by = $self->_order_by(
+    (delete $order->{order_by}) || $self->_rno_default_order
+  );
 
-sub __strip_outer_paren {
-  my $sql = shift;
+  # whatever is left of the order_by
+  my $group_having = $self->_order_by($order);
 
-  if ($sql and not ref $sql) {
-    while ($sql =~ /^ \s* \( (.*) \) \s* $/x ) {
-      $sql = $1;
-    }
-  }
+  my $qalias = $self->_quote ($self->{_dbic_rs_attrs}{alias});
 
+  $sql = sprintf (<<EOS, $offset + 1, $offset + $rows, );
+
+SELECT $select FROM (
+  SELECT $qalias.*, ROW_NUMBER() OVER($order_by ) AS rno__row__index FROM (
+    ${sql}${group_having}
+  ) $qalias
+) $qalias WHERE rno__row__index BETWEEN %d AND %d
+
+EOS
+
+  $sql =~ s/\s*\n\s*/ /g;   # easier to read in the debugger
   return $sql;
 }
 
-sub _where_field_IN {
-  my ($self, $lhs, $op, $rhs) = @_;
-  $rhs = $self->_strip_outer_paren ($rhs);
-  return $self->SUPER::_where_field_IN ($lhs, $op, $rhs);
+# some databases are happy with OVER (), some need OVER (ORDER BY (SELECT (1)) )
+sub _rno_default_order {
+  return undef;
 }
 
-sub _where_field_BETWEEN {
-  my ($self, $lhs, $op, $rhs) = @_;
-  $rhs = $self->_strip_outer_paren ($rhs);
-  return $self->SUPER::_where_field_BETWEEN ($lhs, $op, $rhs);
+# Informix specific limit, almost like LIMIT/OFFSET
+sub _SkipFirst {
+  my ($self, $sql, $order, $rows, $offset) = @_;
+
+  $sql =~ s/^ \s* SELECT \s+ //ix
+    or croak "Unrecognizable SELECT: $sql";
+
+  return sprintf ('SELECT %s%s%s%s',
+    $offset
+      ? sprintf ('SKIP %d ', $offset)
+      : ''
+    ,
+    sprintf ('FIRST %d ', $rows),
+    $sql,
+    $self->_order_by ($order),
+  );
 }
 
-# Slow but ANSI standard Limit/Offset support. DB2 uses this
-sub _RowNumberOver {
-  my ($self, $sql, $order, $rows, $offset ) = @_;
+# Crappy Top based Limit/Offset support. Legacy from MSSQL.
+sub _Top {
+  my ( $self, $sql, $order, $rows, $offset ) = @_;
 
-  $offset += 1;
-  my $last = $rows + $offset - 1;
-  my ( $order_by ) = $self->_order_by( $order );
+  # mangle the input sql so it can be properly aliased in the outer queries
+  $sql =~ s/^ \s* SELECT \s+ (.+?) \s+ (?=FROM)//ix
+    or croak "Unrecognizable SELECT: $sql";
+  my $sql_select = $1;
+  my @sql_select = split (/\s*,\s*/, $sql_select);
 
-  $sql = <<"SQL";
-SELECT * FROM
-(
-   SELECT Q1.*, ROW_NUMBER() OVER( ) AS ROW_NUM FROM (
-      $sql
-      $order_by
-   ) Q1
-) Q2
-WHERE ROW_NUM BETWEEN $offset AND $last
+  # we can't support subqueries (in fact MSSQL can't) - croak
+  if (@sql_select != @{$self->{_dbic_rs_attrs}{select}}) {
+    croak (sprintf (
+      'SQL SELECT did not parse cleanly - retrieved %d comma separated elements, while '
+    . 'the resultset select attribure contains %d elements: %s',
+      scalar @sql_select,
+      scalar @{$self->{_dbic_rs_attrs}{select}},
+      $sql_select,
+    ));
+  }
 
-SQL
+  my $name_sep = $self->name_sep || '.';
+  my $esc_name_sep = "\Q$name_sep\E";
+  my $col_re = qr/ ^ (?: (.+) $esc_name_sep )? ([^$esc_name_sep]+) $ /x;
 
-  return $sql;
-}
+  my $rs_alias = $self->{_dbic_rs_attrs}{alias};
+  my $quoted_rs_alias = $self->_quote ($rs_alias);
 
-# Crappy Top based Limit/Offset support. MSSQL uses this currently,
-# but may have to switch to RowNumberOver one day
-sub _Top {
-  my ( $self, $sql, $order, $rows, $offset ) = @_;
+  # construct the new select lists, rename(alias) some columns if necessary
+  my (@outer_select, @inner_select, %seen_names, %col_aliases, %outer_col_aliases);
 
+  for (@{$self->{_dbic_rs_attrs}{select}}) {
+    next if ref $_;
+    my ($table, $orig_colname) = ( $_ =~ $col_re );
+    next unless $table;
+    $seen_names{$orig_colname}++;
+  }
+
+  for my $i (0 .. $#sql_select) {
+
+    my $colsel_arg = $self->{_dbic_rs_attrs}{select}[$i];
+    my $colsel_sql = $sql_select[$i];
+
+    # this may or may not work (in case of a scalarref or something)
+    my ($table, $orig_colname) = ( $colsel_arg =~ $col_re );
+
+    my $quoted_alias;
+    # do not attempt to understand non-scalar selects - alias numerically
+    if (ref $colsel_arg) {
+      $quoted_alias = $self->_quote ('column_' . (@inner_select + 1) );
+    }
+    # column name seen more than once - alias it
+    elsif ($orig_colname &&
+          ($seen_names{$orig_colname} && $seen_names{$orig_colname} > 1) ) {
+      $quoted_alias = $self->_quote ("${table}__${orig_colname}");
+    }
+
+    # we did rename - make a record and adjust
+    if ($quoted_alias) {
+      # alias inner
+      push @inner_select, "$colsel_sql AS $quoted_alias";
+
+      # push alias to outer
+      push @outer_select, $quoted_alias;
+
+      # Any aliasing accumulated here will be considered
+      # both for inner and outer adjustments of ORDER BY
+      $self->__record_alias (
+        \%col_aliases,
+        $quoted_alias,
+        $colsel_arg,
+        $table ? $orig_colname : undef,
+      );
+    }
+
+    # otherwise just leave things intact inside, and use the abbreviated one outside
+    # (as we do not have table names anymore)
+    else {
+      push @inner_select, $colsel_sql;
+
+      my $outer_quoted = $self->_quote ($orig_colname);  # it was not a duplicate so should just work
+      push @outer_select, $outer_quoted;
+      $self->__record_alias (
+        \%outer_col_aliases,
+        $outer_quoted,
+        $colsel_arg,
+        $table ? $orig_colname : undef,
+      );
+    }
+  }
+
+  my $outer_select = join (', ', @outer_select );
+  my $inner_select = join (', ', @inner_select );
+
+  %outer_col_aliases = (%outer_col_aliases, %col_aliases);
+
+  # deal with order
   croak '$order supplied to SQLAHacks limit emulators must be a hash'
     if (ref $order ne 'HASH');
 
   $order = { %$order }; #copy
 
-  my $last = $rows + $offset;
+  my $req_order = $order->{order_by};
 
-  my $req_order = $self->_order_by ($order->{order_by});
+  # examine normalized version, collapses nesting
+  my $limit_order;
+  if (scalar $self->_order_by_chunks ($req_order)) {
+    $limit_order = $req_order;
+  }
+  else {
+    $limit_order = [ map
+      { join ('', $rs_alias, $name_sep, $_ ) }
+      ( $self->{_dbic_rs_attrs}{_source_handle}->resolve->primary_columns )
+    ];
+  }
 
-  my $limit_order = $req_order ? $order->{order_by} : $order->{_virtual_order_by};
+  my ( $order_by_inner, $order_by_outer ) = $self->_order_directions($limit_order);
+  my $order_by_requested = $self->_order_by ($req_order);
 
-  delete $order->{$_} for qw/order_by _virtual_order_by/;
+  # generate the rest
+  delete $order->{order_by};
   my $grpby_having = $self->_order_by ($order);
 
-  my ( $order_by_inner, $order_by_outer ) = $self->_order_directions($limit_order);
+  # short circuit for counts - the ordering complexity is needless
+  if ($self->{_dbic_rs_attrs}{-for_count_only}) {
+    return "SELECT TOP $rows $inner_select $sql $grpby_having $order_by_outer";
+  }
 
-  $sql =~ s/^\s*(SELECT|select)//;
+  # we can't really adjust the order_by columns, as introspection is lacking
+  # resort to simple substitution
+  for my $col (keys %outer_col_aliases) {
+    for ($order_by_requested, $order_by_outer) {
+      $_ =~ s/\s+$col\s+/ $outer_col_aliases{$col} /g;
+    }
+  }
+  for my $col (keys %col_aliases) {
+    $order_by_inner =~ s/\s+$col\s+/ $col_aliases{$col} /g;
+  }
 
-  $sql = <<"SQL";
-  SELECT * FROM
-  (
-    SELECT TOP $rows * FROM
+
+  my $inner_lim = $rows + $offset;
+
+  $sql = "SELECT TOP $inner_lim $inner_select $sql $grpby_having $order_by_inner";
+
+  if ($offset) {
+    $sql = <<"SQL";
+
+    SELECT TOP $rows $outer_select FROM
     (
-        SELECT TOP $last $sql $grpby_having $order_by_inner
-    ) AS foo
+      $sql
+    ) $quoted_rs_alias
     $order_by_outer
-  ) AS bar
-  $req_order
+SQL
 
+  }
+
+  if ($order_by_requested) {
+    $sql = <<"SQL";
+
+    SELECT $outer_select FROM
+      ( $sql ) $quoted_rs_alias
+    $order_by_requested
 SQL
-    return $sql;
+
+  }
+
+  $sql =~ s/\s*\n\s*/ /g; # parsing out multiline statements is harder than a single line
+  return $sql;
 }
 
+# action at a distance to shorten Top code above
+sub __record_alias {
+  my ($self, $register, $alias, $fqcol, $col) = @_;
 
+  # record qualified name
+  $register->{$fqcol} = $alias;
+  $register->{$self->_quote($fqcol)} = $alias;
 
+  return unless $col;
+
+  # record unqualified name, undef (no adjustment) if a duplicate is found
+  if (exists $register->{$col}) {
+    $register->{$col} = undef;
+  }
+  else {
+    $register->{$col} = $alias;
+  }
+
+  $register->{$self->_quote($col)} = $register->{$col};
+}
+
+
+
 # While we're at it, this should make LIMIT queries more efficient,
 #  without digging into things too deeply
 sub _find_syntax {
@@ -158,17 +302,21 @@
   return $self->{_cached_syntax} ||= $self->SUPER::_find_syntax($syntax);
 }
 
+my $for_syntax = {
+  update => 'FOR UPDATE',
+  shared => 'FOR SHARE',
+};
+# Quotes table names, handles "limit" dialects (e.g. where rownum between x and
+# y), supports SELECT ... FOR UPDATE and SELECT ... FOR SHARE.
 sub select {
   my ($self, $table, $fields, $where, $order, @rest) = @_;
 
   $self->{"${_}_bind"} = [] for (qw/having from order/);
 
-  if (ref $table eq 'SCALAR') {
-    $table = $$table;
-  }
-  elsif (not ref $table) {
+  if (not ref($table) or ref($table) eq 'SCALAR') {
     $table = $self->_quote($table);
   }
+
   local $self->{rownum_hack_count} = 1
     if (defined $rest[0] && $self->{limit_dialect} eq 'RowNum');
   @rest = (-1) unless defined $rest[0];
@@ -177,22 +325,18 @@
   my ($sql, @where_bind) = $self->SUPER::select(
     $table, $self->_recurse_fields($fields), $where, $order, @rest
   );
-  $sql .= 
-    $self->{for} ?
-    (
-      $self->{for} eq 'update' ? ' FOR UPDATE' :
-      $self->{for} eq 'shared' ? ' FOR SHARE'  :
-      ''
-    ) :
-    ''
-  ;
+  if (my $for = delete $self->{_dbic_rs_attrs}{for}) {
+    $sql .= " $for_syntax->{$for}" if $for_syntax->{$for};
+  }
+
   return wantarray ? ($sql, @{$self->{from_bind}}, @where_bind, @{$self->{having_bind}}, @{$self->{order_bind}} ) : $sql;
 }
 
+# Quotes table names, and handles default inserts
 sub insert {
   my $self = shift;
   my $table = shift;
-  $table = $self->_quote($table) unless ref($table);
+  $table = $self->_quote($table);
 
   # SQLA will emit INSERT INTO $table ( ) VALUES ( )
   # which is sadly understood only by MySQL. Change default behavior here,
@@ -204,17 +348,19 @@
   $self->SUPER::insert($table, @_);
 }
 
+# Just quotes table names.
 sub update {
   my $self = shift;
   my $table = shift;
-  $table = $self->_quote($table) unless ref($table);
+  $table = $self->_quote($table);
   $self->SUPER::update($table, @_);
 }
 
+# Just quotes table names.
 sub delete {
   my $self = shift;
   my $table = shift;
-  $table = $self->_quote($table) unless ref($table);
+  $table = $self->_quote($table);
   $self->SUPER::delete($table, @_);
 }
 
@@ -240,28 +386,37 @@
           ? ' AS col'.$self->{rownum_hack_count}++
           : '')
       } @$fields);
-  } elsif ($ref eq 'HASH') {
-    foreach my $func (keys %$fields) {
-      if ($func eq 'distinct') {
-        my $_fields = $fields->{$func};
-        if (ref $_fields eq 'ARRAY' && @{$_fields} > 1) {
-          croak (
-            'The select => { distinct => ... } syntax is not supported for multiple columns.'
-           .' Instead please use { group_by => [ qw/' . (join ' ', @$_fields) . '/ ] }'
-           .' or { select => [ qw/' . (join ' ', @$_fields) . '/ ], distinct => 1 }'
-          );
-        }
-        else {
-          $_fields = @{$_fields}[0] if ref $_fields eq 'ARRAY';
-          carp (
-            'The select => { distinct => ... } syntax will be deprecated in DBIC version 0.09,'
-           ." please use { group_by => '${_fields}' } or { select => '${_fields}', distinct => 1 }"
-          );
-        }
-      }
-      return $self->_sqlcase($func)
-        .'( '.$self->_recurse_fields($fields->{$func}).' )';
+  }
+  elsif ($ref eq 'HASH') {
+    my %hash = %$fields;
+
+    my $as = delete $hash{-as};   # if supplied
+
+    my ($func, $args) = each %hash;
+    delete $hash{$func};
+
+    if (lc ($func) eq 'distinct' && ref $args eq 'ARRAY' && @$args > 1) {
+      croak (
+        'The select => { distinct => ... } syntax is not supported for multiple columns.'
+       .' Instead please use { group_by => [ qw/' . (join ' ', @$args) . '/ ] }'
+       .' or { select => [ qw/' . (join ' ', @$args) . '/ ], distinct => 1 }'
+      );
     }
+
+    my $select = sprintf ('%s( %s )%s',
+      $self->_sqlcase($func),
+      $self->_recurse_fields($args),
+      $as
+        ? sprintf (' %s %s', $self->_sqlcase('as'), $self->_quote ($as) )
+        : ''
+    );
+
+    # there should be nothing left
+    if (keys %hash) {
+      croak "Malformed select argument - too many keys in hash: " . join (',', keys %$fields );
+    }
+
+    return $select;
   }
   # Is the second check absolutely necessary?
   elsif ( $ref eq 'REF' and ref($$fields) eq 'ARRAY' ) {
@@ -279,9 +434,8 @@
 
     my $ret = '';
 
-    if (defined $arg->{group_by}) {
-      $ret = $self->_sqlcase(' group by ')
-        .$self->_recurse_fields($arg->{group_by}, { no_rownum_hack => 1 });
+    if (my $g = $self->_recurse_fields($arg->{group_by}, { no_rownum_hack => 1 }) ) {
+      $ret = $self->_sqlcase(' group by ') . $g;
     }
 
     if (defined $arg->{having}) {
@@ -338,16 +492,22 @@
   foreach my $j (@join) {
     my ($to, $on) = @$j;
 
+
     # check whether a join type exists
-    my $join_clause = '';
     my $to_jt = ref($to) eq 'ARRAY' ? $to->[0] : $to;
-    if (ref($to_jt) eq 'HASH' and exists($to_jt->{-join_type})) {
-      $join_clause = ' '.uc($to_jt->{-join_type}).' JOIN ';
-    } else {
-      $join_clause = ' JOIN ';
+    my $join_type;
+    if (ref($to_jt) eq 'HASH' and defined($to_jt->{-join_type})) {
+      $join_type = $to_jt->{-join_type};
+      $join_type =~ s/^\s+ | \s+$//xg;
     }
-    push(@sqlf, $join_clause);
 
+    $join_type = $self->{_default_jointype} if not defined $join_type;
+
+    my $join_clause = sprintf ('%s JOIN ',
+      $join_type ?  ' ' . uc($join_type) : ''
+    );
+    push @sqlf, $join_clause;
+
     if (ref $to eq 'ARRAY') {
       push(@sqlf, '(', $self->_recurse_from(@$to), ')');
     } else {
@@ -410,6 +570,7 @@
 sub _quote {
   my ($self, $label) = @_;
   return '' unless defined $label;
+  return $$label if ref($label) eq 'SCALAR';
   return "*" if $label eq '*';
   return $label unless $self->{quote_char};
   if(ref $self->{quote_char} eq "ARRAY"){
@@ -429,12 +590,15 @@
     return $self->{limit_dialect};
 }
 
+# Set to an array-ref to specify separate left and right quotes for table names.
+# A single scalar is equivalen to [ $char, $char ]
 sub quote_char {
     my $self = shift;
     $self->{quote_char} = shift if @_;
     return $self->{quote_char};
 }
 
+# Character separating quoted table names.
 sub name_sep {
     my $self = shift;
     $self->{name_sep} = shift if @_;
@@ -442,50 +606,3 @@
 }
 
 1;
-
-__END__
-
-=pod
-
-=head1 NAME
-
-DBIx::Class::SQLAHacks - This module is a subclass of SQL::Abstract::Limit
-and includes a number of DBIC-specific workarounds, not yet suitable for
-inclusion into SQLA proper.
-
-=head1 METHODS
-
-=head2 new
-
-Tries to determine limit dialect.
-
-=head2 select
-
-Quotes table names, handles "limit" dialects (e.g. where rownum between x and
-y), supports SELECT ... FOR UPDATE and SELECT ... FOR SHARE.
-
-=head2 insert update delete
-
-Just quotes table names.
-
-=head2 limit_dialect
-
-Specifies the dialect of used for implementing an SQL "limit" clause for
-restricting the number of query results returned.  Valid values are: RowNum.
-
-See L<DBIx::Class::Storage::DBI/connect_info> for details.
-
-=head2 name_sep
-
-Character separating quoted table names.
-
-See L<DBIx::Class::Storage::DBI/connect_info> for details.
-
-=head2 quote_char
-
-Set to an array-ref to specify separate left and right quotes for table names.
-
-See L<DBIx::Class::Storage::DBI/connect_info> for details.
-
-=cut
-

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Schema/Versioned.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Schema/Versioned.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Schema/Versioned.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,10 +1,9 @@
 package # Hide from PAUSE
   DBIx::Class::Version::Table;
-use base 'DBIx::Class';
+use base 'DBIx::Class::Core';
 use strict;
 use warnings;
 
-__PACKAGE__->load_components(qw/ Core/);
 __PACKAGE__->table('dbix_class_schema_versions');
 
 __PACKAGE__->add_columns
@@ -31,8 +30,7 @@
 
 package # Hide from PAUSE
   DBIx::Class::Version::TableCompat;
-use base 'DBIx::Class';
-__PACKAGE__->load_components(qw/ Core/);
+use base 'DBIx::Class::Core';
 __PACKAGE__->table('SchemaVersions');
 
 __PACKAGE__->add_columns
@@ -116,7 +114,7 @@
   use Getopt::Long;
   use MyApp::Schema;
 
-  my ( $preversion, $help ); 
+  my ( $preversion, $help );
   GetOptions(
     'p|preversion:s'  => \$preversion,
   ) or die pod2usage;
@@ -152,13 +150,13 @@
 and we can safely deploy the DDL to it. However things are not always so simple.
 
 if you want to initialise a pre-existing database where the DDL is not the same
-as the DDL for your current schema version then you will need a diff which 
+as the DDL for your current schema version then you will need a diff which
 converts the database's DDL to the current DDL. The best way to do this is
 to get a dump of the database schema (without data) and save that in your
 SQL directory as version 0.000 (the filename must be as with
-L<DBIx::Class::Schema/ddl_filename>) then create a diff using your create DDL 
+L<DBIx::Class::Schema/ddl_filename>) then create a diff using your create DDL
 script given above from version 0.000 to the current version. Then hand check
-and if necessary edit the resulting diff to ensure that it will apply. Once you have 
+and if necessary edit the resulting diff to ensure that it will apply. Once you have
 done all that you can do this:
 
   if (!$schema->get_db_version()) {
@@ -170,7 +168,7 @@
   $schema->upgrade();
 
 In the case of an unversioned database the above code will create the
-dbix_class_schema_versions table and write version 0.000 to it, then 
+dbix_class_schema_versions table and write version 0.000 to it, then
 upgrade will then apply the diff we talked about creating in the previous paragraph
 and then you're good to go.
 
@@ -180,10 +178,10 @@
 
 use strict;
 use warnings;
-use base 'DBIx::Class';
+use base 'DBIx::Class::Schema';
 
 use Carp::Clan qw/^DBIx::Class/;
-use POSIX 'strftime';
+use Time::HiRes qw/gettimeofday/;
 
 __PACKAGE__->mk_classdata('_filedata');
 __PACKAGE__->mk_classdata('upgrade_directory');
@@ -260,58 +258,155 @@
 
 =back
 
-Virtual method that should be overriden to create an upgrade file. 
-This is useful in the case of upgrading across multiple versions 
+Virtual method that should be overridden to create an upgrade file.
+This is useful in the case of upgrading across multiple versions
 to concatenate several files to create one upgrade file.
 
 You'll probably want the db_version retrieved via $self->get_db_version
-and the schema_version which is retrieved via $self->schema_version 
+and the schema_version which is retrieved via $self->schema_version
 
 =cut
 
 sub create_upgrade_path {
-	## override this method
+  ## override this method
 }
 
+=head2 ordered_schema_versions
+
+=over 4
+
+=item Returns: a list of version numbers, ordered from lowest to highest
+
+=back
+
+Virtual method that should be overridden to return an ordered list
+of schema versions. This is then used to produce a set of steps to
+upgrade through to achieve the required schema version.
+
+You may want the db_version retrieved via $self->get_db_version
+and the schema_version which is retrieved via $self->schema_version
+
+=cut
+
+sub ordered_schema_versions {
+  ## override this method
+}
+
 =head2 upgrade
 
-Call this to attempt to upgrade your database from the version it is at to the version
-this DBIC schema is at. If they are the same it does nothing.
+Call this to attempt to upgrade your database from the version it
+is at to the version this DBIC schema is at. If they are the same
+it does nothing.
 
-It requires an SQL diff file to exist in you I<upgrade_directory>, normally you will
-have created this using L<DBIx::Class::Schema/create_ddl_dir>.
+It will call L</ordered_schema_versions> to retrieve an ordered
+list of schema versions (if ordered_schema_versions returns nothing
+then it is assumed you can do the upgrade as a single step). It
+then iterates through the list of versions between the current db
+version and the schema version applying one update at a time until
+all relevant updates are applied.
 
-If successful the dbix_class_schema_versions table is updated with the current
-DBIC schema version.
+The individual update steps are performed by using
+L</upgrade_single_step>, which will apply the update and also
+update the dbix_class_schema_versions table.
 
 =cut
 
-sub upgrade
+sub upgrade {
+    my ($self) = @_;
+    my $db_version = $self->get_db_version();
+
+    # db unversioned
+    unless ($db_version) {
+        carp 'Upgrade not possible as database is unversioned. Please call install first.';
+        return;
+    }
+
+    # db and schema at same version. do nothing
+    if ( $db_version eq $self->schema_version ) {
+        carp "Upgrade not necessary\n";
+        return;
+    }
+
+    my @version_list = $self->ordered_schema_versions;
+
+    # if nothing returned then we preload with min/max
+    @version_list = ( $db_version, $self->schema_version )
+      unless ( scalar(@version_list) );
+
+    # catch the case of someone returning an arrayref
+    @version_list = @{ $version_list[0] }
+      if ( ref( $version_list[0] ) eq 'ARRAY' );
+
+    # remove all versions in list above the required version
+    while ( scalar(@version_list)
+        && ( $version_list[-1] ne $self->schema_version ) )
+    {
+        pop @version_list;
+    }
+
+    # remove all versions in list below the current version
+    while ( scalar(@version_list) && ( $version_list[0] ne $db_version ) ) {
+        shift @version_list;
+    }
+
+    # check we have an appropriate list of versions
+    if ( scalar(@version_list) < 2 ) {
+        die;
+    }
+
+    # do sets of upgrade
+    while ( scalar(@version_list) >= 2 ) {
+        $self->upgrade_single_step( $version_list[0], $version_list[1] );
+        shift @version_list;
+    }
+}
+
+=head2 upgrade_single_step
+
+=over 4
+
+=item Arguments: db_version - the version currently within the db
+
+=item Arguments: target_version - the version to upgrade to
+
+=back
+
+Call this to attempt to upgrade your database from the
+I<db_version> to the I<target_version>. If they are the same it
+does nothing.
+
+It requires an SQL diff file to exist in your I<upgrade_directory>,
+normally you will have created this using L<DBIx::Class::Schema/create_ddl_dir>.
+
+If successful the dbix_class_schema_versions table is updated with
+the I<target_version>.
+
+This method may be called repeatedly by the upgrade method to
+upgrade through a series of updates.
+
+=cut
+
+sub upgrade_single_step
 {
-  my ($self) = @_;
-  my $db_version = $self->get_db_version();
+  my ($self,
+      $db_version,
+      $target_version) = @_;
 
-  # db unversioned
-  unless ($db_version) {
-    carp 'Upgrade not possible as database is unversioned. Please call install first.';
-    return;
-  }
-
   # db and schema at same version. do nothing
-  if ($db_version eq $self->schema_version) {
+  if ($db_version eq $target_version) {
     carp "Upgrade not necessary\n";
     return;
   }
 
   # strangely the first time this is called can
-  # differ to subsequent times. so we call it 
+  # differ to subsequent times. so we call it
   # here to be sure.
   # XXX - just fix it
   $self->storage->sqlt_type;
-  
+
   my $upgrade_file = $self->ddl_filename(
                                          $self->storage->sqlt_type,
-                                         $self->schema_version,
+                                         $target_version,
                                          $self->upgrade_directory,
                                          $db_version,
                                         );
@@ -323,7 +418,7 @@
     return;
   }
 
-  carp "\nDB version ($db_version) is lower than the schema version (".$self->schema_version."). Attempting upgrade.\n";
+  carp "DB version ($db_version) is lower than the schema version (".$self->schema_version."). Attempting upgrade.\n";
 
   # backup if necessary then apply upgrade
   $self->_filedata($self->_read_sql_file($upgrade_file));
@@ -331,7 +426,7 @@
   $self->txn_do(sub { $self->do_upgrade() });
 
   # set row in dbix_class_schema_versions table
-  $self->_set_db_version;
+  $self->_set_db_version({version => $target_version});
 }
 
 =head2 do_upgrade
@@ -340,7 +435,7 @@
 allows you to run your upgrade any way you please, you can call C<run_upgrade>
 any number of times to run the actual SQL commands, and in between you can
 sandwich your data upgrading. For example, first run all the B<CREATE>
-commands, then migrate your data from old to new tables/formats, then 
+commands, then migrate your data from old to new tables/formats, then
 issue the DROP commands when you are finished. Will run the whole file as it is by default.
 
 =cut
@@ -349,7 +444,7 @@
 {
   my ($self) = @_;
 
-  # just run all the commands (including inserts) in order                                                        
+  # just run all the commands (including inserts) in order
   $self->run_upgrade(qr/.*?/);
 }
 
@@ -374,7 +469,7 @@
     $self->_filedata([ grep { $_ !~ /$stm/i } @{$self->_filedata} ]);
 
     for (@statements)
-    {      
+    {
         $self->storage->debugobj->query_start($_) if $self->storage->debug;
         $self->apply_statement($_);
         $self->storage->debugobj->query_end($_) if $self->storage->debug;
@@ -393,7 +488,7 @@
 sub apply_statement {
     my ($self, $statement) = @_;
 
-    $self->storage->dbh->do($_) or carp "SQL was:\n $_";
+    $self->storage->dbh->do($_) or carp "SQL was: $_";
 }
 
 =head2 get_db_version
@@ -408,12 +503,12 @@
     my ($self, $rs) = @_;
 
     my $vtable = $self->{vschema}->resultset('Table');
-    my $version = 0;
-    eval {
-      my $stamp = $vtable->get_column('installed')->max;
-      $version = $vtable->search({ installed => $stamp })->first->version;
+    my $version = eval {
+      $vtable->search({}, { order_by => { -desc => 'installed' }, rows => 1 } )
+              ->get_column ('version')
+               ->next;
     };
-    return $version;
+    return $version || 0;
 }
 
 =head2 schema_version
@@ -427,7 +522,7 @@
 This is an overwritable method which is called just before the upgrade, to
 allow you to make a backup of the database. Per default this method attempts
 to call C<< $self->storage->backup >>, to run the standard backup on each
-database type. 
+database type.
 
 This method should return the name of the backup file, if appropriate..
 
@@ -449,7 +544,7 @@
 compatibility between the old versions table (SchemaVersions) and the new one
 (dbix_class_schema_versions).
 
-To avoid the checks on connect, set the env var DBIC_NO_VERSION_CHECK or alternatively you can set the ignore_version attr in the forth argument like so:
+To avoid the checks on connect, set the environment var DBIC_NO_VERSION_CHECK or alternatively you can set the ignore_version attr in the forth argument like so:
 
   my $schema = MyApp::Schema->connect(
     $dsn,
@@ -472,9 +567,13 @@
   my ($self, $args) = @_;
 
   $args = {} unless $args;
+
   $self->{vschema} = DBIx::Class::Version->connect(@{$self->storage->connect_info()});
   my $vtable = $self->{vschema}->resultset('Table');
 
+  # useful when connecting from scripts etc
+  return if ($args->{ignore_version} || ($ENV{DBIC_NO_VERSION_CHECK} && !exists $args->{ignore_version}));
+
   # check for legacy versions table and move to new if exists
   my $vschema_compat = DBIx::Class::VersionCompat->connect(@{$self->storage->connect_info()});
   unless ($self->_source_exists($vtable)) {
@@ -486,8 +585,6 @@
     }
   }
 
-  # useful when connecting from scripts etc
-  return if ($args->{ignore_version} || ($ENV{DBIC_NO_VERSION_CHECK} && !exists $args->{ignore_version}));
   my $pversion = $self->get_db_version();
 
   if($pversion eq $self->schema_version)
@@ -502,7 +599,7 @@
         return 1;
     }
 
-  carp "Versions out of sync. This is " . $self->schema_version . 
+  carp "Versions out of sync. This is " . $self->schema_version .
     ", your database contains version $pversion, please call upgrade on your Schema.\n";
 }
 
@@ -520,13 +617,12 @@
     return;
   }
 
-  eval 'require SQL::Translator "0.09003"';
-  if ($@) {
-    $self->throw_exception("SQL::Translator 0.09003 required");
+  unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
+    $self->throw_exception("Unable to proceed without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
   }
 
-  my $db_tr = SQL::Translator->new({ 
-                                    add_drop_table => 1, 
+  my $db_tr = SQL::Translator->new({
+                                    add_drop_table => 1,
                                     parser => 'DBI',
                                     parser_args => { dbh => $self->storage->dbh }
                                    });
@@ -546,7 +642,7 @@
     $tr->parser->($tr, $$data);
   }
 
-  my $diff = SQL::Translator::Diff::schema_diff($db_tr->schema, $db, 
+  my $diff = SQL::Translator::Diff::schema_diff($db_tr->schema, $db,
                                                 $dbic_tr->schema, $db,
                                                 { ignore_constraint_names => 1, ignore_index_names => 1, caseopt => 1 });
 
@@ -576,24 +672,50 @@
 
   my $version = $params->{version} ? $params->{version} : $self->schema_version;
   my $vtable = $self->{vschema}->resultset('Table');
-  $vtable->create({ version => $version,
-                      installed => strftime("%Y-%m-%d %H:%M:%S", gmtime())
-                      });
 
+  ##############################################################################
+  #                             !!! NOTE !!!
+  ##############################################################################
+  #
+  # The travesty below replaces the old nice timestamp format of %Y-%m-%d %H:%M:%S
+  # This is necessary since there are legitimate cases when upgrades can happen
+  # back to back within the same second. This breaks things since we relay on the
+  # ability to sort by the 'installed' value. The logical choice of an autoinc
+  # is not possible, as it will break multiple legacy installations. Also it is 
+  # not possible to format the string sanely, as the column is a varchar(20).
+  # The 'v' character is added to the front of the string, so that any version
+  # formatted by this new function will sort _after_ any existing 200... strings.
+  my @tm = gettimeofday();
+  my @dt = gmtime ($tm[0]);
+  my $o = $vtable->create({ 
+    version => $version,
+    installed => sprintf("v%04d%02d%02d_%02d%02d%02d.%03.0f",
+      $dt[5] + 1900,
+      $dt[4] + 1,
+      $dt[3],
+      $dt[2],
+      $dt[1],
+      $dt[0],
+      $tm[1] / 1000, # convert to millisecs, format as up/down rounded int above
+    ),
+  });
 }
 
 sub _read_sql_file {
   my $self = shift;
   my $file = shift || return;
 
-  my $fh;
-  open $fh, "<$file" or carp("Can't open upgrade file, $file ($!)");
-  my @data = split(/\n/, join('', <$fh>));
-  @data = grep(!/^--/, @data);
-  @data = split(/;/, join('', @data));
-  close($fh);
-  @data = grep { $_ && $_ !~ /^-- / } @data;
-  @data = grep { $_ !~ /^(BEGIN|BEGIN TRANSACTION|COMMIT)/m } @data;
+  open my $fh, '<', $file or carp("Can't open upgrade file, $file ($!)");
+  my @data = split /\n/, join '', <$fh>;
+  close $fh;
+
+  @data = grep {
+     $_ &&
+     !/^--/ &&
+     !/^(BEGIN|BEGIN TRANSACTION|COMMIT)/m
+  } split /;/,
+     join '', @data;
+
   return \@data;
 }
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Schema.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Schema.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Schema.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -5,11 +5,10 @@
 
 use DBIx::Class::Exception;
 use Carp::Clan qw/^DBIx::Class/;
-use Scalar::Util qw/weaken/;
+use Scalar::Util ();
 use File::Spec;
-use MRO::Compat;
 use Sub::Name ();
-require Module::Find;
+use Module::Find();
 
 use base qw/DBIx::Class/;
 
@@ -34,8 +33,9 @@
   __PACKAGE__->load_namespaces();
 
   package Library::Schema::Result::CD;
-  use base qw/DBIx::Class/;
-  __PACKAGE__->load_components(qw/Core/); # for example
+  use base qw/DBIx::Class::Core/;
+
+  __PACKAGE__->load_components(qw/InflateColumn::DateTime/); # for example
   __PACKAGE__->table('cd');
 
   # Elsewhere in your code:
@@ -43,7 +43,7 @@
     $dsn,
     $user,
     $password,
-    { AutoCommit => 0 },
+    { AutoCommit => 1 },
   );
 
   my $schema2 = Library::Schema->connect($coderef_returning_dbh);
@@ -82,7 +82,7 @@
 
 With no arguments, this method uses L<Module::Find> to load all your
 Result classes from a sub-namespace F<Result> under your Schema class'
-namespace. Eg. With a Schema of I<MyDB::Schema> all files in
+namespace, i.e. with a Schema of I<MyDB::Schema> all files in
 I<MyDB::Schema::Result> are assumed to be Result classes.
 
 It also finds all ResultSet classes in the namespace F<ResultSet> and
@@ -407,12 +407,10 @@
 
 Set the storage class that will be instantiated when L</connect> is called.
 If the classname starts with C<::>, the prefix C<DBIx::Class::Storage> is
-assumed by L</connect>.  
+assumed by L</connect>.
 
 You want to use this to set subclasses of L<DBIx::Class::Storage::DBI>
-in cases where the appropriate subclass is not autodetected, such as
-when dealing with MSSQL via L<DBD::Sybase>, in which case you'd set it
-to C<::DBI::Sybase::MSSQL>.
+in cases where the appropriate subclass is not autodetected.
 
 If your storage type requires instantiation arguments, those are
 defined as a second argument in the form of a hashref and the entire
@@ -512,7 +510,7 @@
 general.
 
 Note that C<connect_info> expects an arrayref of arguments, but
-C<connect> does not. C<connect> wraps it's arguments in an arrayref
+C<connect> does not. C<connect> wraps its arguments in an arrayref
 before passing them to C<connect_info>.
 
 =head3 Overloading
@@ -544,6 +542,8 @@
 
 sub resultset {
   my ($self, $moniker) = @_;
+  $self->throw_exception('resultset() expects a source name')
+    unless defined $moniker;
   return $self->source($moniker)->resultset;
 }
 
@@ -630,13 +630,13 @@
 This interface is preferred over using the individual methods L</txn_begin>,
 L</txn_commit>, and L</txn_rollback> below.
 
-WARNING: If you are connected with C<AutoCommit => 0> the transaction is
+WARNING: If you are connected with C<< AutoCommit => 0 >> the transaction is
 considered nested, and you will still need to call L</txn_commit> to write your
-changes when appropriate. You will also want to connect with C<auto_savepoint =>
-1> to get partial rollback to work, if the storage driver for your database
+changes when appropriate. You will also want to connect with C<< auto_savepoint =>
+1 >> to get partial rollback to work, if the storage driver for your database
 supports it.
 
-Connecting with C<AutoCommit => 1> is recommended.
+Connecting with C<< AutoCommit => 1 >> is recommended.
 
 =cut
 
@@ -748,7 +748,7 @@
 L<DBIx::Class::ResultSet/create>, and a arrayref of the resulting row
 objects is returned.
 
-i.e.,
+e.g.
 
   $schema->populate('Artist', [
     [ qw/artistid name/ ],
@@ -756,7 +756,7 @@
     [ 2, 'Indie Band' ],
     ...
   ]);
-  
+
 Since wantarray context is basically the same as looping over $rs->create(...) 
 you won't see any performance benefits and in this case the method is more for
 convenience. Void context sends the column information directly to storage
@@ -807,13 +807,13 @@
 sub connection {
   my ($self, @info) = @_;
   return $self if !@info && $self->storage;
-  
+
   my ($storage_class, $args) = ref $self->storage_type ? 
     ($self->_normalize_storage_type($self->storage_type),{}) : ($self->storage_type, {});
-    
+
   $storage_class = 'DBIx::Class::Storage'.$storage_class
     if $storage_class =~ m/^::/;
-  eval "require ${storage_class};";
+  eval { $self->ensure_class_loaded ($storage_class) };
   $self->throw_exception(
     "No arguments to load_classes and couldn't load ${storage_class} ($@)"
   ) if $@;
@@ -851,7 +851,7 @@
 
 It also attaches a corresponding L<DBIx::Class::ResultSource> object to the
 new $schema object. If C<$additional_base_class> is given, the new composed
-classes will inherit from first the corresponding classe from the current
+classes will inherit from first the corresponding class from the current
 schema then the base class.
 
 For example, for a schema with My::Schema::CD and My::Schema::Artist classes,
@@ -909,7 +909,7 @@
     no strict 'refs';
     no warnings 'redefine';
     foreach my $meth (qw/class source resultset/) {
-      *{"${target}::${meth}"} =
+      *{"${target}::${meth}"} = Sub::Name::subname "${target}::${meth}" =>
         sub { shift->schema->$meth(@_) };
     }
   }
@@ -1083,7 +1083,7 @@
   $self->storage->deployment_statements($self, @_);
 }
 
-=head2 create_ddl_dir (EXPERIMENTAL)
+=head2 create_ddl_dir
 
 =over 4
 
@@ -1147,7 +1147,7 @@
   $filename =~ s/::/-/g;
   $filename = File::Spec->catfile($dir, "$filename-$version-$type.sql");
   $filename =~ s/$version/$preversion-$version/ if($preversion);
-  
+
   return $filename;
 }
 
@@ -1155,7 +1155,7 @@
 
 Provided as the recommended way of thawing schema objects. You can call 
 C<Storable::thaw> directly if you wish, but the thawed objects will not have a
-reference to any schema, so are rather useless
+reference to any schema, so are rather useless.
 
 =cut
 
@@ -1167,8 +1167,8 @@
 
 =head2 freeze
 
-This doesn't actualy do anything more than call L<Storable/freeze>, it is just
-provided here for symetry.
+This doesn't actually do anything more than call L<Storable/freeze>, it is just
+provided here for symmetry.
 
 =cut
 
@@ -1178,9 +1178,18 @@
 
 =head2 dclone
 
-Recommeneded way of dcloning objects. This is needed to properly maintain
-references to the schema object (which itself is B<not> cloned.)
+=over 4
 
+=item Arguments: $object
+
+=item Return Value: dcloned $object
+
+=back
+
+Recommended way of dcloning L<DBIx::Class::Row> and L<DBIx::Class::ResultSet>
+objects so their references to the schema object
+(which itself is B<not> cloned) are properly maintained.
+
 =cut
 
 sub dclone {
@@ -1260,6 +1269,24 @@
   $self->_register_source(@_);
 }
 
+=head2 unregister_source
+
+=over 4
+
+=item Arguments: $moniker
+
+=back
+
+Removes the L<DBIx::Class::ResultSource> from the schema for the given moniker.
+
+=cut
+
+sub unregister_source {
+  my $self = shift;
+
+  $self->_unregister_source(@_);
+}
+
 =head2 register_extra_source
 
 =over 4
@@ -1286,7 +1313,7 @@
 
   $source = $source->new({ %$source, source_name => $moniker });
   $source->schema($self);
-  weaken($source->{schema}) if ref($self);
+  Scalar::Util::weaken($source->{schema}) if ref($self);
 
   my $rs_class = $source->result_class;
 
@@ -1373,7 +1400,7 @@
     $self->throw_exception
       ("No arguments to load_classes and couldn't load ${base} ($@)")
         if $@;
-  
+
     if ($self eq $target) {
       # Pathological case, largely caused by the docs on early C::M::DBIC::Plain
       foreach my $moniker ($self->sources) {
@@ -1386,14 +1413,14 @@
       $self->connection(@info);
       return $self;
     }
-  
+
     my $schema = $self->compose_namespace($target, $base);
     {
       no strict 'refs';
       my $name = join '::', $target, 'schema';
       *$name = Sub::Name::subname $name, sub { $schema };
     }
-  
+
     $schema->connection(@info);
     foreach my $moniker ($schema->sources) {
       my $source = $schema->source($moniker);

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Serialize/Storable.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Serialize/Storable.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Serialize/Storable.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -7,10 +7,13 @@
     my ($self, $cloning) = @_;
     my $to_serialize = { %$self };
 
+    # The source is either derived from _source_handle or is
+    # reattached in the thaw handler below
     delete $to_serialize->{result_source};
-    delete $to_serialize->{related_resultsets};
-    delete $to_serialize->{_inflated_column};
 
+    # Dynamic values, easy to recalculate
+    delete $to_serialize->{$_} for qw/related_resultsets _inflated_column/;
+
     return (Storable::freeze($to_serialize));
 }
 
@@ -18,8 +21,10 @@
     my ($self, $cloning, $serialized) = @_;
 
     %$self = %{ Storable::thaw($serialized) };
+
+    # if the handle went missing somehow, reattach
     $self->result_source($self->result_source_instance)
-      if $self->can('result_source_instance');
+      if !$self->_source_handle && $self->can('result_source_instance');
 }
 
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/StartupCheck.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/StartupCheck.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/StartupCheck.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -7,7 +7,7 @@
 =head1 SYNOPSIS
 
   use DBIx::Class::StartupCheck;
-  
+
 =head1 DESCRIPTION
 
 This module used to check for, and if necessary issue a warning for, a
@@ -17,7 +17,7 @@
 triggers, incorrectly flagging those versions of perl to be buggy. A
 more comprehensive check has been moved into the test suite in
 C<t/99rh_perl_perf_bug.t> and further information about the bug has been
-put in L<DBIx::Class::Manual::Troubleshooting>
+put in L<DBIx::Class::Manual::Troubleshooting>.
 
 Other checks may be added from time to time.
 

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,144 @@
+package DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server;
+
+use strict;
+use warnings;
+
+use base qw/
+  DBIx::Class::Storage::DBI::ADO
+  DBIx::Class::Storage::DBI::MSSQL
+/;
+use mro 'c3';
+
+sub _rebless {
+  my $self = shift;
+  $self->_identity_method('@@identity');
+}
+
+sub source_bind_attributes {
+  my $self = shift;
+  my ($source) = @_;
+
+  my $bind_attributes = $self->next::method(@_);
+
+  foreach my $column ($source->columns) {
+    $bind_attributes->{$column}{ado_size} ||= 8000; # max VARCHAR
+  }
+
+  return $bind_attributes;
+}
+
+sub bind_attribute_by_data_type {
+  my ($self, $data_type) = @_;
+
+  ($data_type = lc($data_type)) =~ s/\s+.*//;
+
+  my $max_size =
+    $self->_mssql_max_data_type_representation_size_in_bytes->{$data_type};
+
+  my $res = {};
+  $res->{ado_size} = $max_size if $max_size;
+
+  return $res;
+}
+
+# approximate
+# XXX needs to support varchar(max) and varbinary(max)
+sub _mssql_max_data_type_representation_size_in_bytes {
+  my $self = shift;
+
+  my $blob_max = $self->_get_dbh->{LongReadLen} || 32768;
+
+  return +{
+# MSSQL types
+    char => 8000,
+    varchar => 8000,
+    binary => 8000,
+    varbinary => 8000,
+    nchar => 8000,
+    nvarchar => 8000,
+    numeric => 100,
+    smallint => 100,
+    tinyint => 100,
+    smallmoney => 100,
+    bigint => 100,
+    bit => 100,
+    decimal => 100,
+    integer => 100,
+    int => 100,
+    money => 100,
+    float => 100,
+    real => 100,
+    uniqueidentifier => 100,
+    ntext => $blob_max,
+    text => $blob_max,
+    image => $blob_max,
+    date => 100,
+    datetime => 100,
+    datetime2 => 100,
+    datetimeoffset => 100,
+    smalldatetime => 100,
+    time => 100,
+    timestamp => 100,
+    cursor => 100,
+    hierarchyid => 100,
+    sql_variant => 100,
+    table => 100,
+    xml => $blob_max, # ???
+
+# some non-MSSQL types
+    serial => 100,
+    bigserial => 100,
+    varchar2 => 8000,
+    blob => $blob_max,
+    clob => $blob_max,
+  }
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server - Support for Microsoft
+SQL Server via DBD::ADO
+
+=head1 SYNOPSIS
+
+This subclass supports MSSQL server connections via L<DBD::ADO>.
+
+=head1 DESCRIPTION
+
+The MSSQL specific functionality is provided by
+L<DBIx::Class::Storage::DBI::MSSQL>.
+
+=head2 CAVEATS
+
+=head3 identities
+
+C<_identity_method> is set to C<@@identity>, as C<SCOPE_IDENTITY()> doesn't work
+with L<DBD::ADO>. See L<DBIx::Class::Storage::DBI::MSSQL/IMPLEMENTATION NOTES>
+for caveats regarding this.
+
+=head3 truncation bug
+
+There is a bug with MSSQL ADO providers where data gets truncated based on the
+size of the bind sizes in the first prepare call:
+
+L<https://rt.cpan.org/Ticket/Display.html?id=52048>
+
+The C<ado_size> workaround is used (see L<DBD::ADO/"ADO Providers">) with the
+approximate maximum size of the data_type of the bound column, or 8000 (maximum
+VARCHAR size) if the data_type is not available.
+
+This code is incomplete and may be buggy. Particularly, C<VARCHAR(MAX)> is not
+supported yet. The data_type list for other DBs is also incomplete. Please
+report problems (and send patches.)
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ADO.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ADO.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ADO.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,43 @@
+package # hide from PAUSE
+    DBIx::Class::Storage::DBI::ADO;
+
+use base 'DBIx::Class::Storage::DBI';
+
+sub _rebless {
+  my $self = shift;
+
+# check for MSSQL
+# XXX This should be using an OpenSchema method of some sort, but I don't know
+# how.
+# Current version is stolen from Sybase.pm
+  my $dbtype = eval {
+    @{$self->_get_dbh
+      ->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})
+    }[2]
+  };
+
+  unless ($@) {
+    $dbtype =~ s/\W/_/gi;
+    my $subclass = "DBIx::Class::Storage::DBI::ADO::${dbtype}";
+    if ($self->load_optional_class($subclass) && !$self->isa($subclass)) {
+      bless $self, $subclass;
+      $self->_rebless;
+    }
+  }
+}
+
+# Here I was just experimenting with ADO cursor types, left in as a comment in
+# case you want to as well. See the DBD::ADO docs.
+#sub _dbh_sth {
+#  my ($self, $dbh, $sql) = @_;
+#
+#  my $sth = $self->disable_sth_caching
+#    ? $dbh->prepare($sql, { CursorType => 'adOpenStatic' })
+#    : $dbh->prepare_cached($sql, { CursorType => 'adOpenStatic' }, 3);
+#
+#  $self->throw_exception($dbh->errstr) if !$sth;
+#
+#  $sth;
+#}
+
+1;

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/AmbiguousGlob.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/AmbiguousGlob.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/AmbiguousGlob.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,47 @@
+package DBIx::Class::Storage::DBI::AmbiguousGlob;
+
+use strict;
+use warnings;
+
+use base 'DBIx::Class::Storage::DBI';
+use mro 'c3';
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::AmbiguousGlob - Storage component for RDBMS choking on count(*)
+
+=head1 DESCRIPTION
+
+Some servers choke on things like:
+
+  COUNT(*) FROM (SELECT tab1.col, tab2.col FROM tab1 JOIN tab2 ... )
+
+claiming that col is a duplicate column (it loses the table specifiers by
+the time it gets to the *). Thus for any subquery count we select only the
+primary keys of the main table in the inner query. This hopefully still
+hits the indexes and keeps the server happy.
+
+At this point the only overridden method is C<_subq_count_select()>
+
+=cut
+
+sub _subq_count_select {
+  my ($self, $source, $rs_attrs) = @_;
+
+  return $rs_attrs->{group_by} if $rs_attrs->{group_by};
+
+  my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns);
+  return @pcols ? \@pcols : [ 1 ];
+}
+
+=head1 AUTHORS
+
+See L<DBIx::Class/CONTRIBUTORS>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;


Property changes on: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/AmbiguousGlob.pm
___________________________________________________________________
Name: svn:eol-style
   + native

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/AutoCast.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/AutoCast.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/AutoCast.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,98 @@
+package DBIx::Class::Storage::DBI::AutoCast;
+
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
+
+__PACKAGE__->mk_group_accessors('simple' => 'auto_cast' );
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::AutoCast - Storage component for RDBMS requiring explicit placeholder typing
+
+=head1 SYNOPSIS
+
+  $schema->storage->auto_cast(1);
+
+=head1 DESCRIPTION
+
+In some combinations of RDBMS and DBD drivers (e.g. FreeTDS and Sybase)
+statements with values bound to columns or conditions that are not strings will
+throw implicit type conversion errors.
+
+As long as a column L<data_type|DBIx::Class::ResultSource/add_columns> is
+defined and resolves to a base RDBMS native type via L</_native_data_type> as
+defined in your Storage driver, the placeholder for this column will be
+converted to:
+
+  CAST(? as $mapped_type)
+
+This option can also be enabled in L<DBIx::Class::Storage::DBI/connect_info> as:
+
+  on_connect_call => ['set_auto_cast']
+
+=cut
+
+sub _prep_for_execute {
+  my $self = shift;
+  my ($op, $extra_bind, $ident, $args) = @_;
+
+  my ($sql, $bind) = $self->next::method (@_);
+
+# If we're using ::NoBindVars, there are no binds by this point so this code
+# gets skippeed.
+  if ($self->auto_cast && @$bind) {
+    my $new_sql;
+    my @sql_part = split /\?/, $sql;
+    my $col_info = $self->_resolve_column_info($ident,[ map $_->[0], @$bind ]);
+
+    foreach my $bound (@$bind) {
+      my $col = $bound->[0];
+      my $type = $self->_native_data_type($col_info->{$col}{data_type});
+
+      foreach my $data (@{$bound}[1..$#$bound]) {
+        $new_sql .= shift(@sql_part) .
+          ($type ? "CAST(? AS $type)" : '?');
+      }
+    }
+    $new_sql .= join '', @sql_part;
+    $sql = $new_sql;
+  }
+
+  return ($sql, $bind);
+}
+
+=head2 connect_call_set_auto_cast
+
+Executes:
+
+  $schema->storage->auto_cast(1);
+
+on connection.
+
+Used as:
+
+    on_connect_call => ['set_auto_cast']
+
+in L<DBIx::Class::Storage::DBI/connect_info>.
+
+=cut
+
+sub connect_call_set_auto_cast {
+  my $self = shift;
+  $self->auto_cast(1);
+}
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Cursor.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Cursor.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Cursor.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,10 +1,14 @@
 package DBIx::Class::Storage::DBI::Cursor;
 
-use base qw/DBIx::Class::Cursor/;
-
 use strict;
 use warnings;
 
+use base qw/DBIx::Class::Cursor/;
+
+__PACKAGE__->mk_group_accessors('simple' =>
+    qw/sth/
+);
+
 =head1 NAME
 
 DBIx::Class::Storage::DBI::Cursor - Object representing a query cursor on a
@@ -68,25 +72,29 @@
   my ($storage, $dbh, $self) = @_;
 
   $self->_check_dbh_gen;
-  if ($self->{attrs}{rows} && $self->{pos} >= $self->{attrs}{rows}) {
-    $self->{sth}->finish if $self->{sth}->{Active};
-    delete $self->{sth};
+  if (
+    $self->{attrs}{software_limit}
+      && $self->{attrs}{rows}
+        && $self->{pos} >= $self->{attrs}{rows}
+  ) {
+    $self->sth->finish if $self->sth->{Active};
+    $self->sth(undef);
     $self->{done} = 1;
   }
   return if $self->{done};
-  unless ($self->{sth}) {
-    $self->{sth} = ($storage->_select(@{$self->{args}}))[1];
+  unless ($self->sth) {
+    $self->sth(($storage->_select(@{$self->{args}}))[1]);
     if ($self->{attrs}{software_limit}) {
       if (my $offset = $self->{attrs}{offset}) {
-        $self->{sth}->fetch for 1 .. $offset;
+        $self->sth->fetch for 1 .. $offset;
       }
     }
   }
-  my @row = $self->{sth}->fetchrow_array;
+  my @row = $self->sth->fetchrow_array;
   if (@row) {
     $self->{pos}++;
   } else {
-    delete $self->{sth};
+    $self->sth(undef);
     $self->{done} = 1;
   }
   return @row;
@@ -116,8 +124,8 @@
   my ($storage, $dbh, $self) = @_;
 
   $self->_check_dbh_gen;
-  $self->{sth}->finish if $self->{sth}->{Active};
-  delete $self->{sth};
+  $self->sth->finish if $self->sth && $self->sth->{Active};
+  $self->sth(undef);
   my ($rv, $sth) = $storage->_select(@{$self->{args}});
   return @{$sth->fetchall_arrayref};
 }
@@ -128,6 +136,7 @@
         && ($self->{attrs}{offset} || $self->{attrs}{rows})) {
     return $self->next::method;
   }
+
   $self->{storage}->dbh_do($self->can('_dbh_all'), $self);
 }
 
@@ -141,17 +150,17 @@
   my ($self) = @_;
 
   # No need to care about failures here
-  eval { $self->{sth}->finish if $self->{sth} && $self->{sth}->{Active} };
+  eval { $self->sth->finish if $self->sth && $self->sth->{Active} };
   $self->_soft_reset;
+  return undef;
 }
 
 sub _soft_reset {
   my ($self) = @_;
 
-  delete $self->{sth};
+  $self->sth(undef);
   delete $self->{done};
   $self->{pos} = 0;
-  return $self;
 }
 
 sub _check_dbh_gen {
@@ -168,7 +177,7 @@
 
   # None of the reasons this would die matter if we're in DESTROY anyways
   local $@;
-  eval { $self->{sth}->finish if $self->{sth} && $self->{sth}->{Active} };
+  eval { $self->sth->finish if $self->sth && $self->sth->{Active} };
 }
 
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/DB2.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/DB2.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/DB2.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,9 +4,8 @@
 use warnings;
 
 use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
 
-# __PACKAGE__->load_components(qw/PK::Auto/);
-
 sub _dbh_last_insert_id {
     my ($self, $dbh, $source, $col) = @_;
 
@@ -22,11 +21,11 @@
 
 sub _sql_maker_opts {
     my ( $self, $opts ) = @_;
-    
+
     if ( $opts ) {
         $self->{_sql_maker_opts} = { %$opts };
     }
-                    
+
     return { limit_dialect => 'RowNumberOver', %{$self->{_sql_maker_opts}||{}} };
 }
 
@@ -39,7 +38,7 @@
 =head1 SYNOPSIS
 
   # In your table classes
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  use base 'DBIx::Class::Core';
   __PACKAGE__->set_primary_key('id');
 
 =head1 DESCRIPTION

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Informix.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Informix.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Informix.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,57 @@
+package DBIx::Class::Storage::DBI::Informix;
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+
+use mro 'c3';
+
+__PACKAGE__->mk_group_accessors('simple' => '__last_insert_id');
+
+sub _execute {
+  my $self = shift;
+  my ($op) = @_;
+  my ($rv, $sth, @rest) = $self->next::method(@_);
+  if ($op eq 'insert') {
+    $self->__last_insert_id($sth->{ix_sqlerrd}[1]);
+  }
+  return (wantarray ? ($rv, $sth, @rest) : $rv);
+}
+
+sub last_insert_id {
+  shift->__last_insert_id;
+}
+
+sub _sql_maker_opts {
+  my ( $self, $opts ) = @_;
+
+  if ( $opts ) {
+    $self->{_sql_maker_opts} = { %$opts };
+  }
+
+  return { limit_dialect => 'SkipFirst', %{$self->{_sql_maker_opts}||{}} };
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Informix - Base Storage Class for INFORMIX Support
+
+=head1 SYNOPSIS
+
+=head1 DESCRIPTION
+
+This class implements storage-specific support for Informix
+
+=head1 AUTHORS
+
+See L<DBIx::Class/CONTRIBUTORS>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/MSSQL.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/MSSQL.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/MSSQL.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,14 +3,225 @@
 use strict;
 use warnings;
 
-use base qw/DBIx::Class::Storage::DBI/;
+use base qw/DBIx::Class::Storage::DBI::AmbiguousGlob DBIx::Class::Storage::DBI/;
+use mro 'c3';
 
-sub _dbh_last_insert_id {
-  my ($self, $dbh, $source, $col) = @_;
-  my ($id) = $dbh->selectrow_array('SELECT SCOPE_IDENTITY()');
-  return $id;
+use List::Util();
+
+__PACKAGE__->mk_group_accessors(simple => qw/
+  _identity _identity_method
+/);
+
+__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks::MSSQL');
+
+sub _set_identity_insert {
+  my ($self, $table) = @_;
+
+  my $sql = sprintf (
+    'SET IDENTITY_INSERT %s ON',
+    $self->sql_maker->_quote ($table),
+  );
+
+  my $dbh = $self->_get_dbh;
+  eval { $dbh->do ($sql) };
+  if ($@) {
+    $self->throw_exception (sprintf "Error executing '%s': %s",
+      $sql,
+      $dbh->errstr,
+    );
+  }
 }
 
+sub _unset_identity_insert {
+  my ($self, $table) = @_;
+
+  my $sql = sprintf (
+    'SET IDENTITY_INSERT %s OFF',
+    $self->sql_maker->_quote ($table),
+  );
+
+  my $dbh = $self->_get_dbh;
+  $dbh->do ($sql);
+}
+
+sub insert_bulk {
+  my $self = shift;
+  my ($source, $cols, $data) = @_;
+
+  my $is_identity_insert = (List::Util::first
+      { $source->column_info ($_)->{is_auto_increment} }
+      (@{$cols})
+  )
+     ? 1
+     : 0;
+
+  if ($is_identity_insert) {
+     $self->_set_identity_insert ($source->name);
+  }
+
+  $self->next::method(@_);
+
+  if ($is_identity_insert) {
+     $self->_unset_identity_insert ($source->name);
+  }
+}
+
+# support MSSQL GUID column types
+
+sub insert {
+  my $self = shift;
+  my ($source, $to_insert) = @_;
+
+  my $supplied_col_info = $self->_resolve_column_info($source, [keys %$to_insert] );
+
+  my %guid_cols;
+  my @pk_cols = $source->primary_columns;
+  my %pk_cols;
+  @pk_cols{@pk_cols} = ();
+
+  my @pk_guids = grep {
+    $source->column_info($_)->{data_type}
+    &&
+    $source->column_info($_)->{data_type} =~ /^uniqueidentifier/i
+  } @pk_cols;
+
+  my @auto_guids = grep {
+    $source->column_info($_)->{data_type}
+    &&
+    $source->column_info($_)->{data_type} =~ /^uniqueidentifier/i
+    &&
+    $source->column_info($_)->{auto_nextval}
+  } grep { not exists $pk_cols{$_} } $source->columns;
+
+  my @get_guids_for =
+    grep { not exists $to_insert->{$_} } (@pk_guids, @auto_guids);
+
+  my $updated_cols = {};
+
+  for my $guid_col (@get_guids_for) {
+    my ($new_guid) = $self->_get_dbh->selectrow_array('SELECT NEWID()');
+    $updated_cols->{$guid_col} = $to_insert->{$guid_col} = $new_guid;
+  }
+
+  my $is_identity_insert = (List::Util::first { $_->{is_auto_increment} } (values %$supplied_col_info) )
+     ? 1
+     : 0;
+
+  if ($is_identity_insert) {
+     $self->_set_identity_insert ($source->name);
+  }
+
+  $updated_cols = { %$updated_cols, %{ $self->next::method(@_) } };
+
+  if ($is_identity_insert) {
+     $self->_unset_identity_insert ($source->name);
+  }
+
+
+  return $updated_cols;
+}
+
+sub _prep_for_execute {
+  my $self = shift;
+  my ($op, $extra_bind, $ident, $args) = @_;
+
+# cast MONEY values properly
+  if ($op eq 'insert' || $op eq 'update') {
+    my $fields = $args->[0];
+
+    for my $col (keys %$fields) {
+      # $ident is a result source object with INSERT/UPDATE ops
+      if ($ident->column_info ($col)->{data_type}
+         &&
+         $ident->column_info ($col)->{data_type} =~ /^money\z/i) {
+        my $val = $fields->{$col};
+        $fields->{$col} = \['CAST(? AS MONEY)', [ $col => $val ]];
+      }
+    }
+  }
+
+  my ($sql, $bind) = $self->next::method (@_);
+
+  if ($op eq 'insert') {
+    $sql .= ';SELECT SCOPE_IDENTITY()';
+
+  }
+
+  return ($sql, $bind);
+}
+
+sub _execute {
+  my $self = shift;
+  my ($op) = @_;
+
+  my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
+
+  if ($op eq 'insert') {
+
+    # this should bring back the result of SELECT SCOPE_IDENTITY() we tacked
+    # on in _prep_for_execute above
+    my ($identity) = eval { $sth->fetchrow_array };
+
+    # SCOPE_IDENTITY failed, but we can do something else
+    if ( (! $identity) && $self->_identity_method) {
+      ($identity) = $self->_dbh->selectrow_array(
+        'select ' . $self->_identity_method
+      );
+    }
+
+    $self->_identity($identity);
+    $sth->finish;
+  }
+
+  return wantarray ? ($rv, $sth, @bind) : $rv;
+}
+
+sub last_insert_id { shift->_identity }
+
+#
+# MSSQL is retarded wrt ordered subselects. One needs to add a TOP
+# to *all* subqueries, but one also can't use TOP 100 PERCENT
+# http://sqladvice.com/forums/permalink/18496/22931/ShowThread.aspx#22931
+#
+sub _select_args_to_query {
+  my $self = shift;
+
+  my ($sql, $prep_bind, @rest) = $self->next::method (@_);
+
+  # see if this is an ordered subquery
+  my $attrs = $_[3];
+  if ( scalar $self->_parse_order_by ($attrs->{order_by}) ) {
+    $self->throw_exception(
+      'An ordered subselect encountered - this is not safe! Please see "Ordered Subselects" in DBIx::Class::Storage::DBI::MSSQL
+    ') unless $attrs->{unsafe_subselect_ok};
+    my $max = 2 ** 32;
+    $sql =~ s/^ \s* SELECT \s/SELECT TOP $max /xi;
+  }
+
+  return wantarray
+    ? ($sql, $prep_bind, @rest)
+    : \[ "($sql)", @$prep_bind ]
+  ;
+}
+
+
+# savepoint syntax is the same as in Sybase ASE
+
+sub _svp_begin {
+  my ($self, $name) = @_;
+
+  $self->_get_dbh->do("SAVE TRANSACTION $name");
+}
+
+# A new SAVE TRANSACTION with the same name releases the previous one.
+sub _svp_release { 1 }
+
+sub _svp_rollback {
+  my ($self, $name) = @_;
+
+  $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
+}
+
 sub build_datetime_parser {
   my $self = shift;
   my $type = "DateTime::Format::Strptime";
@@ -21,51 +232,134 @@
 
 sub sqlt_type { 'SQLServer' }
 
-sub _sql_maker_opts {
-    my ( $self, $opts ) = @_;
+sub _get_mssql_version {
+  my $self = shift;
 
-    if ( $opts ) {
-        $self->{_sql_maker_opts} = { %$opts };
+  my $data = $self->_get_dbh->selectrow_hashref('xp_msver ProductVersion');
+
+  if ($data->{Character_Value} =~ /^(\d+)\./) {
+    return $1;
+  } else {
+    $self->throw_exception(q{Your ProductVersion's Character_Value is missing or malformed!});
+  }
+}
+
+sub sql_maker {
+  my $self = shift;
+
+  unless ($self->_sql_maker) {
+    unless ($self->{_sql_maker_opts}{limit_dialect}) {
+      my $version = eval { $self->_get_mssql_version; } || 0;
+
+      $self->{_sql_maker_opts} = {
+        limit_dialect => ($version >= 9 ? 'RowNumberOver' : 'Top'),
+        %{$self->{_sql_maker_opts}||{}}
+      };
     }
 
-    return { limit_dialect => 'Top', %{$self->{_sql_maker_opts}||{}} };
+    my $maker = $self->next::method (@_);
+  }
+
+  return $self->_sql_maker;
 }
 
 1;
 
 =head1 NAME
 
-DBIx::Class::Storage::DBI::MSSQL - Storage::DBI subclass for MSSQL
+DBIx::Class::Storage::DBI::MSSQL - Base Class for Microsoft SQL Server support
+in DBIx::Class
 
 =head1 SYNOPSIS
 
-This subclass supports MSSQL, and can in theory be used directly
-via the C<storage_type> mechanism:
+This is the base class for Microsoft SQL Server support, used by
+L<DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server> and
+L<DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server>.
 
-  $schema->storage_type('::DBI::MSSQL');
-  $schema->connect_info('dbi:....', ...);
+=head1 IMPLEMENTATION NOTES
 
-However, as there is no L<DBD::MSSQL>, you will probably want to use
-one of the other DBD-specific MSSQL classes, such as
-L<DBIx::Class::Storage::DBI::Sybase::MSSQL>.  These classes will
-merge this class with a DBD-specific class to obtain fully
-correct behavior for your scenario.
+=head2 IDENTITY information
 
-=head1 METHODS
+Microsoft SQL Server supports three methods of retrieving the IDENTITY
+value for inserted row: IDENT_CURRENT, @@IDENTITY, and SCOPE_IDENTITY().
+SCOPE_IDENTITY is used here because it is the safest.  However, it must
+be called is the same execute statement, not just the same connection.
 
-=head2 last_insert_id
+So, this implementation appends a SELECT SCOPE_IDENTITY() statement
+onto each INSERT to accommodate that requirement.
 
-=head2 sqlt_type
+C<SELECT @@IDENTITY> can also be used by issuing:
 
-=head2 build_datetime_parser
+  $self->_identity_method('@@identity');
 
-The resulting parser handles the MSSQL C<DATETIME> type, but is almost
-certainly not sufficient for the other MSSQL 2008 date/time types.
+it will only be used if SCOPE_IDENTITY() fails.
 
-=head1 AUTHORS
+This is more dangerous, as inserting into a table with an on insert trigger that
+inserts into another table with an identity will give erroneous results on
+recent versions of SQL Server.
 
-Brian Cassidy <bricas at cpan.org>
+=head2 identity insert
 
+Be aware that we have tried to make things as simple as possible for our users.
+For MSSQL that means that when a user tries to create a row, while supplying an
+explicit value for an autoincrementing column, we will try to issue the
+appropriate database call to make this possible, namely C<SET IDENTITY_INSERT
+$table_name ON>. Unfortunately this operation in MSSQL requires the
+C<db_ddladmin> privilege, which is normally not included in the standard
+write-permissions.
+
+=head2 Ordered Subselects
+
+If you attempted the following query (among many others) in Microsoft SQL
+Server
+
+ $rs->search ({}, {
+  prefetch => 'relation',
+  rows => 2,
+  offset => 3,
+ });
+
+You may be surprised to receive an exception. The reason for this is a quirk
+in the MSSQL engine itself, and sadly doesn't have a sensible workaround due
+to the way DBIC is built. DBIC can do truly wonderful things with the aid of
+subselects, and does so automatically when necessary. The list of situations
+when a subselect is necessary is long and still changes often, so it can not
+be exhaustively enumerated here. The general rule of thumb is a joined
+L<has_many|DBIx::Class::Relationship/has_many> relationship with limit/group
+applied to the left part of the join.
+
+In its "pursuit of standards" Microsft SQL Server goes to great lengths to
+forbid the use of ordered subselects. This breaks a very useful group of
+searches like "Give me things number 4 to 6 (ordered by name), and prefetch
+all their relations, no matter how many". While there is a hack which fools
+the syntax checker, the optimizer may B<still elect to break the subselect>.
+Testing has determined that while such breakage does occur (the test suite
+contains an explicit test which demonstrates the problem), it is relative
+rare. The benefits of ordered subselects are on the other hand too great to be
+outright disabled for MSSQL.
+
+Thus compromise between usability and perfection is the MSSQL-specific
+L<resultset attribute|DBIx::Class::ResultSet/ATTRIBUTES> C<unsafe_subselect_ok>.
+It is deliberately not possible to set this on the Storage level, as the user
+should inspect (and preferably regression-test) the return of every such
+ResultSet individually. The example above would work if written like:
+
+ $rs->search ({}, {
+  unsafe_subselect_ok => 1,
+  prefetch => 'relation',
+  rows => 2,
+  offset => 3,
+ });
+
+If it is possible to rewrite the search() in a way that will avoid the need
+for this flag - you are urged to do so. If DBIC internals insist that an
+ordered subselect is necessary for an operation, and you believe there is a
+different/better way to get the same result - please file a bugreport.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
 =head1 LICENSE
 
 You may distribute this code under the same terms as Perl itself.

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/MultiColumnIn.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/MultiColumnIn.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/MultiColumnIn.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,6 +4,7 @@
 use warnings;
 
 use base 'DBIx::Class::Storage::DBI';
+use mro 'c3';
 
 =head1 NAME 
 
@@ -16,7 +17,7 @@
 The storage class for any such RDBMS should inherit from this class, in order
 to dramatically speed up update/delete operations on joined multipk resultsets.
 
-At this point the only overriden method is C<_multipk_update_delete()>
+At this point the only overridden method is C<_multipk_update_delete()>
 
 =cut
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/NoBindVars.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/NoBindVars.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,6 +4,7 @@
 use warnings;
 
 use base 'DBIx::Class::Storage::DBI';
+use mro 'c3';
 
 =head1 NAME 
 
@@ -39,24 +40,32 @@
 sub _prep_for_execute {
   my $self = shift;
 
-  my ($op, $extra_bind, $ident) = @_;
-
   my ($sql, $bind) = $self->next::method(@_);
 
-  # stringify args, quote via $dbh, and manually insert
+  # stringify bind args, quote via $dbh, and manually insert
+  #my ($op, $extra_bind, $ident, $args) = @_;
+  my $ident = $_[2];
 
   my @sql_part = split /\?/, $sql;
   my $new_sql;
 
+  my $col_info = $self->_resolve_column_info($ident, [ map $_->[0], @$bind ]);
+
   foreach my $bound (@$bind) {
     my $col = shift @$bound;
-    my $datatype = 'FIXME!!!';
+
+    my $datatype = $col_info->{$col}{data_type};
+
     foreach my $data (@$bound) {
-        if(ref $data) {
-            $data = ''.$data;
-        }
-        $data = $self->_dbh->quote($data);
-        $new_sql .= shift(@sql_part) . $data;
+      $data = ''.$data if ref $data;
+
+      $data = $self->_prep_interpolated_value($datatype, $data)
+        if $datatype;
+
+      $data = $self->_dbh->quote($data)
+        unless $self->interpolate_unquoted($datatype, $data);
+
+      $new_sql .= shift(@sql_part) . $data;
     }
   }
   $new_sql .= join '', @sql_part;
@@ -64,12 +73,44 @@
   return ($new_sql, []);
 }
 
+=head2 interpolate_unquoted
+
+This method is called by L</_prep_for_execute> for every column in
+order to determine if its value should be quoted or not. The arguments
+are the current column data type and the actual bind value. The return
+value is interpreted as: true - do not quote, false - do quote. You should
+override this in you Storage::DBI::<database> subclass, if your RDBMS
+does not like quotes around certain datatypes (e.g. Sybase and integer
+columns). The default method always returns false (do quote).
+
+ WARNING!!!
+
+ Always validate that the bind-value is valid for the current datatype.
+ Otherwise you may very well open the door to SQL injection attacks.
+
+=cut
+
+sub interpolate_unquoted {
+  #my ($self, $datatype, $value) = @_;
+  return 0;
+}
+
+=head2 _prep_interpolated_value
+
+Given a datatype and the value to be inserted directly into a SQL query, returns
+the necessary string to represent that value (by e.g. adding a '$' sign)
+
+=cut
+
+sub _prep_interpolated_value {
+  #my ($self, $datatype, $value) = @_;
+  return $_[2];
+}
+
 =head1 AUTHORS
 
-Brandon Black <blblack at gmail.com>
+See L<DBIx::Class/CONTRIBUTORS>
 
-Trym Skaar <trym at tryms.no>
-
 =head1 LICENSE
 
 You may distribute this code under the same terms as Perl itself.

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -2,9 +2,11 @@
 use strict;
 use warnings;
 
-use DBI;
 use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
 
+use DBI;
+
 my $ERR_MSG_START = __PACKAGE__ . ' failed: ';
 
 sub insert {
@@ -38,11 +40,11 @@
 
 sub bind_attribute_by_data_type {
     my $self = shift;
-    
+
     my ( $data_type ) = @_;
-    
+
     return { TYPE => $data_type } if $data_type == DBI::SQL_LONGVARCHAR;
-    
+
     return;
 }
 
@@ -77,7 +79,7 @@
 
 =head1 IMPLEMENTATION NOTES
 
-MS Access supports the @@IDENTITY function for retriving the id of the latest inserted row.
+MS Access supports the @@IDENTITY function for retrieving the id of the latest inserted row.
 @@IDENTITY is global to the connection, so to support the possibility of getting the last inserted
 id for different tables, the insert() function stores the inserted id on a per table basis.
 last_insert_id() then just returns the stored value.

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,6 +3,7 @@
 use warnings;
 
 use base qw/DBIx::Class::Storage::DBI::ODBC/;
+use mro 'c3';
 
 sub _dbh_last_insert_id {
     my ($self, $dbh, $source, $col) = @_;
@@ -22,7 +23,7 @@
 
 sub _sql_maker_opts {
     my ($self) = @_;
-    
+
     $self->dbh_do(sub {
         my ($self, $dbh) = @_;
 
@@ -42,8 +43,8 @@
 
 =head1 SYNOPSIS
 
-  # In your table classes
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  # In your result (table) classes
+  use base 'DBIx::Class::Core';
   __PACKAGE__->set_primary_key('id');
 
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,64 +3,195 @@
 use warnings;
 
 use base qw/DBIx::Class::Storage::DBI::MSSQL/;
+use mro 'c3';
 
-sub _prep_for_execute {
-    my $self = shift;
-    my ($op, $extra_bind, $ident, $args) = @_;
+use List::Util();
+use Scalar::Util ();
 
-    my ($sql, $bind) = $self->next::method (@_);
-    $sql .= ';SELECT SCOPE_IDENTITY()' if $op eq 'insert';
+__PACKAGE__->mk_group_accessors(simple => qw/
+  _using_dynamic_cursors
+/);
 
-    return ($sql, $bind);
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server - Support specific
+to Microsoft SQL Server over ODBC
+
+=head1 DESCRIPTION
+
+This class implements support specific to Microsoft SQL Server over ODBC.  It is
+loaded automatically by by DBIx::Class::Storage::DBI::ODBC when it detects a
+MSSQL back-end.
+
+Most of the functionality is provided from the superclass
+L<DBIx::Class::Storage::DBI::MSSQL>.
+
+=head1 MULTIPLE ACTIVE STATEMENTS
+
+The following options are alternative ways to enable concurrent executing
+statement support. Each has its own advantages and drawbacks.
+
+=head2 connect_call_use_dynamic_cursors
+
+Use as:
+
+  on_connect_call => 'use_dynamic_cursors'
+
+in your L<DBIx::Class::Storage::DBI/connect_info> as one way to enable multiple
+concurrent statements.
+
+Will add C<< odbc_cursortype => 2 >> to your DBI connection attributes. See
+L<DBD::ODBC/odbc_cursortype> for more information.
+
+Alternatively, you can add it yourself and dynamic cursor support will be
+automatically enabled.
+
+If you're using FreeTDS, C<tds_version> must be set to at least C<8.0>.
+
+This will not work with CODE ref connect_info's.
+
+B<WARNING:> this will break C<SCOPE_IDENTITY()>, and C<SELECT @@IDENTITY> will
+be used instead, which on SQL Server 2005 and later will return erroneous
+results on tables which have an on insert trigger that inserts into another
+table with an C<IDENTITY> column.
+
+=cut
+
+sub connect_call_use_dynamic_cursors {
+  my $self = shift;
+
+  if (ref($self->_dbi_connect_info->[0]) eq 'CODE') {
+    $self->throw_exception ('Cannot set DBI attributes on a CODE ref connect_info');
+  }
+
+  my $dbi_attrs = $self->_dbi_connect_info->[-1];
+
+  unless (ref($dbi_attrs) && Scalar::Util::reftype($dbi_attrs) eq 'HASH') {
+    $dbi_attrs = {};
+    push @{ $self->_dbi_connect_info }, $dbi_attrs;
+  }
+
+  if (not exists $dbi_attrs->{odbc_cursortype}) {
+    # turn on support for multiple concurrent statements, unless overridden
+    $dbi_attrs->{odbc_cursortype} = 2;
+    $self->disconnect; # resetting dbi attrs, so have to reconnect
+    $self->ensure_connected;
+    $self->_set_dynamic_cursors;
+  }
 }
 
-sub _execute {
-    my $self = shift;
-    my ($op) = @_;
+sub _set_dynamic_cursors {
+  my $self = shift;
+  my $dbh  = $self->_get_dbh;
 
-    my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
-    if ($op eq 'insert') {
-      $self->{_scope_identity} = $sth->fetchrow_array;
-      $sth->finish;
-    }
+  eval {
+    local $dbh->{RaiseError} = 1;
+    local $dbh->{PrintError} = 0;
+    $dbh->do('SELECT @@IDENTITY');
+  };
+  if ($@) {
+    $self->throw_exception (<<'EOF');
 
-    return wantarray ? ($rv, $sth, @bind) : $rv;
+Your drivers do not seem to support dynamic cursors (odbc_cursortype => 2),
+if you're using FreeTDS, make sure to set tds_version to 8.0 or greater.
+EOF
+  }
+
+  $self->_using_dynamic_cursors(1);
+  $self->_identity_method('@@identity');
 }
 
-sub last_insert_id { shift->{_scope_identity} }
+sub _init {
+  my $self = shift;
 
-1;
+  no warnings qw/uninitialized/;
 
-__END__
+  if (
+    ref($self->_dbi_connect_info->[0]) ne 'CODE'
+      &&
+    ref ($self->_dbi_connect_info->[-1]) eq 'HASH'
+      &&
+    $self->_dbi_connect_info->[-1]{odbc_cursortype} == 2
+  ) {
+    $self->_set_dynamic_cursors;
+    return;
+  }
 
-=head1 NAME
+  $self->_using_dynamic_cursors(0);
+}
 
-DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server - Support specific
-to Microsoft SQL Server over ODBC
+=head2 connect_call_use_server_cursors
 
-=head1 DESCRIPTION
+Use as:
 
-This class implements support specific to Microsoft SQL Server over ODBC,
-including auto-increment primary keys and SQL::Abstract::Limit dialect.  It
-is loaded automatically by by DBIx::Class::Storage::DBI::ODBC when it
-detects a MSSQL back-end.
+  on_connect_call => 'use_server_cursors'
 
-=head1 IMPLEMENTATION NOTES
+May allow multiple active select statements. See
+L<DBD::ODBC/odbc_SQL_ROWSET_SIZE> for more information.
 
-Microsoft SQL Server supports three methods of retrieving the IDENTITY
-value for inserted row: IDENT_CURRENT, @@IDENTITY, and SCOPE_IDENTITY().
-SCOPE_IDENTITY is used here because it is the safest.  However, it must
-be called is the same execute statement, not just the same connection.
+Takes an optional parameter for the value to set the attribute to, default is
+C<2>.
 
-So, this implementation appends a SELECT SCOPE_IDENTITY() statement
-onto each INSERT to accommodate that requirement.
+B<WARNING>: this does not work on all versions of SQL Server, and may lock up
+your database!
 
-=head1 AUTHORS
+=cut
 
-Marc Mims C<< <marc at questright.com> >>
+sub connect_call_use_server_cursors {
+  my $self            = shift;
+  my $sql_rowset_size = shift || 2;
 
+  $self->_get_dbh->{odbc_SQL_ROWSET_SIZE} = $sql_rowset_size;
+}
+
+=head2 connect_call_use_MARS
+
+Use as:
+
+  on_connect_call => 'use_MARS'
+
+Use to enable a feature of SQL Server 2005 and later, "Multiple Active Result
+Sets". See L<DBD::ODBC::FAQ/Does DBD::ODBC support Multiple Active Statements?>
+for more information.
+
+B<WARNING>: This has implications for the way transactions are handled.
+
+=cut
+
+sub connect_call_use_MARS {
+  my $self = shift;
+
+  my $dsn = $self->_dbi_connect_info->[0];
+
+  if (ref($dsn) eq 'CODE') {
+    $self->throw_exception('cannot change the DBI DSN on a CODE ref connect_info');
+  }
+
+  if ($dsn !~ /MARS_Connection=/) {
+    $self->_dbi_connect_info->[0] = "$dsn;MARS_Connection=Yes";
+    my $was_connected = defined $self->_dbh;
+    $self->disconnect;
+    $self->ensure_connected if $was_connected;
+  }
+}
+
+sub _get_mssql_version {
+  my $self = shift;
+
+  my ($version) = $self->_get_dbh->get_info(18) =~ /^(\d+)/;
+
+  return $version;
+}
+
+1;
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
 =head1 LICENSE
 
 You may distribute this code under the same terms as Perl itself.
 
 =cut
+# vim: sw=2 sts=2

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/SQL_Anywhere.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/SQL_Anywhere.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC/SQL_Anywhere.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,28 @@
+package DBIx::Class::Storage::DBI::ODBC::SQL_Anywhere;
+
+use strict;
+use warnings;
+use base qw/DBIx::Class::Storage::DBI::SQLAnywhere/;
+use mro 'c3';
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC::SQL_Anywhere - Driver for using Sybase SQL
+Anywhere through ODBC
+
+=head1 SYNOPSIS
+
+All functionality is provided by L<DBIx::Class::Storage::DBI::SQLAnywhere>, see
+that module for details.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/ODBC.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,47 +3,35 @@
 use warnings;
 
 use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
 
 sub _rebless {
     my ($self) = @_;
 
-    my $dbtype = eval { $self->dbh->get_info(17) };
+    my $dbtype = eval { $self->_get_dbh->get_info(17) };
+
     unless ( $@ ) {
         # Translate the backend name into a perl identifier
         $dbtype =~ s/\W/_/gi;
-        my $class = "DBIx::Class::Storage::DBI::ODBC::${dbtype}";
-        eval "require $class";
-        bless $self, $class unless $@;
+        my $subclass = "DBIx::Class::Storage::DBI::ODBC::${dbtype}";
+        if ($self->load_optional_class($subclass) && !$self->isa($subclass)) {
+            bless $self, $subclass;
+            $self->_rebless;
+        }
     }
 }
 
-sub _dbh_last_insert_id {
-    my ($self, $dbh, $source, $col) = @_;
-
-    # punt: if there is no derived class for the specific backend, attempt
-    # to use the DBI->last_insert_id, which may not be sufficient (see the
-    # discussion of last_insert_id in perldoc DBI)
-    return $dbh->last_insert_id(undef, undef, $source->from, $col);
-}
-
 1;
 
 =head1 NAME
 
 DBIx::Class::Storage::DBI::ODBC - Base class for ODBC drivers
 
-=head1 SYNOPSIS
-
-  # In your table classes
-  __PACKAGE__->load_components(qw/Core/);
-
-
 =head1 DESCRIPTION
 
 This class simply provides a mechanism for discovering and loading a sub-class
 for a specific ODBC backend.  It should be transparent to the user.
 
-
 =head1 AUTHORS
 
 Marc Mims C<< <marc at questright.com> >>

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -5,30 +5,29 @@
 
 =head1 NAME
 
-DBIx::Class::Storage::DBI::Oracle::Generic - Automatic primary key class for Oracle
+DBIx::Class::Storage::DBI::Oracle::Generic - Oracle Support for DBIx::Class
 
 =head1 SYNOPSIS
 
-  # In your table classes
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  # In your result (table) classes
+  use base 'DBIx::Class::Core';
   __PACKAGE__->add_columns({ id => { sequence => 'mysequence', auto_nextval => 1 } });
   __PACKAGE__->set_primary_key('id');
   __PACKAGE__->sequence('mysequence');
 
 =head1 DESCRIPTION
 
-This class implements autoincrements for Oracle.
+This class implements base Oracle support. The subclass
+L<DBIx::Class::Storage::DBI::Oracle::WhereJoins> is for C<(+)> joins in Oracle
+versions before 9.
 
 =head1 METHODS
 
 =cut
 
 use base qw/DBIx::Class::Storage::DBI/;
-use Carp::Clan qw/^DBIx::Class/;
+use mro 'c3';
 
-# For ORA_BLOB => 113, ORA_CLOB => 112
-use DBD::Oracle qw( :ora_types );
-
 sub _dbh_last_insert_id {
   my ($self, $dbh, $source, @columns) = @_;
   my @ids = ();
@@ -52,12 +51,20 @@
   };
 
   # trigger_body is a LONG
-  $dbh->{LongReadLen} = 64 * 1024 if ($dbh->{LongReadLen} < 64 * 1024);
+  local $dbh->{LongReadLen} = 64 * 1024 if ($dbh->{LongReadLen} < 64 * 1024);
 
   my $sth;
 
+  my $source_name;
+  if ( ref $source->name ne 'SCALAR' ) {
+      $source_name = $source->name;
+  }
+  else {
+      $source_name = ${$source->name};
+  }
+
   # check for fully-qualified name (eg. SCHEMA.TABLENAME)
-  if ( my ( $schema, $table ) = $source->name =~ /(\w+)\.(\w+)/ ) {
+  if ( my ( $schema, $table ) = $source_name =~ /(\w+)\.(\w+)/ ) {
     $sql = q{
       SELECT trigger_body FROM ALL_TRIGGERS t
       WHERE t.owner = ? AND t.table_name = ?
@@ -69,7 +76,7 @@
   }
   else {
     $sth = $dbh->prepare($sql);
-    $sth->execute( uc( $source->name ) );
+    $sth->execute( uc( $source_name ) );
   }
   while (my ($insert_trigger) = $sth->fetchrow_array) {
     return uc($1) if $insert_trigger =~ m!(\w+)\.nextval!i; # col name goes here???
@@ -79,40 +86,22 @@
 
 sub _sequence_fetch {
   my ( $self, $type, $seq ) = @_;
-  my ($id) = $self->dbh->selectrow_array("SELECT ${seq}.${type} FROM DUAL");
+  my ($id) = $self->_get_dbh->selectrow_array("SELECT ${seq}.${type} FROM DUAL");
   return $id;
 }
 
-=head2 connected
-
-Returns true if we have an open (and working) database connection, false if it is not (yet)
-open (or does not work). (Executes a simple SELECT to make sure it works.)
-
-The reason this is needed is that L<DBD::Oracle>'s ping() does not do a real
-OCIPing but just gets the server version, which doesn't help if someone killed
-your session.
-
-=cut
-
-sub connected {
+sub _ping {
   my $self = shift;
 
-  if (not $self->next::method(@_)) {
-    return 0;
-  }
-  else {
-    my $dbh = $self->_dbh;
+  my $dbh = $self->_dbh or return 0;
 
-    local $dbh->{RaiseError} = 1;
+  local $dbh->{RaiseError} = 1;
 
-    eval {
-      my $ping_sth = $dbh->prepare_cached("select 1 from dual");
-      $ping_sth->execute;
-      $ping_sth->finish;
-    };
+  eval {
+    $dbh->do("select 1 from dual");
+  };
 
-    return $@ ? 0 : 1;
-  }
+  return $@ ? 0 : 1;
 }
 
 sub _dbh_execute {
@@ -157,7 +146,7 @@
 
 sub get_autoinc_seq {
   my ($self, $source, $col) = @_;
-    
+
   $self->dbh_do('_dbh_get_autoinc_seq', $source, $col);
 }
 
@@ -183,10 +172,52 @@
 
 sub datetime_parser_type { return "DateTime::Format::Oracle"; }
 
-sub _svp_begin {
-    my ($self, $name) = @_;
- 
-    $self->dbh->do("SAVEPOINT $name");
+=head2 connect_call_datetime_setup
+
+Used as:
+
+    on_connect_call => 'datetime_setup'
+
+In L<DBIx::Class::Storage::DBI/connect_info> to set the session nls date, and
+timestamp values for use with L<DBIx::Class::InflateColumn::DateTime> and the
+necessary environment variables for L<DateTime::Format::Oracle>, which is used
+by it.
+
+Maximum allowable precision is used, unless the environment variables have
+already been set.
+
+These are the defaults used:
+
+  $ENV{NLS_DATE_FORMAT}         ||= 'YYYY-MM-DD HH24:MI:SS';
+  $ENV{NLS_TIMESTAMP_FORMAT}    ||= 'YYYY-MM-DD HH24:MI:SS.FF';
+  $ENV{NLS_TIMESTAMP_TZ_FORMAT} ||= 'YYYY-MM-DD HH24:MI:SS.FF TZHTZM';
+
+To get more than second precision with L<DBIx::Class::InflateColumn::DateTime>
+for your timestamps, use something like this:
+
+  use Time::HiRes 'time';
+  my $ts = DateTime->from_epoch(epoch => time);
+
+=cut
+
+sub connect_call_datetime_setup {
+  my $self = shift;
+
+  my $date_format = $ENV{NLS_DATE_FORMAT} ||= 'YYYY-MM-DD HH24:MI:SS';
+  my $timestamp_format = $ENV{NLS_TIMESTAMP_FORMAT} ||=
+    'YYYY-MM-DD HH24:MI:SS.FF';
+  my $timestamp_tz_format = $ENV{NLS_TIMESTAMP_TZ_FORMAT} ||=
+    'YYYY-MM-DD HH24:MI:SS.FF TZHTZM';
+
+  $self->_do_query(
+    "alter session set nls_date_format = '$date_format'"
+  );
+  $self->_do_query(
+    "alter session set nls_timestamp_format = '$timestamp_format'"
+  );
+  $self->_do_query(
+    "alter session set nls_timestamp_tz_format='$timestamp_tz_format'"
+  );
 }
 
 =head2 source_bind_attributes
@@ -206,47 +237,99 @@
 
 =cut
 
-sub source_bind_attributes 
+sub source_bind_attributes
 {
-	my $self = shift;
-	my($source) = @_;
+  require DBD::Oracle;
+  my $self = shift;
+  my($source) = @_;
 
-	my %bind_attributes;
+  my %bind_attributes;
 
-	foreach my $column ($source->columns) {
-		my $data_type = $source->column_info($column)->{data_type} || '';
-		next unless $data_type;
+  foreach my $column ($source->columns) {
+    my $data_type = $source->column_info($column)->{data_type} || '';
+    next unless $data_type;
 
-		my %column_bind_attrs = $self->bind_attribute_by_data_type($data_type);
+    my %column_bind_attrs = $self->bind_attribute_by_data_type($data_type);
 
-		if ($data_type =~ /^[BC]LOB$/i) {
-			$column_bind_attrs{'ora_type'}
-				= uc($data_type) eq 'CLOB' ? ORA_CLOB : ORA_BLOB;
-			$column_bind_attrs{'ora_field'} = $column;
-		}
+    if ($data_type =~ /^[BC]LOB$/i) {
+      if ($DBD::Oracle::VERSION eq '1.23') {
+        $self->throw_exception(
+"BLOB/CLOB support in DBD::Oracle == 1.23 is broken, use an earlier or later ".
+"version.\n\nSee: https://rt.cpan.org/Public/Bug/Display.html?id=46016\n"
+        );
+      }
 
-		$bind_attributes{$column} = \%column_bind_attrs;
-	}
+      $column_bind_attrs{'ora_type'} = uc($data_type) eq 'CLOB'
+        ? DBD::Oracle::ORA_CLOB()
+        : DBD::Oracle::ORA_BLOB()
+      ;
+      $column_bind_attrs{'ora_field'} = $column;
+    }
 
-	return \%bind_attributes;
+    $bind_attributes{$column} = \%column_bind_attrs;
+  }
+
+  return \%bind_attributes;
 }
 
+sub _svp_begin {
+  my ($self, $name) = @_;
+  $self->_get_dbh->do("SAVEPOINT $name");
+}
+
 # Oracle automatically releases a savepoint when you start another one with the
 # same name.
 sub _svp_release { 1 }
 
 sub _svp_rollback {
-    my ($self, $name) = @_;
+  my ($self, $name) = @_;
+  $self->_get_dbh->do("ROLLBACK TO SAVEPOINT $name")
+}
 
-    $self->dbh->do("ROLLBACK TO SAVEPOINT $name")
+=head2 relname_to_table_alias
+
+L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
+queries.
+
+Unfortunately, Oracle doesn't support identifiers over 30 chars in length, so
+the L<DBIx::Class::Relationship> name is shortened and appended with half of an
+MD5 hash.
+
+See L<DBIx::Class::Storage/"relname_to_table_alias">.
+
+=cut
+
+sub relname_to_table_alias {
+  my $self = shift;
+  my ($relname, $join_count) = @_;
+
+  my $alias = $self->next::method(@_);
+
+  return $alias if length($alias) <= 30;
+
+  # get a base64 md5 of the alias with join_count
+  require Digest::MD5;
+  my $ctx = Digest::MD5->new;
+  $ctx->add($alias);
+  my $md5 = $ctx->b64digest;
+
+  # remove alignment mark just in case
+  $md5 =~ s/=*\z//;
+
+  # truncate and prepend to truncated relname without vowels
+  (my $devoweled = $relname) =~ s/[aeiou]//g;
+  my $shortened = substr($devoweled, 0, 18);
+
+  my $new_alias =
+    $shortened . '_' . substr($md5, 0, 30 - length($shortened) - 1);
+
+  return $new_alias;
 }
 
-=head1 AUTHORS
+=head1 AUTHOR
 
-Andy Grundman <andy at hybridized.org>
+See L<DBIx::Class/CONTRIBUTORS>.
 
-Scott Connelly <scottsweep at yahoo.com>
-
 =head1 LICENSE
 
 You may distribute this code under the same terms as Perl itself.

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Oracle/WhereJoins.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Oracle/WhereJoins.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Oracle/WhereJoins.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,10 +1,11 @@
 package DBIx::Class::Storage::DBI::Oracle::WhereJoins;
 
-use base qw( DBIx::Class::Storage::DBI::Oracle::Generic );
-
 use strict;
 use warnings;
 
+use base qw( DBIx::Class::Storage::DBI::Oracle::Generic );
+use mro 'c3';
+
 __PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks::OracleJoins');
 
 1;
@@ -22,8 +23,7 @@
 
 This module was originally written to support Oracle < 9i where ANSI joins
 weren't supported at all, but became the module for Oracle >= 8 because
-Oracle's optimising of ANSI joins is horrible.  (See:
-http://scsys.co.uk:8001/7495)
+Oracle's optimising of ANSI joins is horrible.
 
 =head1 SYNOPSIS
 
@@ -43,7 +43,7 @@
 It should properly support left joins, and right joins.  Full outer joins are
 not possible due to the fact that Oracle requires the entire query be written
 to union the results of a left and right join, and by the time this module is
-called to create the where query and table definition part of the sql query,
+called to create the where query and table definition part of the SQL query,
 it's already too late.
 
 =head1 METHODS

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Oracle.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Oracle.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Oracle.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,11 +4,12 @@
 use warnings;
 
 use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
 
 sub _rebless {
     my ($self) = @_;
 
-    my $version = eval { $self->dbh->get_info(18); };
+    my $version = eval { $self->_get_dbh->get_info(18); };
 
     if ( !$@ ) {
         my ($major, $minor, $patchlevel) = split(/\./, $version);
@@ -18,10 +19,8 @@
           ? 'DBIx::Class::Storage::DBI::Oracle::WhereJoins'
           : 'DBIx::Class::Storage::DBI::Oracle::Generic';
 
-        # Load and rebless
-        eval "require $class";
-
-        bless $self, $class unless $@;
+        $self->ensure_class_loaded ($class);
+        bless $self, $class;
     }
 }
 
@@ -31,11 +30,6 @@
 
 DBIx::Class::Storage::DBI::Oracle - Base class for Oracle driver
 
-=head1 SYNOPSIS
-
-  # In your table classes
-  __PACKAGE__->load_components(qw/Core/);
-
 =head1 DESCRIPTION
 
 This class simply provides a mechanism for discovering and loading a sub-class

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Pg.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Pg.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Pg.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,62 +3,136 @@
 use strict;
 use warnings;
 
-use DBD::Pg qw(:pg_types);
-
 use base qw/DBIx::Class::Storage::DBI::MultiColumnIn/;
+use mro 'c3';
 
-# __PACKAGE__->load_components(qw/PK::Auto/);
+use DBD::Pg qw(:pg_types);
 
-# Warn about problematic versions of DBD::Pg
-warn "DBD::Pg 1.49 is strongly recommended"
-  if ($DBD::Pg::VERSION < 1.49);
+# Ask for a DBD::Pg with array support
+warn "DBD::Pg 2.9.2 or greater is strongly recommended\n"
+  if ($DBD::Pg::VERSION < 2.009002);  # pg uses (used?) version::qv()
 
 sub with_deferred_fk_checks {
   my ($self, $sub) = @_;
 
-  $self->dbh->do('SET CONSTRAINTS ALL DEFERRED');
+  $self->_get_dbh->do('SET CONSTRAINTS ALL DEFERRED');
   $sub->();
 }
 
+sub last_insert_id {
+  my ($self,$source, at cols) = @_;
+
+  my @values;
+
+  for my $col (@cols) {
+    my $seq = ( $source->column_info($col)->{sequence} ||= $self->dbh_do('_dbh_get_autoinc_seq', $source, $col) )
+      or $self->throw_exception( sprintf(
+        'could not determine sequence for column %s.%s, please consider adding a schema-qualified sequence to its column info',
+          $source->name,
+          $col,
+      ));
+
+    push @values, $self->_dbh_last_insert_id ($self->_dbh, $seq);
+  }
+
+  return @values;
+}
+
+# there seems to be absolutely no reason to have this as a separate method,
+# but leaving intact in case someone is already overriding it
 sub _dbh_last_insert_id {
   my ($self, $dbh, $seq) = @_;
   $dbh->last_insert_id(undef, undef, undef, undef, {sequence => $seq});
 }
 
-sub last_insert_id {
-  my ($self,$source,$col) = @_;
-  my $seq = ($source->column_info($col)->{sequence} ||= $self->get_autoinc_seq($source,$col));
-  $self->throw_exception("could not fetch primary key for " . $source->name . ", could not "
-    . "get autoinc sequence for $col (check that table and column specifications are correct "
-    . "and in the correct case)") unless defined $seq;
-  $self->dbh_do('_dbh_last_insert_id', $seq);
-}
 
 sub _dbh_get_autoinc_seq {
-  my ($self, $dbh, $schema, $table, @pri) = @_;
+  my ($self, $dbh, $source, $col) = @_;
 
-  while (my $col = shift @pri) {
-    my $info = $dbh->column_info(undef,$schema,$table,$col)->fetchrow_hashref;
-    if(defined $info->{COLUMN_DEF} and
-       $info->{COLUMN_DEF} =~ /^nextval\(+'([^']+)'::(?:text|regclass)\)/) {
-      my $seq = $1;
-      # may need to strip quotes -- see if this works
-      return $seq =~ /\./ ? $seq : $info->{TABLE_SCHEM} . "." . $seq;
-    }
+  my $schema;
+  my $table = $source->name;
+
+  # deref table name if it needs it
+  $table = $$table
+      if ref $table eq 'SCALAR';
+
+  # parse out schema name if present
+  if( $table =~ /^(.+)\.(.+)$/ ) {
+    ( $schema, $table ) = ( $1, $2 );
   }
-  return;
+
+  # get the column default using a Postgres-specific pg_catalog query
+  my $seq_expr = $self->_dbh_get_column_default( $dbh, $schema, $table, $col );
+
+  # if no default value is set on the column, or if we can't parse the
+  # default value as a sequence, throw.
+  unless ( defined $seq_expr and $seq_expr =~ /^nextval\(+'([^']+)'::(?:text|regclass)\)/i ) {
+    $seq_expr = '' unless defined $seq_expr;
+    $schema = "$schema." if defined $schema && length $schema;
+    $self->throw_exception( sprintf (
+      'no sequence found for %s%s.%s, check the RDBMS table definition or explicitly set the '.
+      "'sequence' for this column in %s",
+        $schema ? "$schema." : '',
+        $table,
+        $col,
+        $source->source_name,
+    ));
+  }
+
+  return $1;
 }
 
-sub get_autoinc_seq {
-  my ($self,$source,$col) = @_;
-    
-  my @pri = $source->primary_columns;
-  my ($schema,$table) = $source->name =~ /^(.+)\.(.+)$/ ? ($1,$2)
-    : (undef,$source->name);
+# custom method for fetching column default, since column_info has a
+# bug with older versions of DBD::Pg
+sub _dbh_get_column_default {
+  my ( $self, $dbh, $schema, $table, $col ) = @_;
 
-  $self->dbh_do('_dbh_get_autoinc_seq', $schema, $table, @pri);
+  # Build and execute a query into the pg_catalog to find the Pg
+  # expression for the default value for this column in this table.
+  # If the table name is schema-qualified, query using that specific
+  # schema name.
+
+  # Otherwise, find the table in the standard Postgres way, using the
+  # search path.  This is done with the pg_catalog.pg_table_is_visible
+  # function, which returns true if a given table is 'visible',
+  # meaning the first table of that name to be found in the search
+  # path.
+
+  # I *think* we can be assured that this query will always find the
+  # correct column according to standard Postgres semantics.
+  #
+  # -- rbuels
+
+  my $sqlmaker = $self->sql_maker;
+  local $sqlmaker->{bindtype} = 'normal';
+
+  my ($where, @bind) = $sqlmaker->where ({
+    'a.attnum' => {'>', 0},
+    'c.relname' => $table,
+    'a.attname' => $col,
+    -not_bool => 'a.attisdropped',
+    (defined $schema && length $schema)
+      ? ( 'n.nspname' => $schema )
+      : ( -bool => \'pg_catalog.pg_table_is_visible(c.oid)' )
+  });
+
+  my ($seq_expr) = $dbh->selectrow_array(<<EOS,undef, at bind);
+
+SELECT
+  (SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid)
+   FROM pg_catalog.pg_attrdef d
+   WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef)
+FROM pg_catalog.pg_class c
+     LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+     JOIN pg_catalog.pg_attribute a ON a.attrelid = c.oid
+$where
+
+EOS
+
+  return $seq_expr;
 }
 
+
 sub sqlt_type {
   return 'PostgreSQL';
 }
@@ -72,7 +146,7 @@
     bytea => { pg_type => DBD::Pg::PG_BYTEA },
     blob  => { pg_type => DBD::Pg::PG_BYTEA },
   };
- 
+
   if( defined $bind_attributes->{$data_type} ) {
     return $bind_attributes->{$data_type};
   }
@@ -83,38 +157,40 @@
 
 sub _sequence_fetch {
   my ( $self, $type, $seq ) = @_;
-  my ($id) = $self->dbh->selectrow_array("SELECT nextval('${seq}')");
+  my ($id) = $self->_get_dbh->selectrow_array("SELECT nextval('${seq}')");
   return $id;
 }
 
 sub _svp_begin {
     my ($self, $name) = @_;
 
-    $self->dbh->pg_savepoint($name);
+    $self->_get_dbh->pg_savepoint($name);
 }
 
 sub _svp_release {
     my ($self, $name) = @_;
 
-    $self->dbh->pg_release($name);
+    $self->_get_dbh->pg_release($name);
 }
 
 sub _svp_rollback {
     my ($self, $name) = @_;
 
-    $self->dbh->pg_rollback_to($name);
+    $self->_get_dbh->pg_rollback_to($name);
 }
 
 1;
 
+__END__
+
 =head1 NAME
 
 DBIx::Class::Storage::DBI::Pg - Automatic primary key class for PostgreSQL
 
 =head1 SYNOPSIS
 
-  # In your table classes
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  # In your result (table) classes
+  use base 'DBIx::Class::Core';
   __PACKAGE__->set_primary_key('id');
   __PACKAGE__->sequence('mysequence');
 
@@ -122,9 +198,30 @@
 
 This class implements autoincrements for PostgreSQL.
 
+=head1 POSTGRESQL SCHEMA SUPPORT
+
+This driver supports multiple PostgreSQL schemas, with one caveat: for
+performance reasons, data about the search path, sequence names, and
+so forth is queried as needed and CACHED for subsequent uses.
+
+For this reason, once your schema is instantiated, you should not
+change the PostgreSQL schema search path for that schema's database
+connection. If you do, Bad Things may happen.
+
+You should do any necessary manipulation of the search path BEFORE
+instantiating your schema object, or as part of the on_connect_do
+option to connect(), for example:
+
+   my $schema = My::Schema->connect
+                  ( $dsn,$user,$pass,
+                    { on_connect_do =>
+                        [ 'SET search_path TO myschema, foo, public' ],
+                    },
+                  );
+
 =head1 AUTHORS
 
-Marcus Ramberg <m.ramberg at cpan.org>
+See L<DBIx::Class/CONTRIBUTORS>
 
 =head1 LICENSE
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -12,14 +12,14 @@
 
 This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.  You
 shouldn't need to create instances of this class.
-    
+
 =head1 DESCRIPTION
 
 Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
 database's (L<DBIx::Class::Storage::DBI::Replicated::Replicant>), defines a
 method by which query load can be spread out across each replicant in the pool.
 
-This Balancer just get's whatever is the first replicant in the pool
+This Balancer just gets whichever is the first replicant in the pool.
 
 =head1 ATTRIBUTES
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -13,7 +13,7 @@
 
 This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.  You
 shouldn't need to create instances of this class.
-    
+
 =head1 DESCRIPTION
 
 Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,7 +3,8 @@
 use Moose::Role;
 requires 'next_storage';
 use MooseX::Types::Moose qw/Int/;
-
+use DBIx::Class::Storage::DBI::Replicated::Pool;
+use DBIx::Class::Storage::DBI::Replicated::Types qw/DBICStorageDBI/;
 use namespace::clean -except => 'meta';
 
 =head1 NAME
@@ -13,7 +14,7 @@
 =head1 SYNOPSIS
 
 This role is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
-    
+
 =head1 DESCRIPTION
 
 Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
@@ -48,7 +49,7 @@
 
 has 'master' => (
   is=>'ro',
-  isa=>'DBIx::Class::Storage::DBI',
+  isa=>DBICStorageDBI,
   required=>1,
 );
 
@@ -74,13 +75,13 @@
 
 This attribute returns the next slave to handle a read request.  Your L</pool>
 attribute has methods to help you shuffle through all the available replicants
-via it's balancer object.
+via its balancer object.
 
 =cut
 
 has 'current_replicant' => (
   is=> 'rw',
-  isa=>'DBIx::Class::Storage::DBI',
+  isa=>DBICStorageDBI,
   lazy_build=>1,
   handles=>[qw/
     select
@@ -109,7 +110,7 @@
 This method should be defined in the class which consumes this role.
 
 Given a pool object, return the next replicant that will serve queries.  The
-default behavior is to grap the first replicant it finds but you can write 
+default behavior is to grab the first replicant it finds but you can write 
 your own subclasses of L<DBIx::Class::Storage::DBI::Replicated::Balancer> to 
 support other balance systems.
 
@@ -169,10 +170,12 @@
 
 around 'select' => sub {
   my ($select, $self, @args) = @_;
-  
+
   if (my $forced_pool = $args[-1]->{force_pool}) {
     delete $args[-1]->{force_pool};
     return $self->_get_forced_pool($forced_pool)->select(@args); 
+  } elsif($self->master->{transaction_depth}) {
+    return $self->master->select(@args);
   } else {
     $self->increment_storage;
     return $self->$select(@args);
@@ -189,10 +192,12 @@
 
 around 'select_single' => sub {
   my ($select_single, $self, @args) = @_;
-  
+
   if (my $forced_pool = $args[-1]->{force_pool}) {
     delete $args[-1]->{force_pool};
     return $self->_get_forced_pool($forced_pool)->select_single(@args); 
+  } elsif($self->master->{transaction_depth}) {
+    return $self->master->select_single(@args);
   } else {
     $self->increment_storage;
     return $self->$select_single(@args);
@@ -224,7 +229,7 @@
     return $forced_pool;
   } elsif($forced_pool eq 'master') {
     return $self->master;
-  } elsif(my $replicant = $self->pool->replicants($forced_pool)) {
+  } elsif(my $replicant = $self->pool->replicants->{$forced_pool}) {
     return $replicant;
   } else {
     $self->master->throw_exception("$forced_pool is not a named replicant.");
@@ -233,7 +238,7 @@
 
 =head1 AUTHOR
 
-John Napiorkowski <john.napiorkowski at takkle.com>
+John Napiorkowski <jjnapiork at cpan.org>
 
 =head1 LICENSE
 

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,184 @@
+package DBIx::Class::Storage::DBI::Replicated::Introduction;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Introduction - Minimum Need to Know
+
+=head1 SYNOPSIS
+
+This is an introductory document for L<DBIx::Class::Storage::Replication>.
+
+This document is not an overview of what replication is or why you should be
+using it.  It is not a document explaining how to setup MySQL native replication
+either.  Copious external resources are available for both.  This document
+presumes you have the basics down.
+  
+=head1 DESCRIPTION
+
+L<DBIx::Class> supports a framework for using database replication.  This system
+is integrated completely, which means once it's setup you should be able to 
+automatically just start using a replication cluster without additional work or
+changes to your code.  Some caveats apply, primarily related to the proper use
+of transactions (you are wrapping all your database modifying statements inside
+a transaction, right ;) ) however in our experience properly written DBIC will
+work transparently with Replicated storage.
+
+Currently we have support for MySQL native replication, which is relatively
+easy to install and configure.  We also currently support single master to one
+or more replicants (also called 'slaves' in some documentation).  However the
+framework is not specifically tied to the MySQL framework and supporting other
+replication systems or topographies should be possible.  Please bring your
+patches and ideas to the #dbix-class IRC channel or the mailing list.
+
+For an easy way to start playing with MySQL native replication, see:
+L<MySQL::Sandbox>.
+
+If you are using this with a L<Catalyst> based application, you may also want
+to see more recent updates to L<Catalyst::Model::DBIC::Schema>, which has 
+support for replication configuration options as well.
+
+=head1 REPLICATED STORAGE
+
+By default, when you start L<DBIx::Class>, your Schema (L<DBIx::Class::Schema>)
+is assigned a storage_type, which when fully connected will reflect your
+underlying storage engine as defined by your chosen database driver.  For
+example, if you connect to a MySQL database, your storage_type will be
+L<DBIx::Class::Storage::DBI::mysql>  Your storage type class will contain 
+database specific code to help smooth over the differences between databases
+and let L<DBIx::Class> do its thing.
+
+If you want to use replication, you will override this setting so that the
+replicated storage engine will 'wrap' your underlying storages and present 
+a unified interface to the end programmer.  This wrapper storage class will
+delegate method calls to either a master database or one or more replicated
+databases based on if they are read only (by default sent to the replicants)
+or write (reserved for the master).  Additionally, the Replicated storage 
+will monitor the health of your replicants and automatically drop them should
+one exceed configurable parameters.  Later, it can automatically restore a
+replicant when its health is restored.
+
+This gives you a very robust system, since you can add or drop replicants
+and DBIC will automatically adjust itself accordingly.
+
+Additionally, if you need high data integrity, such as when you are executing
+a transaction, replicated storage will automatically delegate all database
+traffic to the master storage.  There are several ways to enable this high
+integrity mode, but wrapping your statements inside a transaction is the easy
+and canonical option. 
+
+=head1 PARTS OF REPLICATED STORAGE
+
+A replicated storage contains several parts.  First, there is the replicated
+storage itself (L<DBIx::Class::Storage::DBI::Replicated>).  A replicated storage
+takes a pool of replicants (L<DBIx::Class::Storage::DBI::Replicated::Pool>)
+and a software balancer (L<DBIx::Class::Storage::DBI::Replicated::Pool>).  The
+balancer does the job of splitting up all the read traffic amongst the
+replicants in the Pool. Currently there are two types of balancers, a Random one
+which chooses a Replicant in the Pool using a naive randomizer algorithm, and a
+First replicant, which just uses the first one in the Pool (and obviously is
+only of value when you have a single replicant).
+
+=head1 REPLICATED STORAGE CONFIGURATION
+
+All the parts of replication can be altered dynamically at runtime, which makes
+it possibly to create a system that automatically scales under load by creating
+more replicants as needed, perhaps using a cloud system such as Amazon EC2.
+However, for common use you can setup your replicated storage to be enabled at
+the time you connect the databases.  The following is a breakdown of how you
+may wish to do this.  Again, if you are using L<Catalyst>, I strongly recommend
+you use (or upgrade to) the latest L<Catalyst::Model::DBIC::Schema>, which makes
+this job even easier.
+
+First, you need to get a C<$schema> object and set the storage_type:
+
+  my $schema = MyApp::Schema->clone;
+  $schema->storage_type([
+    '::DBI::Replicated' => {
+      balancer_type => '::Random',
+      balancer_args => {
+        auto_validate_every => 5,
+        master_read_weight => 1
+      },
+      pool_args => {
+        maximum_lag =>2,
+      },
+    }
+  ]);
+
+Then, you need to connect your L<DBIx::Class::Schema>.
+
+  $schema->connection($dsn, $user, $pass);
+
+Let's break down the settings.  The method L<DBIx::Class::Schema/storage_type>
+takes one mandatory parameter, a scalar value, and an option second value which
+is a Hash Reference of configuration options for that storage.  In this case,
+we are setting the Replicated storage type using '::DBI::Replicated' as the
+first value.  You will only use a different value if you are subclassing the
+replicated storage, so for now just copy that first parameter.
+
+The second parameter contains a hash reference of stuff that gets passed to the
+replicated storage.  L<DBIx::Class::Storage::DBI::Replicated/balancer_type> is
+the type of software load balancer you will use to split up traffic among all
+your replicants.  Right now we have two options, "::Random" and "::First". You
+can review documentation for both at:
+
+L<DBIx::Class::Storage::DBI::Replicated::Balancer::First>,
+L<DBIx::Class::Storage::DBI::Replicated::Balancer::Random>.
+
+In this case we will have three replicants, so the ::Random option is the only
+one that makes sense.
+
+'balancer_args' get passed to the balancer when it's instantiated.  All
+balancers have the 'auto_validate_every' option.  This is the number of seconds
+we allow to pass between validation checks on a load balanced replicant. So
+the higher the number, the more possibility that your reads to the replicant 
+may be inconsistent with what's on the master.  Setting this number too low
+will result in increased database loads, so choose a number with care.  Our
+experience is that setting the number around 5 seconds results in a good
+performance / integrity balance.
+
+'master_read_weight' is an option associated with the ::Random balancer.  It
+allows you to let the master be read from.  I usually leave this off (default
+is off).
+
+The 'pool_args' are configuration options associated with the replicant pool.
+This object (L<DBIx::Class::Storage::DBI::Replicated::Pool>) manages all the
+declared replicants.  'maximum_lag' is the number of seconds a replicant is
+allowed to lag behind the master before being temporarily removed from the pool.
+Keep in mind that the Balancer option 'auto_validate_every' determines how often
+a replicant is tested against this condition, so the true possible lag can be
+higher than the number you set.  The default is zero.
+
+No matter how low you set the maximum_lag or the auto_validate_every settings,
+there is always the chance that your replicants will lag a bit behind the
+master for the supported replication system built into MySQL.  You can ensure
+reliable reads by using a transaction, which will force both read and write
+activity to the master, however this will increase the load on your master
+database.
+
+After you've configured the replicated storage, you need to add the connection
+information for the replicants:
+
+  $schema->storage->connect_replicants(
+    [$dsn1, $user, $pass, \%opts],
+    [$dsn2, $user, $pass, \%opts],
+    [$dsn3, $user, $pass, \%opts],
+  );
+
+These replicants should be configured as slaves to the master using the
+instructions for MySQL native replication, or if you are just learning, you
+will find L<MySQL::Sandbox> an easy way to set up a replication cluster.
+
+And now your $schema object is properly configured!  Enjoy!
+
+=head1 AUTHOR
+
+John Napiorkowski <jjnapiork at cpan.org>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,12 +1,13 @@
 package DBIx::Class::Storage::DBI::Replicated::Pool;
 
 use Moose;
-use MooseX::AttributeHelpers;
 use DBIx::Class::Storage::DBI::Replicated::Replicant;
 use List::Util 'sum';
 use Scalar::Util 'reftype';
+use DBI ();
 use Carp::Clan qw/^DBIx::Class/;
 use MooseX::Types::Moose qw/Num Int ClassName HashRef/;
+use DBIx::Class::Storage::DBI::Replicated::Types 'DBICStorageDBI';
 
 use namespace::clean -except => 'meta';
 
@@ -18,11 +19,11 @@
 
 This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.  You
 shouldn't need to create instances of this class.
-  
+
 =head1 DESCRIPTION
 
 In a replicated storage type, there is at least one replicant to handle the
-read only traffic.  The Pool class manages this replicant, or list of 
+read-only traffic.  The Pool class manages this replicant, or list of 
 replicants, and gives some methods for querying information about their status.
 
 =head1 ATTRIBUTES
@@ -34,7 +35,7 @@
 This is a number which defines the maximum allowed lag returned by the
 L<DBIx::Class::Storage::DBI/lag_behind_master> method.  The default is 0.  In
 general, this should return a larger number when the replicant is lagging
-behind it's master, however the implementation of this is database specific, so
+behind its master, however the implementation of this is database specific, so
 don't count on this number having a fixed meaning.  For example, MySQL will
 return a number of seconds that the replicating database is lagging.
 
@@ -51,8 +52,8 @@
 =head2 last_validated
 
 This is an integer representing a time since the last time the replicants were
-validated. It's nothing fancy, just an integer provided via the perl time 
-builtin.
+validated. It's nothing fancy, just an integer provided via the perl L<time|perlfunc/time>
+built-in.
 
 =cut
 
@@ -86,14 +87,14 @@
 =head2 replicants
 
 A hashref of replicant, with the key being the dsn and the value returning the
-actual replicant storage.  For example if the $dsn element is something like:
+actual replicant storage.  For example, if the $dsn element is something like:
 
   "dbi:SQLite:dbname=dbfile"
-  
+
 You could access the specific replicant via:
 
   $schema->storage->replicants->{'dbname=dbfile'}
-  
+
 This attributes also supports the following helper methods:
 
 =over 4
@@ -116,7 +117,7 @@
 
 =item delete_replicant ($key)
 
-removes the replicant under $key from the pool
+Removes the replicant under $key from the pool
 
 =back
 
@@ -124,18 +125,42 @@
 
 has 'replicants' => (
   is=>'rw',
-  metaclass => 'Collection::Hash',
-  isa=>HashRef['DBIx::Class::Storage::DBI'],
+  traits => ['Hash'],
+  isa=>HashRef['Object'],
   default=>sub {{}},
-  provides  => {
-    'set' => 'set_replicant',
-    'get' => 'get_replicant',            
-    'empty' => 'has_replicants',
-    'count' => 'num_replicants',
-    'delete' => 'delete_replicant',
+  handles  => {
+    'set_replicant' => 'set',
+    'get_replicant' => 'get',
+    'has_replicants' => 'is_empty',
+    'num_replicants' => 'count',
+    'delete_replicant' => 'delete',
+    'all_replicant_storages' => 'values',
   },
 );
 
+around has_replicants => sub {
+    my ($orig, $self) = @_;
+    return !$self->$orig;
+};
+
+has next_unknown_replicant_id => (
+  is => 'rw',
+  traits => ['Counter'],
+  isa => Int,
+  default => 1,
+  handles => {
+    'inc_unknown_replicant_id' => 'inc',
+  },
+);
+
+=head2 master
+
+Reference to the master Storage.
+
+=cut
+
+has master => (is => 'rw', isa => DBICStorageDBI, weak_ref => 1);
+
 =head1 METHODS
 
 This class defines the following methods.
@@ -151,25 +176,54 @@
 sub connect_replicants {
   my $self = shift @_;
   my $schema = shift @_;
-  
+
   my @newly_created = ();
   foreach my $connect_info (@_) {
     $connect_info = [ $connect_info ]
       if reftype $connect_info ne 'ARRAY';
 
-    croak "coderef replicant connect_info not supported"
-      if ref $connect_info->[0] && reftype $connect_info->[0] eq 'CODE';
+    my $connect_coderef =
+      (reftype($connect_info->[0])||'') eq 'CODE' ? $connect_info->[0]
+        : (reftype($connect_info->[0])||'') eq 'HASH' &&
+          $connect_info->[0]->{dbh_maker};
 
-    my $replicant = $self->connect_replicant($schema, $connect_info);
+    my $dsn;
+    my $replicant = do {
+# yes this is evil, but it only usually happens once (for coderefs)
+# this will fail if the coderef does not actually DBI::connect
+      no warnings 'redefine';
+      my $connect = \&DBI::connect;
+      local *DBI::connect = sub {
+        $dsn = $_[1];
+        goto $connect;
+      };
+      $self->connect_replicant($schema, $connect_info);
+    };
 
-    my $key = $connect_info->[0];
-    $key = $key->{dsn} if ref $key && reftype $key eq 'HASH';
-    ($key) = ($key =~ m/^dbi\:.+\:(.+)$/);
+    my $key;
 
-    $self->set_replicant( $key => $replicant);  
+    if (!$dsn) {
+      if (!$connect_coderef) {
+        $dsn = $connect_info->[0];
+        $dsn = $dsn->{dsn} if (reftype($dsn)||'') eq 'HASH';
+      }
+      else {
+        # all attempts to get the DSN failed
+        $key = "UNKNOWN_" . $self->next_unknown_replicant_id;
+        $self->inc_unknown_replicant_id;
+      }
+    }
+    if ($dsn) {
+      $replicant->dsn($dsn);
+      ($key) = ($dsn =~ m/^dbi\:.+\:(.+)$/i);
+    }
+
+    $replicant->id($key);
+    $self->set_replicant($key => $replicant);  
+
     push @newly_created, $replicant;
   }
-  
+
   return @newly_created;
 }
 
@@ -198,7 +252,13 @@
     $replicant->_determine_driver
   });
 
-  DBIx::Class::Storage::DBI::Replicated::Replicant->meta->apply($replicant);  
+  Moose::Meta::Class->initialize(ref $replicant);
+
+  DBIx::Class::Storage::DBI::Replicated::Replicant->meta->apply($replicant);
+
+  # link back to master
+  $replicant->master($self->master);
+
   return $replicant;
 }
 
@@ -208,7 +268,7 @@
 connect.  For the master database this is desirable, but since replicants are
 allowed to fail, this behavior is not desirable.  This method wraps the call
 to ensure_connected in an eval in order to catch any generated errors.  That
-way a slave can go completely offline (ie, the box itself can die) without
+way a slave can go completely offline (e.g. the box itself can die) without
 bringing down your entire pool of databases.
 
 =cut
@@ -235,16 +295,15 @@
 
   eval {
     $code->()
-  }; 
+  };
   if ($@) {
-    $replicant
-      ->debugobj
-      ->print(
-        sprintf( "Exception trying to $name for replicant %s, error is %s",
-          $replicant->_dbi_connect_info->[0], $@)
-        );
-  	return;
+    $replicant->debugobj->print(sprintf(
+      "Exception trying to $name for replicant %s, error is %s",
+      $replicant->_dbi_connect_info->[0], $@)
+    );
+    return undef;
   }
+
   return 1;
 }
 
@@ -306,7 +365,7 @@
 inactive, and thus removed from the replication pool.
 
 This tests L<all_replicants>, since a replicant that has been previous marked
-as inactive can be reactived should it start to pass the validation tests again.
+as inactive can be reactivated should it start to pass the validation tests again.
 
 See L<DBIx::Class::Storage::DBI> for more about checking if a replicating
 connection is not following a master or is lagging.

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,7 +3,8 @@
 use Moose::Role;
 requires qw/_query_start/;
 with 'DBIx::Class::Storage::DBI::Replicated::WithDSN';
-use MooseX::Types::Moose 'Bool';
+use MooseX::Types::Moose qw/Bool Str/;
+use DBIx::Class::Storage::DBI::Replicated::Types 'DBICStorageDBI';
 
 use namespace::clean -except => 'meta';
 
@@ -14,7 +15,7 @@
 =head1 SYNOPSIS
 
 This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
-    
+
 =head1 DESCRIPTION
 
 Replicants are DBI Storages that follow a master DBI Storage.  Typically this
@@ -32,14 +33,14 @@
 =head2 active
 
 This is a boolean which allows you to programmatically activate or deactivate a
-replicant from the pool.  This way to you do stuff like disallow a replicant
-when it get's too far behind the master, if it stops replicating, etc.
+replicant from the pool.  This way you can do stuff like disallow a replicant
+when it gets too far behind the master, if it stops replicating, etc.
 
 This attribute DOES NOT reflect a replicant's internal status, i.e. if it is
 properly replicating from a master and has not fallen too many seconds behind a
 reliability threshold.  For that, use L</is_replicating>  and L</lag_behind_master>.
 Since the implementation of those functions database specific (and not all DBIC
-supported DB's support replication) you should refer your database specific
+supported DBs support replication) you should refer your database-specific
 storage driver for more information.
 
 =cut
@@ -52,6 +53,17 @@
   default=>1,
 );
 
+has dsn => (is => 'rw', isa => Str);
+has id  => (is => 'rw', isa => Str);
+
+=head2 master
+
+Reference to the master Storage.
+
+=cut
+
+has master => (is => 'rw', isa => DBICStorageDBI, weak_ref => 1);
+
 =head1 METHODS
 
 This class defines the following methods.
@@ -63,7 +75,9 @@
 =cut
 
 sub debugobj {
-    return shift->schema->storage->debugobj;
+  my $self = shift;
+
+  return $self->master->debugobj;
 }
 
 =head1 ALSO SEE

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Types.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Types.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/Types.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,32 +1,31 @@
 package # hide from PAUSE
   DBIx::Class::Storage::DBI::Replicated::Types;
 
-=head1 NAME
+# DBIx::Class::Storage::DBI::Replicated::Types - Types used internally by
+# L<DBIx::Class::Storage::DBI::Replicated>
 
-DBIx::Class::Storage::DBI::Replicated::Types - Types used internally by
-L<DBIx::Class::Storage::DBI::Replicated>
-
-=cut
-
 use MooseX::Types
-  -declare => [qw/BalancerClassNamePart Weight/];
+  -declare => [qw/BalancerClassNamePart Weight DBICSchema DBICStorageDBI/];
 use MooseX::Types::Moose qw/ClassName Str Num/;
 
 class_type 'DBIx::Class::Storage::DBI';
 class_type 'DBIx::Class::Schema';
 
+subtype DBICSchema, as 'DBIx::Class::Schema';
+subtype DBICStorageDBI, as 'DBIx::Class::Storage::DBI';
+
 subtype BalancerClassNamePart,
   as ClassName;
-    
+
 coerce BalancerClassNamePart,
   from Str,
   via {
     my $type = $_;
     if($type=~m/^::/) {
       $type = 'DBIx::Class::Storage::DBI::Replicated::Balancer'.$type;
-    }  
-    Class::MOP::load_class($type);  
-    $type;  	
+    }
+    Class::MOP::load_class($type);
+    $type;
   };
 
 subtype Weight,
@@ -34,14 +33,12 @@
   where { $_ >= 0 },
   message { 'weight must be a decimal greater than 0' };
 
-=head1 AUTHOR
+# AUTHOR
+#
+#  John Napiorkowski <john.napiorkowski at takkle.com>
+#
+# LICENSE
+#
+#  You may distribute this code under the same terms as Perl itself.
 
-  John Napiorkowski <john.napiorkowski at takkle.com>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
-
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,6 +1,7 @@
 package DBIx::Class::Storage::DBI::Replicated::WithDSN;
 
 use Moose::Role;
+use Scalar::Util 'reftype';
 requires qw/_query_start/;
 
 use namespace::clean -except => 'meta';
@@ -13,7 +14,7 @@
 =head1 SYNOPSIS
 
 This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
-    
+
 =head1 DESCRIPTION
 
 This role adds C<DSN: > info to storage debugging output.
@@ -30,8 +31,25 @@
 
 around '_query_start' => sub {
   my ($method, $self, $sql, @bind) = @_;
-  my $dsn = $self->_dbi_connect_info->[0];
-  $self->$method("DSN: $dsn SQL: $sql", @bind);
+
+  my $dsn = eval { $self->dsn } || $self->_dbi_connect_info->[0];
+
+  my($op, $rest) = (($sql=~m/^(\w+)(.+)$/),'NOP', 'NO SQL');
+  my $storage_type = $self->can('active') ? 'REPLICANT' : 'MASTER';
+
+  my $query = do {
+    if ((reftype($dsn)||'') ne 'CODE') {
+      "$op [DSN_$storage_type=$dsn]$rest";
+    }
+    elsif (my $id = eval { $self->id }) {
+      "$op [$storage_type=$id]$rest";
+    }
+    else {
+      "$op [$storage_type]$rest";
+    }
+  };
+
+  $self->$method($query, @bind);
 };
 
 =head1 ALSO SEE

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Replicated.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,37 +3,20 @@
 BEGIN {
   use Carp::Clan qw/^DBIx::Class/;
 
-  ## Modules required for Replication support not required for general DBIC
-  ## use, so we explicitly test for these.
-
-  my %replication_required = (
-    Moose => '0.77',
-    MooseX::AttributeHelpers => '0.12',
-    MooseX::Types => '0.10',
-    namespace::clean => '0.11',
-    Hash::Merge => '0.11'
-  );
-
-  my @didnt_load;
-
-  for my $module (keys %replication_required) {
-    eval "use $module $replication_required{$module}";
-    push @didnt_load, "$module $replication_required{$module}" if $@;
-  }
-
-  croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication")
-    if @didnt_load;
+  use DBIx::Class;
+  croak('The following modules are required for Replication ' . DBIx::Class::Optional::Dependencies->req_missing_for ('replicated') )
+    unless DBIx::Class::Optional::Dependencies->req_ok_for ('replicated');
 }
 
 use Moose;
 use DBIx::Class::Storage::DBI;
 use DBIx::Class::Storage::DBI::Replicated::Pool;
 use DBIx::Class::Storage::DBI::Replicated::Balancer;
-use DBIx::Class::Storage::DBI::Replicated::Types 'BalancerClassNamePart';
+use DBIx::Class::Storage::DBI::Replicated::Types qw/BalancerClassNamePart DBICSchema DBICStorageDBI/;
 use MooseX::Types::Moose qw/ClassName HashRef Object/;
 use Scalar::Util 'reftype';
-use Carp::Clan qw/^DBIx::Class/;
-use Hash::Merge 'merge';
+use Hash::Merge;
+use List::Util qw/min max/;
 
 use namespace::clean -except => 'meta';
 
@@ -44,42 +27,56 @@
 =head1 SYNOPSIS
 
 The Following example shows how to change an existing $schema to a replicated
-storage type, add some replicated (readonly) databases, and perform reporting
+storage type, add some replicated (read-only) databases, and perform reporting
 tasks.
 
-  ## Change storage_type in your schema class
+You should set the 'storage_type attribute to a replicated type.  You should
+also define your arguments, such as which balancer you want and any arguments
+that the Pool object should get.
+
+  my $schema = Schema::Class->clone;
   $schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] );
-  
-  ## Add some slaves.  Basically this is an array of arrayrefs, where each
-  ## arrayref is database connect information
-  
+  $schema->connection(...);
+
+Next, you need to add in the Replicants.  Basically this is an array of 
+arrayrefs, where each arrayref is database connect information.  Think of these
+arguments as what you'd pass to the 'normal' $schema->connect method.
+
   $schema->storage->connect_replicants(
     [$dsn1, $user, $pass, \%opts],
     [$dsn2, $user, $pass, \%opts],
     [$dsn3, $user, $pass, \%opts],
   );
-  
-  ## Now, just use the $schema as normal
+
+Now, just use the $schema as you normally would.  Automatically all reads will
+be delegated to the replicants, while writes to the master.
+
   $schema->resultset('Source')->search({name=>'etc'});
-  
-  ## You can force a given query to use a particular storage using the search
-  ### attribute 'force_pool'.  For example:
-  
+
+You can force a given query to use a particular storage using the search
+attribute 'force_pool'.  For example:
+
   my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'});
-  
-  ## Now $RS will force everything (both reads and writes) to use whatever was
-  ## setup as the master storage.  'master' is hardcoded to always point to the
-  ## Master, but you can also use any Replicant name.  Please see:
-  ## L<DBIx::Class::Storage::Replicated::Pool> and the replicants attribute for
-  ## More. Also see transactions and L</execute_reliably> for alternative ways
-  ## to force read traffic to the master.
-  
+
+Now $RS will force everything (both reads and writes) to use whatever was setup
+as the master storage.  'master' is hardcoded to always point to the Master, 
+but you can also use any Replicant name.  Please see:
+L<DBIx::Class::Storage::DBI::Replicated::Pool> and the replicants attribute for more.
+
+Also see transactions and L</execute_reliably> for alternative ways to
+force read traffic to the master.  In general, you should wrap your statements
+in a transaction when you are reading and writing to the same tables at the
+same time, since your replicants will often lag a bit behind the master.
+
+See L<DBIx::Class::Storage::DBI::Replicated::Instructions> for more help and
+walkthroughs.
+
 =head1 DESCRIPTION
 
 Warning: This class is marked BETA.  This has been running a production
 website using MySQL native replication as its backend and we have some decent
 test coverage but the code hasn't yet been stressed by a variety of databases.
-Individual DB's may have quirks we are not aware of.  Please use this in first
+Individual DBs may have quirks we are not aware of.  Please use this in first
 development and pass along your experiences/bug fixes.
 
 This class implements replicated data store for DBI. Currently you can define
@@ -93,29 +90,21 @@
 to all existing storages.  This way our storage class is a drop in replacement
 for L<DBIx::Class::Storage::DBI>.
 
-Read traffic is spread across the replicants (slaves) occuring to a user
+Read traffic is spread across the replicants (slaves) occurring to a user
 selected algorithm.  The default algorithm is random weighted.
 
 =head1 NOTES
 
-The consistancy betweeen master and replicants is database specific.  The Pool
-gives you a method to validate it's replicants, removing and replacing them
+The consistency between master and replicants is database specific.  The Pool
+gives you a method to validate its replicants, removing and replacing them
 when they fail/pass predefined criteria.  Please make careful use of the ways
 to force a query to run against Master when needed.
 
 =head1 REQUIREMENTS
 
-Replicated Storage has additional requirements not currently part of L<DBIx::Class>
+Replicated Storage has additional requirements not currently part of
+L<DBIx::Class>. See L<DBIx::Class::Optional::Dependencies> for more details.
 
-  Moose => 0.77
-  MooseX::AttributeHelpers => 0.12 
-  MooseX::Types => 0.10
-  namespace::clean => 0.11
-  Hash::Merge => 0.11
-  
-You will need to install these modules manually via CPAN or make them part of the
-Makefile for your distribution.
-
 =head1 ATTRIBUTES
 
 This class defines the following attributes.
@@ -128,7 +117,7 @@
 
 has 'schema' => (
     is=>'rw',
-    isa=>'DBIx::Class::Schema',
+    isa=>DBICSchema,
     weak_ref=>1,
     required=>1,
 );
@@ -152,7 +141,7 @@
 =head2 pool_args
 
 Contains a hashref of initialized information to pass to the Balancer object.
-See L<DBIx::Class::Storage::Replicated::Pool> for available arguments.
+See L<DBIx::Class::Storage::DBI::Replicated::Pool> for available arguments.
 
 =cut
 
@@ -185,7 +174,7 @@
 =head2 balancer_args
 
 Contains a hashref of initialized information to pass to the Balancer object.
-See L<DBIx::Class::Storage::Replicated::Balancer> for available arguments.
+See L<DBIx::Class::Storage::DBI::Replicated::Balancer> for available arguments.
 
 =cut
 
@@ -209,7 +198,7 @@
   isa=>'DBIx::Class::Storage::DBI::Replicated::Pool',
   lazy_build=>1,
   handles=>[qw/
-    connect_replicants    
+    connect_replicants
     replicants
     has_replicants
   /],
@@ -241,7 +230,7 @@
 
 has 'master' => (
   is=> 'ro',
-  isa=>'DBIx::Class::Storage::DBI',
+  isa=>DBICStorageDBI,
   lazy_build=>1,
 );
 
@@ -264,12 +253,17 @@
     select
     select_single
     columns_info_for
-  /],    
+    _dbh_columns_info_for 
+    _select
+  /],
 );
 
 =head2 write_handler
 
-Defines an object that implements the write side of L<BIx::Class::Storage::DBI>.
+Defines an object that implements the write side of L<BIx::Class::Storage::DBI>,
+as well as methods that don't write or read that can be called on only one
+storage, methods that return a C<$dbh>, and any methods that don't make sense to
+run on a replicant.
 
 =cut
 
@@ -280,7 +274,10 @@
   handles=>[qw/
     on_connect_do
     on_disconnect_do
+    on_connect_call
+    on_disconnect_call
     connect_info
+    _connect_info
     throw_exception
     sql_maker
     sqlt_type
@@ -288,6 +285,7 @@
     deployment_statements
     datetime_parser
     datetime_parser_type
+    build_datetime_parser
     last_insert_id
     insert
     insert_bulk
@@ -302,11 +300,71 @@
     sth
     deploy
     with_deferred_fk_checks
-    run_file_against_storage
-
+    dbh_do
     reload_row
+    with_deferred_fk_checks
     _prep_for_execute
-
+    backup
+    is_datatype_numeric
+    _count_select
+    _subq_count_select
+    _subq_update_delete
+    svp_rollback
+    svp_begin
+    svp_release
+    relname_to_table_alias
+    _straight_join_to_node
+    _dbh_last_insert_id
+    _fix_bind_params
+    _default_dbi_connect_attributes
+    _dbi_connect_info
+    auto_savepoint
+    _sqlt_version_ok
+    _query_end
+    bind_attribute_by_data_type
+    transaction_depth
+    _dbh
+    _select_args
+    _dbh_execute_array
+    _sql_maker_args
+    _sql_maker
+    _query_start
+    _sqlt_version_error
+    _per_row_update_delete
+    _dbh_begin_work
+    _dbh_execute_inserts_with_no_binds
+    _select_args_to_query
+    _svp_generate_name
+    _multipk_update_delete
+    source_bind_attributes
+    _normalize_connect_info
+    _parse_connect_do
+    _dbh_commit
+    _execute_array
+    _placeholders_supported
+    _verify_pid
+    savepoints
+    _sqlt_minimum_version
+    _sql_maker_opts
+    _conn_pid
+    _typeless_placeholders_supported
+    _conn_tid
+    _dbh_autocommit
+    _native_data_type
+    _get_dbh
+    sql_maker_class
+    _dbh_rollback
+    _adjust_select_args_for_complex_prefetch
+    _resolve_ident_sources
+    _resolve_column_info
+    _prune_unused_joins
+    _strip_cond_qualifiers
+    _parse_order_by
+    _resolve_aliastypes_from_select_args
+    _execute
+    _do_query
+    _dbh_sth
+    _dbh_execute
   /],
 );
 
@@ -315,8 +373,8 @@
 
 =head2 around: connect_info
 
-Preserve master's C<connect_info> options (for merging with replicants.)
-Also set any Replicated related options from connect_info, such as
+Preserves master's C<connect_info> options (for merging with replicants.)
+Also sets any Replicated-related options from connect_info, such as
 C<pool_type>, C<pool_args>, C<balancer_type> and C<balancer_args>.
 
 =cut
@@ -326,10 +384,12 @@
 
   my $wantarray = wantarray;
 
+  my $merge = Hash::Merge->new;
+
   my %opts;
   for my $arg (@$info) {
     next unless (reftype($arg)||'') eq 'HASH';
-    %opts = %{ merge($arg, \%opts) };
+    %opts = %{ $merge->merge($arg, \%opts) };
   }
   delete $opts{dsn};
 
@@ -338,11 +398,11 @@
       if $opts{pool_type};
 
     $self->pool_args(
-      merge((delete $opts{pool_args} || {}), $self->pool_args)
+      $merge->merge((delete $opts{pool_args} || {}), $self->pool_args)
     );
 
     $self->pool($self->_build_pool)
-	if $self->pool;
+      if $self->pool;
   }
 
   if (@opts{qw/balancer_type balancer_args/}) {
@@ -350,11 +410,11 @@
       if $opts{balancer_type};
 
     $self->balancer_args(
-      merge((delete $opts{balancer_args} || {}), $self->balancer_args)
+      $merge->merge((delete $opts{balancer_args} || {}), $self->balancer_args)
     );
 
     $self->balancer($self->_build_balancer)
-	if $self->balancer;
+      if $self->balancer;
   }
 
   $self->_master_connect_info_opts(\%opts);
@@ -370,8 +430,12 @@
   my $master = $self->master;
   $master->_determine_driver;
   Moose::Meta::Class->initialize(ref $master);
+
   DBIx::Class::Storage::DBI::Replicated::WithDSN->meta->apply($master);
 
+  # link pool back to master
+  $self->pool->master($master);
+
   $wantarray ? @res : $res;
 };
 
@@ -381,19 +445,19 @@
 
 =head2 BUILDARGS
 
-L<DBIx::Class::Schema> when instantiating it's storage passed itself as the
+L<DBIx::Class::Schema> when instantiating its storage passed itself as the
 first argument.  So we need to massage the arguments a bit so that all the
 bits get put into the correct places.
 
 =cut
 
 sub BUILDARGS {
-  my ($class, $schema, $storage_type_args, @args) = @_;	
-  
+  my ($class, $schema, $storage_type_args, @args) = @_;  
+
   return {
-  	schema=>$schema, 
-  	%$storage_type_args,
-  	@args
+    schema=>$schema,
+    %$storage_type_args,
+    @args
   }
 }
 
@@ -430,7 +494,7 @@
 sub _build_balancer {
   my $self = shift @_;
   $self->create_balancer(
-    pool=>$self->pool, 
+    pool=>$self->pool,
     master=>$self->master,
     %{$self->balancer_args},
   );
@@ -472,32 +536,40 @@
   for my $r (@args) {
     $r = [ $r ] unless reftype $r eq 'ARRAY';
 
-    croak "coderef replicant connect_info not supported"
+    $self->throw_exception('coderef replicant connect_info not supported')
       if ref $r->[0] && reftype $r->[0] eq 'CODE';
 
 # any connect_info options?
     my $i = 0;
     $i++ while $i < @$r && (reftype($r->[$i])||'') ne 'HASH';
 
-# make one if none    
+# make one if none
     $r->[$i] = {} unless $r->[$i];
 
 # merge if two hashes
     my @hashes = @$r[$i .. $#{$r}];
 
-    croak "invalid connect_info options"
+    $self->throw_exception('invalid connect_info options')
       if (grep { reftype($_) eq 'HASH' } @hashes) != @hashes;
 
-    croak "too many hashrefs in connect_info"
+    $self->throw_exception('too many hashrefs in connect_info')
       if @hashes > 2;
 
-    my %opts = %{ merge(reverse @hashes) };
+    my $merge = Hash::Merge->new;
+    my %opts = %{ $merge->merge(reverse @hashes) };
 
 # delete them
     splice @$r, $i+1, ($#{$r} - $i), ();
 
+# make sure master/replicants opts don't clash
+    my %master_opts = %{ $self->_master_connect_info_opts };
+    if (exists $opts{dbh_maker}) {
+        delete @master_opts{qw/dsn user password/};
+    }
+    delete $master_opts{dbh_maker};
+
 # merge with master
-    %opts = %{ merge(\%opts, $self->_master_connect_info_opts) };
+    %opts = %{ $merge->merge(\%opts, \%master_opts) };
 
 # update
     $r->[$i] = \%opts;
@@ -525,7 +597,7 @@
 =head2 execute_reliably ($coderef, ?@args)
 
 Given a coderef, saves the current state of the L</read_handler>, forces it to
-use reliable storage (ie sets it to the master), executes a coderef and then
+use reliable storage (e.g. sets it to the master), executes a coderef and then
 restores the original state.
 
 Example:
@@ -546,24 +618,24 @@
 
 sub execute_reliably {
   my ($self, $coderef, @args) = @_;
-  
+
   unless( ref $coderef eq 'CODE') {
     $self->throw_exception('Second argument must be a coderef');
   }
-  
+
   ##Get copy of master storage
   my $master = $self->master;
-  
+
   ##Get whatever the current read hander is
   my $current = $self->read_handler;
-  
+
   ##Set the read handler to master
   $self->read_handler($master);
-  
+
   ## do whatever the caller needs
   my @result;
   my $want_array = wantarray;
-  
+
   eval {
     if($want_array) {
       @result = $coderef->(@args);
@@ -571,15 +643,15 @@
       ($result[0]) = ($coderef->(@args));
     } else {
       $coderef->(@args);
-    }       
+    }
   };
-  
+
   ##Reset to the original state
-  $self->read_handler($current); 
-  
+  $self->read_handler($current);
+
   ##Exception testing has to come last, otherwise you might leave the 
   ##read_handler set to master.
-  
+
   if($@) {
     $self->throw_exception("coderef returned an error: $@");
   } else {
@@ -591,45 +663,32 @@
 
 Sets the current $schema to be 'reliable', that is all queries, both read and
 write are sent to the master
-  
+
 =cut
 
 sub set_reliable_storage {
   my $self = shift @_;
   my $schema = $self->schema;
   my $write_handler = $self->schema->storage->write_handler;
-  
+
   $schema->storage->read_handler($write_handler);
 }
 
 =head2 set_balanced_storage
 
 Sets the current $schema to be use the </balancer> for all reads, while all
-writea are sent to the master only
-  
+writes are sent to the master only
+
 =cut
 
 sub set_balanced_storage {
   my $self = shift @_;
   my $schema = $self->schema;
-  my $write_handler = $self->schema->storage->balancer;
-  
-  $schema->storage->read_handler($write_handler);
+  my $balanced_handler = $self->schema->storage->balancer;
+
+  $schema->storage->read_handler($balanced_handler);
 }
 
-=head2 around: txn_do ($coderef)
-
-Overload to the txn_do method, which is delegated to whatever the
-L<write_handler> is set to.  We overload this in order to wrap in inside a
-L</execute_reliably> method.
-
-=cut
-
-around 'txn_do' => sub {
-  my($txn_do, $self, $coderef, @args) = @_;
-  $self->execute_reliably(sub {$self->$txn_do($coderef, @args)}); 
-};
-
 =head2 connected
 
 Check that the master and at least one of the replicants is connected.
@@ -722,57 +781,42 @@
   if(@_) {
     foreach my $source ($self->all_storages) {
       $source->debug(@_);
-    }   
+    }
   }
   return $self->master->debug;
 }
 
 =head2 debugobj
 
-set a debug object across all storages
+set a debug object
 
 =cut
 
 sub debugobj {
   my $self = shift @_;
-  if(@_) {
-    foreach my $source ($self->all_storages) {
-      $source->debugobj(@_);
-    } 	
-  }
-  return $self->master->debugobj;
+  return $self->master->debugobj(@_);
 }
 
 =head2 debugfh
 
-set a debugfh object across all storages
+set a debugfh object
 
 =cut
 
 sub debugfh {
   my $self = shift @_;
-  if(@_) {
-    foreach my $source ($self->all_storages) {
-      $source->debugfh(@_);
-    }   
-  }
-  return $self->master->debugfh;
+  return $self->master->debugfh(@_);
 }
 
 =head2 debugcb
 
-set a debug callback across all storages
+set a debug callback
 
 =cut
 
 sub debugcb {
   my $self = shift @_;
-  if(@_) {
-    foreach my $source ($self->all_storages) {
-      $source->debugcb(@_);
-    }   
-  }
-  return $self->master->debugcb;
+  return $self->master->debugcb(@_);
 }
 
 =head2 disconnect
@@ -802,7 +846,166 @@
   }
   $self->master->cursor_class;
 }
+
+=head2 cursor
+
+set cursor class on all storages, or return master's, alias for L</cursor_class>
+above.
+
+=cut
+
+sub cursor {
+  my ($self, $cursor_class) = @_;
+
+  if ($cursor_class) {
+    $_->cursor($cursor_class) for $self->all_storages;
+  }
+  $self->master->cursor;
+}
+
+=head2 unsafe
+
+sets the L<DBIx::Class::Storage::DBI/unsafe> option on all storages or returns
+master's current setting
+
+=cut
+
+sub unsafe {
+  my $self = shift;
+
+  if (@_) {
+    $_->unsafe(@_) for $self->all_storages;
+  }
+
+  return $self->master->unsafe;
+}
+
+=head2 disable_sth_caching
+
+sets the L<DBIx::Class::Storage::DBI/disable_sth_caching> option on all storages
+or returns master's current setting
+
+=cut
+
+sub disable_sth_caching {
+  my $self = shift;
+
+  if (@_) {
+    $_->disable_sth_caching(@_) for $self->all_storages;
+  }
+
+  return $self->master->disable_sth_caching;
+}
+
+=head2 lag_behind_master
+
+returns the highest Replicant L<DBIx::Class::Storage::DBI/lag_behind_master>
+setting
+
+=cut
+
+sub lag_behind_master {
+  my $self = shift;
+
+  return max map $_->lag_behind_master, $self->replicants;
+} 
+
+=head2 is_replicating
+
+returns true if all replicants return true for
+L<DBIx::Class::Storage::DBI/is_replicating>
+
+=cut
+
+sub is_replicating {
+  my $self = shift;
+
+  return (grep $_->is_replicating, $self->replicants) == ($self->replicants);
+}
+
+=head2 connect_call_datetime_setup
+
+calls L<DBIx::Class::Storage::DBI/connect_call_datetime_setup> for all storages
+
+=cut
+
+sub connect_call_datetime_setup {
+  my $self = shift;
+  $_->connect_call_datetime_setup for $self->all_storages;
+}
+
+sub _populate_dbh {
+  my $self = shift;
+  $_->_populate_dbh for $self->all_storages;
+}
+
+sub _connect {
+  my $self = shift;
+  $_->_connect for $self->all_storages;
+}
+
+sub _rebless {
+  my $self = shift;
+  $_->_rebless for $self->all_storages;
+}
+
+sub _determine_driver {
+  my $self = shift;
+  $_->_determine_driver for $self->all_storages;
+}
+
+sub _driver_determined {
+  my $self = shift;
   
+  if (@_) {
+    $_->_driver_determined(@_) for $self->all_storages;
+  }
+
+  return $self->master->_driver_determined;
+}
+
+sub _init {
+  my $self = shift;
+  
+  $_->_init for $self->all_storages;
+}
+
+sub _run_connection_actions {
+  my $self = shift;
+  
+  $_->_run_connection_actions for $self->all_storages;
+}
+
+sub _do_connection_actions {
+  my $self = shift;
+  
+  if (@_) {
+    $_->_do_connection_actions(@_) for $self->all_storages;
+  }
+}
+
+sub connect_call_do_sql {
+  my $self = shift;
+  $_->connect_call_do_sql(@_) for $self->all_storages;
+}
+
+sub disconnect_call_do_sql {
+  my $self = shift;
+  $_->disconnect_call_do_sql(@_) for $self->all_storages;
+}
+
+sub _seems_connected {
+  my $self = shift;
+
+  return min map $_->_seems_connected, $self->all_storages;
+}
+
+sub _ping {
+  my $self = shift;
+
+  return min map $_->_ping, $self->all_storages;
+}
+
 =head1 GOTCHAS
 
 Due to the fact that replicants can lag behind a master, you must take care to
@@ -836,7 +1039,7 @@
 
   my $new_schema = $schema->clone;
   $new_schema->set_reliable_storage;
-  
+
   ## $new_schema will use only the Master storage for all reads/writes while
   ## the $schema object will use replicated storage.
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Role/QueryCounter.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Role/QueryCounter.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Role/QueryCounter.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -19,7 +19,7 @@
 
 This package defines the following attributes.
 
-head2 _query_count
+=head2 _query_count
 
 Is the attribute holding the current query count.  It defines a public reader
 called 'query_count' which you can use to access the total number of queries
@@ -42,7 +42,7 @@
 
 =head2 _query_start
 
-override on the method so that we count the queries.
+Override on the method so that we count the queries.
 
 =cut
 

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/SQLAnywhere.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/SQLAnywhere.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/SQLAnywhere.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,163 @@
+package DBIx::Class::Storage::DBI::SQLAnywhere;
+
+use strict;
+use warnings;
+use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
+use List::Util ();
+
+__PACKAGE__->mk_group_accessors(simple => qw/
+  _identity
+/);
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::SQLAnywhere - Driver for Sybase SQL Anywhere
+
+=head1 DESCRIPTION
+
+This class implements autoincrements for Sybase SQL Anywhere, selects the
+RowNumberOver limit implementation and provides
+L<DBIx::Class::InflateColumn::DateTime> support.
+
+You need the C<DBD::SQLAnywhere> driver that comes with the SQL Anywhere
+distribution, B<NOT> the one on CPAN. It is usually under a path such as:
+
+  /opt/sqlanywhere11/sdk/perl
+
+Recommended L<DBIx::Class::Storage::DBI/connect_info> settings:
+
+  on_connect_call => 'datetime_setup'
+
+=head1 METHODS
+
+=cut
+
+sub last_insert_id { shift->_identity }
+
+sub insert {
+  my $self = shift;
+  my ($source, $to_insert) = @_;
+
+  my $identity_col = List::Util::first {
+      $source->column_info($_)->{is_auto_increment} 
+  } $source->columns;
+
+# user might have an identity PK without is_auto_increment
+  if (not $identity_col) {
+    foreach my $pk_col ($source->primary_columns) {
+      if (not exists $to_insert->{$pk_col}) {
+        $identity_col = $pk_col;
+        last;
+      }
+    }
+  }
+
+  if ($identity_col && (not exists $to_insert->{$identity_col})) {
+    my $dbh = $self->_get_dbh;
+    my $table_name = $source->from;
+    $table_name    = $$table_name if ref $table_name;
+
+    my ($identity) = $dbh->selectrow_array("SELECT GET_IDENTITY('$table_name')");
+
+    $to_insert->{$identity_col} = $identity;
+
+    $self->_identity($identity);
+  }
+
+  return $self->next::method(@_);
+}
+
+# this sub stolen from DB2
+
+sub _sql_maker_opts {
+  my ( $self, $opts ) = @_;
+
+  if ( $opts ) {
+    $self->{_sql_maker_opts} = { %$opts };
+  }
+
+  return { limit_dialect => 'RowNumberOver', %{$self->{_sql_maker_opts}||{}} };
+}
+
+# this sub stolen from MSSQL
+
+sub build_datetime_parser {
+  my $self = shift;
+  my $type = "DateTime::Format::Strptime";
+  eval "use ${type}";
+  $self->throw_exception("Couldn't load ${type}: $@") if $@;
+  return $type->new( pattern => '%Y-%m-%d %H:%M:%S.%6N' );
+}
+
+=head2 connect_call_datetime_setup
+
+Used as:
+
+    on_connect_call => 'datetime_setup'
+
+In L<DBIx::Class::Storage::DBI/connect_info> to set the date and timestamp
+formats (as temporary options for the session) for use with
+L<DBIx::Class::InflateColumn::DateTime>.
+
+The C<TIMESTAMP> data type supports up to 6 digits after the decimal point for
+second precision. The full precision is used.
+
+The C<DATE> data type supposedly stores hours and minutes too, according to the
+documentation, but I could not get that to work. It seems to only store the
+date.
+
+You will need the L<DateTime::Format::Strptime> module for inflation to work.
+
+=cut
+
+sub connect_call_datetime_setup {
+  my $self = shift;
+
+  $self->_do_query(
+    "set temporary option timestamp_format = 'yyyy-mm-dd hh:mm:ss.ssssss'"
+  );
+  $self->_do_query(
+    "set temporary option date_format      = 'yyyy-mm-dd hh:mm:ss.ssssss'"
+  );
+}
+
+sub _svp_begin {
+    my ($self, $name) = @_;
+
+    $self->_get_dbh->do("SAVEPOINT $name");
+}
+
+# can't release savepoints that have been rolled back
+sub _svp_release { 1 }
+
+sub _svp_rollback {
+    my ($self, $name) = @_;
+
+    $self->_get_dbh->do("ROLLBACK TO SAVEPOINT $name")
+}
+
+1;
+
+=head1 MAXIMUM CURSORS
+
+A L<DBIx::Class>> application can use a lot of cursors, due to the usage of
+L<DBI/prepare_cached>.
+
+The default cursor maximum is C<50>, which can be a bit too low. This limit can
+be turned off (or increased) by the DBA by executing:
+
+  set option max_statement_count = 0
+  set option max_cursor_count    = 0
+
+Highly recommended.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/SQLite.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/SQLite.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/SQLite.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -2,17 +2,14 @@
 
 use strict;
 use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
+
 use POSIX 'strftime';
 use File::Copy;
 use File::Spec;
 
-use base qw/DBIx::Class::Storage::DBI/;
-
-sub _dbh_last_insert_id {
-  my ($self, $dbh, $source, $col) = @_;
-  $dbh->func('last_insert_rowid');
-}
-
 sub backup
 {
   my ($self, $dir) = @_;
@@ -45,6 +42,22 @@
   return $backupfile;
 }
 
+sub deployment_statements {
+  my $self = shift;;
+  my ($schema, $type, $version, $dir, $sqltargs, @rest) = @_;
+
+  $sqltargs ||= {};
+
+  my $sqlite_version = $self->_get_dbh->{sqlite_version};
+
+  # numify, SQLT does a numeric comparison
+  $sqlite_version =~ s/^(\d+) \. (\d+) (?: \. (\d+))? .*/${1}.${2}/x;
+
+  $sqltargs->{producer_args}{sqlite_version} = $sqlite_version;
+
+  $self->next::method($schema, $type, $version, $dir, $sqltargs, @rest);
+}
+
 sub datetime_parser_type { return "DateTime::Format::SQLite"; } 
 
 1;
@@ -56,7 +69,7 @@
 =head1 SYNOPSIS
 
   # In your table classes
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  use base 'DBIx::Class::Core';
   __PACKAGE__->set_primary_key('id');
 
 =head1 DESCRIPTION

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,102 @@
+package DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars;
+
+use base qw/
+  DBIx::Class::Storage::DBI::NoBindVars
+  DBIx::Class::Storage::DBI::Sybase::ASE
+/;
+use mro 'c3';
+use List::Util ();
+use Scalar::Util ();
+
+sub _init {
+  my $self = shift;
+  $self->disable_sth_caching(1);
+  $self->_identity_method('@@IDENTITY');
+  $self->next::method (@_);
+}
+
+sub _fetch_identity_sql { 'SELECT ' . $_[0]->_identity_method }
+
+my $number = sub { Scalar::Util::looks_like_number($_[0]) };
+
+my $decimal = sub { $_[0] =~ /^ [-+]? \d+ (?:\.\d*)? \z/x };
+
+my %noquote = (
+    int => sub { $_[0] =~ /^ [-+]? \d+ \z/x },
+    bit => => sub { $_[0] =~ /^[01]\z/ },
+    money => sub { $_[0] =~ /^\$ \d+ (?:\.\d*)? \z/x },
+    float => $number,
+    real => $number,
+    double => $number,
+    decimal => $decimal,
+    numeric => $decimal,
+);
+
+sub interpolate_unquoted {
+  my $self = shift;
+  my ($type, $value) = @_;
+
+  return $self->next::method(@_) if not defined $value or not defined $type;
+
+  if (my $key = List::Util::first { $type =~ /$_/i } keys %noquote) {
+    return 1 if $noquote{$key}->($value);
+  }
+  elsif ($self->is_datatype_numeric($type) && $number->($value)) {
+    return 1;
+  }
+
+  return $self->next::method(@_);
+}
+
+sub _prep_interpolated_value {
+  my ($self, $type, $value) = @_;
+
+  if ($type =~ /money/i && defined $value) {
+    # change a ^ not followed by \$ to a \$
+    $value =~ s/^ (?! \$) /\$/x;
+  }
+
+  return $value;
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars - Storage::DBI subclass for
+Sybase ASE without placeholder support
+
+=head1 DESCRIPTION
+
+If you're using this driver then your version of Sybase or the libraries you
+use to connect to it do not support placeholders.
+
+You can also enable this driver explicitly using:
+
+  my $schema = SchemaClass->clone;
+  $schema->storage_type('::DBI::Sybase::ASE::NoBindVars');
+  $schema->connect($dsn, $user, $pass, \%opts);
+
+See the discussion in L<< DBD::Sybase/Using ? Placeholders & bind parameters to
+$sth->execute >> for details on the pros and cons of using placeholders.
+
+One advantage of not using placeholders is that C<select @@identity> will work
+for obtaining the last insert id of an C<IDENTITY> column, instead of having to
+do C<select max(col)> in a transaction as the base Sybase driver does.
+
+When using this driver, bind variables will be interpolated (properly quoted of
+course) into the SQL query itself, without using placeholders.
+
+The caching of prepared statements is also explicitly disabled, as the
+interpolation renders it useless.
+
+=head1 AUTHORS
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+# vim:sts=2 sw=2:

Copied: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase.pm)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,1173 @@
+package DBIx::Class::Storage::DBI::Sybase::ASE;
+
+use strict;
+use warnings;
+
+use base qw/
+    DBIx::Class::Storage::DBI::Sybase
+    DBIx::Class::Storage::DBI::AutoCast
+/;
+use mro 'c3';
+use Carp::Clan qw/^DBIx::Class/;
+use Scalar::Util();
+use List::Util();
+use Sub::Name();
+use Data::Dumper::Concise();
+
+__PACKAGE__->mk_group_accessors('simple' =>
+    qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
+       _bulk_storage _is_bulk_storage _began_bulk_work
+       _bulk_disabled_due_to_coderef_connect_info_warned
+       _identity_method/
+);
+
+my @also_proxy_to_extra_storages = qw/
+  connect_call_set_auto_cast auto_cast connect_call_blob_setup
+  connect_call_datetime_setup
+
+  disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
+  auto_savepoint unsafe cursor_class debug debugobj schema
+/;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::ASE - Sybase ASE SQL Server support for
+DBIx::Class
+
+=head1 SYNOPSIS
+
+This subclass supports L<DBD::Sybase> for real (non-Microsoft) Sybase databases.
+
+=head1 DESCRIPTION
+
+If your version of Sybase does not support placeholders, then your storage will
+be reblessed to L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
+You can also enable that driver explicitly, see the documentation for more
+details.
+
+With this driver there is unfortunately no way to get the C<last_insert_id>
+without doing a C<SELECT MAX(col)>. This is done safely in a transaction
+(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
+
+A recommended L<DBIx::Class::Storage::DBI/connect_info> setting:
+
+  on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
+
+=head1 METHODS
+
+=cut
+
+sub _rebless {
+  my $self = shift;
+
+  my $no_bind_vars = __PACKAGE__ . '::NoBindVars';
+
+  if ($self->using_freetds) {
+    carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
+
+You are using FreeTDS with Sybase.
+
+We will do our best to support this configuration, but please consider this
+support experimental.
+
+TEXT/IMAGE columns will definitely not work.
+
+You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries
+instead.
+
+See perldoc DBIx::Class::Storage::DBI::Sybase::ASE for more details.
+
+To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment
+variable.
+EOF
+
+    if (not $self->_typeless_placeholders_supported) {
+      if ($self->_placeholders_supported) {
+        $self->auto_cast(1);
+      }
+      else {
+        $self->ensure_class_loaded($no_bind_vars);
+        bless $self, $no_bind_vars;
+        $self->_rebless;
+      }
+    }
+  }
+
+  elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
+    # not necessarily FreeTDS, but no placeholders nevertheless
+    $self->ensure_class_loaded($no_bind_vars);
+    bless $self, $no_bind_vars;
+    $self->_rebless;
+  }
+  # this is highly unlikely, but we check just in case
+  elsif (not $self->_typeless_placeholders_supported) {
+    $self->auto_cast(1);
+  }
+}
+
+sub _init {
+  my $self = shift;
+  $self->_set_max_connect(256);
+
+# create storage for insert/(update blob) transactions,
+# unless this is that storage
+  return if $self->_is_extra_storage;
+
+  my $writer_storage = (ref $self)->new;
+
+  $writer_storage->_is_extra_storage(1);
+  $writer_storage->connect_info($self->connect_info);
+  $writer_storage->auto_cast($self->auto_cast);
+
+  $self->_writer_storage($writer_storage);
+
+# create a bulk storage unless connect_info is a coderef
+  return if ref($self->_dbi_connect_info->[0]) eq 'CODE';
+
+  my $bulk_storage = (ref $self)->new;
+
+  $bulk_storage->_is_extra_storage(1);
+  $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
+  $bulk_storage->connect_info($self->connect_info);
+
+# this is why
+  $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
+
+  $self->_bulk_storage($bulk_storage);
+}
+
+for my $method (@also_proxy_to_extra_storages) {
+  no strict 'refs';
+  no warnings 'redefine';
+
+  my $replaced = __PACKAGE__->can($method);
+
+  *{$method} = Sub::Name::subname $method => sub {
+    my $self = shift;
+    $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
+    $self->_bulk_storage->$replaced(@_)   if $self->_bulk_storage;
+    return $self->$replaced(@_);
+  };
+}
+
+sub disconnect {
+  my $self = shift;
+
+# Even though we call $sth->finish for uses off the bulk API, there's still an
+# "active statement" warning on disconnect, which we throw away here.
+# This is due to the bug described in insert_bulk.
+# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
+  local $SIG{__WARN__} = sub {
+    warn $_[0] unless $_[0] =~ /active statement/i;
+  } if $self->_is_bulk_storage;
+
+# so that next transaction gets a dbh
+  $self->_began_bulk_work(0) if $self->_is_bulk_storage;
+
+  $self->next::method;
+}
+
+# Set up session settings for Sybase databases for the connection.
+#
+# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
+# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
+# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
+# only want when AutoCommit is off.
+#
+# Also SET TEXTSIZE for FreeTDS because LongReadLen doesn't work.
+sub _run_connection_actions {
+  my $self = shift;
+
+  if ($self->_is_bulk_storage) {
+# this should be cleared on every reconnect
+    $self->_began_bulk_work(0);
+    return;
+  }
+
+  if (not $self->using_freetds) {
+    $self->_dbh->{syb_chained_txn} = 1;
+  } else {
+    # based on LongReadLen in connect_info
+    $self->set_textsize;
+
+    if ($self->_dbh_autocommit) {
+      $self->_dbh->do('SET CHAINED OFF');
+    } else {
+      $self->_dbh->do('SET CHAINED ON');
+    }
+  }
+
+  $self->next::method(@_);
+}
+
+=head2 connect_call_blob_setup
+
+Used as:
+
+  on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ]
+
+Does C<< $dbh->{syb_binary_images} = 1; >> to return C<IMAGE> data as raw binary
+instead of as a hex string.
+
+Recommended.
+
+Also sets the C<log_on_update> value for blob write operations. The default is
+C<1>, but C<0> is better if your database is configured for it.
+
+See
+L<DBD::Sybase/Handling_IMAGE/TEXT_data_with_syb_ct_get_data()/syb_ct_send_data()>.
+
+=cut
+
+sub connect_call_blob_setup {
+  my $self = shift;
+  my %args = @_;
+  my $dbh = $self->_dbh;
+  $dbh->{syb_binary_images} = 1;
+
+  $self->_blob_log_on_update($args{log_on_update})
+    if exists $args{log_on_update};
+}
+
+sub _is_lob_type {
+  my $self = shift;
+  my $type = shift;
+  $type && $type =~ /(?:text|image|lob|bytea|binary|memo)/i;
+}
+
+sub _is_lob_column {
+  my ($self, $source, $column) = @_;
+
+  return $self->_is_lob_type($source->column_info($column)->{data_type});
+}
+
+sub _prep_for_execute {
+  my $self = shift;
+  my ($op, $extra_bind, $ident, $args) = @_;
+
+  my ($sql, $bind) = $self->next::method (@_);
+
+  my $table = Scalar::Util::blessed($ident) ? $ident->from : $ident;
+
+  my $bind_info = $self->_resolve_column_info(
+    $ident, [map $_->[0], @{$bind}]
+  );
+  my $bound_identity_col = List::Util::first
+    { $bind_info->{$_}{is_auto_increment} }
+    (keys %$bind_info)
+  ;
+  my $identity_col = Scalar::Util::blessed($ident) &&
+    List::Util::first
+    { $ident->column_info($_)->{is_auto_increment} }
+    $ident->columns
+  ;
+
+  if (($op eq 'insert' && $bound_identity_col) ||
+      ($op eq 'update' && exists $args->[0]{$identity_col})) {
+    $sql = join ("\n",
+      $self->_set_table_identity_sql($op => $table, 'on'),
+      $sql,
+      $self->_set_table_identity_sql($op => $table, 'off'),
+    );
+  }
+
+  if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
+      (not $self->{insert_bulk})) {
+    $sql =
+      "$sql\n" .
+      $self->_fetch_identity_sql($ident, $identity_col);
+  }
+
+  return ($sql, $bind);
+}
+
+sub _set_table_identity_sql {
+  my ($self, $op, $table, $on_off) = @_;
+
+  return sprintf 'SET IDENTITY_%s %s %s',
+    uc($op), $self->sql_maker->_quote($table), uc($on_off);
+}
+
+# Stolen from SQLT, with some modifications. This is a makeshift
+# solution before a sane type-mapping library is available, thus
+# the 'our' for easy overrides.
+our %TYPE_MAPPING  = (
+    number    => 'numeric',
+    money     => 'money',
+    varchar   => 'varchar',
+    varchar2  => 'varchar',
+    timestamp => 'datetime',
+    text      => 'varchar',
+    real      => 'double precision',
+    comment   => 'text',
+    bit       => 'bit',
+    tinyint   => 'smallint',
+    float     => 'double precision',
+    serial    => 'numeric',
+    bigserial => 'numeric',
+    boolean   => 'varchar',
+    long      => 'varchar',
+);
+
+sub _native_data_type {
+  my ($self, $type) = @_;
+
+  $type = lc $type;
+  $type =~ s/\s* identity//x;
+
+  return uc($TYPE_MAPPING{$type} || $type);
+}
+
+sub _fetch_identity_sql {
+  my ($self, $source, $col) = @_;
+
+  return sprintf ("SELECT MAX(%s) FROM %s",
+    map { $self->sql_maker->_quote ($_) } ($col, $source->from)
+  );
+}
+
+sub _execute {
+  my $self = shift;
+  my ($op) = @_;
+
+  my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
+
+  if ($op eq 'insert') {
+    $self->_identity($sth->fetchrow_array);
+    $sth->finish;
+  }
+
+  return wantarray ? ($rv, $sth, @bind) : $rv;
+}
+
+sub last_insert_id { shift->_identity }
+
+# handles TEXT/IMAGE and transaction for last_insert_id
+sub insert {
+  my $self = shift;
+  my ($source, $to_insert) = @_;
+
+  my $identity_col = (List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns) || '';
+
+  # check for empty insert
+  # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
+  # try to insert explicit 'DEFAULT's instead (except for identity, timestamp
+  # and computed columns)
+  if (not %$to_insert) {
+    for my $col ($source->columns) {
+      next if $col eq $identity_col;
+
+      my $info = $source->column_info($col);
+
+      next if ref $info->{default_value} eq 'SCALAR'
+        || (exists $info->{data_type} && (not defined $info->{data_type}));
+
+      next if $info->{data_type} && $info->{data_type} =~ /^timestamp\z/i;
+
+      $to_insert->{$col} = \'DEFAULT';
+    }
+  }
+
+  my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
+
+  # do we need the horrific SELECT MAX(COL) hack?
+  my $dumb_last_insert_id =
+       $identity_col
+    && (not exists $to_insert->{$identity_col})
+    && ($self->_identity_method||'') ne '@@IDENTITY';
+
+  my $next = $self->next::can;
+
+  # we are already in a transaction, or there are no blobs
+  # and we don't need the PK - just (try to) do it
+  if ($self->{transaction_depth}
+        || (!$blob_cols && !$dumb_last_insert_id)
+  ) {
+    return $self->_insert (
+      $next, $source, $to_insert, $blob_cols, $identity_col
+    );
+  }
+
+  # otherwise use the _writer_storage to do the insert+transaction on another
+  # connection
+  my $guard = $self->_writer_storage->txn_scope_guard;
+
+  my $updated_cols = $self->_writer_storage->_insert (
+    $next, $source, $to_insert, $blob_cols, $identity_col
+  );
+
+  $self->_identity($self->_writer_storage->_identity);
+
+  $guard->commit;
+
+  return $updated_cols;
+}
+
+sub _insert {
+  my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
+
+  my $updated_cols = $self->$next ($source, $to_insert);
+
+  my $final_row = {
+    ($identity_col ?
+      ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
+    %$to_insert,
+    %$updated_cols,
+  };
+
+  $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
+
+  return $updated_cols;
+}
+
+sub update {
+  my $self = shift;
+  my ($source, $fields, $where, @rest) = @_;
+
+  my $wantarray = wantarray;
+
+  my $blob_cols = $self->_remove_blob_cols($source, $fields);
+
+  my $table = $source->name;
+
+  my $identity_col = List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns;
+
+  my $is_identity_update = $identity_col && defined $fields->{$identity_col};
+
+  return $self->next::method(@_) unless $blob_cols;
+
+# If there are any blobs in $where, Sybase will return a descriptive error
+# message.
+# XXX blobs can still be used with a LIKE query, and this should be handled.
+
+# update+blob update(s) done atomically on separate connection
+  $self = $self->_writer_storage;
+
+  my $guard = $self->txn_scope_guard;
+
+# First update the blob columns to be updated to '' (taken from $fields, where
+# it is originally put by _remove_blob_cols .)
+  my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
+
+# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
+
+  $self->next::method($source, \%blobs_to_empty, $where, @rest);
+
+# Now update the blobs before the other columns in case the update of other
+# columns makes the search condition invalid.
+  $self->_update_blobs($source, $blob_cols, $where);
+
+  my @res;
+  if (%$fields) {
+    if ($wantarray) {
+      @res    = $self->next::method(@_);
+    }
+    elsif (defined $wantarray) {
+      $res[0] = $self->next::method(@_);
+    }
+    else {
+      $self->next::method(@_);
+    }
+  }
+
+  $guard->commit;
+
+  return $wantarray ? @res : $res[0];
+}
+
+sub insert_bulk {
+  my $self = shift;
+  my ($source, $cols, $data) = @_;
+
+  my $identity_col = List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns;
+
+  my $is_identity_insert = (List::Util::first
+    { $_ eq $identity_col }
+    @{$cols}
+  ) ? 1 : 0;
+
+  my @source_columns = $source->columns;
+
+  my $use_bulk_api =
+    $self->_bulk_storage &&
+    $self->_get_dbh->{syb_has_blk};
+
+  if ((not $use_bulk_api)
+        &&
+      (ref($self->_dbi_connect_info->[0]) eq 'CODE')
+        &&
+      (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
+    carp <<'EOF';
+Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
+regular array inserts.
+EOF
+    $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
+  }
+
+  if (not $use_bulk_api) {
+    my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
+
+# _execute_array uses a txn anyway, but it ends too early in case we need to
+# select max(col) to get the identity for inserting blobs.
+    ($self, my $guard) = $self->{transaction_depth} == 0 ?
+      ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
+      :
+      ($self, undef);
+
+    local $self->{insert_bulk} = 1;
+
+    $self->next::method(@_);
+
+    if ($blob_cols) {
+      if ($is_identity_insert) {
+        $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
+      }
+      else {
+        my @cols_with_identities = (@$cols, $identity_col);
+
+        ## calculate identities
+        # XXX This assumes identities always increase by 1, which may or may not
+        # be true.
+        my ($last_identity) =
+          $self->_dbh->selectrow_array (
+            $self->_fetch_identity_sql($source, $identity_col)
+          );
+        my @identities = (($last_identity - @$data + 1) .. $last_identity);
+
+        my @data_with_identities = map [@$_, shift @identities], @$data;
+
+        $self->_insert_blobs_array (
+          $source, $blob_cols, \@cols_with_identities, \@data_with_identities
+        );
+      }
+    }
+
+    $guard->commit if $guard;
+
+    return;
+  }
+
+# otherwise, use the bulk API
+
+# rearrange @$data so that columns are in database order
+  my %orig_idx;
+  @orig_idx{@$cols} = 0..$#$cols;
+
+  my %new_idx;
+  @new_idx{@source_columns} = 0..$#source_columns;
+
+  my @new_data;
+  for my $datum (@$data) {
+    my $new_datum = [];
+    for my $col (@source_columns) {
+# identity data will be 'undef' if not $is_identity_insert
+# columns with defaults will also be 'undef'
+      $new_datum->[ $new_idx{$col} ] =
+        exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
+    }
+    push @new_data, $new_datum;
+  }
+
+# bcp identity index is 1-based
+  my $identity_idx = exists $new_idx{$identity_col} ?
+    $new_idx{$identity_col} + 1 : 0;
+
+## Set a client-side conversion error handler, straight from DBD::Sybase docs.
+# This ignores any data conversion errors detected by the client side libs, as
+# they are usually harmless.
+  my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
+    Sub::Name::subname insert_bulk => sub {
+      my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
+
+      return 1 if $errno == 36;
+
+      carp
+        "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
+        ($errmsg ? "\n$errmsg" : '') .
+        ($osmsg  ? "\n$osmsg"  : '')  .
+        ($blkmsg ? "\n$blkmsg" : '');
+
+      return 0;
+  });
+
+  eval {
+    my $bulk = $self->_bulk_storage;
+
+    my $guard = $bulk->txn_scope_guard;
+
+## XXX get this to work instead of our own $sth
+## will require SQLA or *Hacks changes for ordered columns
+#    $bulk->next::method($source, \@source_columns, \@new_data, {
+#      syb_bcp_attribs => {
+#        identity_flag   => $is_identity_insert,
+#        identity_column => $identity_idx,
+#      }
+#    });
+    my $sql = 'INSERT INTO ' .
+      $bulk->sql_maker->_quote($source->name) . ' (' .
+# colname list is ignored for BCP, but does no harm
+      (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
+      ' VALUES ('.  (join ', ', ('?') x @source_columns) . ')';
+
+## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
+## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
+## better yet the version above. Should be fixed in DBD::Sybase .
+    my $sth = $bulk->_get_dbh->prepare($sql,
+#      'insert', # op
+      {
+        syb_bcp_attribs => {
+          identity_flag   => $is_identity_insert,
+          identity_column => $identity_idx,
+        }
+      }
+    );
+
+    my @bind = do {
+      my $idx = 0;
+      map [ $_, $idx++ ], @source_columns;
+    };
+
+    $self->_execute_array(
+      $source, $sth, \@bind, \@source_columns, \@new_data, sub {
+        $guard->commit
+      }
+    );
+
+    $bulk->_query_end($sql);
+  };
+
+  my $exception = $@;
+  DBD::Sybase::set_cslib_cb($orig_cslib_cb);
+
+  if ($exception =~ /-Y option/) {
+    carp <<"EOF";
+
+Sybase bulk API operation failed due to character set incompatibility, reverting
+to regular array inserts:
+
+*** Try unsetting the LANG environment variable.
+
+$exception
+EOF
+    $self->_bulk_storage(undef);
+    unshift @_, $self;
+    goto \&insert_bulk;
+  }
+  elsif ($exception) {
+# rollback makes the bulkLogin connection unusable
+    $self->_bulk_storage->disconnect;
+    $self->throw_exception($exception);
+  }
+}
+
+sub _dbh_execute_array {
+  my ($self, $sth, $tuple_status, $cb) = @_;
+
+  my $rv = $self->next::method($sth, $tuple_status);
+  $cb->() if $cb;
+
+  return $rv;
+}
+
+# Make sure blobs are not bound as placeholders, and return any non-empty ones
+# as a hash.
+sub _remove_blob_cols {
+  my ($self, $source, $fields) = @_;
+
+  my %blob_cols;
+
+  for my $col (keys %$fields) {
+    if ($self->_is_lob_column($source, $col)) {
+      my $blob_val = delete $fields->{$col};
+      if (not defined $blob_val) {
+        $fields->{$col} = \'NULL';
+      }
+      else {
+        $fields->{$col} = \"''";
+        $blob_cols{$col} = $blob_val unless $blob_val eq '';
+      }
+    }
+  }
+
+  return %blob_cols ? \%blob_cols : undef;
+}
+
+# same for insert_bulk
+sub _remove_blob_cols_array {
+  my ($self, $source, $cols, $data) = @_;
+
+  my @blob_cols;
+
+  for my $i (0..$#$cols) {
+    my $col = $cols->[$i];
+
+    if ($self->_is_lob_column($source, $col)) {
+      for my $j (0..$#$data) {
+        my $blob_val = delete $data->[$j][$i];
+        if (not defined $blob_val) {
+          $data->[$j][$i] = \'NULL';
+        }
+        else {
+          $data->[$j][$i] = \"''";
+          $blob_cols[$j][$i] = $blob_val
+            unless $blob_val eq '';
+        }
+      }
+    }
+  }
+
+  return @blob_cols ? \@blob_cols : undef;
+}
+
+sub _update_blobs {
+  my ($self, $source, $blob_cols, $where) = @_;
+
+  my (@primary_cols) = $source->primary_columns;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
+    unless @primary_cols;
+
+# check if we're updating a single row by PK
+  my $pk_cols_in_where = 0;
+  for my $col (@primary_cols) {
+    $pk_cols_in_where++ if defined $where->{$col};
+  }
+  my @rows;
+
+  if ($pk_cols_in_where == @primary_cols) {
+    my %row_to_update;
+    @row_to_update{@primary_cols} = @{$where}{@primary_cols};
+    @rows = \%row_to_update;
+  } else {
+    my $cursor = $self->select ($source, \@primary_cols, $where, {});
+    @rows = map {
+      my %row; @row{@primary_cols} = @$_; \%row
+    } $cursor->all;
+  }
+
+  for my $row (@rows) {
+    $self->_insert_blobs($source, $blob_cols, $row);
+  }
+}
+
+sub _insert_blobs {
+  my ($self, $source, $blob_cols, $row) = @_;
+  my $dbh = $self->_get_dbh;
+
+  my $table = $source->name;
+
+  my %row = %$row;
+  my (@primary_cols) = $source->primary_columns;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
+    unless @primary_cols;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
+    if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
+
+  for my $col (keys %$blob_cols) {
+    my $blob = $blob_cols->{$col};
+
+    my %where = map { ($_, $row{$_}) } @primary_cols;
+
+    my $cursor = $self->select ($source, [$col], \%where, {});
+    $cursor->next;
+    my $sth = $cursor->sth;
+
+    if (not $sth) {
+
+      $self->throw_exception(
+          "Could not find row in table '$table' for blob update:\n"
+        . Data::Dumper::Concise::Dumper (\%where)
+      );
+    }
+
+    eval {
+      do {
+        $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
+      } while $sth->fetch;
+
+      $sth->func('ct_prepare_send') or die $sth->errstr;
+
+      my $log_on_update = $self->_blob_log_on_update;
+      $log_on_update    = 1 if not defined $log_on_update;
+
+      $sth->func('CS_SET', 1, {
+        total_txtlen => length($blob),
+        log_on_update => $log_on_update
+      }, 'ct_data_info') or die $sth->errstr;
+
+      $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
+
+      $sth->func('ct_finish_send') or die $sth->errstr;
+    };
+    my $exception = $@;
+    $sth->finish if $sth;
+    if ($exception) {
+      if ($self->using_freetds) {
+        $self->throw_exception (
+          'TEXT/IMAGE operation failed, probably because you are using FreeTDS: '
+          . $exception
+        );
+      } else {
+        $self->throw_exception($exception);
+      }
+    }
+  }
+}
+
+sub _insert_blobs_array {
+  my ($self, $source, $blob_cols, $cols, $data) = @_;
+
+  for my $i (0..$#$data) {
+    my $datum = $data->[$i];
+
+    my %row;
+    @row{ @$cols } = @$datum;
+
+    my %blob_vals;
+    for my $j (0..$#$cols) {
+      if (exists $blob_cols->[$i][$j]) {
+        $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
+      }
+    }
+
+    $self->_insert_blobs ($source, \%blob_vals, \%row);
+  }
+}
+
+=head2 connect_call_datetime_setup
+
+Used as:
+
+  on_connect_call => 'datetime_setup'
+
+In L<DBIx::Class::Storage::DBI/connect_info> to set:
+
+  $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
+  $dbh->do('set dateformat mdy');   # input fmt:  08/13/1979 18:08:55.080
+
+On connection for use with L<DBIx::Class::InflateColumn::DateTime>, using
+L<DateTime::Format::Sybase>, which you will need to install.
+
+This works for both C<DATETIME> and C<SMALLDATETIME> columns, although
+C<SMALLDATETIME> columns only have minute precision.
+
+=cut
+
+{
+  my $old_dbd_warned = 0;
+
+  sub connect_call_datetime_setup {
+    my $self = shift;
+    my $dbh = $self->_get_dbh;
+
+    if ($dbh->can('syb_date_fmt')) {
+      # amazingly, this works with FreeTDS
+      $dbh->syb_date_fmt('ISO_strict');
+    } elsif (not $old_dbd_warned) {
+      carp "Your DBD::Sybase is too old to support ".
+      "DBIx::Class::InflateColumn::DateTime, please upgrade!";
+      $old_dbd_warned = 1;
+    }
+
+    $dbh->do('SET DATEFORMAT mdy');
+
+    1;
+  }
+}
+
+sub datetime_parser_type { "DateTime::Format::Sybase" }
+
+# ->begin_work and such have no effect with FreeTDS but we run them anyway to
+# let the DBD keep any state it needs to.
+#
+# If they ever do start working, the extra statements will do no harm (because
+# Sybase supports nested transactions.)
+
+sub _dbh_begin_work {
+  my $self = shift;
+
+# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
+# TRAN once. However, we need to make sure there's a $dbh.
+  return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
+
+  $self->next::method(@_);
+
+  if ($self->using_freetds) {
+    $self->_get_dbh->do('BEGIN TRAN');
+  }
+
+  $self->_began_bulk_work(1) if $self->_is_bulk_storage;
+}
+
+sub _dbh_commit {
+  my $self = shift;
+  if ($self->using_freetds) {
+    $self->_dbh->do('COMMIT');
+  }
+  return $self->next::method(@_);
+}
+
+sub _dbh_rollback {
+  my $self = shift;
+  if ($self->using_freetds) {
+    $self->_dbh->do('ROLLBACK');
+  }
+  return $self->next::method(@_);
+}
+
+# savepoint support using ASE syntax
+
+sub _svp_begin {
+  my ($self, $name) = @_;
+
+  $self->_get_dbh->do("SAVE TRANSACTION $name");
+}
+
+# A new SAVE TRANSACTION with the same name releases the previous one.
+sub _svp_release { 1 }
+
+sub _svp_rollback {
+  my ($self, $name) = @_;
+
+  $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
+}
+
+1;
+
+=head1 Schema::Loader Support
+
+As of version C<0.05000>, L<DBIx::Class::Schema::Loader> should work well with
+most (if not all) versions of Sybase ASE.
+
+=head1 FreeTDS
+
+This driver supports L<DBD::Sybase> compiled against FreeTDS
+(L<http://www.freetds.org/>) to the best of our ability, however it is
+recommended that you recompile L<DBD::Sybase> against the Sybase Open Client
+libraries. They are a part of the Sybase ASE distribution:
+
+The Open Client FAQ is here:
+L<http://www.isug.com/Sybase_FAQ/ASE/section7.html>.
+
+Sybase ASE for Linux (which comes with the Open Client libraries) may be
+downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
+
+To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
+
+  perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
+
+Some versions of the libraries involved will not support placeholders, in which
+case the storage will be reblessed to
+L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
+
+In some configurations, placeholders will work but will throw implicit type
+conversion errors for anything that's not expecting a string. In such a case,
+the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
+automatically set, which you may enable on connection with
+L<DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>. The type info
+for the C<CAST>s is taken from the L<DBIx::Class::ResultSource/data_type>
+definitions in your Result classes, and are mapped to a Sybase type (if it isn't
+already) using a mapping based on L<SQL::Translator>.
+
+In other configurations, placeholders will work just as they do with the Sybase
+Open Client libraries.
+
+Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
+
+=head1 INSERTS WITH PLACEHOLDERS
+
+With placeholders enabled, inserts are done in a transaction so that there are
+no concurrency issues with getting the inserted identity value using
+C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
+mode.
+
+In addition, they are done on a separate connection so that it's possible to
+have active cursors when doing an insert.
+
+When using C<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars> transactions
+are disabled, as there are no concurrency issues with C<SELECT @@IDENTITY> as
+it's a session variable.
+
+=head1 TRANSACTIONS
+
+Due to limitations of the TDS protocol, L<DBD::Sybase>, or both, you cannot
+begin a transaction while there are active cursors, nor can you use multiple
+active cursors within a transaction. An active cursor is, for example, a
+L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
+C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
+
+For example, this will not work:
+
+  $schema->txn_do(sub {
+    my $rs = $schema->resultset('Book');
+    while (my $row = $rs->next) {
+      $schema->resultset('MetaData')->create({
+        book_id => $row->id,
+        ...
+      });
+    }
+  });
+
+This won't either:
+
+  my $first_row = $large_rs->first;
+  $schema->txn_do(sub { ... });
+
+Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
+are not affected, as they are done on an extra database handle.
+
+Some workarounds:
+
+=over 4
+
+=item * use L<DBIx::Class::Storage::DBI::Replicated>
+
+=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
+
+=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
+
+=back
+
+=head1 MAXIMUM CONNECTIONS
+
+The TDS protocol makes separate connections to the server for active statements
+in the background. By default the number of such connections is limited to 25,
+on both the client side and the server side.
+
+This is a bit too low for a complex L<DBIx::Class> application, so on connection
+the client side setting is set to C<256> (see L<DBD::Sybase/maxConnect>.) You
+can override it to whatever setting you like in the DSN.
+
+See
+L<http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.ase_15.0.sag1/html/sag1/sag1272.htm>
+for information on changing the setting on the server side.
+
+=head1 DATES
+
+See L</connect_call_datetime_setup> to setup date formats
+for L<DBIx::Class::InflateColumn::DateTime>.
+
+=head1 TEXT/IMAGE COLUMNS
+
+L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
+C<TEXT/IMAGE> columns.
+
+Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either:
+
+  $schema->storage->dbh->do("SET TEXTSIZE $bytes");
+
+or
+
+  $schema->storage->set_textsize($bytes);
+
+instead.
+
+However, the C<LongReadLen> you pass in
+L<DBIx::Class::Storage::DBI/connect_info> is used to execute the equivalent
+C<SET TEXTSIZE> command on connection.
+
+See L</connect_call_blob_setup> for a L<DBIx::Class::Storage::DBI/connect_info>
+setting you need to work with C<IMAGE> columns.
+
+=head1 BULK API
+
+The experimental L<DBD::Sybase> Bulk API support is used for
+L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
+on a separate connection.
+
+To use this feature effectively, use a large number of rows for each
+L<populate|DBIx::Class::ResultSet/populate> call, eg.:
+
+  while (my $rows = $data_source->get_100_rows()) {
+    $rs->populate($rows);
+  }
+
+B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
+calls in your C<Result> classes B<must> list columns in database order for this
+to work. Also, you may have to unset the C<LANG> environment variable before
+loading your app, if it doesn't match the character set of your database.
+
+When inserting IMAGE columns using this method, you'll need to use
+L</connect_call_blob_setup> as well.
+
+=head1 COMPUTED COLUMNS
+
+If you have columns such as:
+
+  created_dtm AS getdate()
+
+represent them in your Result classes as:
+
+  created_dtm => {
+    data_type => undef,
+    default_value => \'getdate()',
+    is_nullable => 0,
+  }
+
+The C<data_type> must exist and must be C<undef>. Then empty inserts will work
+on tables with such columns.
+
+=head1 TIMESTAMP COLUMNS
+
+C<timestamp> columns in Sybase ASE are not really timestamps, see:
+L<http://dba.fyicenter.com/Interview-Questions/SYBASE/The_timestamp_datatype_in_Sybase_.html>.
+
+They should be defined in your Result classes as:
+
+  ts => {
+    data_type => 'timestamp',
+    is_nullable => 0,
+    inflate_datetime => 0,
+  }
+
+The C<<inflate_datetime => 0>> is necessary if you use
+L<DBIx::Class::InflateColumn::DateTime>, and most people do, and still want to
+be able to read these values.
+
+The values will come back as hexadecimal.
+
+=head1 TODO
+
+=over
+
+=item *
+
+Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
+any active cursors, using eager cursors.
+
+=item *
+
+Real limits and limited counts using stored procedures deployed on startup.
+
+=item *
+
+Adaptive Server Anywhere (ASA) support, with possible SQLA::Limit support.
+
+=item *
+
+Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
+
+=item *
+
+bulk_insert using prepare_cached (see comments.)
+
+=back
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+# vim:sts=2 sw=2:

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/MSSQL.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/MSSQL.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/MSSQL.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -15,6 +15,7 @@
 
 
 use base qw/DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server/;
+use mro 'c3';
 
 1;
 

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,52 @@
+package DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars;
+
+use strict;
+use warnings;
+
+use base qw/
+  DBIx::Class::Storage::DBI::NoBindVars
+  DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server
+/;
+use mro 'c3';
+
+sub _init {
+  my $self = shift;
+  $self->disable_sth_caching(1);
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars - Support for Microsoft
+SQL Server via DBD::Sybase without placeholders
+
+=head1 SYNOPSIS
+
+This subclass supports MSSQL server connections via DBD::Sybase when ? style
+placeholders are not available.
+
+=head1 DESCRIPTION
+
+If you are using this driver then your combination of L<DBD::Sybase> and
+libraries (most likely FreeTDS) does not support ? style placeholders.
+
+This storage driver uses L<DBIx::Class::Storage::DBI::NoBindVars> as a base.
+This means that bind variables will be interpolated (properly quoted of course)
+into the SQL query itself, without using bind placeholders.
+
+More importantly this means that caching of prepared statements is explicitly
+disabled, as the interpolation renders it useless.
+
+In all other respects, it is a subclass of
+L<DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server>.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,36 +4,82 @@
 use warnings;
 
 use base qw/
-  DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server
   DBIx::Class::Storage::DBI::Sybase
+  DBIx::Class::Storage::DBI::MSSQL
 /;
+use mro 'c3';
 
+sub _rebless {
+  my $self = shift;
+  my $dbh  = $self->_get_dbh;
+
+  return if ref $self ne __PACKAGE__;
+
+  if (not $self->_typeless_placeholders_supported) {
+    require
+      DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars;
+    bless $self,
+      'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars';
+    $self->_rebless;
+  }
+}
+
+sub _run_connection_actions {
+  my $self = shift;
+
+  # LongReadLen doesn't work with MSSQL through DBD::Sybase, and the default is
+  # huge on some versions of SQL server and can cause memory problems, so we
+  # fix it up here (see ::DBI::Sybase.pm)
+  $self->set_textsize;
+
+  $self->next::method(@_);
+}
+
+sub _dbh_begin_work {
+  my $self = shift;
+
+  $self->_get_dbh->do('BEGIN TRAN');
+}
+
+sub _dbh_commit {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot COMMIT on a disconnected handle');
+  $dbh->do('COMMIT');
+}
+
+sub _dbh_rollback {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
+  $dbh->do('ROLLBACK');
+}
+
 1;
 
 =head1 NAME
 
-DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server - Storage::DBI subclass for MSSQL via
-DBD::Sybase
+DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server - Support for Microsoft
+SQL Server via DBD::Sybase
 
 =head1 SYNOPSIS
 
 This subclass supports MSSQL server connections via L<DBD::Sybase>.
 
-=head1 CAVEATS
+=head1 DESCRIPTION
 
-This storage driver uses L<DBIx::Class::Storage::DBI::NoBindVars> as a base.
-This means that bind variables will be interpolated (properly quoted of course)
-into the SQL query itself, without using bind placeholders.
+This driver tries to determine whether your version of L<DBD::Sybase> and
+supporting libraries (usually FreeTDS) support using placeholders, if not the
+storage will be reblessed to
+L<DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars>.
 
-More importantly this means that caching of prepared statements is explicitly
-disabled, as the interpolation renders it useless.
+The MSSQL specific functionality is provided by
+L<DBIx::Class::Storage::DBI::MSSQL>.
 
-=head1 AUTHORS
+=head1 AUTHOR
 
-Brandon L Black <blblack at gmail.com>
+See L<DBIx::Class/CONTRIBUTORS>.
 
-Justin Hunter <justin.d.hunter at gmail.com>
-
 =head1 LICENSE
 
 You may distribute this code under the same terms as Perl itself.

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,54 +3,128 @@
 use strict;
 use warnings;
 
-use base qw/DBIx::Class::Storage::DBI::NoBindVars/;
+use base qw/DBIx::Class::Storage::DBI/;
 
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase - Base class for drivers using
+L<DBD::Sybase>
+
+=head1 DESCRIPTION
+
+This is the base class/dispatcher for Storage's designed to work with
+L<DBD::Sybase>
+
+=head1 METHODS
+
+=cut
+
 sub _rebless {
-    my $self = shift;
+  my $self = shift;
 
-    my $dbtype = eval { @{$self->dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2] };
-    unless ( $@ ) {
-        $dbtype =~ s/\W/_/gi;
-        my $subclass = "DBIx::Class::Storage::DBI::Sybase::${dbtype}";
-        if ($self->load_optional_class($subclass) && !$self->isa($subclass)) {
-            bless $self, $subclass;
-            $self->_rebless;
-        }
+  my $dbtype = eval {
+    @{$self->_get_dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2]
+  };
+
+  $self->throw_exception("Unable to estable connection to determine database type: $@")
+    if $@;
+
+  if ($dbtype) {
+    $dbtype =~ s/\W/_/gi;
+
+    # saner class name
+    $dbtype = 'ASE' if $dbtype eq 'SQL_Server';
+
+    my $subclass = __PACKAGE__ . "::$dbtype";
+    if ($self->load_optional_class($subclass)) {
+      bless $self, $subclass;
+      $self->_rebless;
     }
+  }
 }
 
-sub _dbh_last_insert_id {
-    my ($self, $dbh, $source, $col) = @_;
-    return ($dbh->selectrow_array('select @@identity'))[0];
+sub _ping {
+  my $self = shift;
+
+  my $dbh = $self->_dbh or return 0;
+
+  local $dbh->{RaiseError} = 1;
+  local $dbh->{PrintError} = 0;
+
+  if ($dbh->{syb_no_child_con}) {
+# if extra connections are not allowed, then ->ping is reliable
+    my $ping = eval { $dbh->ping };
+    return $@ ? 0 : $ping;
+  }
+
+  eval {
+# XXX if the main connection goes stale, does opening another for this statement
+# really determine anything?
+    $dbh->do('select 1');
+  };
+
+  return $@ ? 0 : 1;
 }
 
-1;
+sub _set_max_connect {
+  my $self = shift;
+  my $val  = shift || 256;
 
-=head1 NAME
+  my $dsn = $self->_dbi_connect_info->[0];
 
-DBIx::Class::Storage::DBI::Sybase - Storage::DBI subclass for Sybase
+  return if ref($dsn) eq 'CODE';
 
-=head1 SYNOPSIS
+  if ($dsn !~ /maxConnect=/) {
+    $self->_dbi_connect_info->[0] = "$dsn;maxConnect=$val";
+    my $connected = defined $self->_dbh;
+    $self->disconnect;
+    $self->ensure_connected if $connected;
+  }
+}
 
-This subclass supports L<DBD::Sybase> for real Sybase databases.  If
-you are using an MSSQL database via L<DBD::Sybase>, see
-L<DBIx::Class::Storage::DBI::Sybase::MSSQL>.
+=head2 using_freetds
 
-=head1 CAVEATS
+Whether or not L<DBD::Sybase> was compiled against FreeTDS. If false, it means
+the Sybase OpenClient libraries were used.
 
-This storage driver uses L<DBIx::Class::Storage::DBI::NoBindVars> as a base.
-This means that bind variables will be interpolated (properly quoted of course)
-into the SQL query itself, without using bind placeholders.
+=cut
 
-More importantly this means that caching of prepared statements is explicitly
-disabled, as the interpolation renders it useless.
+sub using_freetds {
+  my $self = shift;
 
+  return $self->_get_dbh->{syb_oc_version} =~ /freetds/i;
+}
+
+=head2 set_textsize
+
+When using FreeTDS and/or MSSQL, C<< $dbh->{LongReadLen} >> is not available,
+use this function instead. It does:
+
+  $dbh->do("SET TEXTSIZE $bytes");
+
+Takes the number of bytes, or uses the C<LongReadLen> value from your
+L<DBIx::Class/connect_info> if omitted, lastly falls back to the C<32768> which
+is the L<DBD::Sybase> default.
+
+=cut
+
+sub set_textsize {
+  my $self = shift;
+  my $text_size = shift ||
+    eval { $self->_dbi_connect_info->[-1]->{LongReadLen} } ||
+    32768; # the DBD::Sybase default
+
+  return unless defined $text_size;
+
+  $self->_dbh->do("SET TEXTSIZE $text_size");
+}
+
+1;
+
 =head1 AUTHORS
 
-Brandon L Black <blblack at gmail.com>
+See L<DBIx::Class/CONTRIBUTORS>.
 
-Justin Hunter <justin.d.hunter at gmail.com>
-
 =head1 LICENSE
 
 You may distribute this code under the same terms as Perl itself.


Property changes on: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/Sybase.pm
___________________________________________________________________
Name: svn:eol-style
   - native

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/mysql.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/mysql.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI/mysql.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,23 +3,51 @@
 use strict;
 use warnings;
 
-use base qw/DBIx::Class::Storage::DBI::MultiColumnIn/;
+use base qw/
+  DBIx::Class::Storage::DBI::MultiColumnIn
+  DBIx::Class::Storage::DBI::AmbiguousGlob
+  DBIx::Class::Storage::DBI
+/;
+use mro 'c3';
 
 __PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks::MySQL');
 
 sub with_deferred_fk_checks {
   my ($self, $sub) = @_;
 
-  $self->dbh->do('SET foreign_key_checks=0');
+  $self->_do_query('SET FOREIGN_KEY_CHECKS = 0');
   $sub->();
-  $self->dbh->do('SET foreign_key_checks=1');
+  $self->_do_query('SET FOREIGN_KEY_CHECKS = 1');
 }
 
+sub connect_call_set_strict_mode {
+  my $self = shift;
+
+  # the @@sql_mode puts back what was previously set on the session handle
+  $self->_do_query(q|SET SQL_MODE = CONCAT('ANSI,TRADITIONAL,ONLY_FULL_GROUP_BY,', @@sql_mode)|);
+  $self->_do_query(q|SET SQL_AUTO_IS_NULL = 0|);
+}
+
 sub _dbh_last_insert_id {
   my ($self, $dbh, $source, $col) = @_;
   $dbh->{mysql_insertid};
 }
 
+# we need to figure out what mysql version we're running
+sub sql_maker {
+  my $self = shift;
+
+  unless ($self->_sql_maker) {
+    my $maker = $self->next::method (@_);
+
+    # mysql 3 does not understand a bare JOIN
+    my $mysql_ver = $self->_get_dbh->get_info(18);
+    $maker->{_default_jointype} = 'INNER' if $mysql_ver =~ /^3/;
+  }
+
+  return $self->_sql_maker;
+}
+
 sub sqlt_type {
   return 'MySQL';
 }
@@ -27,28 +55,28 @@
 sub _svp_begin {
     my ($self, $name) = @_;
 
-    $self->dbh->do("SAVEPOINT $name");
+    $self->_get_dbh->do("SAVEPOINT $name");
 }
 
 sub _svp_release {
     my ($self, $name) = @_;
 
-    $self->dbh->do("RELEASE SAVEPOINT $name");
+    $self->_get_dbh->do("RELEASE SAVEPOINT $name");
 }
 
 sub _svp_rollback {
     my ($self, $name) = @_;
 
-    $self->dbh->do("ROLLBACK TO SAVEPOINT $name")
+    $self->_get_dbh->do("ROLLBACK TO SAVEPOINT $name")
 }
- 
+
 sub is_replicating {
-    my $status = shift->dbh->selectrow_hashref('show slave status');
+    my $status = shift->_get_dbh->selectrow_hashref('show slave status');
     return ($status->{Slave_IO_Running} eq 'Yes') && ($status->{Slave_SQL_Running} eq 'Yes');
 }
 
 sub lag_behind_master {
-    return shift->dbh->selectrow_hashref('show slave status')->{Seconds_Behind_Master};
+    return shift->_get_dbh->selectrow_hashref('show slave status')->{Seconds_Behind_Master};
 }
 
 # MySql can not do subquery update/deletes, only way is slow per-row operations.
@@ -57,38 +85,43 @@
   return shift->_per_row_update_delete (@_);
 }
 
-# MySql chokes on things like:
-# COUNT(*) FROM (SELECT tab1.col, tab2.col FROM tab1 JOIN tab2 ... )
-# claiming that col is a duplicate column (it loses the table specifiers by
-# the time it gets to the *). Thus for any subquery count we select only the
-# primary keys of the main table in the inner query. This hopefully still
-# hits the indexes and keeps mysql happy.
-# (mysql does not care if the SELECT and the GROUP BY match)
-sub _subq_count_select {
-  my ($self, $source, $rs_attrs) = @_;
-  my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns);
-  return @pcols ? \@pcols : [ 1 ];
-}
-
 1;
 
 =head1 NAME
 
-DBIx::Class::Storage::DBI::mysql - Automatic primary key class for MySQL
+DBIx::Class::Storage::DBI::mysql - Storage::DBI class implementing MySQL specifics
 
 =head1 SYNOPSIS
 
-  # In your table classes
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
-  __PACKAGE__->set_primary_key('id');
+Storage::DBI autodetects the underlying MySQL database, and re-blesses the
+C<$storage> object into this class.
 
+  my $schema = MyDb::Schema->connect( $dsn, $user, $pass, { on_connect_call => 'set_strict_mode' } );
+
 =head1 DESCRIPTION
 
-This class implements autoincrements for MySQL.
+This class implements MySQL specific bits of L<DBIx::Class::Storage::DBI>.
 
+It also provides a one-stop on-connect macro C<set_strict_mode> which sets
+session variables such that MySQL behaves more predictably as far as the
+SQL standard is concerned.
+
+=head1 STORAGE OPTIONS
+
+=head2 set_strict_mode
+
+Enables session-wide strict options upon connecting. Equivalent to:
+
+  ->connect ( ... , {
+    on_connect_do => [
+      q|SET SQL_MODE = CONCAT('ANSI,TRADITIONAL,ONLY_FULL_GROUP_BY,', @@sql_mode)|,
+      q|SET SQL_AUTO_IS_NULL = 0|,
+    ]
+  });
+
 =head1 AUTHORS
 
-Matt S. Trout <mst at shadowcatsystems.co.uk>
+See L<DBIx::Class/CONTRIBUTORS>
 
 =head1 LICENSE
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBI.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,10 +1,12 @@
 package DBIx::Class::Storage::DBI;
 # -*- mode: cperl; cperl-indent-level: 2 -*-
 
-use base 'DBIx::Class::Storage';
+use strict;
+use warnings;
 
-use strict;    
-use warnings;
+use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/;
+use mro 'c3';
+
 use Carp::Clan qw/^DBIx::Class/;
 use DBI;
 use DBIx::Class::Storage::DBI::Cursor;
@@ -12,16 +14,19 @@
 use Path::Class::File ();
 use Scalar::Util();
 use List::Util();
+use Data::Dumper::Concise();
+use Sub::Name ();
 
 __PACKAGE__->mk_group_accessors('simple' =>
-    qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts
-       _conn_pid _conn_tid transaction_depth _dbh_autocommit savepoints/
+  qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid
+     _conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/
 );
 
 # the values for these accessors are picked out (and deleted) from
 # the attribute hashref passed to connect_info
 my @storage_options = qw/
-  on_connect_do on_disconnect_do disable_sth_caching unsafe auto_savepoint
+  on_connect_call on_disconnect_call on_connect_do on_disconnect_do
+  disable_sth_caching unsafe auto_savepoint
 /;
 __PACKAGE__->mk_group_accessors('simple' => @storage_options);
 
@@ -33,6 +38,38 @@
 __PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks');
 
 
+# Each of these methods need _determine_driver called before itself
+# in order to function reliably. This is a purely DRY optimization
+my @rdbms_specific_methods = qw/
+  sqlt_type
+  build_datetime_parser
+  datetime_parser_type
+
+  insert
+  insert_bulk
+  update
+  delete
+  select
+  select_single
+/;
+
+for my $meth (@rdbms_specific_methods) {
+
+  my $orig = __PACKAGE__->can ($meth)
+    or next;
+
+  no strict qw/refs/;
+  no warnings qw/redefine/;
+  *{__PACKAGE__ ."::$meth"} = Sub::Name::subname $meth => sub {
+    if (not $_[0]->_driver_determined) {
+      $_[0]->_determine_driver;
+      goto $_[0]->can($meth);
+    }
+    $orig->(@_);
+  };
+}
+
+
 =head1 NAME
 
 DBIx::Class::Storage::DBI - DBI storage handler
@@ -42,8 +79,15 @@
   my $schema = MySchema->connect('dbi:SQLite:my.db');
 
   $schema->storage->debug(1);
-  $schema->dbh_do("DROP TABLE authors");
 
+  my @stuff = $schema->storage->dbh_do(
+    sub {
+      my ($storage, $dbh, @args) = @_;
+      $dbh->do("DROP TABLE authors");
+    },
+    @column_list
+  );
+
   $schema->resultset('Book')->search({
      written_on => $schema->storage->datetime_parser(DateTime->now)
   });
@@ -90,8 +134,8 @@
 
 =item *
 
-A single code reference which returns a connected 
-L<DBI database handle|DBI/connect> optionally followed by 
+A single code reference which returns a connected
+L<DBI database handle|DBI/connect> optionally followed by
 L<extra attributes|/DBIx::Class specific connection attributes> recognized
 by DBIx::Class:
 
@@ -110,7 +154,13 @@
     %extra_attributes,
   }];
 
-This is particularly useful for L<Catalyst> based applications, allowing the 
+  $connect_info_args = [{
+    dbh_maker => sub { DBI->connect (...) },
+    %dbi_attributes,
+    %extra_attributes,
+  }];
+
+This is particularly useful for L<Catalyst> based applications, allowing the
 following config (L<Config::General> style):
 
   <Model::DB>
@@ -123,13 +173,17 @@
     </connect_info>
   </Model::DB>
 
+The C<dsn>/C<user>/C<password> combination can be substituted by the
+C<dbh_maker> key whose value is a coderef that returns a connected
+L<DBI database handle|DBI/connect>
+
 =back
 
 Please note that the L<DBI> docs recommend that you always explicitly
 set C<AutoCommit> to either I<0> or I<1>.  L<DBIx::Class> further
 recommends that it be set to I<1>, and that you perform transactions
 via our L<DBIx::Class::Schema/txn_do> method.  L<DBIx::Class> will set it
-to I<1> if you do not do explicitly set it to zero.  This is the default 
+to I<1> if you do not do explicitly set it to zero.  This is the default
 for most DBDs. See L</DBIx::Class and AutoCommit> for details.
 
 =head3 DBIx::Class specific connection attributes
@@ -137,7 +191,7 @@
 In addition to the standard L<DBI|DBI/ATTRIBUTES_COMMON_TO_ALL_HANDLES>
 L<connection|DBI/Database_Handle_Attributes> attributes, DBIx::Class recognizes
 the following connection options. These options can be mixed in with your other
-L<DBI> connection attributes, or placed in a seperate hashref
+L<DBI> connection attributes, or placed in a separate hashref
 (C<\%extra_attributes>) as shown above.
 
 Every time C<connect_info> is invoked, any previous settings for
@@ -178,12 +232,97 @@
 Note, this only runs if you explicitly call L</disconnect> on the
 storage object.
 
+=item on_connect_call
+
+A more generalized form of L</on_connect_do> that calls the specified
+C<connect_call_METHOD> methods in your storage driver.
+
+  on_connect_do => 'select 1'
+
+is equivalent to:
+
+  on_connect_call => [ [ do_sql => 'select 1' ] ]
+
+Its values may contain:
+
+=over
+
+=item a scalar
+
+Will call the C<connect_call_METHOD> method.
+
+=item a code reference
+
+Will execute C<< $code->($storage) >>
+
+=item an array reference
+
+Each value can be a method name or code reference.
+
+=item an array of arrays
+
+For each array, the first item is taken to be the C<connect_call_> method name
+or code reference, and the rest are parameters to it.
+
+=back
+
+Some predefined storage methods you may use:
+
+=over
+
+=item do_sql
+
+Executes a SQL string or a code reference that returns a SQL string. This is
+what L</on_connect_do> and L</on_disconnect_do> use.
+
+It can take:
+
+=over
+
+=item a scalar
+
+Will execute the scalar as SQL.
+
+=item an arrayref
+
+Taken to be arguments to L<DBI/do>, the SQL string optionally followed by the
+attributes hashref and bind values.
+
+=item a code reference
+
+Will execute C<< $code->($storage) >> and execute the return array refs as
+above.
+
+=back
+
+=item datetime_setup
+
+Execute any statements necessary to initialize the database session to return
+and accept datetime/timestamp values used with
+L<DBIx::Class::InflateColumn::DateTime>.
+
+Only necessary for some databases, see your specific storage driver for
+implementation details.
+
+=back
+
+=item on_disconnect_call
+
+Takes arguments in the same form as L</on_connect_call> and executes them
+immediately before disconnecting from the database.
+
+Calls the C<disconnect_call_METHOD> methods as opposed to the
+C<connect_call_METHOD> methods called by L</on_connect_call>.
+
+Note, this only runs if you explicitly call L</disconnect> on the
+storage object.
+
 =item disable_sth_caching
 
 If set to a true value, this option will disable the caching of
 statement handles via L<DBI/prepare_cached>.
 
-=item limit_dialect 
+=item limit_dialect
 
 Sets the limit dialect. This is useful for JDBC-bridge among others
 where the remote SQL-dialect cannot be determined by the name of the
@@ -191,7 +330,7 @@
 
 =item quote_char
 
-Specifies what characters to use to quote table and column names. If 
+Specifies what characters to use to quote table and column names. If
 you use this you will want to specify L</name_sep> as well.
 
 C<quote_char> expects either a single character, in which case is it
@@ -203,8 +342,8 @@
 
 =item name_sep
 
-This only needs to be used in conjunction with C<quote_char>, and is used to 
-specify the charecter that seperates elements (schemas, tables, columns) from 
+This only needs to be used in conjunction with C<quote_char>, and is used to
+specify the character that separates elements (schemas, tables, columns) from
 each other. In most cases this is simply a C<.>.
 
 The consequences of not supplying this value is that L<SQL::Abstract>
@@ -250,6 +389,12 @@
   # Connect via subref
   ->connect_info([ sub { DBI->connect(...) } ]);
 
+  # Connect via subref in hashref
+  ->connect_info([{
+    dbh_maker => sub { DBI->connect(...) },
+    on_connect_do => 'alter session ...',
+  }]);
+
   # A bit more complicated
   ->connect_info(
     [
@@ -302,14 +447,51 @@
 =cut
 
 sub connect_info {
+  my ($self, $info) = @_;
+
+  return $self->_connect_info if !$info;
+
+  $self->_connect_info($info); # copy for _connect_info
+
+  $info = $self->_normalize_connect_info($info)
+    if ref $info eq 'ARRAY';
+
+  for my $storage_opt (keys %{ $info->{storage_options} }) {
+    my $value = $info->{storage_options}{$storage_opt};
+
+    $self->$storage_opt($value);
+  }
+
+  # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
+  #  the new set of options
+  $self->_sql_maker(undef);
+  $self->_sql_maker_opts({});
+
+  for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) {
+    my $value = $info->{sql_maker_options}{$sql_maker_opt};
+
+    $self->_sql_maker_opts->{$sql_maker_opt} = $value;
+  }
+
+  my %attrs = (
+    %{ $self->_default_dbi_connect_attributes || {} },
+    %{ $info->{attributes} || {} },
+  );
+
+  my @args = @{ $info->{arguments} };
+
+  $self->_dbi_connect_info([@args,
+    %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]);
+
+  return $self->_connect_info;
+}
+
+sub _normalize_connect_info {
   my ($self, $info_arg) = @_;
+  my %info;
 
-  return $self->_connect_info if !$info_arg;
-
   my @args = @$info_arg;  # take a shallow copy for further mutilation
-  $self->_connect_info([@args]); # copy for _connect_info
 
-
   # combine/pre-parse arguments depending on invocation style
 
   my %attrs;
@@ -320,9 +502,22 @@
   elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config)
     %attrs = %{$args[0]};
     @args = ();
-    for (qw/password user dsn/) {
-      unshift @args, delete $attrs{$_};
+    if (my $code = delete $attrs{dbh_maker}) {
+      @args = $code;
+
+      my @ignored = grep { delete $attrs{$_} } (qw/dsn user password/);
+      if (@ignored) {
+        carp sprintf (
+            'Attribute(s) %s in connect_info were ignored, as they can not be applied '
+          . "to the result of 'dbh_maker'",
+
+          join (', ', map { "'$_'" } (@ignored) ),
+        );
+      }
     }
+    else {
+      @args = delete @attrs{qw/dsn user password/};
+    }
   }
   else {                # otherwise assume dsn/user/password + \%attrs + \%extra_attrs
     %attrs = (
@@ -332,35 +527,66 @@
     @args = @args[0,1,2];
   }
 
-  # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
-  #  the new set of options
-  $self->_sql_maker(undef);
-  $self->_sql_maker_opts({});
+  $info{arguments} = \@args;
 
-  if(keys %attrs) {
-    for my $storage_opt (@storage_options, 'cursor_class') {    # @storage_options is declared at the top of the module
-      if(my $value = delete $attrs{$storage_opt}) {
-        $self->$storage_opt($value);
-      }
-    }
-    for my $sql_maker_opt (qw/limit_dialect quote_char name_sep/) {
-      if(my $opt_val = delete $attrs{$sql_maker_opt}) {
-        $self->_sql_maker_opts->{$sql_maker_opt} = $opt_val;
-      }
-    }
-  }
+  my @storage_opts = grep exists $attrs{$_},
+    @storage_options, 'cursor_class';
 
-  %attrs = () if (ref $args[0] eq 'CODE');  # _connect() never looks past $args[0] in this case
+  @{ $info{storage_options} }{@storage_opts} =
+    delete @attrs{@storage_opts} if @storage_opts;
 
-  $self->_dbi_connect_info([@args, keys %attrs ? \%attrs : ()]);
-  $self->_connect_info;
+  my @sql_maker_opts = grep exists $attrs{$_},
+    qw/limit_dialect quote_char name_sep/;
+
+  @{ $info{sql_maker_options} }{@sql_maker_opts} =
+    delete @attrs{@sql_maker_opts} if @sql_maker_opts;
+
+  $info{attributes} = \%attrs if %attrs;
+
+  return \%info;
 }
 
+sub _default_dbi_connect_attributes {
+  return {
+    AutoCommit => 1,
+    RaiseError => 1,
+    PrintError => 0,
+  };
+}
+
 =head2 on_connect_do
 
 This method is deprecated in favour of setting via L</connect_info>.
 
+=cut
 
+=head2 on_disconnect_do
+
+This method is deprecated in favour of setting via L</connect_info>.
+
+=cut
+
+sub _parse_connect_do {
+  my ($self, $type) = @_;
+
+  my $val = $self->$type;
+  return () if not defined $val;
+
+  my @res;
+
+  if (not ref($val)) {
+    push @res, [ 'do_sql', $val ];
+  } elsif (ref($val) eq 'CODE') {
+    push @res, $val;
+  } elsif (ref($val) eq 'ARRAY') {
+    push @res, map { [ 'do_sql', $_ ] } @$val;
+  } else {
+    $self->throw_exception("Invalid type for $type: ".ref($val));
+  }
+
+  return \@res;
+}
+
 =head2 dbh_do
 
 Arguments: ($subref | $method_name), @extra_coderef_args?
@@ -396,7 +622,7 @@
   my $self = shift;
   my $code = shift;
 
-  my $dbh = $self->_dbh;
+  my $dbh = $self->_get_dbh;
 
   return $self->$code($dbh, @_) if $self->{_in_dbh_do}
       || $self->{transaction_depth};
@@ -407,11 +633,6 @@
   my $want_array = wantarray;
 
   eval {
-    $self->_verify_pid if $dbh;
-    if(!$self->_dbh) {
-        $self->_populate_dbh;
-        $dbh = $self->_dbh;
-    }
 
     if($want_array) {
         @result = $self->$code($dbh, @_);
@@ -424,6 +645,7 @@
     }
   };
 
+  # ->connected might unset $@ - copy
   my $exception = $@;
   if(!$exception) { return $want_array ? @result : $result[0] }
 
@@ -431,6 +653,8 @@
 
   # We were not connected - reconnect and retry, but let any
   #  exception fall right through this time
+  carp "Retrying $code after catching disconnected exception: $exception"
+    if $ENV{DBIC_DBIRETRY_DEBUG};
   $self->_populate_dbh;
   $self->$code($self->_dbh, @_);
 }
@@ -455,8 +679,7 @@
   my $tried = 0;
   while(1) {
     eval {
-      $self->_verify_pid if $self->_dbh;
-      $self->_populate_dbh if !$self->_dbh;
+      $self->_get_dbh;
 
       $self->txn_begin;
       if($want_array) {
@@ -471,10 +694,11 @@
       $self->txn_commit;
     };
 
+    # ->connected might unset $@ - copy
     my $exception = $@;
     if(!$exception) { return $want_array ? @result : $result[0] }
 
-    if($tried++ > 0 || $self->connected) {
+    if($tried++ || $self->connected) {
       eval { $self->txn_rollback };
       my $rollback_exception = $@;
       if($rollback_exception) {
@@ -492,6 +716,8 @@
 
     # We were not connected, and was first try - reconnect and retry
     # via the while loop
+    carp "Retrying $coderef after catching disconnected exception: $exception"
+      if $ENV{DBIC_DBIRETRY_DEBUG};
     $self->_populate_dbh;
   }
 }
@@ -506,11 +732,16 @@
 sub disconnect {
   my ($self) = @_;
 
-  if( $self->connected ) {
-    my $connection_do = $self->on_disconnect_do;
-    $self->_do_connection_actions($connection_do) if ref($connection_do);
+  if( $self->_dbh ) {
+    my @actions;
 
-    $self->_dbh->rollback unless $self->_dbh_autocommit;
+    push @actions, ( $self->on_disconnect_call || () );
+    push @actions, $self->_parse_connect_do ('on_disconnect_do');
+
+    $self->_do_connection_actions(disconnect_call_ => $_) for @actions;
+
+    $self->_dbh_rollback unless $self->_dbh_autocommit;
+
     $self->_dbh->disconnect;
     $self->_dbh(undef);
     $self->{_dbh_gen}++;
@@ -535,29 +766,62 @@
 # Storage subclasses should override this
 sub with_deferred_fk_checks {
   my ($self, $sub) = @_;
-
   $sub->();
 }
 
+=head2 connected
+
+=over
+
+=item Arguments: none
+
+=item Return Value: 1|0
+
+=back
+
+Verifies that the current database handle is active and ready to execute
+an SQL statement (e.g. the connection did not get stale, server is still
+answering, etc.) This method is used internally by L</dbh>.
+
+=cut
+
 sub connected {
-  my ($self) = @_;
+  my $self = shift;
+  return 0 unless $self->_seems_connected;
 
-  if(my $dbh = $self->_dbh) {
-      if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) {
-          $self->_dbh(undef);
-          $self->{_dbh_gen}++;
-          return;
-      }
-      else {
-          $self->_verify_pid;
-          return 0 if !$self->_dbh;
-      }
-      return ($dbh->FETCH('Active') && $dbh->ping);
+  #be on the safe side
+  local $self->_dbh->{RaiseError} = 1;
+
+  return $self->_ping;
+}
+
+sub _seems_connected {
+  my $self = shift;
+
+  my $dbh = $self->_dbh
+    or return 0;
+
+  if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) {
+    $self->_dbh(undef);
+    $self->{_dbh_gen}++;
+    return 0;
   }
+  else {
+    $self->_verify_pid;
+    return 0 if !$self->_dbh;
+  }
 
-  return 0;
+  return $dbh->FETCH('Active');
 }
 
+sub _ping {
+  my $self = shift;
+
+  my $dbh = $self->_dbh or return 0;
+
+  return $dbh->ping;
+}
+
 # handle pid changes correctly
 #  NOTE: assumes $self->_dbh is a valid $dbh
 sub _verify_pid {
@@ -582,21 +846,42 @@
 
 =head2 dbh
 
-Returns the dbh - a data base handle of class L<DBI>.
+Returns a C<$dbh> - a data base handle of class L<DBI>. The returned handle
+is guaranteed to be healthy by implicitly calling L</connected>, and if
+necessary performing a reconnection before returning. Keep in mind that this
+is very B<expensive> on some database engines. Consider using L<dbh_do>
+instead.
 
 =cut
 
 sub dbh {
   my ($self) = @_;
 
-  $self->ensure_connected;
+  if (not $self->_dbh) {
+    $self->_populate_dbh;
+  } else {
+    $self->ensure_connected;
+  }
   return $self->_dbh;
 }
 
+# this is the internal "get dbh or connect (don't check)" method
+sub _get_dbh {
+  my $self = shift;
+  $self->_verify_pid if $self->_dbh;
+  $self->_populate_dbh unless $self->_dbh;
+  return $self->_dbh;
+}
+
 sub _sql_maker_args {
     my ($self) = @_;
-    
-    return ( bindtype=>'columns', array_datatypes => 1, limit_dialect => $self->dbh, %{$self->_sql_maker_opts} );
+
+    return (
+      bindtype=>'columns',
+      array_datatypes => 1,
+      limit_dialect => $self->_get_dbh,
+      %{$self->_sql_maker_opts}
+    );
 }
 
 sub sql_maker {
@@ -609,11 +894,15 @@
   return $self->_sql_maker;
 }
 
+# nothing to do by default
 sub _rebless {}
+sub _init {}
 
 sub _populate_dbh {
   my ($self) = @_;
+
   my @info = @{$self->_dbi_connect_info || []};
+  $self->_dbh(undef); # in case ->connected failed we might get sent here
   $self->_dbh($self->_connect(@info));
 
   $self->_conn_pid($$);
@@ -625,51 +914,98 @@
   #  there is no transaction in progress by definition
   $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
 
-  my $connection_do = $self->on_connect_do;
-  $self->_do_connection_actions($connection_do) if $connection_do;
+  $self->_run_connection_actions unless $self->{_in_determine_driver};
 }
 
+sub _run_connection_actions {
+  my $self = shift;
+  my @actions;
+
+  push @actions, ( $self->on_connect_call || () );
+  push @actions, $self->_parse_connect_do ('on_connect_do');
+
+  $self->_do_connection_actions(connect_call_ => $_) for @actions;
+}
+
 sub _determine_driver {
   my ($self) = @_;
 
-  if (ref $self eq 'DBIx::Class::Storage::DBI') {
-    my $driver;
+  if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) {
+    my $started_connected = 0;
+    local $self->{_in_determine_driver} = 1;
 
-    if ($self->_dbh) { # we are connected
-      $driver = $self->_dbh->{Driver}{Name};
-    } else {
-      # try to use dsn to not require being connected, the driver may still
-      # force a connection in _rebless to determine version
-      ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
+    if (ref($self) eq __PACKAGE__) {
+      my $driver;
+      if ($self->_dbh) { # we are connected
+        $driver = $self->_dbh->{Driver}{Name};
+        $started_connected = 1;
+      } else {
+        # if connect_info is a CODEREF, we have no choice but to connect
+        if (ref $self->_dbi_connect_info->[0] &&
+            Scalar::Util::reftype($self->_dbi_connect_info->[0]) eq 'CODE') {
+          $self->_populate_dbh;
+          $driver = $self->_dbh->{Driver}{Name};
+        }
+        else {
+          # try to use dsn to not require being connected, the driver may still
+          # force a connection in _rebless to determine version
+          ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
+        }
+      }
+
+      my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
+      if ($self->load_optional_class($storage_class)) {
+        mro::set_mro($storage_class, 'c3');
+        bless $self, $storage_class;
+        $self->_rebless();
+      }
     }
 
-    if ($self->load_optional_class("DBIx::Class::Storage::DBI::${driver}")) {
-      bless $self, "DBIx::Class::Storage::DBI::${driver}";
-      $self->_rebless();
-    }
+    $self->_driver_determined(1);
+
+    $self->_init; # run driver-specific initializations
+
+    $self->_run_connection_actions
+        if !$started_connected && defined $self->_dbh;
   }
 }
 
 sub _do_connection_actions {
-  my $self = shift;
-  my $connection_do = shift;
+  my $self          = shift;
+  my $method_prefix = shift;
+  my $call          = shift;
 
-  if (!ref $connection_do) {
-    $self->_do_query($connection_do);
+  if (not ref($call)) {
+    my $method = $method_prefix . $call;
+    $self->$method(@_);
+  } elsif (ref($call) eq 'CODE') {
+    $self->$call(@_);
+  } elsif (ref($call) eq 'ARRAY') {
+    if (ref($call->[0]) ne 'ARRAY') {
+      $self->_do_connection_actions($method_prefix, $_) for @$call;
+    } else {
+      $self->_do_connection_actions($method_prefix, @$_) for @$call;
+    }
+  } else {
+    $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) );
   }
-  elsif (ref $connection_do eq 'ARRAY') {
-    $self->_do_query($_) foreach @$connection_do;
-  }
-  elsif (ref $connection_do eq 'CODE') {
-    $connection_do->($self);
-  }
-  else {
-    $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref $connection_do) );
-  }
 
   return $self;
 }
 
+sub connect_call_do_sql {
+  my $self = shift;
+  $self->_do_query(@_);
+}
+
+sub disconnect_call_do_sql {
+  my $self = shift;
+  $self->_do_query(@_);
+}
+
+# override in db-specific backend when necessary
+sub connect_call_datetime_setup { 1 }
+
 sub _do_query {
   my ($self, $action) = @_;
 
@@ -688,7 +1024,7 @@
     my @bind = map { [ undef, $_ ] } @do_args;
 
     $self->_query_start($sql, @bind);
-    $self->_dbh->do($sql, $attrs, @do_args);
+    $self->_get_dbh->do($sql, $attrs, @do_args);
     $self->_query_end($sql, @bind);
   }
 
@@ -710,7 +1046,7 @@
 
   eval {
     if(ref $info[0] eq 'CODE') {
-       $dbh = &{$info[0]}
+       $dbh = $info[0]->();
     }
     else {
        $dbh = DBI->connect(@info);
@@ -724,6 +1060,8 @@
             $weak_self->throw_exception("DBI Exception: $_[0]");
           }
           else {
+            # the handler may be invoked by something totally out of
+            # the scope of DBIC
             croak ("DBI Exception: $_[0]");
           }
       };
@@ -754,11 +1092,11 @@
 
   $self->throw_exception ("Your Storage implementation doesn't support savepoints")
     unless $self->can('_svp_begin');
-  
+
   push @{ $self->{savepoints} }, $name;
 
   $self->debugobj->svp_begin($name) if $self->debug;
-  
+
   return $self->_svp_begin($name);
 }
 
@@ -818,7 +1156,7 @@
   }
 
   $self->debugobj->svp_rollback($name) if $self->debug;
-  
+
   return $self->_svp_rollback($name);
 }
 
@@ -830,27 +1168,43 @@
 
 sub txn_begin {
   my $self = shift;
-  $self->ensure_connected();
+
+  # this means we have not yet connected and do not know the AC status
+  # (e.g. coderef $dbh)
+  $self->ensure_connected if (! defined $self->_dbh_autocommit);
+
   if($self->{transaction_depth} == 0) {
     $self->debugobj->txn_begin()
       if $self->debug;
-    # this isn't ->_dbh-> because
-    #  we should reconnect on begin_work
-    #  for AutoCommit users
-    $self->dbh->begin_work;
-  } elsif ($self->auto_savepoint) {
+    $self->_dbh_begin_work;
+  }
+  elsif ($self->auto_savepoint) {
     $self->svp_begin;
   }
   $self->{transaction_depth}++;
 }
 
+sub _dbh_begin_work {
+  my $self = shift;
+
+  # if the user is utilizing txn_do - good for him, otherwise we need to
+  # ensure that the $dbh is healthy on BEGIN.
+  # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
+  # will be replaced by a failure of begin_work itself (which will be
+  # then retried on reconnect)
+  if ($self->{_in_dbh_do}) {
+    $self->_dbh->begin_work;
+  } else {
+    $self->dbh_do(sub { $_[1]->begin_work });
+  }
+}
+
 sub txn_commit {
   my $self = shift;
   if ($self->{transaction_depth} == 1) {
-    my $dbh = $self->_dbh;
     $self->debugobj->txn_commit()
       if ($self->debug);
-    $dbh->commit;
+    $self->_dbh_commit;
     $self->{transaction_depth} = 0
       if $self->_dbh_autocommit;
   }
@@ -861,6 +1215,13 @@
   }
 }
 
+sub _dbh_commit {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot COMMIT on a disconnected handle');
+  $dbh->commit;
+}
+
 sub txn_rollback {
   my $self = shift;
   my $dbh = $self->_dbh;
@@ -870,7 +1231,7 @@
         if ($self->debug);
       $self->{transaction_depth} = 0
         if $self->_dbh_autocommit;
-      $dbh->rollback;
+      $self->_dbh_rollback;
     }
     elsif($self->{transaction_depth} > 1) {
       $self->{transaction_depth}--;
@@ -893,6 +1254,13 @@
   }
 }
 
+sub _dbh_rollback {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
+  $dbh->rollback;
+}
+
 # This used to be the top-half of _execute.  It was split out to make it
 #  easier to override in NoBindVars without duping the rest.  It takes up
 #  all of _execute's args, and emits $sql, @bind.
@@ -956,7 +1324,7 @@
 
   my $sth = $self->sth($sql,$op);
 
-  my $placeholder_index = 1; 
+  my $placeholder_index = 1;
 
   foreach my $bound (@$bind) {
     my $attributes = {};
@@ -987,7 +1355,7 @@
 
 sub _execute {
     my $self = shift;
-    $self->dbh_do('_dbh_execute', @_)
+    $self->dbh_do('_dbh_execute', @_);  # retry over disconnects
 }
 
 sub insert {
@@ -998,13 +1366,16 @@
 
   my $updated_cols = {};
 
-  $self->ensure_connected;
   foreach my $col ( $source->columns ) {
     if ( !defined $to_insert->{$col} ) {
       my $col_info = $source->column_info($col);
 
       if ( $col_info->{auto_nextval} ) {
-        $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch( 'nextval', $col_info->{sequence} || $self->_dbh_get_autoinc_seq($self->dbh, $source) );
+        $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch(
+          'nextval',
+          $col_info->{sequence} ||
+            $self->_dbh_get_autoinc_seq($self->_get_dbh, $source)
+        );
       }
     }
   }
@@ -1014,22 +1385,114 @@
   return $updated_cols;
 }
 
-## Still not quite perfect, and EXPERIMENTAL
-## Currently it is assumed that all values passed will be "normal", i.e. not 
+## Currently it is assumed that all values passed will be "normal", i.e. not
 ## scalar refs, or at least, all the same type as the first set, the statement is
 ## only prepped once.
 sub insert_bulk {
   my ($self, $source, $cols, $data) = @_;
+
   my %colvalues;
-  my $table = $source->from;
   @colvalues{@$cols} = (0..$#$cols);
-  my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues);
-  
-  $self->_query_start( $sql, @bind );
+
+  for my $i (0..$#$cols) {
+    my $first_val = $data->[0][$i];
+    next unless ref $first_val eq 'SCALAR';
+
+    $colvalues{ $cols->[$i] } = $first_val;
+  }
+
+  # check for bad data and stringify stringifiable objects
+  my $bad_slice = sub {
+    my ($msg, $col_idx, $slice_idx) = @_;
+    $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
+      $msg,
+      $cols->[$col_idx],
+      do {
+        local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any
+        Data::Dumper::Concise::Dumper({
+          map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
+        }),
+      }
+    );
+  };
+
+  for my $datum_idx (0..$#$data) {
+    my $datum = $data->[$datum_idx];
+
+    for my $col_idx (0..$#$cols) {
+      my $val            = $datum->[$col_idx];
+      my $sqla_bind      = $colvalues{ $cols->[$col_idx] };
+      my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
+
+      if ($is_literal_sql) {
+        if (not ref $val) {
+          $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx);
+        }
+        elsif ((my $reftype = ref $val) ne 'SCALAR') {
+          $bad_slice->("$reftype reference found where literal SQL expected",
+            $col_idx, $datum_idx);
+        }
+        elsif ($$val ne $$sqla_bind){
+          $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
+            $col_idx, $datum_idx);
+        }
+      }
+      elsif (my $reftype = ref $val) {
+        require overload;
+        if (overload::Method($val, '""')) {
+          $datum->[$col_idx] = "".$val;
+        }
+        else {
+          $bad_slice->("$reftype reference found where bind expected",
+            $col_idx, $datum_idx);
+        }
+      }
+    }
+  }
+
+  my ($sql, $bind) = $self->_prep_for_execute (
+    'insert', undef, $source, [\%colvalues]
+  );
+  my @bind = @$bind;
+
+  my $empty_bind = 1 if (not @bind) &&
+    (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols;
+
+  if ((not @bind) && (not $empty_bind)) {
+    $self->throw_exception(
+      'Cannot insert_bulk without support for placeholders'
+    );
+  }
+
+  # neither _execute_array, nor _execute_inserts_with_no_binds are
+  # atomic (even if _execute _array is a single call). Thus a safety
+  # scope guard
+  my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0;
+
+  $self->_query_start( $sql, ['__BULK__'] );
   my $sth = $self->sth($sql);
+  my $rv = do {
+    if ($empty_bind) {
+      # bind_param_array doesn't work if there are no binds
+      $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
+    }
+    else {
+#      @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+      $self->_execute_array( $source, $sth, \@bind, $cols, $data );
+    }
+  };
 
-#  @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+  $self->_query_end( $sql, ['__BULK__'] );
 
+
+  $guard->commit if $guard;
+
+  return (wantarray ? ($rv, $sth, @bind) : $rv);
+}
+
+sub _execute_array {
+  my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
+
   ## This must be an arrayref, else nothing works!
   my $tuple_status = [];
 
@@ -1037,9 +1500,9 @@
   my $bind_attributes = $self->source_bind_attributes($source);
 
   ## Bind the values and execute
-  my $placeholder_index = 1; 
+  my $placeholder_index = 1;
 
-  foreach my $bound (@bind) {
+  foreach my $bound (@$bind) {
 
     my $attributes = {};
     my ($column_name, $data_index) = @$bound;
@@ -1054,56 +1517,82 @@
     $sth->bind_param_array( $placeholder_index, [@data], $attributes );
     $placeholder_index++;
   }
-  my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) };
-  if (my $err = $@) {
+
+  my $rv = eval {
+    $self->_dbh_execute_array($sth, $tuple_status, @extra);
+  };
+  my $err = $@ || $sth->errstr;
+
+# Statement must finish even if there was an exception.
+  eval { $sth->finish };
+  $err = $@ unless $err;
+
+  if ($err) {
     my $i = 0;
     ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
 
-    $self->throw_exception($sth->errstr || "Unexpected populate error: $err")
+    $self->throw_exception("Unexpected populate error: $err")
       if ($i > $#$tuple_status);
 
-    require Data::Dumper;
-    local $Data::Dumper::Terse = 1;
-    local $Data::Dumper::Indent = 1;
-    local $Data::Dumper::Useqq = 1;
-    local $Data::Dumper::Quotekeys = 0;
-
     $self->throw_exception(sprintf "%s for populate slice:\n%s",
-      $tuple_status->[$i][1],
-      Data::Dumper::Dumper(
-        { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) }
-      ),
+      ($tuple_status->[$i][1] || $err),
+      Data::Dumper::Concise::Dumper({
+        map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols)
+      }),
     );
   }
-  $self->throw_exception($sth->errstr) if !$rv;
+  return $rv;
+}
 
-  $self->_query_end( $sql, @bind );
-  return (wantarray ? ($rv, $sth, @bind) : $rv);
+sub _dbh_execute_array {
+    my ($self, $sth, $tuple_status, @extra) = @_;
+
+    return $sth->execute_array({ArrayTupleStatus => $tuple_status});
 }
 
+sub _dbh_execute_inserts_with_no_binds {
+  my ($self, $sth, $count) = @_;
+
+  eval {
+    my $dbh = $self->_get_dbh;
+    local $dbh->{RaiseError} = 1;
+    local $dbh->{PrintError} = 0;
+
+    $sth->execute foreach 1..$count;
+  };
+  my $exception = $@;
+
+# Make sure statement is finished even if there was an exception.
+  eval { $sth->finish };
+  $exception = $@ unless $exception;
+
+  $self->throw_exception($exception) if $exception;
+
+  return $count;
+}
+
 sub update {
-  my $self = shift @_;
-  my $source = shift @_;
-  my $bind_attributes = $self->source_bind_attributes($source);
-  
-  return $self->_execute('update' => [], $source, $bind_attributes, @_);
+  my ($self, $source, @args) = @_;
+
+  my $bind_attrs = $self->source_bind_attributes($source);
+
+  return $self->_execute('update' => [], $source, $bind_attrs, @args);
 }
 
 
 sub delete {
-  my $self = shift @_;
-  my $source = shift @_;
-  
+  my ($self, $source, @args) = @_;
+
   my $bind_attrs = $self->source_bind_attributes($source);
-  
-  return $self->_execute('delete' => [], $source, $bind_attrs, @_);
+
+  return $self->_execute('delete' => [], $source, $bind_attrs, @args);
 }
 
 # We were sent here because the $rs contains a complex search
 # which will require a subquery to select the correct rows
-# (i.e. joined or limited resultsets)
+# (i.e. joined or limited resultsets, or non-introspectable conditions)
 #
-# Genarating a single PK column subquery is trivial and supported
+# Generating a single PK column subquery is trivial and supported
 # by all RDBMS. However if we have a multicolumn PK, things get ugly.
 # Look at _multipk_update_delete()
 sub _subq_update_delete {
@@ -1112,14 +1601,27 @@
 
   my $rsrc = $rs->result_source;
 
-  # we already check this, but double check naively just in case. Should be removed soon
+  # quick check if we got a sane rs on our hands
+  my @pcols = $rsrc->primary_columns;
+  unless (@pcols) {
+    $self->throw_exception (
+      sprintf (
+        "You must declare primary key(s) on source '%s' (via set_primary_key) in order to update or delete complex resultsets",
+        $rsrc->source_name || $rsrc->from
+      )
+    );
+  }
+
   my $sel = $rs->_resolved_attrs->{select};
   $sel = [ $sel ] unless ref $sel eq 'ARRAY';
-  my @pcols = $rsrc->primary_columns;
-  if (@$sel != @pcols) {
+
+  if (
+      join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
+        ne
+      join ("\x00", sort @$sel )
+  ) {
     $self->throw_exception (
-      'Subquery update/delete can not be called on resultsets selecting a'
-     .' number of columns different than the number of primary keys'
+      '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
     );
   }
 
@@ -1169,11 +1671,12 @@
   my $row_cnt = '0E0';
 
   my $subrs_cur = $rs->cursor;
-  while (my @pks = $subrs_cur->next) {
+  my @all_pk = $subrs_cur->all;
+  for my $pks ( @all_pk) {
 
     my $cond;
     for my $i (0.. $#pcols) {
-      $cond->{$pcols[$i]} = $pks[$i];
+      $cond->{$pcols[$i]} = $pks->[$i];
     }
 
     $self->$op (
@@ -1194,10 +1697,10 @@
   my $self = shift;
 
   # localization is neccessary as
-  # 1) there is no infrastructure to pass this around (easy to do, but will wait)
+  # 1) there is no infrastructure to pass this around before SQLA2
   # 2) _select_args sets it and _prep_for_execute consumes it
   my $sql_maker = $self->sql_maker;
-  local $sql_maker->{for};
+  local $sql_maker->{_dbic_rs_attrs};
 
   return $self->_execute($self->_select_args(@_));
 }
@@ -1206,10 +1709,10 @@
   my $self = shift;
 
   # localization is neccessary as
-  # 1) there is no infrastructure to pass this around (easy to do, but will wait)
+  # 1) there is no infrastructure to pass this around before SQLA2
   # 2) _select_args sets it and _prep_for_execute consumes it
   my $sql_maker = $self->sql_maker;
-  local $sql_maker->{for};
+  local $sql_maker->{_dbic_rs_attrs};
 
   # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $order, $rows, $offset)
   #  = $self->_select_args($ident, $select, $cond, $attrs);
@@ -1229,8 +1732,19 @@
 sub _select_args {
   my ($self, $ident, $select, $where, $attrs) = @_;
 
+  my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident);
+
   my $sql_maker = $self->sql_maker;
-  my $alias2source = $self->_resolve_ident_sources ($ident);
+  $sql_maker->{_dbic_rs_attrs} = {
+    %$attrs,
+    select => $select,
+    from => $ident,
+    where => $where,
+    $rs_alias && $alias2source->{$rs_alias}
+      ? ( _source_handle => $alias2source->{$rs_alias}->handle )
+      : ()
+    ,
+  };
 
   # calculate bind_attrs before possible $ident mangling
   my $bind_attrs = {};
@@ -1241,31 +1755,112 @@
       my $fqcn = join ('.', $alias, $col);
       $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col};
 
-      # so that unqualified searches can be bound too
-      $bind_attrs->{$col} = $bind_attrs->{$fqcn} if $alias eq 'me';
+      # Unqialified column names are nice, but at the same time can be
+      # rather ambiguous. What we do here is basically go along with
+      # the loop, adding an unqualified column slot to $bind_attrs,
+      # alongside the fully qualified name. As soon as we encounter
+      # another column by that name (which would imply another table)
+      # we unset the unqualified slot and never add any info to it
+      # to avoid erroneous type binding. If this happens the users
+      # only choice will be to fully qualify his column name
+
+      if (exists $bind_attrs->{$col}) {
+        $bind_attrs->{$col} = {};
+      }
+      else {
+        $bind_attrs->{$col} = $bind_attrs->{$fqcn};
+      }
     }
   }
 
-  my @limit;
-  if ($attrs->{software_limit} ||
-      $sql_maker->_default_limit_syntax eq "GenericSubQ") {
-        $attrs->{software_limit} = 1;
-  } else {
+  # adjust limits
+  if (
+    $attrs->{software_limit}
+      ||
+    $sql_maker->_default_limit_syntax eq "GenericSubQ"
+  ) {
+    $attrs->{software_limit} = 1;
+  }
+  else {
     $self->throw_exception("rows attribute must be positive if present")
       if (defined($attrs->{rows}) && !($attrs->{rows} > 0));
 
     # MySQL actually recommends this approach.  I cringe.
     $attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset};
+  }
 
-    if ($attrs->{rows} && keys %{$attrs->{collapse}}) {
-      ($ident, $select, $where, $attrs)
-        = $self->_adjust_select_args_for_limited_prefetch ($ident, $select, $where, $attrs);
-    }
-    else {
-      push @limit, $attrs->{rows}, $attrs->{offset};
-    }
+  my @limit;
+
+  # see if we need to tear the prefetch apart otherwise delegate the limiting to the
+  # storage, unless software limit was requested
+  if (
+    #limited has_many
+    ( $attrs->{rows} && keys %{$attrs->{collapse}} )
+       ||
+    # limited prefetch with RNO subqueries
+    (
+      $attrs->{rows}
+        &&
+      $sql_maker->limit_dialect eq 'RowNumberOver'
+        &&
+      $attrs->{_prefetch_select}
+        &&
+      @{$attrs->{_prefetch_select}}
+    )
+      ||
+    # grouped prefetch
+    ( $attrs->{group_by}
+        &&
+      @{$attrs->{group_by}}
+        &&
+      $attrs->{_prefetch_select}
+        &&
+      @{$attrs->{_prefetch_select}}
+    )
+  ) {
+    ($ident, $select, $where, $attrs)
+      = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
   }
 
+  elsif (
+    ($attrs->{rows} || $attrs->{offset})
+      &&
+    $sql_maker->limit_dialect eq 'RowNumberOver'
+      &&
+    (ref $ident eq 'ARRAY' && @$ident > 1)  # indicates a join
+      &&
+    scalar $self->_parse_order_by ($attrs->{order_by})
+  ) {
+    # the RNO limit dialect above mangles the SQL such that the join gets lost
+    # wrap a subquery here
+
+    push @limit, delete @{$attrs}{qw/rows offset/};
+
+    my $subq = $self->_select_args_to_query (
+      $ident,
+      $select,
+      $where,
+      $attrs,
+    );
+
+    $ident = {
+      -alias => $attrs->{alias},
+      -source_handle => $ident->[0]{-source_handle},
+      $attrs->{alias} => $subq,
+    };
+
+    # all part of the subquery now
+    delete @{$attrs}{qw/order_by group_by having/};
+    $where = undef;
+  }
+
+  elsif (! $attrs->{software_limit} ) {
+    push @limit, $attrs->{rows}, $attrs->{offset};
+  }
+
+  # try to simplify the joinmap further (prune unreferenced type-single joins)
+  $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
+
 ###
   # This would be the point to deflate anything found in $where
   # (and leave $attrs->{bind} intact). Problem is - inflators historically
@@ -1278,193 +1873,12 @@
 
   my $order = { map
     { $attrs->{$_} ? ( $_ => $attrs->{$_} ) : ()  }
-    (qw/order_by group_by having _virtual_order_by/ )
+    (qw/order_by group_by having/ )
   };
 
-
-  $sql_maker->{for} = delete $attrs->{for};
-
   return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit);
 }
 
-sub _adjust_select_args_for_limited_prefetch {
-  my ($self, $from, $select, $where, $attrs) = @_;
-
-  if ($attrs->{group_by} and @{$attrs->{group_by}}) {
-    $self->throw_exception ('Prefetch with limit (rows/offset) is not supported on resultsets with a group_by attribute');
-  }
-
-  $self->throw_exception ('Prefetch with limit (rows/offset) is not supported on resultsets with a custom from attribute')
-    if (ref $from ne 'ARRAY');
-
-
-  # separate attributes
-  my $sub_attrs = { %$attrs };
-  delete $attrs->{$_} for qw/where bind rows offset/;
-  delete $sub_attrs->{$_} for qw/for collapse select order_by/;
-
-  my $alias = $attrs->{alias};
-
-  # create subquery select list
-  my $sub_select = [ grep { $_ =~ /^$alias\./ } @{$attrs->{select}} ];
-
-  # bring over all non-collapse-induced order_by into the inner query (if any)
-  # the outer one will have to keep them all
-  if (my $ord_cnt = @{$attrs->{order_by}} - @{$attrs->{_collapse_order_by}} ) {
-    $sub_attrs->{order_by} = [
-      @{$attrs->{order_by}}[ 0 .. ($#{$attrs->{order_by}} - $ord_cnt - 1) ]
-    ];
-  }
-
-  # mangle {from}
-  $from = [ @$from ];
-  my $select_root = shift @$from;
-  my @outer_from = @$from;
-
-  my %inner_joins;
-  my %join_info = map { $_->[0]{-alias} => $_->[0] } (@$from);
-
-  # in complex search_related chains $alias may *not* be 'me'
-  # so always include it in the inner join, and also shift away
-  # from the outer stack, so that the two datasets actually do
-  # meet
-  if ($select_root->{-alias} ne $alias) {
-    $inner_joins{$alias} = 1;
-
-    while (@outer_from && $outer_from[0][0]{-alias} ne $alias) {
-      shift @outer_from;
-    }
-    if (! @outer_from) {
-      $self->throw_exception ("Unable to find '$alias' in the {from} stack, something is wrong");
-    }
-
-    shift @outer_from; # the new subquery will represent this alias, so get rid of it
-  }
-
-
-  # decide which parts of the join will remain on the inside
-  #
-  # this is not a very viable optimisation, but it was written
-  # before I realised this, so might as well remain. We can throw
-  # away _any_ branches of the join tree that are:
-  # 1) not mentioned in the condition/order
-  # 2) left-join leaves (or left-join leaf chains)
-  # Most of the join ocnditions will not satisfy this, but for real
-  # complex queries some might, and we might make some RDBMS happy.
-  #
-  #
-  # since we do not have introspectable SQLA, we fall back to ugly
-  # scanning of raw SQL for WHERE, and for pieces of ORDER BY
-  # in order to determine what goes into %inner_joins
-  # It may not be very efficient, but it's a reasonable stop-gap
-  {
-    # produce stuff unquoted, so it can be scanned
-    my $sql_maker = $self->sql_maker;
-    local $sql_maker->{quote_char};
-
-    my @order_by = (map
-      { ref $_ ? $_->[0] : $_ }
-      $sql_maker->_order_by_chunks ($sub_attrs->{order_by})
-    );
-
-    my $where_sql = $sql_maker->where ($where);
-
-    # sort needed joins
-    for my $alias (keys %join_info) {
-
-      # any table alias found on a column name in where or order_by
-      # gets included in %inner_joins
-      # Also any parent joins that are needed to reach this particular alias
-      for my $piece ($where_sql, @order_by ) {
-        if ($piece =~ /\b$alias\./) {
-          $inner_joins{$alias} = 1;
-        }
-      }
-    }
-  }
-
-  # scan for non-leaf/non-left joins and mark as needed
-  # also mark all ancestor joins that are needed to reach this particular alias
-  # (e.g.  join => { cds => 'tracks' } - tracks will bring cds too )
-  #
-  # traverse by the size of the -join_path i.e. reverse depth first
-  for my $alias (sort { @{$join_info{$b}{-join_path}} <=> @{$join_info{$a}{-join_path}} } (keys %join_info) ) {
-
-    my $j = $join_info{$alias};
-    $inner_joins{$alias} = 1 if (! $j->{-join_type} || ($j->{-join_type} !~ /^left$/i) );
-
-    if ($inner_joins{$alias}) {
-      $inner_joins{$_} = 1 for (@{$j->{-join_path}});
-    }
-  }
-
-  # construct the inner $from for the subquery
-  my $inner_from = [ $select_root ];
-  for my $j (@$from) {
-    push @$inner_from, $j if $inner_joins{$j->[0]{-alias}};
-  }
-
-  # if a multi-type join was needed in the subquery ("multi" is indicated by
-  # presence in {collapse}) - add a group_by to simulate the collapse in the subq
-
-  for my $alias (keys %inner_joins) {
-
-    # the dot comes from some weirdness in collapse
-    # remove after the rewrite
-    if ($attrs->{collapse}{".$alias"}) {
-      $sub_attrs->{group_by} = $sub_select;
-      last;
-    }
-  }
-
-  # generate the subquery
-  my $subq = $self->_select_args_to_query (
-    $inner_from,
-    $sub_select,
-    $where,
-    $sub_attrs
-  );
-
-  # put it in the new {from}
-  unshift @outer_from, { $alias => $subq };
-
-  # This is totally horrific - the $where ends up in both the inner and outer query
-  # Unfortunately not much can be done until SQLA2 introspection arrives
-  #
-  # OTOH it can be seen as a plus: <ash> (notes that this query would make a DBA cry ;)
-  return (\@outer_from, $select, $where, $attrs);
-}
-
-sub _resolve_ident_sources {
-  my ($self, $ident) = @_;
-
-  my $alias2source = {};
-
-  # the reason this is so contrived is that $ident may be a {from}
-  # structure, specifying multiple tables to join
-  if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
-    # this is compat mode for insert/update/delete which do not deal with aliases
-    $alias2source->{me} = $ident;
-  }
-  elsif (ref $ident eq 'ARRAY') {
-
-    for (@$ident) {
-      my $tabinfo;
-      if (ref $_ eq 'HASH') {
-        $tabinfo = $_;
-      }
-      if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') {
-        $tabinfo = $_->[0];
-      }
-
-      $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve
-        if ($tabinfo->{-source_handle});
-    }
-  }
-
-  return $alias2source;
-}
-
 # Returns a counting SELECT for a simple count
 # query. Abstracted so that a storage could override
 # this to { count => 'firstcol' } or whatever makes
@@ -1489,7 +1903,6 @@
   return @pcols ? \@pcols : [ 1 ];
 }
 
-
 sub source_bind_attributes {
   my ($self, $source) = @_;
 
@@ -1564,7 +1977,7 @@
 
 sub sth {
   my ($self, $sql) = @_;
-  $self->dbh_do('_dbh_sth', $sql);
+  $self->dbh_do('_dbh_sth', $sql);  # retry over disconnects
 }
 
 sub _dbh_columns_info_for {
@@ -1626,7 +2039,7 @@
 
 sub columns_info_for {
   my ($self, $table) = @_;
-  $self->dbh_do('_dbh_columns_info_for', $table);
+  $self->_dbh_columns_info_for ($self->_get_dbh, $table);
 }
 
 =head2 last_insert_id
@@ -1636,32 +2049,91 @@
 =cut
 
 sub _dbh_last_insert_id {
-    # All Storage's need to register their own _dbh_last_insert_id
-    # the old SQLite-based method was highly inappropriate
+    my ($self, $dbh, $source, $col) = @_;
 
-    my $self = shift;
+    my $id = eval { $dbh->last_insert_id (undef, undef, $source->name, $col) };
+
+    return $id if defined $id;
+
     my $class = ref $self;
-    $self->throw_exception (<<EOE);
-
-No _dbh_last_insert_id() method found in $class.
-Since the method of obtaining the autoincrement id of the last insert
-operation varies greatly between different databases, this method must be
-individually implemented for every storage class.
-EOE
+    $self->throw_exception ("No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed");
 }
 
 sub last_insert_id {
   my $self = shift;
-  $self->dbh_do('_dbh_last_insert_id', @_);
+  $self->_dbh_last_insert_id ($self->_dbh, @_);
 }
 
+=head2 _native_data_type
+
+=over 4
+
+=item Arguments: $type_name
+
+=back
+
+This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
+currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
+L<::Sybase::ASE|DBIx::Class::Storage::DBI::Sybase::ASE>.
+
+The default implementation returns C<undef>, implement in your Storage driver if
+you need this functionality.
+
+Should map types from other databases to the native RDBMS type, for example
+C<VARCHAR2> to C<VARCHAR>.
+
+Types with modifiers should map to the underlying data type. For example,
+C<INTEGER AUTO_INCREMENT> should become C<INTEGER>.
+
+Composite types should map to the container type, for example
+C<ENUM(foo,bar,baz)> becomes C<ENUM>.
+
+=cut
+
+sub _native_data_type {
+  #my ($self, $data_type) = @_;
+  return undef
+}
+
+# Check if placeholders are supported at all
+sub _placeholders_supported {
+  my $self = shift;
+  my $dbh  = $self->_get_dbh;
+
+  # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
+  # but it is inaccurate more often than not
+  eval {
+    local $dbh->{PrintError} = 0;
+    local $dbh->{RaiseError} = 1;
+    $dbh->do('select ?', {}, 1);
+  };
+  return $@ ? 0 : 1;
+}
+
+# Check if placeholders bound to non-string types throw exceptions
+#
+sub _typeless_placeholders_supported {
+  my $self = shift;
+  my $dbh  = $self->_get_dbh;
+
+  eval {
+    local $dbh->{PrintError} = 0;
+    local $dbh->{RaiseError} = 1;
+    # this specifically tests a bind that is NOT a string
+    $dbh->do('select 1 where 1 = ?', {}, 1);
+  };
+  return $@ ? 0 : 1;
+}
+
 =head2 sqlt_type
 
 Returns the database driver name.
 
 =cut
 
-sub sqlt_type { shift->dbh->{Driver}->{Name} }
+sub sqlt_type {
+  shift->_get_dbh->{Driver}->{Name};
+}
 
 =head2 bind_attribute_by_data_type
 
@@ -1698,7 +2170,7 @@
 }
 
 
-=head2 create_ddl_dir (EXPERIMENTAL)
+=head2 create_ddl_dir
 
 =over 4
 
@@ -1744,17 +2216,15 @@
 
  { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 }
 
-merged with the hash passed in. To disable any of those features, pass in a 
+merged with the hash passed in. To disable any of those features, pass in a
 hashref like the following
 
  { ignore_constraint_names => 0, # ... other options }
 
 
-Note that this feature is currently EXPERIMENTAL and may not work correctly 
-across all databases, or fully handle complex relationships.
+WARNING: You are strongly advised to check all SQL files created, before applying
+them.
 
-WARNING: Please check all SQL files created, before applying them.
-
 =cut
 
 sub create_ddl_dir {
@@ -1771,15 +2241,15 @@
   $version ||= $schema_version;
 
   $sqltargs = {
-    add_drop_table => 1, 
+    add_drop_table => 1,
     ignore_constraint_names => 1,
     ignore_index_names => 1,
     %{$sqltargs || {}}
   };
 
-  $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09003: '}
-      . $self->_check_sqlt_message . q{'})
-          if !$self->_check_sqlt_version;
+  unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
+    $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
+  }
 
   my $sqlt = SQL::Translator->new( $sqltargs );
 
@@ -1811,7 +2281,7 @@
     }
     print $file $output;
     close($file);
-  
+
     next unless ($preversion);
 
     require SQL::Translator::Diff;
@@ -1827,7 +2297,7 @@
       carp("Overwriting existing diff file - $difffile");
       unlink($difffile);
     }
-    
+
     my $source_schema;
     {
       my $t = SQL::Translator->new($sqltargs);
@@ -1846,7 +2316,7 @@
         unless ( $source_schema->name );
     }
 
-    # The "new" style of producers have sane normalization and can support 
+    # The "new" style of producers have sane normalization and can support
     # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't
     # And we have to diff parsed SQL against parsed SQL.
     my $dest_schema = $sqlt_schema;
@@ -1867,12 +2337,12 @@
       $dest_schema->name( $filename )
         unless $dest_schema->name;
     }
-    
+
     my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db,
                                                   $dest_schema,   $db,
                                                   $sqltargs
                                                  );
-    if(!open $file, ">$difffile") { 
+    if(!open $file, ">$difffile") {
       $self->throw_exception("Can't write to $difffile ($!)");
       next;
     }
@@ -1907,8 +2377,6 @@
 
 sub deployment_statements {
   my ($self, $schema, $type, $version, $dir, $sqltargs) = @_;
-  # Need to be connected to get the correct sqlt_type
-  $self->ensure_connected() unless $type;
   $type ||= $self->sqlt_type;
   $version ||= $schema->schema_version || '1.x';
   $dir ||= './';
@@ -1920,15 +2388,11 @@
       return wantarray ? @lines : join(';', @lines);
   }
 
-  $self->throw_exception(q{Can't deploy without SQL::Translator 0.09003: '}
-      . $self->_check_sqlt_message . q{'})
-          if !$self->_check_sqlt_version;
+  unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) {
+    $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
+  }
 
-  require SQL::Translator::Parser::DBIx::Class;
-  eval qq{use SQL::Translator::Producer::${type}};
-  $self->throw_exception($@) if $@;
-
-  # sources needs to be a parser arg, but for simplicty allow at top level 
+  # sources needs to be a parser arg, but for simplicty allow at top level
   # coming in
   $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources}
       if exists $sqltargs->{sources};
@@ -1943,9 +2407,8 @@
 sub deploy {
   my ($self, $schema, $type, $sqltargs, $dir) = @_;
   my @statements = $self->deployment_statements(
-  	$schema, $type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 }
+    $schema, $type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 }
   );
-  return $self->_execute_statements(@statements);
 }
 
 =head2 datetime_parser
@@ -1957,7 +2420,6 @@
 sub datetime_parser {
   my $self = shift;
   return $self->{datetime_parser} ||= do {
-    $self->ensure_connected;
     $self->build_datetime_parser(@_);
   };
 }
@@ -1980,27 +2442,11 @@
 sub build_datetime_parser {
   my $self = shift;
   my $type = $self->datetime_parser_type(@_);
-  eval "use ${type}";
-  $self->throw_exception("Couldn't load ${type}: $@") if $@;
+  $self->ensure_class_loaded ($type);
   return $type;
 }
 
-{
-    my $_check_sqlt_version; # private
-    my $_check_sqlt_message; # private
-    sub _check_sqlt_version {
-        return $_check_sqlt_version if defined $_check_sqlt_version;
-        eval 'use SQL::Translator "0.09003"';
-        $_check_sqlt_message = $@ || '';
-        $_check_sqlt_version = !$@;
-    }
 
-    sub _check_sqlt_message {
-        _check_sqlt_version if !defined $_check_sqlt_message;
-        $_check_sqlt_message;
-    }
-}
-
 =head2 is_replicating
 
 A boolean that reports if a particular L<DBIx::Class::Storage::DBI> is set to
@@ -2011,7 +2457,7 @@
 
 sub is_replicating {
     return;
-    
+
 }
 
 =head2 lag_behind_master
@@ -2198,7 +2644,7 @@
   my $comment = qr{--};
   my @lines;
   foreach my $line (@_) {
-	$line=~s/\n|\r|\r\n|\n\r//g; ## Clear any type of eol characters 
+  $line=~s/\n|\r|\r\n|\n\r//g; ## Clear any type of eol characters 
     ## Skip if the line is blank, whitespace only or a comment line 
     if(!$line || $line=~m/^\s* $comment/x || $line=~m/^\s*$/) {
       next;
@@ -2292,6 +2738,34 @@
   return @statements;  
 }
 
+=head2 relname_to_table_alias
+
+=over 4
+
+=item Arguments: $relname, $join_count
+
+=back
+
+L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
+queries.
+
+This hook is to allow specific L<DBIx::Class::Storage> drivers to change the
+way these aliases are named.
+
+The default behavior is C<"$relname_$join_count" if $join_count > 1>, otherwise
+C<"$relname">.
+
+=cut
+
+sub relname_to_table_alias {
+  my ($self, $relname, $join_count) = @_;
+
+  my $alias = ($join_count && $join_count > 1 ?
+    join('_', $relname, $join_count) : $relname);
+
+  return $alias;
+}
+
 =head1 DESTROY
 
 Make sure we properly clean up the object when it goes out of scope.
@@ -2300,8 +2774,18 @@
 
 sub DESTROY {
   my $self = shift;
-  return if !$self->_dbh;
-  $self->_verify_pid;
+
+  $self->_verify_pid if $self->_dbh;
+
+  # some databases need this to stop spewing warnings
+  if (my $dbh = $self->_dbh) {
+    local $@;
+    eval {
+      %{ $dbh->{CachedKids} } = ();
+      $dbh->disconnect;
+    };
+  }
+
   $self->_dbh(undef);
 }
 
@@ -2313,7 +2797,7 @@
 
 DBIx::Class can do some wonderful magic with handling exceptions,
 disconnections, and transactions when you use C<< AutoCommit => 1 >>
-combined with C<txn_do> for transaction support.
+(the default) combined with C<txn_do> for transaction support.
 
 If you set C<< AutoCommit => 0 >> in your connect info, then you are always
 in an assumed transaction between commits, and you're telling us you'd
@@ -2325,7 +2809,6 @@
 be with raw DBI.
 
 
-
 =head1 AUTHORS
 
 Matt S. Trout <mst at shadowcatsystems.co.uk>

Added: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBIHacks.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBIHacks.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/DBIHacks.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,504 @@
+package   #hide from PAUSE
+  DBIx::Class::Storage::DBIHacks;
+
+#
+# This module contains code that should never have seen the light of day,
+# does not belong in the Storage, or is otherwise unfit for public
+# display. The arrival of SQLA2 should immediately oboslete 90% of this
+#
+
+use strict;
+use warnings;
+
+use base 'DBIx::Class::Storage';
+use mro 'c3';
+
+use Carp::Clan qw/^DBIx::Class/;
+
+#
+# This code will remove non-selecting/non-restricting joins from
+# {from} specs, aiding the RDBMS query optimizer
+#
+sub _prune_unused_joins {
+  my ($self) = shift;
+
+  my ($from, $select, $where, $attrs) = @_;
+
+  if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY') {
+    return $from;   # only standard {from} specs are supported
+  }
+
+  my $aliastypes = $self->_resolve_aliastypes_from_select_args(@_);
+
+  # a grouped set will not be affected by amount of rows. Thus any
+  # {multiplying} joins can go
+  delete $aliastypes->{multiplying} if $attrs->{group_by};
+
+
+  my @newfrom = $from->[0]; # FROM head is always present
+
+  my %need_joins = (map { %{$_||{}} } (values %$aliastypes) );
+  for my $j (@{$from}[1..$#$from]) {
+    push @newfrom, $j if (
+      (! $j->[0]{-alias}) # legacy crap
+        ||
+      $need_joins{$j->[0]{-alias}}
+    );
+  }
+
+  return \@newfrom;
+}
+
+#
+# This is the code producing joined subqueries like:
+# SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ... 
+#
+sub _adjust_select_args_for_complex_prefetch {
+  my ($self, $from, $select, $where, $attrs) = @_;
+
+  $self->throw_exception ('Nothing to prefetch... how did we get here?!')
+    if not @{$attrs->{_prefetch_select}};
+
+  $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
+    if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY');
+
+
+  # generate inner/outer attribute lists, remove stuff that doesn't apply
+  my $outer_attrs = { %$attrs };
+  delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
+
+  my $inner_attrs = { %$attrs };
+  delete $inner_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
+
+
+  # bring over all non-collapse-induced order_by into the inner query (if any)
+  # the outer one will have to keep them all
+  delete $inner_attrs->{order_by};
+  if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
+    $inner_attrs->{order_by} = [
+      @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
+    ];
+  }
+
+  # generate the inner/outer select lists
+  # for inside we consider only stuff *not* brought in by the prefetch
+  # on the outside we substitute any function for its alias
+  my $outer_select = [ @$select ];
+  my $inner_select = [];
+  for my $i (0 .. ( @$outer_select - @{$outer_attrs->{_prefetch_select}} - 1) ) {
+    my $sel = $outer_select->[$i];
+
+    if (ref $sel eq 'HASH' ) {
+      $sel->{-as} ||= $attrs->{as}[$i];
+      $outer_select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "inner_column_$i") );
+    }
+
+    push @$inner_select, $sel;
+  }
+
+  # construct the inner $from for the subquery
+  # we need to prune first, because this will determine if we need a group_by below
+  my $inner_from = $self->_prune_unused_joins ($from, $inner_select, $where, $inner_attrs);
+
+  # if a multi-type join was needed in the subquery - add a group_by to simulate the
+  # collapse in the subq
+  $inner_attrs->{group_by} ||= $inner_select
+    if List::Util::first
+      { ! $_->[0]{-is_single} }
+      (@{$inner_from}[1 .. $#$inner_from])
+  ;
+
+  # generate the subquery
+  my $subq = $self->_select_args_to_query (
+    $inner_from,
+    $inner_select,
+    $where,
+    $inner_attrs,
+  );
+
+  my $subq_joinspec = {
+    -alias => $attrs->{alias},
+    -source_handle => $inner_from->[0]{-source_handle},
+    $attrs->{alias} => $subq,
+  };
+
+  # Generate the outer from - this is relatively easy (really just replace
+  # the join slot with the subquery), with a major caveat - we can not
+  # join anything that is non-selecting (not part of the prefetch), but at
+  # the same time is a multi-type relationship, as it will explode the result.
+  #
+  # There are two possibilities here
+  # - either the join is non-restricting, in which case we simply throw it away
+  # - it is part of the restrictions, in which case we need to collapse the outer
+  #   result by tackling yet another group_by to the outside of the query
+
+  # normalize a copy of $from, so it will be easier to work with further
+  # down (i.e. promote the initial hashref to an AoH)
+  $from = [ @$from ];
+  $from->[0] = [ $from->[0] ];
+
+  # so first generate the outer_from, up to the substitution point
+  my @outer_from;
+  while (my $j = shift @$from) {
+    if ($j->[0]{-alias} eq $attrs->{alias}) { # time to swap
+      push @outer_from, [
+        $subq_joinspec,
+        @{$j}[1 .. $#$j],
+      ];
+      last; # we'll take care of what's left in $from below
+    }
+    else {
+      push @outer_from, $j;
+    }
+  }
+
+  # scan the from spec against different attributes, and see which joins are needed
+  # in what role
+  my $outer_aliastypes =
+    $self->_resolve_aliastypes_from_select_args( $from, $outer_select, $where, $outer_attrs );
+
+  # see what's left - throw away if not selecting/restricting
+  # also throw in a group_by if restricting to guard against
+  # cross-join explosions
+  #
+  while (my $j = shift @$from) {
+    my $alias = $j->[0]{-alias};
+
+    if ($outer_aliastypes->{select}{$alias}) {
+      push @outer_from, $j;
+    }
+    elsif ($outer_aliastypes->{restrict}{$alias}) {
+      push @outer_from, $j;
+      $outer_attrs->{group_by} ||= $outer_select unless $j->[0]{-is_single};
+    }
+  }
+
+  # demote the outer_from head
+  $outer_from[0] = $outer_from[0][0];
+
+  # This is totally horrific - the $where ends up in both the inner and outer query
+  # Unfortunately not much can be done until SQLA2 introspection arrives, and even
+  # then if where conditions apply to the *right* side of the prefetch, you may have
+  # to both filter the inner select (e.g. to apply a limit) and then have to re-filter
+  # the outer select to exclude joins you didin't want in the first place
+  #
+  # OTOH it can be seen as a plus: <ash> (notes that this query would make a DBA cry ;)
+  return (\@outer_from, $outer_select, $where, $outer_attrs);
+}
+
+# Due to a lack of SQLA2 we fall back to crude scans of all the
+# select/where/order/group attributes, in order to determine what
+# aliases are neded to fulfill the query. This information is used
+# throughout the code to prune unnecessary JOINs from the queries
+# in an attempt to reduce the execution time.
+# Although the method is pretty horrific, the worst thing that can
+# happen is for it to fail due to an unqualified column, which in
+# turn will result in a vocal exception. Qualifying the column will
+# invariably solve the problem.
+sub _resolve_aliastypes_from_select_args {
+  my ( $self, $from, $select, $where, $attrs ) = @_;
+
+  $self->throw_exception ('Unable to analyze custom {from}')
+    if ref $from ne 'ARRAY';
+
+  # what we will return
+  my $aliases_by_type;
+
+  # see what aliases are there to work with
+  my $alias_list;
+  for (@$from) {
+    my $j = $_;
+    $j = $j->[0] if ref $j eq 'ARRAY';
+    my $al = $j->{-alias}
+      or next;
+
+    $alias_list->{$al} = $j;
+    $aliases_by_type->{multiplying}{$al} = 1
+      unless $j->{-is_single};
+  }
+
+  # set up a botched SQLA
+  my $sql_maker = $self->sql_maker;
+  my $sep = quotemeta ($self->_sql_maker_opts->{name_sep} || '.');
+  local $sql_maker->{quote_char}; # so that we can regex away
+
+
+  my $select_sql = $sql_maker->_recurse_fields ($select);
+  my $where_sql = $sql_maker->where ($where);
+  my $group_by_sql = $sql_maker->_order_by({
+    map { $_ => $attrs->{$_} } qw/group_by having/
+  });
+  my @order_by_chunks = ($self->_parse_order_by ($attrs->{order_by}) );
+
+  # match every alias to the sql chunks above
+  for my $alias (keys %$alias_list) {
+    my $al_re = qr/\b $alias $sep/x;
+
+    for my $piece ($where_sql, $group_by_sql) {
+      $aliases_by_type->{restrict}{$alias} = 1 if ($piece =~ $al_re);
+    }
+
+    for my $piece ($select_sql, @order_by_chunks ) {
+      $aliases_by_type->{select}{$alias} = 1 if ($piece =~ $al_re);
+    }
+  }
+
+  # Add any non-left joins to the restriction list (such joins are indeed restrictions)
+  for my $j (values %$alias_list) {
+    my $alias = $j->{-alias} or next;
+    $aliases_by_type->{restrict}{$alias} = 1 if (
+      (not $j->{-join_type})
+        or
+      ($j->{-join_type} !~ /^left (?: \s+ outer)? $/xi)
+    );
+  }
+
+  # mark all join parents as mentioned
+  # (e.g.  join => { cds => 'tracks' } - tracks will need to bring cds too )
+  for my $type (keys %$aliases_by_type) {
+    for my $alias (keys %{$aliases_by_type->{$type}}) {
+      $aliases_by_type->{$type}{$_} = 1
+        for (map { keys %$_ } @{ $alias_list->{$alias}{-join_path} || [] });
+    }
+  }
+
+  return $aliases_by_type;
+}
+
+sub _resolve_ident_sources {
+  my ($self, $ident) = @_;
+
+  my $alias2source = {};
+  my $rs_alias;
+
+  # the reason this is so contrived is that $ident may be a {from}
+  # structure, specifying multiple tables to join
+  if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
+    # this is compat mode for insert/update/delete which do not deal with aliases
+    $alias2source->{me} = $ident;
+    $rs_alias = 'me';
+  }
+  elsif (ref $ident eq 'ARRAY') {
+
+    for (@$ident) {
+      my $tabinfo;
+      if (ref $_ eq 'HASH') {
+        $tabinfo = $_;
+        $rs_alias = $tabinfo->{-alias};
+      }
+      if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') {
+        $tabinfo = $_->[0];
+      }
+
+      $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve
+        if ($tabinfo->{-source_handle});
+    }
+  }
+
+  return ($alias2source, $rs_alias);
+}
+
+# Takes $ident, \@column_names
+#
+# returns { $column_name => \%column_info, ... }
+# also note: this adds -result_source => $rsrc to the column info
+#
+# If no columns_names are supplied returns info about *all* columns
+# for all sources
+sub _resolve_column_info {
+  my ($self, $ident, $colnames) = @_;
+  my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident);
+
+  my $sep = $self->_sql_maker_opts->{name_sep} || '.';
+  my $qsep = quotemeta $sep;
+
+  my (%return, %seen_cols, @auto_colnames);
+
+  # compile a global list of column names, to be able to properly
+  # disambiguate unqualified column names (if at all possible)
+  for my $alias (keys %$alias2src) {
+    my $rsrc = $alias2src->{$alias};
+    for my $colname ($rsrc->columns) {
+      push @{$seen_cols{$colname}}, $alias;
+      push @auto_colnames, "$alias$sep$colname" unless $colnames;
+    }
+  }
+
+  $colnames ||= [
+    @auto_colnames,
+    grep { @{$seen_cols{$_}} == 1 } (keys %seen_cols),
+  ];
+
+  COLUMN:
+  foreach my $col (@$colnames) {
+    my ($alias, $colname) = $col =~ m/^ (?: ([^$qsep]+) $qsep)? (.+) $/x;
+
+    unless ($alias) {
+      # see if the column was seen exactly once (so we know which rsrc it came from)
+      if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1) {
+        $alias = $seen_cols{$colname}[0];
+      }
+      else {
+        next COLUMN;
+      }
+    }
+
+    my $rsrc = $alias2src->{$alias};
+    $return{$col} = $rsrc && {
+      %{$rsrc->column_info($colname)},
+      -result_source => $rsrc,
+      -source_alias => $alias,
+    };
+  }
+
+  return \%return;
+}
+
+# The DBIC relationship chaining implementation is pretty simple - every
+# new related_relationship is pushed onto the {from} stack, and the {select}
+# window simply slides further in. This means that when we count somewhere
+# in the middle, we got to make sure that everything in the join chain is an
+# actual inner join, otherwise the count will come back with unpredictable
+# results (a resultset may be generated with _some_ rows regardless of if
+# the relation which the $rs currently selects has rows or not). E.g.
+# $artist_rs->cds->count - normally generates:
+# SELECT COUNT( * ) FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid
+# which actually returns the number of artists * (number of cds || 1)
+#
+# So what we do here is crawl {from}, determine if the current alias is at
+# the top of the stack, and if not - make sure the chain is inner-joined down
+# to the root.
+#
+sub _straight_join_to_node {
+  my ($self, $from, $alias) = @_;
+
+  # subqueries and other oddness are naturally not supported
+  return $from if (
+    ref $from ne 'ARRAY'
+      ||
+    @$from <= 1
+      ||
+    ref $from->[0] ne 'HASH'
+      ||
+    ! $from->[0]{-alias}
+      ||
+    $from->[0]{-alias} eq $alias  # this last bit means $alias is the head of $from - nothing to do
+  );
+
+  # find the current $alias in the $from structure
+  my $switch_branch;
+  JOINSCAN:
+  for my $j (@{$from}[1 .. $#$from]) {
+    if ($j->[0]{-alias} eq $alias) {
+      $switch_branch = $j->[0]{-join_path};
+      last JOINSCAN;
+    }
+  }
+
+  # something else went quite wrong
+  return $from unless $switch_branch;
+
+  # So it looks like we will have to switch some stuff around.
+  # local() is useless here as we will be leaving the scope
+  # anyway, and deep cloning is just too fucking expensive
+  # So replace the first hashref in the node arrayref manually 
+  my @new_from = ($from->[0]);
+  my $sw_idx = { map { values %$_ => 1 } @$switch_branch };
+
+  for my $j (@{$from}[1 .. $#$from]) {
+    my $jalias = $j->[0]{-alias};
+
+    if ($sw_idx->{$jalias}) {
+      my %attrs = %{$j->[0]};
+      delete $attrs{-join_type};
+      push @new_from, [
+        \%attrs,
+        @{$j}[ 1 .. $#$j ],
+      ];
+    }
+    else {
+      push @new_from, $j;
+    }
+  }
+
+  return \@new_from;
+}
+
+# Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus
+# a condition containing 'me' or other table prefixes will not work
+# at all. What this code tries to do (badly) is introspect the condition
+# and remove all column qualifiers. If it bails out early (returns undef)
+# the calling code should try another approach (e.g. a subquery)
+sub _strip_cond_qualifiers {
+  my ($self, $where) = @_;
+
+  my $cond = {};
+
+  # No-op. No condition, we're updating/deleting everything
+  return $cond unless $where;
+
+  if (ref $where eq 'ARRAY') {
+    $cond = [
+      map {
+        my %hash;
+        foreach my $key (keys %{$_}) {
+          $key =~ /([^.]+)$/;
+          $hash{$1} = $_->{$key};
+        }
+        \%hash;
+      } @$where
+    ];
+  }
+  elsif (ref $where eq 'HASH') {
+    if ( (keys %$where) == 1 && ( (keys %{$where})[0] eq '-and' )) {
+      $cond->{-and} = [];
+      my @cond = @{$where->{-and}};
+       for (my $i = 0; $i < @cond; $i++) {
+        my $entry = $cond[$i];
+        my $hash;
+        my $ref = ref $entry;
+        if ($ref eq 'HASH' or $ref eq 'ARRAY') {
+          $hash = $self->_strip_cond_qualifiers($entry);
+        }
+        elsif (! $ref) {
+          $entry =~ /([^.]+)$/;
+          $hash->{$1} = $cond[++$i];
+        }
+        else {
+          $self->throw_exception ("_strip_cond_qualifiers() is unable to handle a condition reftype $ref");
+        }
+        push @{$cond->{-and}}, $hash;
+      }
+    }
+    else {
+      foreach my $key (keys %$where) {
+        $key =~ /([^.]+)$/;
+        $cond->{$1} = $where->{$key};
+      }
+    }
+  }
+  else {
+    return undef;
+  }
+
+  return $cond;
+}
+
+sub _parse_order_by {
+  my ($self, $order_by) = @_;
+
+  return scalar $self->sql_maker->_order_by_chunks ($order_by)
+    unless wantarray;
+
+  my $sql_maker = $self->sql_maker;
+  local $sql_maker->{quote_char}; #disable quoting
+  my @chunks;
+  for my $chunk (map { ref $_ ? @$_ : $_ } ($sql_maker->_order_by_chunks ($order_by) ) ) {
+    $chunk =~ s/\s+ (?: ASC|DESC ) \s* $//ix;
+    push @chunks, $chunk;
+  }
+
+  return @chunks;
+}
+
+1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/Statistics.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/Statistics.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/Statistics.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -2,7 +2,7 @@
 use strict;
 use warnings;
 
-use base qw/Class::Accessor::Grouped/;
+use base qw/DBIx::Class/;
 use IO::File;
 
 __PACKAGE__->mk_group_accessors(simple => qw/callback debugfh silence/);
@@ -16,7 +16,7 @@
 =head1 DESCRIPTION
 
 This class is called by DBIx::Class::Storage::DBI as a means of collecting
-statistics on it's actions.  Using this class alone merely prints the SQL
+statistics on its actions.  Using this class alone merely prints the SQL
 executed, the fact that it completes and begin/end notification for
 transactions.
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/TxnScopeGuard.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/TxnScopeGuard.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage/TxnScopeGuard.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -2,7 +2,7 @@
 
 use strict;
 use warnings;
-use Carp ();
+use Carp::Clan qw/^DBIx::Class/;
 
 sub new {
   my ($class, $storage) = @_;
@@ -24,21 +24,33 @@
   return if $dismiss;
 
   my $exception = $@;
-  Carp::cluck("A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or an error - bad")
-    unless $exception; 
+
   {
     local $@;
+
+    carp 'A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or error. Rolling back.'
+      unless $exception;
+
     eval { $storage->txn_rollback };
     my $rollback_exception = $@;
-    if($rollback_exception) {
-      my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
 
-      $storage->throw_exception(
-        "Transaction aborted: ${exception}. "
-        . "Rollback failed: ${rollback_exception}"
-      ) unless $rollback_exception =~ /$exception_class/;
+    if ($rollback_exception && $rollback_exception !~ /DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION/) {
+      if ($exception) {
+        $exception = "Transaction aborted: ${exception} "
+          ."Rollback failed: ${rollback_exception}";
+      }
+      else {
+        carp (join ' ',
+          "********************* ROLLBACK FAILED!!! ********************",
+          "\nA rollback operation failed after the guard went out of scope.",
+          'This is potentially a disastrous situation, check your data for',
+          "consistency: $rollback_exception"
+        );
+      }
     }
   }
+
+  $@ = $exception;
 }
 
 1;
@@ -77,7 +89,7 @@
 =head2 commit
 
 Commit the transaction, and stop guarding the scope. If this method is not
-called and this object goes out of scope (i.e. an exception is thrown) then
+called and this object goes out of scope (e.g. an exception is thrown) then
 the transaction is rolled back, via L<DBIx::Class::Storage/txn_rollback>
 
 =cut
@@ -90,7 +102,7 @@
 
 Ash Berlin, 2008.
 
-Insipred by L<Scope::Guard> by chocolateboy.
+Inspired by L<Scope::Guard> by chocolateboy.
 
 This module is free software. It may be used, redistributed and/or modified
 under the same terms as Perl itself.

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/Storage.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,9 +4,10 @@
 use warnings;
 
 use base qw/DBIx::Class/;
+use mro 'c3';
 
-use Scalar::Util qw/weaken/;
-use Carp::Clan qw/^DBIx::Class/;
+use DBIx::Class::Exception;
+use Scalar::Util();
 use IO::File;
 use DBIx::Class::Storage::TxnScopeGuard;
 
@@ -82,7 +83,7 @@
 sub set_schema {
   my ($self, $schema) = @_;
   $self->schema($schema);
-  weaken($self->{schema}) if ref $self->{schema};
+  Scalar::Util::weaken($self->{schema}) if ref $self->{schema};
 }
 
 =head2 connected
@@ -119,8 +120,12 @@
 sub throw_exception {
   my $self = shift;
 
-  $self->schema->throw_exception(@_) if $self->schema;
-  croak @_;
+  if ($self->schema) {
+    $self->schema->throw_exception(@_);
+  }
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 =head2 txn_do
@@ -348,7 +353,7 @@
 =head2 debugfh
 
 Set or retrieve the filehandle used for trace/debug output.  This should be
-an IO::Handle compatible ojbect (only the C<print> method is used.  Initially
+an IO::Handle compatible object (only the C<print> method is used.  Initially
 set to be STDERR - although see information on the
 L<DBIC_TRACE> environment variable.
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/UTF8Columns.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/UTF8Columns.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class/UTF8Columns.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,17 +3,6 @@
 use warnings;
 use base qw/DBIx::Class/;
 
-BEGIN {
-
-    # Perl 5.8.0 doesn't have utf8::is_utf8()
-    # Yes, 5.8.0 support for Unicode is suboptimal, but things like RHEL3 ship with it.
-    if ($] <= 5.008000) {
-        require Encode;
-    } else {
-        require utf8;
-    }
-}
-
 __PACKAGE__->mk_classdata( '_utf8_columns' );
 
 =head1 NAME
@@ -23,9 +12,11 @@
 =head1 SYNOPSIS
 
     package Artist;
-    __PACKAGE__->load_components(qw/UTF8Columns Core/);
+    use base 'DBIx::Class::Core';
+
+    __PACKAGE__->load_components(qw/UTF8Columns/);
     __PACKAGE__->utf8_columns(qw/name description/);
-    
+
     # then belows return strings with utf8 flag
     $artist->name;
     $artist->get_column('description');
@@ -34,6 +25,15 @@
 
 This module allows you to get columns data that have utf8 (Unicode) flag.
 
+=head2 Warning
+
+Note that this module overloads L<DBIx::Class::Row/store_column> in a way
+that may prevent other components overloading the same method from working
+correctly. This component must be the last one before L<DBIx::Class::Row>
+(which is provided by L<DBIx::Class::Core>). DBIx::Class will detect such
+incorrect component order and issue an appropriate warning, advising which
+components need to be loaded differently.
+
 =head1 SEE ALSO
 
 L<Template::Stash::ForceUTF8>, L<DBIx::Class::UUIDColumns>.
@@ -50,7 +50,7 @@
         foreach my $col (@_) {
             $self->throw_exception("column $col doesn't exist")
                 unless $self->has_column($col);
-        }        
+        }
         return $self->_utf8_columns({ map { $_ => 1 } @_ });
     } else {
         return $self->_utf8_columns;
@@ -67,17 +67,11 @@
     my ( $self, $column ) = @_;
     my $value = $self->next::method($column);
 
-    my $cols = $self->_utf8_columns;
-    if ( $cols and defined $value and $cols->{$column} ) {
+    utf8::decode($value) if (
+      defined $value and $self->_is_utf8_column($column) and ! utf8::is_utf8($value)
+    );
 
-        if ($] <= 5.008000) {
-            Encode::_utf8_on($value) unless Encode::is_utf8($value);
-        } else {
-            utf8::decode($value) unless utf8::is_utf8($value);
-        }
-    }
-
-    $value;
+    return $value;
 }
 
 =head2 get_columns
@@ -88,16 +82,13 @@
     my $self = shift;
     my %data = $self->next::method(@_);
 
-    foreach my $col (grep { defined $data{$_} } keys %{ $self->_utf8_columns || {} }) {
-
-        if ($] <= 5.008000) {
-            Encode::_utf8_on($data{$col}) unless Encode::is_utf8($data{$col});
-        } else {
-            utf8::decode($data{$col}) unless utf8::is_utf8($data{$col});
-        }
+    foreach my $col (keys %data) {
+      utf8::decode($data{$col}) if (
+        exists $data{$col} and defined $data{$col} and $self->_is_utf8_column($col) and ! utf8::is_utf8($data{$col})
+      );
     }
 
-    %data;
+    return %data;
 }
 
 =head2 store_column
@@ -107,32 +98,33 @@
 sub store_column {
     my ( $self, $column, $value ) = @_;
 
-    my $cols = $self->_utf8_columns;
-    if ( $cols and defined $value and $cols->{$column} ) {
+    # the dirtyness comparison must happen on the non-encoded value
+    my $copy;
 
-        if ($] <= 5.008000) {
-            Encode::_utf8_off($value) if Encode::is_utf8($value);
-        } else {
-            utf8::encode($value) if utf8::is_utf8($value);
-        }
+    if ( defined $value and $self->_is_utf8_column($column) and utf8::is_utf8($value) ) {
+      $copy = $value;
+      utf8::encode($value);
     }
 
     $self->next::method( $column, $value );
+
+    return $copy || $value;
 }
 
-=head1 AUTHOR
+# override this if you want to force everything to be encoded/decoded
+sub _is_utf8_column {
+  # my ($self, $col) = @_;
+  return ($_[0]->utf8_columns || {})->{$_[1]};
+}
 
-Daisuke Murase <typester at cpan.org>
+=head1 AUTHORS
 
-=head1 COPYRIGHT
+See L<DBIx::Class/CONTRIBUTORS>.
 
-This program is free software; you can redistribute
-it and/or modify it under the same terms as Perl itself.
+=head1 LICENSE
 
-The full text of the license can be found in the
-LICENSE file included with this module.
+You may distribute this code under the same terms as Perl itself.
 
 =cut
 
 1;
-

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/DBIx/Class.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,11 +3,15 @@
 use strict;
 use warnings;
 
+use MRO::Compat;
+use mro 'c3';
+
+use DBIx::Class::Optional::Dependencies;
+
 use vars qw($VERSION);
 use base qw/DBIx::Class::Componentised Class::Accessor::Grouped/;
 use DBIx::Class::StartupCheck;
 
-
 sub mk_classdata {
   shift->mk_classaccessor(@_);
 }
@@ -23,9 +27,8 @@
 # Always remember to do all digits for the version even if they're 0
 # i.e. first release of 0.XX *must* be 0.XX000. This avoids fBSD ports
 # brain damage and presumably various other packaging systems too
+$VERSION = '0.08119_1';
 
-$VERSION = '0.08107';
-
 $VERSION = eval $VERSION; # numify for warning-free dev releases
 
 sub MODIFY_CODE_ATTRIBUTES {
@@ -72,13 +75,14 @@
 
   1;
 
-Create a table class to represent artists, who have many CDs, in
+Create a result class to represent artists, who have many CDs, in
 MyDB/Schema/Result/Artist.pm:
 
+See L<DBIx::Class::ResultSource> for docs on defining result classes.
+
   package MyDB::Schema::Result::Artist;
-  use base qw/DBIx::Class/;
+  use base qw/DBIx::Class::Core/;
 
-  __PACKAGE__->load_components(qw/Core/);
   __PACKAGE__->table('artist');
   __PACKAGE__->add_columns(qw/ artistid name /);
   __PACKAGE__->set_primary_key('artistid');
@@ -86,13 +90,13 @@
 
   1;
 
-A table class to represent a CD, which belongs to an artist, in
+A result class to represent a CD, which belongs to an artist, in
 MyDB/Schema/Result/CD.pm:
 
   package MyDB::Schema::Result::CD;
-  use base qw/DBIx::Class/;
+  use base qw/DBIx::Class::Core/;
 
-  __PACKAGE__->load_components(qw/Core/);
+  __PACKAGE__->load_components(qw/InflateColumn::DateTime/);
   __PACKAGE__->table('cd');
   __PACKAGE__->add_columns(qw/ cdid artistid title year /);
   __PACKAGE__->set_primary_key('cdid');
@@ -108,9 +112,17 @@
 
   # Query for all artists and put them in an array,
   # or retrieve them as a result set object.
+  # $schema->resultset returns a DBIx::Class::ResultSet
   my @all_artists = $schema->resultset('Artist')->all;
   my $all_artists_rs = $schema->resultset('Artist');
 
+  # Output all artists names
+  # $artist here is a DBIx::Class::Row, which has accessors
+  # for all its columns. Rows are also subclasses of your Result class.
+  foreach $artist (@all_artists) {
+    print $artist->name, "\n";
+  }
+
   # Create a result set to search for artists.
   # This does not query the DB.
   my $johns_rs = $schema->resultset('Artist')->search(
@@ -217,8 +229,12 @@
 
 bluefeet: Aran Deltac <bluefeet at cpan.org>
 
+boghead: Bryan Beeley <cpan at beeley.org>
+
 bricas: Brian Cassidy <bricas at cpan.org>
 
+brunov: Bruno Vecchi <vecchi.b at gmail.com>
+
 caelum: Rafael Kitover <rkitover at cpan.org>
 
 castaway: Jess Robinson
@@ -231,6 +247,8 @@
 
 debolaz: Anders Nor Berle <berle at cpan.org>
 
+dew: Dan Thomas <dan at godders.org>
+
 dkubb: Dan Kubb <dan.kubb-cpan at onautopilot.com>
 
 dnm: Justin Wheeler <jwheeler at datademons.com>
@@ -241,6 +259,8 @@
 
 frew: Arthur Axel "fREW" Schmidt <frioux at gmail.com>
 
+goraxe: Gordon Irving <goraxe at cpan.org>
+
 gphat: Cory G Watson <gphat at cpan.org>
 
 groditi: Guillermo Roditi <groditi at cpan.org>
@@ -255,6 +275,8 @@
 
 jguenther: Justin Guenther <jguenther at cpan.org>
 
+jhannah: Jay Hannah <jay at jays.net>
+
 jnapiorkowski: John Napiorkowski <jjn1056 at yahoo.com>
 
 jon: Jon Schutz <jjschutz at cpan.org>
@@ -281,8 +303,12 @@
 
 norbi: Norbert Buchmuller <norbi at nix.hu>
 
+nuba: Nuba Princigalli <nuba at cpan.org>
+
 Numa: Dan Sully <daniel at cpan.org>
 
+ovid: Curtis "Ovid" Poe <ovid at cpan.org>
+
 oyse: Øystein Torget <oystein.torget at dnv.com>
 
 paulm: Paul Makepeace
@@ -301,14 +327,18 @@
 
 rafl: Florian Ragwitz <rafl at debian.org>
 
+rbuels: Robert Buels <rmb32 at cornell.edu>
+
 rdj: Ryan D Johnson <ryan at innerfence.com>
 
-ribasushi: Peter Rabbitson <rabbit+dbic at rabbit.us>
+ribasushi: Peter Rabbitson <ribasushi at cpan.org>
 
 rjbs: Ricardo Signes <rjbs at cpan.org>
 
 robkinyon: Rob Kinyon <rkinyon at cpan.org>
 
+Roman: Roman Filippov <romanf at cpan.org>
+
 sc_: Just Another Perl Hacker
 
 scotty: Scotty Allen <scotty at scottyallen.com>
@@ -317,6 +347,8 @@
 
 solomon: Jared Johnson <jaredj at nmgi.com>
 
+spb: Stephen Bennett <stephen at freenode.net>
+
 sszabo: Stephan Szabo <sszabo at bigpanda.com>
 
 teejay : Aaron Trevena <teejay at cpan.org>
@@ -325,6 +357,8 @@
 
 Tom Hukins
 
+triode: Pete Gamache <gamache at cpan.org>
+
 typester: Daisuke Murase <typester at cpan.org>
 
 victori: Victor Igumnov <victori at cpan.org>
@@ -337,8 +371,14 @@
 
 zamolxes: Bogdan Lucaciu <bogdan at wiz.ro>
 
+=head1 COPYRIGHT
+
+Copyright (c) 2005 - 2010 the DBIx::Class L</AUTHOR> and L</CONTRIBUTORS>
+as listed above.
+
 =head1 LICENSE
 
-You may distribute this code under the same terms as Perl itself.
+This library is free software and may be distributed under the same terms
+as perl itself.
 
 =cut

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/SQL/Translator/Parser/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/SQL/Translator/Parser/DBIx/Class.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/SQL/Translator/Parser/DBIx/Class.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -15,6 +15,7 @@
 use Exporter;
 use SQL::Translator::Utils qw(debug normalize_name);
 use Carp::Clan qw/^SQL::Translator|^DBIx::Class/;
+use Scalar::Util ();
 
 use base qw(Exporter);
 
@@ -30,6 +31,10 @@
 # We're working with DBIx::Class Schemas, not data streams.
 # -------------------------------------------------------------------
 sub parse {
+    # this is a hack to prevent schema leaks due to a retarded SQLT implementation
+    # DO NOT REMOVE (until SQLT2 is out, the all of this will be rewritten anyway)
+    Scalar::Util::weaken ($_[1]);
+
     my ($tr, $data)   = @_;
     my $args          = $tr->parser_args;
     my $dbicschema    = $args->{'DBIx::Class::Schema'} ||  $args->{"DBIx::Schema"} ||$data;
@@ -65,27 +70,28 @@
     }
 
 
-    my(@table_monikers, @view_monikers);
+    my(%table_monikers, %view_monikers);
     for my $moniker (@monikers){
       my $source = $dbicschema->source($moniker);
        if ( $source->isa('DBIx::Class::ResultSource::Table') ) {
-         push(@table_monikers, $moniker);
+         $table_monikers{$moniker}++;
       } elsif( $source->isa('DBIx::Class::ResultSource::View') ){
           next if $source->is_virtual;
-         push(@view_monikers, $moniker);
+         $view_monikers{$moniker}++;
       }
     }
 
     my %tables;
-    foreach my $moniker (sort @table_monikers)
+    foreach my $moniker (sort keys %table_monikers)
     {
         my $source = $dbicschema->source($moniker);
         my $table_name = $source->name;
 
-        # Skip custom query sources
-        next if ref $table_name;
+        # FIXME - this isn't the right way to do it, but sqlt does not
+        # support quoting properly to be signaled about this
+        $table_name = $$table_name if ref $table_name eq 'SCALAR';
 
-        # Its possible to have multiple DBIC sources using the same table
+        # It's possible to have multiple DBIC sources using the same table
         next if $tables{$table_name};
 
         $tables{$table_name}{source} = $source;
@@ -111,9 +117,11 @@
             my $f = $table->add_field(%colinfo)
               || $dbicschema->throw_exception ($table->error);
         }
-        $table->primary_key($source->primary_columns);
 
         my @primary = $source->primary_columns;
+
+        $table->primary_key(@primary) if @primary;
+
         my %unique_constraints = $source->unique_constraints;
         foreach my $uniq (sort keys %unique_constraints) {
             if (!$source->_compare_relationship_keys($unique_constraints{$uniq}, \@primary)) {
@@ -130,24 +138,33 @@
         my %created_FK_rels;
 
         # global add_fk_index set in parser_args
-        my $add_fk_index = (exists $args->{add_fk_index} && ($args->{add_fk_index} == 0)) ? 0 : 1;
+        my $add_fk_index = (exists $args->{add_fk_index} && ! $args->{add_fk_index}) ? 0 : 1;
 
         foreach my $rel (sort @rels)
         {
+
             my $rel_info = $source->relationship_info($rel);
 
             # Ignore any rel cond that isn't a straight hash
             next unless ref $rel_info->{cond} eq 'HASH';
 
-            my $othertable = $source->related_source($rel);
-            my $rel_table = $othertable->name;
+            my $relsource = $source->related_source($rel);
 
+            # related sources might be excluded via a {sources} filter or might be views
+            next unless exists $table_monikers{$relsource->source_name};
+
+            my $rel_table = $relsource->name;
+
+            # FIXME - this isn't the right way to do it, but sqlt does not
+            # support quoting properly to be signaled about this
+            $rel_table = $$rel_table if ref $rel_table eq 'SCALAR';
+
             my $reverse_rels = $source->reverse_relationship_info($rel);
             my ($otherrelname, $otherrelationship) = each %{$reverse_rels};
 
             # Force the order of @cond to match the order of ->add_columns
             my $idx;
-            my %other_columns_idx = map {'foreign.'.$_ => ++$idx } $othertable->columns;            
+            my %other_columns_idx = map {'foreign.'.$_ => ++$idx } $relsource->columns;
             my @cond = sort { $other_columns_idx{$a} cmp $other_columns_idx{$b} } keys(%{$rel_info->{cond}}); 
 
             # Get the key information, mapping off the foreign/self markers
@@ -179,7 +196,7 @@
                     if ($fk_constraint) {
                         $cascade->{$c} = $rel_info->{attrs}{"on_$c"};
                     }
-                    else {
+                    elsif ( $rel_info->{attrs}{"on_$c"} ) {
                         carp "SQLT attribute 'on_$c' was supplied for relationship '$moniker/$rel', which does not appear to be a foreign constraint. "
                             . "If you are sure that SQLT must generate a constraint for this relationship, add 'is_foreign_key_constraint => 1' to the attributes.\n";
                     }
@@ -189,47 +206,53 @@
                 }
             }
 
-            if($rel_table)
-            {
+            if($rel_table) {
                 # Constraints are added only if applicable
                 next unless $fk_constraint;
 
                 # Make sure we dont create the same foreign key constraint twice
-                my $key_test = join("\x00", @keys);
+                my $key_test = join("\x00", sort @keys);
                 next if $created_FK_rels{$rel_table}->{$key_test};
 
                 if (scalar(@keys)) {
-
                   $created_FK_rels{$rel_table}->{$key_test} = 1;
 
                   my $is_deferrable = $rel_info->{attrs}{is_deferrable};
 
-                  # do not consider deferrable constraints and self-references
-                  # for dependency calculations
+                  # calculate dependencies: do not consider deferrable constraints and
+                  # self-references for dependency calculations
                   if (! $is_deferrable and $rel_table ne $table_name) {
                     $tables{$table_name}{foreign_table_deps}{$rel_table}++;
                   }
 
                   $table->add_constraint(
-                                    type             => 'foreign_key',
-                                    name             => join('_', $table_name, 'fk', @keys),
-                                    fields           => \@keys,
-                                    reference_fields => \@refkeys,
-                                    reference_table  => $rel_table,
-                                    on_delete        => uc ($cascade->{delete} || ''),
-                                    on_update        => uc ($cascade->{update} || ''),
-                                    (defined $is_deferrable ? ( deferrable => $is_deferrable ) : ()),
+                    type             => 'foreign_key',
+                    name             => join('_', $table_name, 'fk', @keys),
+                    fields           => \@keys,
+                    reference_fields => \@refkeys,
+                    reference_table  => $rel_table,
+                    on_delete        => uc ($cascade->{delete} || ''),
+                    on_update        => uc ($cascade->{update} || ''),
+                    (defined $is_deferrable ? ( deferrable => $is_deferrable ) : ()),
                   );
 
                   # global parser_args add_fk_index param can be overridden on the rel def
                   my $add_fk_index_rel = (exists $rel_info->{attrs}{add_fk_index}) ? $rel_info->{attrs}{add_fk_index} : $add_fk_index;
 
+                  # Check that we do not create an index identical to the PK index
+                  # (some RDBMS croak on this, and it generally doesn't make much sense)
+                  # NOTE: we do not sort the key columns because the order of
+                  # columns is important for indexes and two indexes with the
+                  # same cols but different order are allowed and sometimes
+                  # needed
+                  next if join("\x00", @keys) eq join("\x00", @primary);
+
                   if ($add_fk_index_rel) {
                       my $index = $table->add_index(
-                                                    name   => join('_', $table_name, 'idx', @keys),
-                                                    fields => \@keys,
-                                                    type   => 'NORMAL',
-                                                    );
+                          name   => join('_', $table_name, 'idx', @keys),
+                          fields => \@keys,
+                          type   => 'NORMAL',
+                      );
                   }
               }
             }
@@ -251,21 +274,42 @@
     ) {
       $schema->add_table ($tables{$table}{object});
       $tables{$table}{source} -> _invoke_sqlt_deploy_hook( $tables{$table}{object} );
+
+      # the hook might have already removed the table
+      if ($schema->get_table($table) && $table =~ /^ \s* \( \s* SELECT \s+/ix) {
+        warn <<'EOW';
+
+Custom SQL through ->name(\'( SELECT ...') is DEPRECATED, for more details see
+"Arbitrary SQL through a custom ResultSource" in DBIx::Class::Manual::Cookbook
+or http://search.cpan.org/dist/DBIx-Class/lib/DBIx/Class/Manual/Cookbook.pod
+
+EOW
+
+        # remove the table as there is no way someone might want to
+        # actually deploy this
+        $schema->drop_table ($table);
+      }
     }
 
-
     my %views;
-    foreach my $moniker (sort @view_monikers)
+    foreach my $moniker (sort keys %view_monikers)
     {
         my $source = $dbicschema->source($moniker);
         my $view_name = $source->name;
 
+        # FIXME - this isn't the right way to do it, but sqlt does not
+        # support quoting properly to be signaled about this
+        $view_name = $$view_name if ref $view_name eq 'SCALAR';
+
         # Skip custom query sources
         next if ref $view_name;
 
         # Its possible to have multiple DBIC source using same table
         next if $views{$view_name}++;
 
+        $dbicschema->throw_exception ("view $view_name is missing a view_definition")
+            unless $source->view_definition;
+
         my $view = $schema->add_view (
           name => $view_name,
           fields => [ $source->columns ],
@@ -337,11 +381,18 @@
  ## Standalone
  use MyApp::Schema;
  use SQL::Translator;
- 
+
  my $schema = MyApp::Schema->connect;
  my $trans  = SQL::Translator->new (
       parser      => 'SQL::Translator::Parser::DBIx::Class',
-      parser_args => { package => $schema },
+      parser_args => {
+          package => $schema,
+          add_fk_index => 0,
+          sources => [qw/
+            Artist
+            CD
+          /],
+      },
       producer    => 'SQLite',
      ) or die SQL::Translator->error;
  my $out = $trans->translate() or die $trans->error;
@@ -353,7 +404,7 @@
 C<SQL::Translator::Parser::DBIx::Class> reads a DBIx::Class schema,
 interrogates the columns, and stuffs it all in an $sqlt_schema object.
 
-It's primary use is in deploying database layouts described as a set
+Its primary use is in deploying database layouts described as a set
 of L<DBIx::Class> classes, to a database. To do this, see
 L<DBIx::Class::Schema/deploy>.
 
@@ -363,14 +414,34 @@
 have SQL::Translator installed. To do this see
 L<DBIx::Class::Schema/create_ddl_dir>.
 
+=head1 PARSER OPTIONS
+
+=head2 add_fk_index
+
+Create an index for each foreign key.
+Enabled by default, as having indexed foreign key columns is normally the
+sensible thing to do.
+
+=head2 sources
+
+=over 4
+
+=item Arguments: \@class_names
+
+=back
+
+Limit the amount of parsed sources by supplying an explicit list of source names.
+
 =head1 SEE ALSO
 
 L<SQL::Translator>, L<DBIx::Class::Schema>
 
 =head1 AUTHORS
 
-Jess Robinson
+See L<DBIx::Class/CONTRIBUTORS>.
 
-Matt S Trout
+=head1 LICENSE
 
-Ash Berlin
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Modified: DBIx-Class/0.08/branches/run_file_against_storage/lib/SQL/Translator/Producer/DBIx/Class/File.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/lib/SQL/Translator/Producer/DBIx/Class/File.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/lib/SQL/Translator/Producer/DBIx/Class/File.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -128,7 +128,7 @@
                 $tableextras{$table->name} .= "\n__PACKAGE__->belongs_to('" .
                     $cont->fields->[0]->name . "', '" .
                     "${dbixschema}::" . $cont->reference_table . "');\n";
-                
+
                 my $other = "\n__PACKAGE__->has_many('" .
                     "get_" . $table->name. "', '" .
                     "${dbixschema}::" . $table->name. "', '" .

Modified: DBIx-Class/0.08/branches/run_file_against_storage/maint/gen-schema.pl
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/maint/gen-schema.pl	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/maint/gen-schema.pl	2010-02-16 10:26:12 UTC (rev 8720)
@@ -8,4 +8,10 @@
 use SQL::Translator;
 
 my $schema = DBICTest::Schema->connect;
-print scalar ($schema->storage->deployment_statements($schema, 'SQLite'));
+print scalar ($schema->storage->deployment_statements(
+  $schema,
+  'SQLite',
+  undef,
+  undef,
+  { producer_args => { no_transaction => 1 } }
+));

Modified: DBIx-Class/0.08/branches/run_file_against_storage/maint/svn-log.perl
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/maint/svn-log.perl	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/maint/svn-log.perl	2010-02-16 10:26:12 UTC (rev 8720)
@@ -17,8 +17,8 @@
 use XML::Parser;
 
 my %month = qw(
-	Jan 01 Feb 02 Mar 03 Apr 04 May 05 Jun 06
-	Jul 07 Aug 08 Sep 09 Oct 10 Nov 11 Dec 12
+ Jan 01 Feb 02 Mar 03 Apr 04 May 05 Jun 06
+ Jul 07 Aug 08 Sep 09 Oct 10 Nov 11 Dec 12
 );
 
 $Text::Wrap::huge     = "wrap";
@@ -48,28 +48,28 @@
 GetOptions(
   "age=s"      => \$days_back,
   "repo=s"     => \$svn_repo,
-	"help"       => \$send_help,
+  "help"       => \$send_help,
 ) or exit;
 
 # Find the trunk for the current repository if one isn't specified.
 unless (defined $svn_repo) {
-	$svn_repo = `svn info . | grep '^URL: '`;
-	if (length $svn_repo) {
-		chomp $svn_repo;
-		$svn_repo =~ s{^URL\:\s+(.+?)/trunk/?.*$}{$1};
-	}
-	else {
-		$send_help = 1;
-	}
+  $svn_repo = `svn info . | grep '^URL: '`;
+  if (length $svn_repo) {
+    chomp $svn_repo;
+    $svn_repo =~ s{^URL\:\s+(.+?)/trunk/?.*$}{$1};
+  }
+  else {
+    $send_help = 1;
+  }
 }
 
 die(
-	"$0 usage:\n",
-	"  --repo REPOSITORY\n",
-	"  [--age DAYS]\n",
-	"\n",
-	"REPOSITORY must have a trunk subdirectory and a tags directory where\n",
-	"release tags are kept.\n",
+  "$0 usage:\n",
+  "  --repo REPOSITORY\n",
+  "  [--age DAYS]\n",
+  "\n",
+  "REPOSITORY must have a trunk subdirectory and a tags directory where\n",
+  "release tags are kept.\n",
 ) if $send_help;
 
 my $earliest_date = strftime "%F", gmtime(time() - $days_back * 86400);
@@ -81,31 +81,31 @@
 
 open(TAG, "svn -v list $svn_repo/tags|") or die $!;
 while (<TAG>) {
-	# The date is unused, however.
-	next unless (
-		my ($rev, $date, $tag) = m{
-			(\d+).*?(\S\S\S\s+\d\d\s+(?:\d\d\d\d|\d\d:\d\d))\s+(v[0-9_.]+)
-		}x
-	);
+  # The date is unused, however.
+  next unless (
+    my ($rev, $date, $tag) = m{
+      (\d+).*?(\S\S\S\s+\d\d\s+(?:\d\d\d\d|\d\d:\d\d))\s+(v[0-9_.]+)
+    }x
+  );
 
-	my @tag_log = gather_log("$svn_repo/tags/$tag", "--stop-on-copy");
-	die "Tag $tag has changes after tagging!\n" if @tag_log > 1;
+  my @tag_log = gather_log("$svn_repo/tags/$tag", "--stop-on-copy");
+  die "Tag $tag has changes after tagging!\n" if @tag_log > 1;
 
-	my $timestamp = $tag_log[0][LOG_DATE];
-	$tag{$timestamp} = [
-		$rev,     # TAG_REV
-		$tag,     # TAG_TAG
-		[ ],      # TAG_LOG
-	];
+  my $timestamp = $tag_log[0][LOG_DATE];
+  $tag{$timestamp} = [
+    $rev,     # TAG_REV
+    $tag,     # TAG_TAG
+    [ ],      # TAG_LOG
+  ];
 }
 close TAG;
 
 # Fictitious "HEAD" tag for revisions that came after the last tag.
 
 $tag{+MAX_TIMESTAMP} = [
-	"HEAD",         # TAG_REV
-	"(untagged)",   # TAG_TAG
-	undef,          # TAG_LOG
+  "HEAD",         # TAG_REV
+  "(untagged)",   # TAG_TAG
+  undef,          # TAG_LOG
 ];
 
 ### 2. Gather the log for the trunk.  Place log entries under their
@@ -114,184 +114,184 @@
 my @tag_dates = sort keys %tag;
 while (my $date = pop(@tag_dates)) {
 
-	# We're done if this date's before our earliest date.
-	if ($date lt $earliest_date) {
-		delete $tag{$date};
-		next;
-	}
+  # We're done if this date's before our earliest date.
+  if ($date lt $earliest_date) {
+    delete $tag{$date};
+    next;
+  }
 
-	my $tag = $tag{$date}[TAG_TAG];
-	#warn "Gathering information for tag $tag...\n";
+  my $tag = $tag{$date}[TAG_TAG];
+  #warn "Gathering information for tag $tag...\n";
 
-	my $this_rev = $tag{$date}[TAG_REV];
-	my $prev_rev;
-	if (@tag_dates) {
-		$prev_rev = $tag{$tag_dates[-1]}[TAG_REV];
-	}
-	else {
-		$prev_rev = 0;
-	}
+  my $this_rev = $tag{$date}[TAG_REV];
+  my $prev_rev;
+  if (@tag_dates) {
+    $prev_rev = $tag{$tag_dates[-1]}[TAG_REV];
+  }
+  else {
+    $prev_rev = 0;
+  }
 
-	my @log = gather_log("$svn_repo/trunk", "-r", "$this_rev:$prev_rev");
+  my @log = gather_log("$svn_repo/trunk", "-r", "$this_rev:$prev_rev");
 
-	$tag{$date}[TAG_LOG] = \@log;
+  $tag{$date}[TAG_LOG] = \@log;
 }
 
 ### 3. PROFIT!  No, wait... generate the nice log file.
 
 foreach my $timestamp (sort { $b cmp $a } keys %tag) {
-	my $tag_rec = $tag{$timestamp};
+  my $tag_rec = $tag{$timestamp};
 
-	# Skip this tag if there are no log entries.
-	next unless @{$tag_rec->[TAG_LOG]};
+  # Skip this tag if there are no log entries.
+  next unless @{$tag_rec->[TAG_LOG]};
 
-	my $tag_line = "$timestamp $tag_rec->[TAG_TAG]";
-	my $tag_bar  = "=" x length($tag_line);
-	print $tag_bar, "\n", $tag_line, "\n", $tag_bar, "\n\n";
+  my $tag_line = "$timestamp $tag_rec->[TAG_TAG]";
+  my $tag_bar  = "=" x length($tag_line);
+  print $tag_bar, "\n", $tag_line, "\n", $tag_bar, "\n\n";
 
-	foreach my $log_rec (@{$tag_rec->[TAG_LOG]}) {
+  foreach my $log_rec (@{$tag_rec->[TAG_LOG]}) {
 
-		my @paths = @{$log_rec->[LOG_PATHS]};
-		if (@paths > 1) {
-			@paths = grep {
-				$_->[PATH_PATH] ne "/trunk" or $_->[PATH_ACTION] ne "M"
-			} @paths;
-		}
+    my @paths = @{$log_rec->[LOG_PATHS]};
+    if (@paths > 1) {
+      @paths = grep {
+        $_->[PATH_PATH] ne "/trunk" or $_->[PATH_ACTION] ne "M"
+      } @paths;
+    }
 
-		my $time_line = wrap(
-			"  ", "  ",
-			join(
-				"; ",
-				"$log_rec->[LOG_DATE] (r$log_rec->[LOG_REV]) by $log_rec->[LOG_WHO]",
-				map { "$_->[PATH_PATH] $_->[PATH_ACTION]" } @paths
-			)
-		);
+    my $time_line = wrap(
+      "  ", "  ",
+      join(
+        "; ",
+        "$log_rec->[LOG_DATE] (r$log_rec->[LOG_REV]) by $log_rec->[LOG_WHO]",
+        map { "$_->[PATH_PATH] $_->[PATH_ACTION]" } @paths
+      )
+    );
 
-		if ($time_line =~ /\n/) {
-			$time_line = wrap(
-				"  ", "  ",
-				"$log_rec->[LOG_DATE] (r$log_rec->[LOG_REV]) by $log_rec->[LOG_WHO]\n"
-			) .
-			wrap(
-				"  ", "  ",
-				join(
-					"; ",
-					map { "$_->[PATH_PATH] $_->[PATH_ACTION]" } @paths
-				)
-			);
-		}
+    if ($time_line =~ /\n/) {
+      $time_line = wrap(
+        "  ", "  ",
+        "$log_rec->[LOG_DATE] (r$log_rec->[LOG_REV]) by $log_rec->[LOG_WHO]\n"
+      ) .
+      wrap(
+        "  ", "  ",
+        join(
+          "; ",
+          map { "$_->[PATH_PATH] $_->[PATH_ACTION]" } @paths
+        )
+      );
+    }
 
-		print $time_line, "\n\n";
+    print $time_line, "\n\n";
 
-		# Blank lines should have the indent level of whitespace.  This
-		# makes it easier for other utilities to parse them.
+    # Blank lines should have the indent level of whitespace.  This
+    # makes it easier for other utilities to parse them.
 
-		my @paragraphs = split /\n\s*\n/, $log_rec->[LOG_MESSAGE];
-		foreach my $paragraph (@paragraphs) {
+    my @paragraphs = split /\n\s*\n/, $log_rec->[LOG_MESSAGE];
+    foreach my $paragraph (@paragraphs) {
 
-			# Trim off identical leading space from every line.
-			my ($whitespace) = $paragraph =~ /^(\s*)/;
-			if (length $whitespace) {
-				$paragraph =~ s/^$whitespace//mg;
-			}
+      # Trim off identical leading space from every line.
+      my ($whitespace) = $paragraph =~ /^(\s*)/;
+      if (length $whitespace) {
+        $paragraph =~ s/^$whitespace//mg;
+      }
 
-			# Re-flow the paragraph if it isn't indented from the norm.
-			# This should preserve indented quoted text, wiki-style.
-			unless ($paragraph =~ /^\s/) {
-				$paragraph = fill("    ", "    ", $paragraph);
-			}
-		}
+      # Re-flow the paragraph if it isn't indented from the norm.
+      # This should preserve indented quoted text, wiki-style.
+      unless ($paragraph =~ /^\s/) {
+        $paragraph = fill("    ", "    ", $paragraph);
+      }
+    }
 
-		print join("\n    \n", @paragraphs), "\n\n";
-	}
+    print join("\n    \n", @paragraphs), "\n\n";
+  }
 }
 
 print(
-	"==============\n",
-	"End of Excerpt\n",
-	"==============\n",
+  "==============\n",
+  "End of Excerpt\n",
+  "==============\n",
 );
 
 ### Z. Helper functions.
 
 sub gather_log {
-	my ($url, @flags) = @_;
+  my ($url, @flags) = @_;
 
-	my (@log, @stack);
+  my (@log, @stack);
 
-	my $parser = XML::Parser->new(
-		Handlers => {
-			Start => sub {
-				my ($self, $tag, %att) = @_;
-				push @stack, [ $tag, \%att ];
-				if ($tag eq "logentry") {
-					push @log, [ ];
-					$log[-1][LOG_WHO] = "(nobody)";
-				}
-			},
-			Char  => sub {
-				my ($self, $text) = @_;
-				$stack[-1][1]{0} .= $text;
-			},
-			End => sub {
-				my ($self, $tag) = @_;
-				die "close $tag w/out open" unless @stack;
-				my ($pop_tag, $att) = @{pop @stack};
+  my $parser = XML::Parser->new(
+    Handlers => {
+      Start => sub {
+        my ($self, $tag, %att) = @_;
+        push @stack, [ $tag, \%att ];
+        if ($tag eq "logentry") {
+          push @log, [ ];
+          $log[-1][LOG_WHO] = "(nobody)";
+        }
+      },
+      Char  => sub {
+        my ($self, $text) = @_;
+        $stack[-1][1]{0} .= $text;
+      },
+      End => sub {
+        my ($self, $tag) = @_;
+        die "close $tag w/out open" unless @stack;
+        my ($pop_tag, $att) = @{pop @stack};
 
-				die "$tag ne $pop_tag" if $tag ne $pop_tag;
+        die "$tag ne $pop_tag" if $tag ne $pop_tag;
 
-				if ($tag eq "date") {
-					my $timestamp = $att->{0};
-					my ($date, $time) = split /[T.]/, $timestamp;
-					$log[-1][LOG_DATE] = "$date $time";
-					return;
-				}
+        if ($tag eq "date") {
+          my $timestamp = $att->{0};
+          my ($date, $time) = split /[T.]/, $timestamp;
+          $log[-1][LOG_DATE] = "$date $time";
+          return;
+        }
 
-				if ($tag eq "logentry") {
-					$log[-1][LOG_REV] = $att->{revision};
-					return;
-				}
+        if ($tag eq "logentry") {
+          $log[-1][LOG_REV] = $att->{revision};
+          return;
+        }
 
-				if ($tag eq "msg") {
-					$log[-1][LOG_MESSAGE] = $att->{0};
-					return;
-				}
+        if ($tag eq "msg") {
+          $log[-1][LOG_MESSAGE] = $att->{0};
+          return;
+        }
 
-				if ($tag eq "author") {
-					$log[-1][LOG_WHO] = $att->{0};
-					return;
-				}
+        if ($tag eq "author") {
+          $log[-1][LOG_WHO] = $att->{0};
+          return;
+        }
 
-				if ($tag eq "path") {
-					my $path = $att->{0};
-					$path =~ s{^/trunk/}{};
-					push(
-						@{$log[-1][LOG_PATHS]}, [
-							$path,            # PATH_PATH
-							$att->{action},   # PATH_ACTION
-						]
-					);
+        if ($tag eq "path") {
+          my $path = $att->{0};
+          $path =~ s{^/trunk/}{};
+          push(
+            @{$log[-1][LOG_PATHS]}, [
+              $path,            # PATH_PATH
+              $att->{action},   # PATH_ACTION
+            ]
+          );
 
-					$log[-1][LOG_PATHS][-1][PATH_CPF_PATH] = $att->{"copyfrom-path"} if (
-						exists $att->{"copyfrom-path"}
-					);
+          $log[-1][LOG_PATHS][-1][PATH_CPF_PATH] = $att->{"copyfrom-path"} if (
+            exists $att->{"copyfrom-path"}
+          );
 
-					$log[-1][LOG_PATHS][-1][PATH_CPF_REV] = $att->{"copyfrom-rev"} if (
-						exists $att->{"copyfrom-rev"}
-					);
-					return;
-				}
+          $log[-1][LOG_PATHS][-1][PATH_CPF_REV] = $att->{"copyfrom-rev"} if (
+            exists $att->{"copyfrom-rev"}
+          );
+          return;
+        }
 
-			}
-		}
-	);
+      }
+    }
+  );
 
-	my $cmd = "svn -v --xml @flags log $url";
-	#warn "Command: $cmd\n";
+  my $cmd = "svn -v --xml @flags log $url";
+  #warn "Command: $cmd\n";
 
-	open(LOG, "$cmd|") or die $!;
-	$parser->parse(*LOG);
-	close LOG;
+  open(LOG, "$cmd|") or die $!;
+  $parser->parse(*LOG);
+  close LOG;
 
-	return @log;
+  return @log;
 }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/script/dbicadmin
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/script/dbicadmin	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/script/dbicadmin	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,221 +1,95 @@
 #!/usr/bin/perl
+
 use strict;
 use warnings;
 
-use Getopt::Long;
-use Pod::Usage;
-use JSON::Any;
-
-
-my $json = JSON::Any->new(allow_barekey => 1, allow_singlequote => 1);
-
-GetOptions(
-    'schema=s'  => \my $schema_class,
-    'class=s'   => \my $resultset_class,
-    'connect=s' => \my $connect,
-    'op=s'      => \my $op,
-    'set=s'     => \my $set,
-    'where=s'   => \my $where,
-    'attrs=s'   => \my $attrs,
-    'format=s'  => \my $format,
-    'force'     => \my $force,
-    'trace'     => \my $trace,
-    'quiet'     => \my $quiet,
-    'help'      => \my $help,
-    'tlibs'      => \my $t_libs,
-);
-
-if ($t_libs) {
-    unshift( @INC, 't/lib', 'lib' );
+BEGIN {
+  use DBIx::Class;
+  die (  "The following modules are required for the dbicadmin utility\n"
+       . DBIx::Class::Optional::Dependencies->req_missing_for ('admin_script')
+  ) unless DBIx::Class::Optional::Dependencies->req_ok_for ('admin_script');
 }
 
-pod2usage(1) if ($help);
-$ENV{DBIX_CLASS_STORAGE_DBI_DEBUG} = 1 if ($trace);
+use Getopt::Long::Descriptive;
+use DBIx::Class::Admin;
 
-die('No op specified') if(!$op);
-die('Invalid op') if ($op!~/^insert|update|delete|select$/s);
-my $csv_class;
-if ($op eq 'select') {
-    $format ||= 'tsv';
-    die('Invalid format') if ($format!~/^tsv|csv$/s);
-    $csv_class = 'Text::CSV_XS';
-    eval{ require Text::CSV_XS };
-    if ($@) {
-        $csv_class = 'Text::CSV_PP';
-        eval{ require Text::CSV_PP };
-        die('The select op requires either the Text::CSV_XS or the Text::CSV_PP module') if ($@);
-    }
-}
-
-die('No schema specified') if(!$schema_class);
-eval("require $schema_class");
-die('Unable to load schema') if ($@);
-$connect = $json->jsonToObj( $connect ) if ($connect);
-my $schema = $schema_class->connect(
-    ( $connect ? @$connect : () )
+my ($opts, $usage) = describe_options(
+  "%c: %o",
+  (
+    ['Actions'],
+    ["action" => hidden => { one_of => [
+      ['create|c' => 'Create version diffs needs preversion',],
+      ['upgrade|u' => 'Upgrade the database to the current schema '],
+      ['install|i' => 'Install the schema to the database',],
+      ['deploy|d' => 'Deploy the schema to the database',],
+      ['select|s'   => 'Select data from the schema', ],
+      ['insert|i'   => 'Insert data into the schema', ],
+      ['update|u'   => 'Update data in the schema', ], 
+      ['delete|D'   => 'Delete data from the schema',],
+      ['op:s' => 'compatiblity option all of the above can be suppied as --op=<action>'],
+      ['help|h' => 'display this help'],
+    ], required=> 1 }],
+    ['Options'],
+    ['schema-class|schema|C:s' => 'The class of the schema to load', { required => 1 } ],
+    ['resultset|resultset_class|class|r:s' => 'The resultset to operate on for data manipulation' ],
+    ['config-stanza|S:s' => 'Where in the config to find the connection_info, supply in form MyApp::Model::DB',],
+    ['config|f:s' => 'Supply the config file for parsing by Config::Any', { depends => 'config_stanza'} ],
+    ['connect-info|n:s%' => 'Supply the connect info as additonal options ie -I dsn=<dsn> user=<user> password=<pass> '],
+    ['connect:s' => 'Supply the connect info as a json string' ],
+    ['sql-dir|q:s' => 'The directory where sql diffs will be created'],
+    ['sql-type|t:s' => 'The RDBMs flavour you wish to use'],
+    ['version|v:i' => 'Supply a version install'],
+    ['preversion|p:s' => 'The previous version to diff against',],
+    ['set:s' => 'JSON data used to perform data operations' ],
+    ['lib|I:s' => 'Additonal library path to search in'], 
+    ['attrs:s' => 'JSON string to be used for the second argument for search'],
+    ['where:s' => 'JSON string to be used for the where clause of search'],
+    ['force' => 'Be forceful with some operations'],
+    ['trace' => 'Turn on DBIx::Class trace output'],
+    ['quiet' => 'Be less verbose'],
+  )
 );
 
-die('No class specified') if(!$resultset_class);
-my $resultset = eval{ $schema->resultset($resultset_class) };
-die('Unable to load the class with the schema') if ($@);
+die "please only use one of --config or --connect-info\n" if ($opts->{config} and $opts->{connect_info});
 
-$set = $json->jsonToObj( $set ) if ($set);
-$where = $json->jsonToObj( $where ) if ($where);
-$attrs = $json->jsonToObj( $attrs ) if ($attrs);
-
-if ($op eq 'insert') {
-    die('Do not use the where option with the insert op') if ($where);
-    die('Do not use the attrs option with the insert op') if ($attrs);
-    my $obj = $resultset->create( $set );
-    print ''.ref($resultset).' ID: '.join(',',$obj->id())."\n" if (!$quiet);
+# option compatability mangle
+if($opts->{connect}) {
+  $opts->{connect_info} = delete $opts->{connect};
 }
-elsif ($op eq 'update') {
-    $resultset = $resultset->search( ($where||{}) );
-    my $count = $resultset->count();
-    print "This action will modify $count ".ref($resultset)." records.\n" if (!$quiet);
-    if ( $force || confirm() ) {
-        $resultset->update_all( $set );
-    }
-}
-elsif ($op eq 'delete') {
-    die('Do not use the set option with the delete op') if ($set);
-    $resultset = $resultset->search( ($where||{}), ($attrs||()) );
-    my $count = $resultset->count();
-    print "This action will delete $count ".ref($resultset)." records.\n" if (!$quiet);
-    if ( $force || confirm() ) {
-        $resultset->delete_all();
-    }
-}
-elsif ($op eq 'select') {
-    die('Do not use the set option with the select op') if ($set);
-    my $csv = $csv_class->new({
-        sep_char => ( $format eq 'tsv' ? "\t" : ',' ),
-    });
-    $resultset = $resultset->search( ($where||{}), ($attrs||()) );
-    my @columns = $resultset->result_source->columns();
-    $csv->combine( @columns );
-    print $csv->string()."\n";
-    while (my $row = $resultset->next()) {
-        my @fields;
-        foreach my $column (@columns) {
-            push( @fields, $row->get_column($column) );
-        }
-        $csv->combine( @fields );
-        print $csv->string()."\n";
-    }
-}
 
-sub confirm {
-    print "Are you sure you want to do this? (type YES to confirm) ";
-    my $response = <STDIN>;
-    return 1 if ($response=~/^YES/);
-    return;
-}
+my $admin = DBIx::Class::Admin->new( %$opts );
 
-__END__
 
-=head1 NAME
+my $action = $opts->{action};
 
-dbicadmin - Execute operations upon DBIx::Class objects.
+$action = $opts->{op} if ($action eq 'op');
 
-=head1 SYNOPSIS
+print "Performig action $action...\n";
 
-  dbicadmin --op=insert --schema=My::Schema --class=Class --set=JSON
-  dbicadmin --op=update --schema=My::Schema --class=Class --set=JSON --where=JSON
-  dbicadmin --op=delete --schema=My::Schema --class=Class --where=JSON
-  dbicadmin --op=select --schema=My::Schema --class=Class --where=JSON --format=tsv
+my $res = $admin->$action();
+if ($action eq 'select') {
 
-=head1 DESCRIPTION
+  my $format = $opts->{format} || 'tsv';
+  die('Invalid format') if ($format!~/^tsv|csv$/s);
 
-This utility provides the ability to run INSERTs, UPDATEs, 
-DELETEs, and SELECTs on any DBIx::Class object.
+  require Text::CSV;
 
-=head1 OPTIONS
+  my $csv = Text::CSV->new({
+    sep_char => ( $format eq 'tsv' ? "\t" : ',' ),
+  });
 
-=head2 op
+  foreach my $row (@$res) {
+    $csv->combine( @$row );
+    print $csv->string()."\n";
+  }
+}
 
-The type of operation.  Valid values are insert, update, delete, 
-and select.
-
-=head2 schema
-
-The name of your schema class.
-
-=head2 class
-
-The name of the class, within your schema, that you want to run 
-the operation on.
-
-=head2 connect
-
-A JSON array to be passed to your schema class upon connecting.  
-The array will need to be compatible with whatever the DBIC 
-->connect() method requires.
-
-=head2 set
-
-This option must be valid JSON data string and is passed in to 
-the DBIC update() method.  Use this option with the update 
-and insert ops.
-
-=head2 where
-
-This option must be valid JSON data string and is passed in as 
-the first argument to the DBIC search() method.  Use this 
-option with the update, delete, and select ops.
-
-=head2 attrs
-
-This option must be valid JSON data string and is passed in as 
-the second argument to the DBIC search() method.  Use this 
-option with the update, delete, and select ops.
-
-=head2 help
-
-Display this help page.
-
-=head2 force
-
-Suppresses the confirmation dialogues that are usually displayed 
-when someone runs a DELETE or UPDATE action.
-
-=head2 quiet
-
-Do not display status messages.
-
-=head2 trace
-
-Turns on tracing on the DBI storage, thus printing SQL as it is 
-executed.
-
-=head2 tlibs
-
-This option is purely for testing during the DBIC installation.  Do 
-not use it.
-
-=head1 JSON
-
-JSON is a lightweight data-interchange format.  It allows you 
-to express complex data structures for use in the where and 
-set options.
-
-This module turns on L<JSON>'s BareKey and QuotApos options so 
-that your data can look a bit more readable.
-
-  --where={"this":"that"} # generic JSON
-  --where={this:'that'}   # with BareKey and QuoteApos
-
-Consider wrapping your JSON in outer quotes so that you don't 
-have to escape your inner quotes.
-
-  --where={this:\"that\"} # no outer quote
-  --where='{this:"that"}' # outer quoted
-
 =head1 AUTHOR
 
-Aran Deltac <bluefeet at cpan.org>
+See L<DBIx::Class/CONTRIBUTORS>.
 
 =head1 LICENSE
 
-You may distribute this code under the same terms as Perl itself.
+You may distribute this code under the same terms as Perl itself
 
+=cut

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/02pod.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/02pod.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/02pod.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,6 +1,27 @@
+use warnings;
+use strict;
+
 use Test::More;
+use lib qw(t/lib);
+use DBICTest;
 
-eval "use Test::Pod 1.14";
-plan skip_all => 'Test::Pod 1.14 required' if $@;
+my @MODULES = (
+  'Test::Pod 1.26',
+);
 
+# Don't run tests for installs
+unless ( DBICTest::AuthorCheck->is_author || $ENV{AUTOMATED_TESTING} || $ENV{RELEASE_TESTING} ) {
+  plan( skip_all => "Author tests not required for installation" );
+}
+
+# Load the testing modules
+foreach my $MODULE ( @MODULES ) {
+  eval "use $MODULE";
+  if ( $@ ) {
+    $ENV{RELEASE_TESTING}
+    ? die( "Failed to load required release-testing module $MODULE" )
+    : plan( skip_all => "$MODULE not available for testing" );
+  }
+}
+
 all_pod_files_ok();

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/03podcoverage.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/03podcoverage.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/03podcoverage.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,20 +1,39 @@
+use warnings;
+use strict;
+
 use Test::More;
+use List::Util ();
+use lib qw(t/lib);
+use DBICTest;
 
-eval "use Pod::Coverage 0.19";
-plan skip_all => 'Pod::Coverage 0.19 required' if $@;
-eval "use Test::Pod::Coverage 1.04";
-plan skip_all => 'Test::Pod::Coverage 1.04 required' if $@;
+my @MODULES = (
+  'Test::Pod::Coverage 1.08',
+  'Pod::Coverage 0.20',
+);
 
-plan skip_all => 'set TEST_POD to enable this test'
-  unless ($ENV{TEST_POD} || -e 'MANIFEST.SKIP');
+# Don't run tests for installs
+unless ( DBICTest::AuthorCheck->is_author || $ENV{AUTOMATED_TESTING} || $ENV{RELEASE_TESTING} ) {
+  plan( skip_all => "Author tests not required for installation" );
+}
 
-my @modules = sort { $a cmp $b } (Test::Pod::Coverage::all_modules());
-plan tests => scalar(@modules);
+# Load the testing modules
+foreach my $MODULE ( @MODULES ) {
+  eval "use $MODULE";
+  if ( $@ ) {
+    $ENV{RELEASE_TESTING}
+    ? die( "Failed to load required release-testing module $MODULE" )
+    : plan( skip_all => "$MODULE not available for testing" );
+  }
+}
 
 # Since this is about checking documentation, a little documentation
-# of what this is doing might be in order...
+# of what this is doing might be in order.
 # The exceptions structure below is a hash keyed by the module
-# name.  The value for each is a hash, which contains one or more
+# name. Any * in a name is treated like a wildcard and will behave
+# as expected. Modules are matched by longest string first, so 
+# A::B::C will match even if there is A::B*
+
+# The value for each is a hash, which contains one or more
 # (although currently more than one makes no sense) of the following
 # things:-
 #   skip   => a true value means this module is not checked
@@ -22,128 +41,115 @@
 #             do not need to be documented.
 my $exceptions = {
     'DBIx::Class' => {
-        ignore => [
-            qw/MODIFY_CODE_ATTRIBUTES
-              component_base_class
-              mk_classdata
-              mk_classaccessor/
-        ]
+        ignore => [qw/
+            MODIFY_CODE_ATTRIBUTES
+            component_base_class
+            mk_classdata
+            mk_classaccessor
+        /]
     },
     'DBIx::Class::Row' => {
-        ignore => [
-           qw( MULTICREATE_DEBUG )
-        ],
+        ignore => [qw/
+            MULTICREATE_DEBUG
+        /],
     },
     'DBIx::Class::ResultSource' => {
         ignore => [qw/
-          compare_relationship_keys
-          pk_depends_on
-          resolve_condition
-          resolve_join
-          resolve_prefetch
+            compare_relationship_keys
+            pk_depends_on
+            resolve_condition
+            resolve_join
+            resolve_prefetch
         /],
     },
+    'DBIx::Class::ResultSourceHandle' => {
+        ignore => [qw/
+            schema
+            source_moniker
+        /],
+    },
     'DBIx::Class::Storage' => {
-        ignore => [
-            qw(cursor)
-        ]
+        ignore => [qw/
+            schema
+            cursor
+        /]
     },
     'DBIx::Class::Schema' => {
-        ignore => [
-            qw(setup_connection_class)
-        ]
+        ignore => [qw/
+            setup_connection_class
+        /]
     },
-    'DBIx::Class::Storage::DBI::Sybase' => {
-        ignore => [
-            qw/should_quote_data_type/,
-        ]
+
+    'DBIx::Class::Schema::Versioned' => {
+        ignore => [ qw/
+            connection
+        /]
     },
-    'DBIx::Class::CDBICompat::AccessorMapping'          => { skip => 1 },
-    'DBIx::Class::CDBICompat::AbstractSearch' => {
-        ignore => [qw(search_where)]
+
+    'DBIx::Class::Storage::DBI::Replicated*'        => {
+        ignore => [ qw/
+            connect_call_do_sql
+            disconnect_call_do_sql
+        /]
     },
-    'DBIx::Class::CDBICompat::AttributeAPI'             => { skip => 1 },
-    'DBIx::Class::CDBICompat::AutoUpdate'               => { skip => 1 },
-    'DBIx::Class::CDBICompat::ColumnsAsHash' => {
-        ignore => [qw(inflate_result new update)]
-    },
-    'DBIx::Class::CDBICompat::ColumnCase'               => { skip => 1 },
-    'DBIx::Class::CDBICompat::ColumnGroups'             => { skip => 1 },
-    'DBIx::Class::CDBICompat::Constraints'              => { skip => 1 },
-    'DBIx::Class::CDBICompat::Constructor'              => { skip => 1 },
-    'DBIx::Class::CDBICompat::Copy' => {
-        ignore => [qw(copy)]
-    },
-    'DBIx::Class::CDBICompat::DestroyWarning'           => { skip => 1 },
-    'DBIx::Class::CDBICompat::GetSet'                   => { skip => 1 },
-    'DBIx::Class::CDBICompat::HasA'                     => { skip => 1 },
-    'DBIx::Class::CDBICompat::HasMany'                  => { skip => 1 },
-    'DBIx::Class::CDBICompat::ImaDBI'                   => { skip => 1 },
-    'DBIx::Class::CDBICompat::LazyLoading'              => { skip => 1 },
-    'DBIx::Class::CDBICompat::LiveObjectIndex'          => { skip => 1 },
-    'DBIx::Class::CDBICompat::MightHave'                => { skip => 1 },
-    'DBIx::Class::CDBICompat::NoObjectIndex'            => { skip => 1 },
-    'DBIx::Class::CDBICompat::Pager'                    => { skip => 1 },
-    'DBIx::Class::CDBICompat::ReadOnly'                 => { skip => 1 },
-    'DBIx::Class::CDBICompat::Relationship'             => { skip => 1 },
-    'DBIx::Class::CDBICompat::Relationships'            => { skip => 1 },
-    'DBIx::Class::CDBICompat::Retrieve'                 => { skip => 1 },
-    'DBIx::Class::CDBICompat::SQLTransformer'           => { skip => 1 },
-    'DBIx::Class::CDBICompat::Stringify'                => { skip => 1 },
-    'DBIx::Class::CDBICompat::TempColumns'              => { skip => 1 },
-    'DBIx::Class::CDBICompat::Triggers'                 => { skip => 1 },
-    'DBIx::Class::ClassResolver::PassThrough'           => { skip => 1 },
-    'DBIx::Class::Componentised'                        => { skip => 1 },
-    'DBIx::Class::Relationship::Accessor'               => { skip => 1 },
-    'DBIx::Class::Relationship::BelongsTo'              => { skip => 1 },
-    'DBIx::Class::Relationship::CascadeActions'         => { skip => 1 },
-    'DBIx::Class::Relationship::HasMany'                => { skip => 1 },
-    'DBIx::Class::Relationship::HasOne'                 => { skip => 1 },
-    'DBIx::Class::Relationship::Helpers'                => { skip => 1 },
-    'DBIx::Class::Relationship::ManyToMany'             => { skip => 1 },
-    'DBIx::Class::Relationship::ProxyMethods'           => { skip => 1 },
-    'DBIx::Class::ResultSetProxy'                       => { skip => 1 },
-    'DBIx::Class::ResultSetManager'                     => { skip => 1 },
-    'DBIx::Class::ResultSourceProxy'                    => { skip => 1 },
-    'DBIx::Class::Storage::DBI'                         => { skip => 1 },
-    'DBIx::Class::Storage::DBI::DB2'                    => { skip => 1 },
-    'DBIx::Class::Storage::DBI::MSSQL'                  => { skip => 1 },
-    'DBIx::Class::Storage::DBI::Sybase::MSSQL'          => { skip => 1 },
-    'DBIx::Class::Storage::DBI::ODBC400'                => { skip => 1 },
-    'DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL'      => { skip => 1 },
-    'DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server' => { skip => 1 },
-    'DBIx::Class::Storage::DBI::Oracle'                 => { skip => 1 },
-    'DBIx::Class::Storage::DBI::Pg'                     => { skip => 1 },
-    'DBIx::Class::Storage::DBI::SQLite'                 => { skip => 1 },
-    'DBIx::Class::Storage::DBI::mysql'                  => { skip => 1 },
-    'DBIx::Class::SQLAHacks::MySQL'                     => { skip => 1 },
-    'SQL::Translator::Parser::DBIx::Class'              => { skip => 1 },
-    'SQL::Translator::Producer::DBIx::Class::File'      => { skip => 1 },
 
-# skipped because the synopsis covers it clearly
+    'DBIx::Class::Admin::Types'                     => { skip => 1 },
+    'DBIx::Class::ClassResolver::PassThrough'       => { skip => 1 },
+    'DBIx::Class::Componentised'                    => { skip => 1 },
+    'DBIx::Class::Relationship::*'                  => { skip => 1 },
+    'DBIx::Class::ResultSetProxy'                   => { skip => 1 },
+    'DBIx::Class::ResultSourceProxy'                => { skip => 1 },
+    'DBIx::Class::Storage::Statistics'              => { skip => 1 },
+    'DBIx::Class::Storage::DBI::Replicated::Types'  => { skip => 1 },
 
-    'DBIx::Class::InflateColumn::File'                  => { skip => 1 },
+# test some specific components whose parents are exempt below
+    'DBIx::Class::Relationship::Base'               => {},
 
-# skip connection since it's just an override
+# internals
+    'DBIx::Class::SQLAHacks*'                       => { skip => 1 },
+    'DBIx::Class::Storage::DBI*'                    => { skip => 1 },
+    'SQL::Translator::*'                            => { skip => 1 },
 
-    'DBIx::Class::Schema::Versioned' => { ignore => [ qw(connection) ] },
+# deprecated / backcompat stuff
+    'DBIx::Class::CDBICompat*'                      => { skip => 1 },
+    'DBIx::Class::ResultSetManager'                 => { skip => 1 },
+    'DBIx::Class::DB'                               => { skip => 1 },
 
-# don't bother since it's heavily deprecated
-    'DBIx::Class::ResultSetManager' => { skip => 1 },
+# skipped because the synopsis covers it clearly
+    'DBIx::Class::InflateColumn::File'              => { skip => 1 },
 };
 
+my $ex_lookup = {};
+for my $string (keys %$exceptions) {
+  my $ex = $exceptions->{$string};
+  $string =~ s/\*/'.*?'/ge;
+  my $re = qr/^$string$/;
+  $ex_lookup->{$re} = $ex;
+}
+
+my @modules = sort { $a cmp $b } (Test::Pod::Coverage::all_modules());
+
 foreach my $module (@modules) {
-  SKIP:
-    {
-        skip "$module - No real methods", 1 if ($exceptions->{$module}{skip});
+  SKIP: {
 
-        # build parms up from ignore list
-        my $parms = {};
-        $parms->{trustme} =
-          [ map { qr/^$_$/ } @{ $exceptions->{$module}{ignore} } ]
-          if exists($exceptions->{$module}{ignore});
+    my ($match) = List::Util::first
+      { $module =~ $_ }
+      (sort { length $b <=> length $a || $b cmp $a } (keys %$ex_lookup) )
+    ;
 
-        # run the test with the potentially modified parm set
-        pod_coverage_ok($module, $parms, "$module POD coverage");
-    }
+    my $ex = $ex_lookup->{$match} if $match;
+
+    skip ("$module exempt", 1) if ($ex->{skip});
+
+    # build parms up from ignore list
+    my $parms = {};
+    $parms->{trustme} =
+      [ map { qr/^$_$/ } @{ $ex->{ignore} } ]
+        if exists($ex->{ignore});
+
+    # run the test with the potentially modified parm set
+    pod_coverage_ok($module, $parms, "$module POD coverage");
+  }
 }
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/05components.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/05components.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/05components.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -7,8 +7,6 @@
 use lib qw(t/lib);
 use DBICTest::ForeignComponent;
 
-plan tests => 6;
-
 #   Tests if foreign component was loaded by calling foreign's method
 ok( DBICTest::ForeignComponent->foreign_test_method, 'foreign component' );
 
@@ -35,32 +33,7 @@
     'inject_base filters duplicates'
 );
 
-# Test for a warning with incorrect order in load_components
-my @warnings = ();
-{
-  package A::Test;
-  our @ISA = 'DBIx::Class';
-  {
-    local $SIG{__WARN__} = sub { push @warnings, shift};
-    __PACKAGE__->load_components(qw(Core UTF8Columns));
-  }
-}
-like( $warnings[0], qr/Core loaded before UTF8Columns/,
-      'warning issued for incorrect order in load_components()' );
-is( scalar @warnings, 1,
-    'only one warning issued for incorrect load_components call' );
-
-# Test that no warning is issued for the correct order in load_components
-{
-  @warnings = ();
-  package B::Test;
-  our @ISA = 'DBIx::Class';
-  {
-    local $SIG{__WARN__} = sub { push @warnings, shift };
-    __PACKAGE__->load_components(qw(UTF8Columns Core));
-  }
-}
-is( scalar @warnings, 0,
-    'warning not issued for correct order in load_components()' );
-
 use_ok('DBIx::Class::AccessorGroup');
+use_ok('DBIx::Class::Componentised');
+
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/06notabs.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/06notabs.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/06notabs.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,30 @@
+use warnings;
+use strict;
+
+use Test::More;
+use lib 't/lib';
+use DBICTest;
+
+my @MODULES = (
+  'Test::NoTabs 0.9',
+);
+
+plan skip_all => 'Does not work with done_testing, temp disabled';
+
+# Don't run tests for installs
+unless ( DBICTest::AuthorCheck->is_author || $ENV{AUTOMATED_TESTING} || $ENV{RELEASE_TESTING} ) {
+  plan( skip_all => "Author tests not required for installation" );
+}
+# Load the testing modules
+foreach my $MODULE ( @MODULES ) {
+  eval "use $MODULE";
+  if ( $@ ) {
+    $ENV{RELEASE_TESTING}
+    ? die( "Failed to load required release-testing module $MODULE" )
+    : plan( skip_all => "$MODULE not available for testing" );
+  }
+}
+
+all_perl_files_ok(qw/t lib script maint/);
+
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/07eol.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/07eol.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/07eol.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,33 @@
+use warnings;
+use strict;
+
+use Test::More;
+use lib 't/lib';
+use DBICTest;
+
+my @MODULES = (
+  'Test::EOL 0.6',
+);
+
+plan skip_all => 'Does not work with done_testing, temp disabled';
+
+# Don't run tests for installs
+unless ( DBICTest::AuthorCheck->is_author || $ENV{AUTOMATED_TESTING} || $ENV{RELEASE_TESTING} ) {
+  plan( skip_all => "Author tests not required for installation" );
+}
+# Load the testing modules
+foreach my $MODULE ( @MODULES ) {
+  eval "use $MODULE";
+  if ( $@ ) {
+    $ENV{RELEASE_TESTING}
+    ? die( "Failed to load required release-testing module $MODULE" )
+    : plan( skip_all => "$MODULE not available for testing" );
+  }
+}
+
+TODO: {
+  local $TODO = 'Do not fix those yet - we have way too many branches out there, merging will be hell';
+  all_perl_files_ok({ trailing_whitespace => 1}, qw/t lib script maint/);
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/100populate.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/100populate.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/100populate.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -5,9 +5,8 @@
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
+use Path::Class::File ();
 
-plan tests => 23;
-
 my $schema = DBICTest->init_schema();
 
 # The map below generates stuff like:
@@ -116,3 +115,205 @@
 is($link7->url, undef, 'Link 7 url');
 is($link7->title, 'gtitle', 'Link 7 title');
 
+my $rs = $schema->resultset('Artist');
+$rs->delete;
+
+# test _execute_array_empty (insert_bulk with all literal sql)
+
+$rs->populate([
+    (+{
+        name => \"'DT'",
+        rank => \500,
+        charfield => \"'mtfnpy'",
+    }) x 5
+]);
+
+is((grep {
+  $_->name eq 'DT' &&
+  $_->rank == 500  &&
+  $_->charfield eq 'mtfnpy'
+} $rs->all), 5, 'populate with all literal SQL');
+
+$rs->delete;
+
+# test mixed binds with literal sql
+
+$rs->populate([
+    (+{
+        name => \"'DT'",
+        rank => 500,
+        charfield => \"'mtfnpy'",
+    }) x 5
+]);
+
+is((grep {
+  $_->name eq 'DT' &&
+  $_->rank == 500  &&
+  $_->charfield eq 'mtfnpy'
+} $rs->all), 5, 'populate with all literal SQL');
+
+$rs->delete;
+
+###
+
+throws_ok {
+    $rs->populate([
+        {
+            artistid => 1,
+            name => 'foo1',
+        },
+        {
+            artistid => 'foo', # this dies
+            name => 'foo2',
+        },
+        {
+            artistid => 3,
+            name => 'foo3',
+        },
+    ]);
+} qr/slice/, 'bad slice';
+
+is($rs->count, 0, 'populate is atomic');
+
+# Trying to use a column marked as a bind in the first slice with literal sql in
+# a later slice should throw.
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => 1,
+      name => \"'foo'",
+    },
+    {
+      artistid => \2,
+      name => \"'foo'",
+    }
+  ]);
+} qr/bind expected/, 'literal sql where bind expected throws';
+
+# ... and vice-versa.
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => \1,
+      name => \"'foo'",
+    },
+    {
+      artistid => 2,
+      name => \"'foo'",
+    }
+  ]);
+} qr/literal SQL expected/i, 'bind where literal sql expected throws';
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => 1,
+      name => \"'foo'",
+    },
+    {
+      artistid => 2,
+      name => \"'bar'",
+    }
+  ]);
+} qr/inconsistent/, 'literal sql must be the same in all slices';
+
+# the stringification has nothing to do with the artist name
+# this is solely for testing consistency
+my $fn = Path::Class::File->new ('somedir/somefilename.tmp');
+my $fn2 = Path::Class::File->new ('somedir/someotherfilename.tmp');
+
+lives_ok {
+  $rs->populate([
+    {
+      name => 'supplied before stringifying object',
+    },
+    {
+      name => $fn,
+    }
+  ]);
+} 'stringifying objects pass through';
+
+# ... and vice-versa.
+
+lives_ok {
+  $rs->populate([
+    {
+      name => $fn2,
+    },
+    {
+      name => 'supplied after stringifying object',
+    },
+  ]);
+} 'stringifying objects pass through';
+
+for (
+  $fn,
+  $fn2,
+  'supplied after stringifying object',
+  'supplied before stringifying object'
+) {
+  my $row = $rs->find ({name => $_});
+  ok ($row, "Stringification test row '$_' properly inserted");
+}
+
+$rs->delete;
+
+# test stringification with ->create rather than Storage::insert_bulk as well
+
+lives_ok {
+  my @dummy = $rs->populate([
+    {
+      name => 'supplied before stringifying object',
+    },
+    {
+      name => $fn,
+    }
+  ]);
+} 'stringifying objects pass through';
+
+# ... and vice-versa.
+
+lives_ok {
+  my @dummy = $rs->populate([
+    {
+      name => $fn2,
+    },
+    {
+      name => 'supplied after stringifying object',
+    },
+  ]);
+} 'stringifying objects pass through';
+
+for (
+  $fn,
+  $fn2,
+  'supplied after stringifying object',
+  'supplied before stringifying object'
+) {
+  my $row = $rs->find ({name => $_});
+  ok ($row, "Stringification test row '$_' properly inserted");
+}
+
+lives_ok {
+   $schema->resultset('TwoKeys')->populate([{
+      artist => 1,
+      cd     => 5,
+      fourkeys_to_twokeys => [{
+            f_foo => 1,
+            f_bar => 1,
+            f_hello => 1,
+            f_goodbye => 1,
+            autopilot => 'a',
+      },{
+            f_foo => 2,
+            f_bar => 2,
+            f_hello => 2,
+            f_goodbye => 2,
+            autopilot => 'b',
+      }]
+   }])
+} 'multicol-PK has_many populate works';
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/101populate_rs.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/101populate_rs.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/101populate_rs.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -15,17 +15,17 @@
 use lib qw(t/lib);
 use DBICTest;
 
-plan tests => 142;
 
-
 ## ----------------------------------------------------------------------------
 ## Get a Schema and some ResultSets we can play with.
 ## ----------------------------------------------------------------------------
 
-my $schema	= DBICTest->init_schema();
-my $art_rs	= $schema->resultset('Artist');
-my $cd_rs	= $schema->resultset('CD');
+my $schema  = DBICTest->init_schema();
+my $art_rs  = $schema->resultset('Artist');
+my $cd_rs  = $schema->resultset('CD');
 
+my $restricted_art_rs  = $art_rs->search({rank => 42});
+
 ok( $schema, 'Got a Schema object');
 ok( $art_rs, 'Got Good Artist Resultset');
 ok( $cd_rs, 'Got Good CD Resultset');
@@ -37,87 +37,87 @@
 
 SCHEMA_POPULATE1: {
 
-	## Test to make sure that the old $schema->populate is using the new method
-	## for $resultset->populate when in void context and with sub objects.
-	
-	$schema->populate('Artist', [
-	
-		[qw/name cds/],
-		["001First Artist", [
-			{title=>"001Title1", year=>2000},
-			{title=>"001Title2", year=>2001},
-			{title=>"001Title3", year=>2002},
-		]],
-		["002Second Artist", []],
-		["003Third Artist", [
-			{title=>"003Title1", year=>2005},
-		]],
-		[undef, [
-			{title=>"004Title1", year=>2010}
-		]],
-	]);
-	
-	isa_ok $schema, 'DBIx::Class::Schema';
-	
-	my ($undef, $artist1, $artist2, $artist3 ) = $schema->resultset('Artist')->search({
-		name=>["001First Artist","002Second Artist","003Third Artist", undef]},
-		{order_by=>'name ASC'})->all;
-	
-	isa_ok  $artist1, 'DBICTest::Artist';
-	isa_ok  $artist2, 'DBICTest::Artist';
-	isa_ok  $artist3, 'DBICTest::Artist';
-	isa_ok  $undef, 'DBICTest::Artist';	
-	
-	ok $artist1->name eq '001First Artist', "Got Expected Artist Name for Artist001";
-	ok $artist2->name eq '002Second Artist', "Got Expected Artist Name for Artist002";
-	ok $artist3->name eq '003Third Artist', "Got Expected Artist Name for Artist003";
-	ok !defined $undef->name, "Got Expected Artist Name for Artist004";	
-	
-	ok $artist1->cds->count eq 3, "Got Right number of CDs for Artist1";
-	ok $artist2->cds->count eq 0, "Got Right number of CDs for Artist2";
-	ok $artist3->cds->count eq 1, "Got Right number of CDs for Artist3";
-	ok $undef->cds->count eq 1, "Got Right number of CDs for Artist4";	
-	
-	ARTIST1CDS: {
-	
-		my ($cd1, $cd2, $cd3) = $artist1->cds->search(undef, {order_by=>'year ASC'});
-		
-		isa_ok $cd1, 'DBICTest::CD';
-		isa_ok $cd2, 'DBICTest::CD';
-		isa_ok $cd3, 'DBICTest::CD';
-		
-		ok $cd1->year == 2000;
-		ok $cd2->year == 2001;
-		ok $cd3->year == 2002;
-		
-		ok $cd1->title eq '001Title1';
-		ok $cd2->title eq '001Title2';
-		ok $cd3->title eq '001Title3';
-	}
-	
-	ARTIST3CDS: {
-	
-		my ($cd1) = $artist3->cds->search(undef, {order_by=>'year ASC'});
-		
-		isa_ok $cd1, 'DBICTest::CD';
+  ## Test to make sure that the old $schema->populate is using the new method
+  ## for $resultset->populate when in void context and with sub objects.
 
-		ok $cd1->year == 2005;
-		ok $cd1->title eq '003Title1';
-	}
+  $schema->populate('Artist', [
 
-	ARTIST4CDS: {
-	
-		my ($cd1) = $undef->cds->search(undef, {order_by=>'year ASC'});
-		
-		isa_ok $cd1, 'DBICTest::CD';
+    [qw/name cds/],
+    ["001First Artist", [
+      {title=>"001Title1", year=>2000},
+      {title=>"001Title2", year=>2001},
+      {title=>"001Title3", year=>2002},
+    ]],
+    ["002Second Artist", []],
+    ["003Third Artist", [
+      {title=>"003Title1", year=>2005},
+    ]],
+    [undef, [
+      {title=>"004Title1", year=>2010}
+    ]],
+  ]);
 
-		ok $cd1->year == 2010;
-		ok $cd1->title eq '004Title1';
-	}
-	
-	## Need to do some cleanup so that later tests don't get borked
-	
-	$undef->delete;
+  isa_ok $schema, 'DBIx::Class::Schema';
+
+  my ($undef, $artist1, $artist2, $artist3 ) = $schema->resultset('Artist')->search({
+    name=>["001First Artist","002Second Artist","003Third Artist", undef]},
+    {order_by=>'name ASC'})->all;
+
+  isa_ok  $artist1, 'DBICTest::Artist';
+  isa_ok  $artist2, 'DBICTest::Artist';
+  isa_ok  $artist3, 'DBICTest::Artist';
+  isa_ok  $undef, 'DBICTest::Artist';  
+
+  ok $artist1->name eq '001First Artist', "Got Expected Artist Name for Artist001";
+  ok $artist2->name eq '002Second Artist', "Got Expected Artist Name for Artist002";
+  ok $artist3->name eq '003Third Artist', "Got Expected Artist Name for Artist003";
+  ok !defined $undef->name, "Got Expected Artist Name for Artist004";  
+
+  ok $artist1->cds->count eq 3, "Got Right number of CDs for Artist1";
+  ok $artist2->cds->count eq 0, "Got Right number of CDs for Artist2";
+  ok $artist3->cds->count eq 1, "Got Right number of CDs for Artist3";
+  ok $undef->cds->count eq 1, "Got Right number of CDs for Artist4";  
+
+  ARTIST1CDS: {
+
+    my ($cd1, $cd2, $cd3) = $artist1->cds->search(undef, {order_by=>'year ASC'});
+
+    isa_ok $cd1, 'DBICTest::CD';
+    isa_ok $cd2, 'DBICTest::CD';
+    isa_ok $cd3, 'DBICTest::CD';
+
+    ok $cd1->year == 2000;
+    ok $cd2->year == 2001;
+    ok $cd3->year == 2002;
+
+    ok $cd1->title eq '001Title1';
+    ok $cd2->title eq '001Title2';
+    ok $cd3->title eq '001Title3';
+  }
+
+  ARTIST3CDS: {
+
+    my ($cd1) = $artist3->cds->search(undef, {order_by=>'year ASC'});
+
+    isa_ok $cd1, 'DBICTest::CD';
+
+    ok $cd1->year == 2005;
+    ok $cd1->title eq '003Title1';
+  }
+
+  ARTIST4CDS: {
+
+    my ($cd1) = $undef->cds->search(undef, {order_by=>'year ASC'});
+
+    isa_ok $cd1, 'DBICTest::CD';
+
+    ok $cd1->year == 2010;
+    ok $cd1->title eq '004Title1';
+  }
+
+  ## Need to do some cleanup so that later tests don't get borked
+
+  $undef->delete;
 }
 
 
@@ -127,212 +127,224 @@
 
 ARRAY_CONTEXT: {
 
-	## These first set of tests are cake because array context just delegates
-	## all it's processing to $resultset->create
-	
-	HAS_MANY_NO_PKS: {
-	
-		## This first group of tests checks to make sure we can call populate
-		## with the parent having many children and let the keys be automatic
+  ## These first set of tests are cake because array context just delegates
+  ## all it's processing to $resultset->create
 
-		my $artists = [
-			{	
-				name => 'Angsty-Whiny Girl',
-				cds => [
-					{ title => 'My First CD', year => 2006 },
-					{ title => 'Yet More Tweeny-Pop crap', year => 2007 },
-				],					
-			},		
-			{
-				name => 'Manufactured Crap',
-			},
-			{
-				name => 'Like I Give a Damn',
-				cds => [
-					{ title => 'My parents sold me to a record company' ,year => 2005 },
-					{ title => 'Why Am I So Ugly?', year => 2006 },
-					{ title => 'I Got Surgery and am now Popular', year => 2007 }				
-				],
-			},
-			{	
-				name => 'Formerly Named',
-				cds => [
-					{ title => 'One Hit Wonder', year => 2006 },
-				],					
-			},			
-		];
-		
-		## Get the result row objects.
-		
-		my ($girl, $crap, $damn, $formerly) = $art_rs->populate($artists);
-		
-		## Do we have the right object?
-		
-		isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");	
-		isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");	
-		
-		## Find the expected information?
+  HAS_MANY_NO_PKS: {
 
-		ok( $crap->name eq 'Manufactured Crap', "Got Correct name for result object");
-		ok( $girl->name eq 'Angsty-Whiny Girl', "Got Correct name for result object");
-		ok( $damn->name eq 'Like I Give a Damn', "Got Correct name for result object");	
-		ok( $formerly->name eq 'Formerly Named', "Got Correct name for result object");
-		
-		## Create the expected children sub objects?
-		
-		ok( $crap->cds->count == 0, "got Expected Number of Cds");
-		ok( $girl->cds->count == 2, "got Expected Number of Cds");	
-		ok( $damn->cds->count == 3, "got Expected Number of Cds");
-		ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+    ## This first group of tests checks to make sure we can call populate
+    ## with the parent having many children and let the keys be automatic
 
-		## Did the cds get expected information?
-		
-		my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
-		
-		ok( $cd1->title eq "My First CD", "Got Expected CD Title");
-		ok( $cd2->title eq "Yet More Tweeny-Pop crap", "Got Expected CD Title");
-	}
-	
-	HAS_MANY_WITH_PKS: {
-	
-		## This group tests the ability to specify the PK in the parent and let
-		## DBIC transparently pass the PK down to the Child and also let's the
-		## child create any other needed PK's for itself.
-		
-		my $aid		=  $art_rs->get_column('artistid')->max || 0;
-		
-		my $first_aid = ++$aid;
-		
-		my $artists = [
-			{
-				artistid => $first_aid,
-				name => 'PK_Angsty-Whiny Girl',
-				cds => [
-					{ artist => $first_aid, title => 'PK_My First CD', year => 2006 },
-					{ artist => $first_aid, title => 'PK_Yet More Tweeny-Pop crap', year => 2007 },
-				],					
-			},		
-			{
-				artistid => ++$aid,
-				name => 'PK_Manufactured Crap',
-			},
-			{
-				artistid => ++$aid,
-				name => 'PK_Like I Give a Damn',
-				cds => [
-					{ title => 'PK_My parents sold me to a record company' ,year => 2005 },
-					{ title => 'PK_Why Am I So Ugly?', year => 2006 },
-					{ title => 'PK_I Got Surgery and am now Popular', year => 2007 }				
-				],
-			},
-			{
-				artistid => ++$aid,
-				name => 'PK_Formerly Named',
-				cds => [
-					{ title => 'PK_One Hit Wonder', year => 2006 },
-				],					
-			},			
-		];
-		
-		## Get the result row objects.
-		
-		my ($girl, $crap, $damn, $formerly) = $art_rs->populate($artists);
-		
-		## Do we have the right object?
-		
-		isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");	
-		isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");	
-		
-		## Find the expected information?
+    my $artists = [
+      {
+        name => 'Angsty-Whiny Girl',
+        cds => [
+          { title => 'My First CD', year => 2006 },
+          { title => 'Yet More Tweeny-Pop crap', year => 2007 },
+        ],
+      },
+      {
+        name => 'Manufactured Crap',
+      },
+      {
+        name => 'Like I Give a Damn',
+        cds => [
+          { title => 'My parents sold me to a record company' ,year => 2005 },
+          { title => 'Why Am I So Ugly?', year => 2006 },
+          { title => 'I Got Surgery and am now Popular', year => 2007 }
+        ],
+      },
+      {
+        name => 'Formerly Named',
+        cds => [
+          { title => 'One Hit Wonder', year => 2006 },
+        ],
+      },
+    ];
 
-		ok( $crap->name eq 'PK_Manufactured Crap', "Got Correct name for result object");
-		ok( $girl->name eq 'PK_Angsty-Whiny Girl', "Got Correct name for result object");
-		ok( $girl->artistid == $first_aid, "Got Correct artist PK for result object");		
-		ok( $damn->name eq 'PK_Like I Give a Damn', "Got Correct name for result object");	
-		ok( $formerly->name eq 'PK_Formerly Named', "Got Correct name for result object");
-		
-		## Create the expected children sub objects?
-		
-		ok( $crap->cds->count == 0, "got Expected Number of Cds");
-		ok( $girl->cds->count == 2, "got Expected Number of Cds");	
-		ok( $damn->cds->count == 3, "got Expected Number of Cds");
-		ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+    ## Get the result row objects.
 
-		## Did the cds get expected information?
-		
-		my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
-		
-		ok( $cd1->title eq "PK_My First CD", "Got Expected CD Title");
-		ok( $cd2->title eq "PK_Yet More Tweeny-Pop crap", "Got Expected CD Title");
-	}
-	
-	BELONGS_TO_NO_PKs: {
+    my ($girl, $crap, $damn, $formerly) = $art_rs->populate($artists);
 
-		## Test from a belongs_to perspective, should create artist first, 
-		## then CD with artistid.  This test we let the system automatically
-		## create the PK's.  Chances are good you'll use it this way mostly.
-		
-		my $cds = [
-			{
-				title => 'Some CD3',
-				year => '1997',
-				artist => { name => 'Fred BloggsC'},
-			},
-			{
-				title => 'Some CD4',
-				year => '1997',
-				artist => { name => 'Fred BloggsD'},
-			},		
-		];
-		
-		my ($cdA, $cdB) = $cd_rs->populate($cds);
-		
+    ## Do we have the right object?
 
-		isa_ok($cdA, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdA->artist->name, 'Fred BloggsC', 'Set Artist to FredC');
+    isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");  
+    isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");
 
-		
-		isa_ok($cdB, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdB->artist->name, 'Fred BloggsD', 'Set Artist to FredD');
-	}
+    ## Find the expected information?
 
-	BELONGS_TO_WITH_PKs: {
+    ok( $crap->name eq 'Manufactured Crap', "Got Correct name for result object");
+    ok( $girl->name eq 'Angsty-Whiny Girl', "Got Correct name for result object");
+    ok( $damn->name eq 'Like I Give a Damn', "Got Correct name for result object");
+    ok( $formerly->name eq 'Formerly Named', "Got Correct name for result object");
 
-		## Test from a belongs_to perspective, should create artist first, 
-		## then CD with artistid.  This time we try setting the PK's
-		
-		my $aid	= $art_rs->get_column('artistid')->max || 0;
+    ## Create the expected children sub objects?
 
-		my $cds = [
-			{
-				title => 'Some CD3',
-				year => '1997',
-				artist => { artistid=> ++$aid, name => 'Fred BloggsC'},
-			},
-			{
-				title => 'Some CD4',
-				year => '1997',
-				artist => { artistid=> ++$aid, name => 'Fred BloggsD'},
-			},		
-		];
-		
-		my ($cdA, $cdB) = $cd_rs->populate($cds);
-		
-		isa_ok($cdA, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdA->artist->name, 'Fred BloggsC', 'Set Artist to FredC');
-		
-		isa_ok($cdB, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdB->artist->name, 'Fred BloggsD', 'Set Artist to FredD');
-		ok($cdB->artist->artistid == $aid, "Got Expected Artist ID");
-	}
+    ok( $crap->cds->count == 0, "got Expected Number of Cds");
+    ok( $girl->cds->count == 2, "got Expected Number of Cds");
+    ok( $damn->cds->count == 3, "got Expected Number of Cds");
+    ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+
+    ## Did the cds get expected information?
+
+    my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year'});
+
+    ok( $cd1->title eq "My First CD", "Got Expected CD Title");
+    ok( $cd2->title eq "Yet More Tweeny-Pop crap", "Got Expected CD Title");
+  }
+
+  HAS_MANY_WITH_PKS: {
+
+    ## This group tests the ability to specify the PK in the parent and let
+    ## DBIC transparently pass the PK down to the Child and also let's the
+    ## child create any other needed PK's for itself.
+
+    my $aid    =  $art_rs->get_column('artistid')->max || 0;
+
+    my $first_aid = ++$aid;
+
+    my $artists = [
+      {
+        artistid => $first_aid,
+        name => 'PK_Angsty-Whiny Girl',
+        cds => [
+          { artist => $first_aid, title => 'PK_My First CD', year => 2006 },
+          { artist => $first_aid, title => 'PK_Yet More Tweeny-Pop crap', year => 2007 },
+        ],
+      },
+      {
+        artistid => ++$aid,
+        name => 'PK_Manufactured Crap',
+      },
+      {
+        artistid => ++$aid,
+        name => 'PK_Like I Give a Damn',
+        cds => [
+          { title => 'PK_My parents sold me to a record company' ,year => 2005 },
+          { title => 'PK_Why Am I So Ugly?', year => 2006 },
+          { title => 'PK_I Got Surgery and am now Popular', year => 2007 }
+        ],
+      },
+      {
+        artistid => ++$aid,
+        name => 'PK_Formerly Named',
+        cds => [
+          { title => 'PK_One Hit Wonder', year => 2006 },
+        ],
+      },
+    ];
+
+    ## Get the result row objects.
+
+    my ($girl, $crap, $damn, $formerly) = $art_rs->populate($artists);
+
+    ## Do we have the right object?
+
+    isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");  
+    isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");
+
+    ## Find the expected information?
+
+    ok( $crap->name eq 'PK_Manufactured Crap', "Got Correct name for result object");
+    ok( $girl->name eq 'PK_Angsty-Whiny Girl', "Got Correct name for result object");
+    ok( $girl->artistid == $first_aid, "Got Correct artist PK for result object");
+    ok( $damn->name eq 'PK_Like I Give a Damn', "Got Correct name for result object");
+    ok( $formerly->name eq 'PK_Formerly Named', "Got Correct name for result object");
+
+    ## Create the expected children sub objects?
+
+    ok( $crap->cds->count == 0, "got Expected Number of Cds");
+    ok( $girl->cds->count == 2, "got Expected Number of Cds");  
+    ok( $damn->cds->count == 3, "got Expected Number of Cds");
+    ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+
+    ## Did the cds get expected information?
+
+    my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
+
+    ok( $cd1->title eq "PK_My First CD", "Got Expected CD Title");
+    ok( $cd2->title eq "PK_Yet More Tweeny-Pop crap", "Got Expected CD Title");
+  }
+
+  BELONGS_TO_NO_PKs: {
+
+    ## Test from a belongs_to perspective, should create artist first, 
+    ## then CD with artistid.  This test we let the system automatically
+    ## create the PK's.  Chances are good you'll use it this way mostly.
+
+    my $cds = [
+      {
+        title => 'Some CD3',
+        year => '1997',
+        artist => { name => 'Fred BloggsC'},
+      },
+      {
+        title => 'Some CD4',
+        year => '1997',
+        artist => { name => 'Fred BloggsD'},
+      },    
+    ];
+
+    my ($cdA, $cdB) = $cd_rs->populate($cds);
+
+
+    isa_ok($cdA, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdA->artist->name, 'Fred BloggsC', 'Set Artist to FredC');
+
+
+    isa_ok($cdB, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdB->artist->name, 'Fred BloggsD', 'Set Artist to FredD');
+  }
+
+  BELONGS_TO_WITH_PKs: {
+
+    ## Test from a belongs_to perspective, should create artist first, 
+    ## then CD with artistid.  This time we try setting the PK's
+
+    my $aid  = $art_rs->get_column('artistid')->max || 0;
+
+    my $cds = [
+      {
+        title => 'Some CD3',
+        year => '1997',
+        artist => { artistid=> ++$aid, name => 'Fred BloggsC'},
+      },
+      {
+        title => 'Some CD4',
+        year => '1997',
+        artist => { artistid=> ++$aid, name => 'Fred BloggsD'},
+      },    
+    ];
+
+    my ($cdA, $cdB) = $cd_rs->populate($cds);
+
+    isa_ok($cdA, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdA->artist->name, 'Fred BloggsC', 'Set Artist to FredC');
+
+    isa_ok($cdB, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdB->artist->name, 'Fred BloggsD', 'Set Artist to FredD');
+    ok($cdB->artist->artistid == $aid, "Got Expected Artist ID");
+  }
+
+  WITH_COND_FROM_RS: {
+
+    my ($more_crap) = $restricted_art_rs->populate([
+      {
+        name => 'More Manufactured Crap',
+      },
+    ]);
+
+    ## Did it use the condition in the resultset?
+    cmp_ok( $more_crap->rank, '==', 42, "Got Correct rank for result object");
+  } 
 }
 
 
@@ -342,265 +354,280 @@
 
 VOID_CONTEXT: {
 
-	## All these tests check the ability to use populate without asking for 
-	## any returned resultsets.  This uses bulk_insert as much as possible
-	## in order to increase speed.
-	
-	HAS_MANY_WITH_PKS: {
-	
-		## This first group of tests checks to make sure we can call populate
-		## with the parent having many children and the parent PK is set
+  ## All these tests check the ability to use populate without asking for 
+  ## any returned resultsets.  This uses bulk_insert as much as possible
+  ## in order to increase speed.
 
-		my $aid		=  $art_rs->get_column('artistid')->max || 0;
-		
-		my $first_aid = ++$aid;
-		
-		my $artists = [
-			{
-				artistid => $first_aid,
-				name => 'VOID_PK_Angsty-Whiny Girl',
-				cds => [
-					{ artist => $first_aid, title => 'VOID_PK_My First CD', year => 2006 },
-					{ artist => $first_aid, title => 'VOID_PK_Yet More Tweeny-Pop crap', year => 2007 },
-				],					
-			},		
-			{
-				artistid => ++$aid,
-				name => 'VOID_PK_Manufactured Crap',
-			},
-			{
-				artistid => ++$aid,
-				name => 'VOID_PK_Like I Give a Damn',
-				cds => [
-					{ title => 'VOID_PK_My parents sold me to a record company' ,year => 2005 },
-					{ title => 'VOID_PK_Why Am I So Ugly?', year => 2006 },
-					{ title => 'VOID_PK_I Got Surgery and am now Popular', year => 2007 }				
-				],
-			},
-			{
-				artistid => ++$aid,
-				name => 'VOID_PK_Formerly Named',
-				cds => [
-					{ title => 'VOID_PK_One Hit Wonder', year => 2006 },
-				],					
-			},	
-			{
-				artistid => ++$aid,
-				name => undef,
-				cds => [
-					{ title => 'VOID_PK_Zundef test', year => 2006 },
-				],					
-			},		
-		];
-		
-		## Get the result row objects.
-		
-		$art_rs->populate($artists);
-		
-		my ($undef, $girl, $formerly, $damn, $crap) = $art_rs->search(
-		
-			{name=>[ map { $_->{name} } @$artists]},
-			{order_by=>'name ASC'},
-		);
-		
-		## Do we have the right object?
-		
-		isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");	
-		isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");	
-		isa_ok( $undef, 'DBICTest::Artist', "Got 'Artist'");		
-	
-		## Find the expected information?
+  HAS_MANY_WITH_PKS: {
 
-		ok( $crap->name eq 'VOID_PK_Manufactured Crap', "Got Correct name 'VOID_PK_Manufactured Crap' for result object");
-		ok( $girl->name eq 'VOID_PK_Angsty-Whiny Girl', "Got Correct name for result object");
-		ok( $damn->name eq 'VOID_PK_Like I Give a Damn', "Got Correct name for result object");	
-		ok( $formerly->name eq 'VOID_PK_Formerly Named', "Got Correct name for result object");
-		ok( !defined $undef->name, "Got Correct name 'is undef' for result object");		
-		
-		## Create the expected children sub objects?
-		ok( $crap->can('cds'), "Has cds relationship");
-		ok( $girl->can('cds'), "Has cds relationship");
-		ok( $damn->can('cds'), "Has cds relationship");
-		ok( $formerly->can('cds'), "Has cds relationship");
-		ok( $undef->can('cds'), "Has cds relationship");	
-	
-		ok( $crap->cds->count == 0, "got Expected Number of Cds");
-		ok( $girl->cds->count == 2, "got Expected Number of Cds");	
-		ok( $damn->cds->count == 3, "got Expected Number of Cds");
-		ok( $formerly->cds->count == 1, "got Expected Number of Cds");
-		ok( $undef->cds->count == 1, "got Expected Number of Cds");
-		
-		## Did the cds get expected information?
-		
-		my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
-		
-		ok( $cd1->title eq "VOID_PK_My First CD", "Got Expected CD Title");
-		ok( $cd2->title eq "VOID_PK_Yet More Tweeny-Pop crap", "Got Expected CD Title");
-	}
-	
-	
-	BELONGS_TO_WITH_PKs: {
+    ## This first group of tests checks to make sure we can call populate
+    ## with the parent having many children and the parent PK is set
 
-		## Test from a belongs_to perspective, should create artist first, 
-		## then CD with artistid.  This time we try setting the PK's
-		
-		my $aid	= $art_rs->get_column('artistid')->max || 0;
+    my $aid = $art_rs->get_column('artistid')->max || 0;
 
-		my $cds = [
-			{
-				title => 'Some CD3B',
-				year => '1997',
-				artist => { artistid=> ++$aid, name => 'Fred BloggsCB'},
-			},
-			{
-				title => 'Some CD4B',
-				year => '1997',
-				artist => { artistid=> ++$aid, name => 'Fred BloggsDB'},
-			},		
-		];
-		
-		$cd_rs->populate($cds);
-		
-		my ($cdA, $cdB) = $cd_rs->search(
-			{title=>[sort map {$_->{title}} @$cds]},
-			{order_by=>'title ASC'},
-		);
-		
-		isa_ok($cdA, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdA->artist->name, 'Fred BloggsCB', 'Set Artist to FredCB');
-		
-		isa_ok($cdB, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdB->artist->name, 'Fred BloggsDB', 'Set Artist to FredDB');
-		ok($cdB->artist->artistid == $aid, "Got Expected Artist ID");
-	}
+    my $first_aid = ++$aid;
 
-	BELONGS_TO_NO_PKs: {
+    my $artists = [
+      {
+        artistid => $first_aid,
+        name => 'VOID_PK_Angsty-Whiny Girl',
+        cds => [
+          { artist => $first_aid, title => 'VOID_PK_My First CD', year => 2006 },
+          { artist => $first_aid, title => 'VOID_PK_Yet More Tweeny-Pop crap', year => 2007 },
+        ],
+      },
+      {
+        artistid => ++$aid,
+        name => 'VOID_PK_Manufactured Crap',
+      },
+      {
+        artistid => ++$aid,
+        name => 'VOID_PK_Like I Give a Damn',
+        cds => [
+          { title => 'VOID_PK_My parents sold me to a record company' ,year => 2005 },
+          { title => 'VOID_PK_Why Am I So Ugly?', year => 2006 },
+          { title => 'VOID_PK_I Got Surgery and am now Popular', year => 2007 }        
+        ],
+      },
+      {
+        artistid => ++$aid,
+        name => 'VOID_PK_Formerly Named',
+        cds => [
+          { title => 'VOID_PK_One Hit Wonder', year => 2006 },
+        ],
+      },
+      {
+        artistid => ++$aid,
+        name => undef,
+        cds => [
+          { title => 'VOID_PK_Zundef test', year => 2006 },
+        ],
+      },
+    ];
 
-		## Test from a belongs_to perspective, should create artist first, 
-		## then CD with artistid.
-				
-		my $cds = [
-			{
-				title => 'Some CD3BB',
-				year => '1997',
-				artist => { name => 'Fred BloggsCBB'},
-			},
-			{
-				title => 'Some CD4BB',
-				year => '1997',
-				artist => { name => 'Fred BloggsDBB'},
-			},
-			{
-				title => 'Some CD5BB',
-				year => '1997',
-				artist => { name => undef},
-			},		
-		];
-		
-		$cd_rs->populate($cds);
-		
-		my ($cdA, $cdB, $cdC) = $cd_rs->search(
-			{title=>[sort map {$_->{title}} @$cds]},
-			{order_by=>'title ASC'},
-		);
-		
-		isa_ok($cdA, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdA->title, 'Some CD3BB', 'Found Expected title');
-		is($cdA->artist->name, 'Fred BloggsCBB', 'Set Artist to FredCBB');
-		
-		isa_ok($cdB, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdB->title, 'Some CD4BB', 'Found Expected title');
-		is($cdB->artist->name, 'Fred BloggsDBB', 'Set Artist to FredDBB');
-		
-		isa_ok($cdC, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdC->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdC->title, 'Some CD5BB', 'Found Expected title');
-		is( $cdC->artist->name, undef, 'Set Artist to something undefined');
-	}
-	
-	
-	HAS_MANY_NO_PKS: {
-	
-		## This first group of tests checks to make sure we can call populate
-		## with the parent having many children and let the keys be automatic
+    ## Get the result row objects.
 
-		my $artists = [
-			{	
-				name => 'VOID_Angsty-Whiny Girl',
-				cds => [
-					{ title => 'VOID_My First CD', year => 2006 },
-					{ title => 'VOID_Yet More Tweeny-Pop crap', year => 2007 },
-				],					
-			},		
-			{
-				name => 'VOID_Manufactured Crap',
-			},
-			{
-				name => 'VOID_Like I Give a Damn',
-				cds => [
-					{ title => 'VOID_My parents sold me to a record company' ,year => 2005 },
-					{ title => 'VOID_Why Am I So Ugly?', year => 2006 },
-					{ title => 'VOID_I Got Surgery and am now Popular', year => 2007 }				
-				],
-			},
-			{	
-				name => 'VOID_Formerly Named',
-				cds => [
-					{ title => 'VOID_One Hit Wonder', year => 2006 },
-				],					
-			},			
-		];
-		
-		## Get the result row objects.
-		
-		$art_rs->populate($artists);
-		
-		my ($girl, $formerly, $damn, $crap) = $art_rs->search(
-			{name=>[sort map {$_->{name}} @$artists]},
-			{order_by=>'name ASC'},
-		);
-		
-		## Do we have the right object?
-		
-		isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");	
-		isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");	
-		
-		## Find the expected information?
+    $art_rs->populate($artists);
 
-		ok( $crap->name eq 'VOID_Manufactured Crap', "Got Correct name for result object");
-		ok( $girl->name eq 'VOID_Angsty-Whiny Girl', "Got Correct name for result object");
-		ok( $damn->name eq 'VOID_Like I Give a Damn', "Got Correct name for result object");	
-		ok( $formerly->name eq 'VOID_Formerly Named', "Got Correct name for result object");
-		
-		## Create the expected children sub objects?
-		ok( $crap->can('cds'), "Has cds relationship");
-		ok( $girl->can('cds'), "Has cds relationship");
-		ok( $damn->can('cds'), "Has cds relationship");
-		ok( $formerly->can('cds'), "Has cds relationship");
-		
-		ok( $crap->cds->count == 0, "got Expected Number of Cds");
-		ok( $girl->cds->count == 2, "got Expected Number of Cds");	
-		ok( $damn->cds->count == 3, "got Expected Number of Cds");
-		ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+    my ($undef, $girl, $formerly, $damn, $crap) = $art_rs->search(
 
-		## Did the cds get expected information?
-		
-		my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
+      {name=>[ map { $_->{name} } @$artists]},
+      {order_by=>'name ASC'},
+    );
 
-		ok($cd1, "Got a got CD");
-		ok($cd2, "Got a got CD");
-		ok( $cd1->title eq "VOID_My First CD", "Got Expected CD Title");
-		ok( $cd2->title eq "VOID_Yet More Tweeny-Pop crap", "Got Expected CD Title");
-	}
+    ## Do we have the right object?
 
+    isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");  
+    isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");  
+    isa_ok( $undef, 'DBICTest::Artist', "Got 'Artist'");    
+
+    ## Find the expected information?
+
+    ok( $crap->name eq 'VOID_PK_Manufactured Crap', "Got Correct name 'VOID_PK_Manufactured Crap' for result object");
+    ok( $girl->name eq 'VOID_PK_Angsty-Whiny Girl', "Got Correct name for result object");
+    ok( $damn->name eq 'VOID_PK_Like I Give a Damn', "Got Correct name for result object");  
+    ok( $formerly->name eq 'VOID_PK_Formerly Named', "Got Correct name for result object");
+    ok( !defined $undef->name, "Got Correct name 'is undef' for result object");    
+
+    ## Create the expected children sub objects?
+    ok( $crap->can('cds'), "Has cds relationship");
+    ok( $girl->can('cds'), "Has cds relationship");
+    ok( $damn->can('cds'), "Has cds relationship");
+    ok( $formerly->can('cds'), "Has cds relationship");
+    ok( $undef->can('cds'), "Has cds relationship");  
+
+    ok( $crap->cds->count == 0, "got Expected Number of Cds");
+    ok( $girl->cds->count == 2, "got Expected Number of Cds");  
+    ok( $damn->cds->count == 3, "got Expected Number of Cds");
+    ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+    ok( $undef->cds->count == 1, "got Expected Number of Cds");
+
+    ## Did the cds get expected information?
+
+    my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
+
+    ok( $cd1->title eq "VOID_PK_My First CD", "Got Expected CD Title");
+    ok( $cd2->title eq "VOID_PK_Yet More Tweeny-Pop crap", "Got Expected CD Title");
+  }
+
+
+  BELONGS_TO_WITH_PKs: {
+
+    ## Test from a belongs_to perspective, should create artist first, 
+    ## then CD with artistid.  This time we try setting the PK's
+
+    my $aid  = $art_rs->get_column('artistid')->max || 0;
+
+    my $cds = [
+      {
+        title => 'Some CD3B',
+        year => '1997',
+        artist => { artistid=> ++$aid, name => 'Fred BloggsCB'},
+      },
+      {
+        title => 'Some CD4B',
+        year => '1997',
+        artist => { artistid=> ++$aid, name => 'Fred BloggsDB'},
+      },
+    ];
+
+    $cd_rs->populate($cds);
+
+    my ($cdA, $cdB) = $cd_rs->search(
+      {title=>[sort map {$_->{title}} @$cds]},
+      {order_by=>'title ASC'},
+    );
+
+    isa_ok($cdA, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdA->artist->name, 'Fred BloggsCB', 'Set Artist to FredCB');
+
+    isa_ok($cdB, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdB->artist->name, 'Fred BloggsDB', 'Set Artist to FredDB');
+    ok($cdB->artist->artistid == $aid, "Got Expected Artist ID");
+  }
+
+  BELONGS_TO_NO_PKs: {
+
+    ## Test from a belongs_to perspective, should create artist first, 
+    ## then CD with artistid.
+
+    my $cds = [
+      {
+        title => 'Some CD3BB',
+        year => '1997',
+        artist => { name => 'Fred BloggsCBB'},
+      },
+      {
+        title => 'Some CD4BB',
+        year => '1997',
+        artist => { name => 'Fred BloggsDBB'},
+      },
+      {
+        title => 'Some CD5BB',
+        year => '1997',
+        artist => { name => undef},
+      },    
+    ];
+
+    $cd_rs->populate($cds);
+
+    my ($cdA, $cdB, $cdC) = $cd_rs->search(
+      {title=>[sort map {$_->{title}} @$cds]},
+      {order_by=>'title ASC'},
+    );
+
+    isa_ok($cdA, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdA->title, 'Some CD3BB', 'Found Expected title');
+    is($cdA->artist->name, 'Fred BloggsCBB', 'Set Artist to FredCBB');
+
+    isa_ok($cdB, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdB->title, 'Some CD4BB', 'Found Expected title');
+    is($cdB->artist->name, 'Fred BloggsDBB', 'Set Artist to FredDBB');
+
+    isa_ok($cdC, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdC->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdC->title, 'Some CD5BB', 'Found Expected title');
+    is( $cdC->artist->name, undef, 'Set Artist to something undefined');
+  }
+
+
+  HAS_MANY_NO_PKS: {
+
+    ## This first group of tests checks to make sure we can call populate
+    ## with the parent having many children and let the keys be automatic
+
+    my $artists = [
+      {  
+        name => 'VOID_Angsty-Whiny Girl',
+        cds => [
+          { title => 'VOID_My First CD', year => 2006 },
+          { title => 'VOID_Yet More Tweeny-Pop crap', year => 2007 },
+        ],          
+      },    
+      {
+        name => 'VOID_Manufactured Crap',
+      },
+      {
+        name => 'VOID_Like I Give a Damn',
+        cds => [
+          { title => 'VOID_My parents sold me to a record company' ,year => 2005 },
+          { title => 'VOID_Why Am I So Ugly?', year => 2006 },
+          { title => 'VOID_I Got Surgery and am now Popular', year => 2007 }        
+        ],
+      },
+      {  
+        name => 'VOID_Formerly Named',
+        cds => [
+          { title => 'VOID_One Hit Wonder', year => 2006 },
+        ],          
+      },      
+    ];
+
+    ## Get the result row objects.
+
+    $art_rs->populate($artists);
+
+    my ($girl, $formerly, $damn, $crap) = $art_rs->search(
+      {name=>[sort map {$_->{name}} @$artists]},
+      {order_by=>'name ASC'},
+    );
+
+    ## Do we have the right object?
+
+    isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");  
+    isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");  
+
+    ## Find the expected information?
+
+    ok( $crap->name eq 'VOID_Manufactured Crap', "Got Correct name for result object");
+    ok( $girl->name eq 'VOID_Angsty-Whiny Girl', "Got Correct name for result object");
+    ok( $damn->name eq 'VOID_Like I Give a Damn', "Got Correct name for result object");  
+    ok( $formerly->name eq 'VOID_Formerly Named', "Got Correct name for result object");
+
+    ## Create the expected children sub objects?
+    ok( $crap->can('cds'), "Has cds relationship");
+    ok( $girl->can('cds'), "Has cds relationship");
+    ok( $damn->can('cds'), "Has cds relationship");
+    ok( $formerly->can('cds'), "Has cds relationship");
+
+    ok( $crap->cds->count == 0, "got Expected Number of Cds");
+    ok( $girl->cds->count == 2, "got Expected Number of Cds");  
+    ok( $damn->cds->count == 3, "got Expected Number of Cds");
+    ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+
+    ## Did the cds get expected information?
+
+    my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
+
+    ok($cd1, "Got a got CD");
+    ok($cd2, "Got a got CD");
+    ok( $cd1->title eq "VOID_My First CD", "Got Expected CD Title");
+    ok( $cd2->title eq "VOID_Yet More Tweeny-Pop crap", "Got Expected CD Title");
+  }
+
+  WITH_COND_FROM_RS: {
+
+    $restricted_art_rs->populate([
+      {
+        name => 'VOID More Manufactured Crap',
+      },
+    ]);
+
+    my $more_crap = $art_rs->search({
+      name => 'VOID More Manufactured Crap'
+    })->first;
+
+    ## Did it use the condition in the resultset?
+    cmp_ok( $more_crap->rank, '==', 42, "Got Correct rank for result object");
+  } 
 }
 
 ARRAYREF_OF_ARRAYREF_STYLE: {
@@ -610,21 +637,53 @@
     [1001, 'A singer that jumped the shark two albums ago'],
     [1002, 'An actually cool singer.'],
   ]);
-  
+
   ok my $unknown = $art_rs->find(1000), "got Unknown";
   ok my $jumped = $art_rs->find(1001), "got Jumped";
   ok my $cool = $art_rs->find(1002), "got Cool";
-  
+
   is $unknown->name, 'A Formally Unknown Singer', 'Correct Name';
   is $jumped->name, 'A singer that jumped the shark two albums ago', 'Correct Name';
   is $cool->name, 'An actually cool singer.', 'Correct Name';
-  
-  my ($cooler, $lamer) = $art_rs->populate([
+
+  my ($cooler, $lamer) = $restricted_art_rs->populate([
     [qw/artistid name/],
     [1003, 'Cooler'],
-    [1004, 'Lamer'],	
+    [1004, 'Lamer'],  
   ]);
-  
+
   is $cooler->name, 'Cooler', 'Correct Name';
   is $lamer->name, 'Lamer', 'Correct Name';  
-}
\ No newline at end of file
+
+  cmp_ok $cooler->rank, '==', 42, 'Correct Rank';
+
+  ARRAY_CONTEXT_WITH_COND_FROM_RS: {
+
+    my ($mega_lamer) = $restricted_art_rs->populate([
+      {
+        name => 'Mega Lamer',
+      },
+    ]);
+
+    ## Did it use the condition in the resultset?
+    cmp_ok( $mega_lamer->rank, '==', 42, "Got Correct rank for result object");
+  } 
+
+  VOID_CONTEXT_WITH_COND_FROM_RS: {
+
+    $restricted_art_rs->populate([
+      {
+        name => 'VOID Mega Lamer',
+      },
+    ]);
+
+    my $mega_lamer = $art_rs->search({
+      name => 'VOID Mega Lamer'
+    })->first;
+
+    ## Did it use the condition in the resultset?
+    cmp_ok( $mega_lamer->rank, '==', 42, "Got Correct rank for result object");
+  }
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/103many_to_many_warning.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/103many_to_many_warning.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/103many_to_many_warning.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,7 +3,6 @@
 use Test::More;
 
 use lib qw(t/lib);
-use Data::Dumper;
 
 plan tests => 4;
 my $exp_warn = qr/The many-to-many relationship 'bars' is trying to create/;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/104view.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/104view.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/104view.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,5 +1,5 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use Test::Exception;
@@ -8,8 +8,6 @@
 
 my $schema = DBICTest->init_schema();
 
-plan tests => 2;
-
 ## Real view
 my $cds_rs_2000 = $schema->resultset('CD')->search( { year => 2000 });
 my $year2kcds_rs = $schema->resultset('Year2000CDs');
@@ -24,5 +22,50 @@
 is($cds_rs_1999->count, $year1999cds_rs->count, 'View Year1999CDs sees all CDs in year 1999');
 
 
+# Test if relationships work correctly
+is_deeply (
+  [
+    $schema->resultset('Year1999CDs')->search (
+      {},
+      {
+        result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+        prefetch => ['artist', { tracks => [qw/cd year1999cd year2000cd/] } ],
+      },
+    )->all
+  ],
+  [
+    $schema->resultset('CD')->search (
+      { 'me.year' => '1999'},
+      {
+        result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+        prefetch => ['artist', { tracks => [qw/cd year1999cd year2000cd/] } ],
+        columns => [qw/cdid single_track title/],   # to match the columns retrieved by the virtview
+      },
+    )->all
+  ],
+  'Prefetch over virtual view gives expected result',
+);
 
+is_deeply (
+  [
+    $schema->resultset('Year2000CDs')->search (
+      {},
+      {
+        result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+        prefetch => ['artist', { tracks => [qw/cd year1999cd year2000cd/] } ],
+      },
+    )->all
+  ],
+  [
+    $schema->resultset('CD')->search (
+      { 'me.year' => '2000'},
+      {
+        result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+        prefetch => ['artist', { tracks => [qw/cd year1999cd year2000cd/] } ],
+      },
+    )->all
+  ],
+  'Prefetch over regular view gives expected result',
+);
 
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/10optional_deps.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/10optional_deps.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/10optional_deps.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,70 @@
+use strict;
+use warnings;
+no warnings qw/once/;
+
+use Test::More;
+use lib qw(t/lib);
+use Scalar::Util; # load before we break require()
+
+use_ok 'DBIx::Class::Optional::Dependencies';
+
+my $sqlt_dep = DBIx::Class::Optional::Dependencies->req_list_for ('deploy');
+is_deeply (
+  [ keys %$sqlt_dep ],
+  [ 'SQL::Translator' ],
+  'Correct deploy() dependency list',
+);
+
+# make module loading impossible, regardless of actual libpath contents
+ at INC = (sub { die('Optional Dep Test') } );
+
+ok (
+  ! DBIx::Class::Optional::Dependencies->req_ok_for ('deploy'),
+  'deploy() deps missing',
+);
+
+like (
+  DBIx::Class::Optional::Dependencies->req_missing_for ('deploy'),
+  qr/^SQL::Translator \>\= \d/,
+  'expected missing string contents',
+);
+
+like (
+  DBIx::Class::Optional::Dependencies->req_errorlist_for ('deploy')->{'SQL::Translator'},
+  qr/Optional Dep Test/,
+  'custom exception found in errorlist',
+);
+
+
+#make it so module appears loaded
+$INC{'SQL/Translator.pm'} = 1;
+$SQL::Translator::VERSION = 999;
+
+ok (
+  ! DBIx::Class::Optional::Dependencies->req_ok_for ('deploy'),
+  'deploy() deps missing cached properly',
+);
+
+#reset cache
+%DBIx::Class::Optional::Dependencies::req_availability_cache = ();
+
+
+ok (
+  DBIx::Class::Optional::Dependencies->req_ok_for ('deploy'),
+  'deploy() deps present',
+);
+
+is (
+  DBIx::Class::Optional::Dependencies->req_missing_for ('deploy'),
+  '',
+  'expected null missing string',
+);
+
+is_deeply (
+  DBIx::Class::Optional::Dependencies->req_errorlist_for ('deploy'),
+  {},
+  'expected empty errorlist',
+);
+
+
+done_testing;

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/18inserterror.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/18inserterror.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/18inserterror.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,29 +0,0 @@
-use Class::C3;
-use strict;
-use Test::More;
-use warnings;
-
-BEGIN {
-    eval "use DBD::SQLite";
-    plan $@
-        ? ( skip_all => 'needs DBD::SQLite for testing' )
-        : ( tests => 4 );
-}
-
-use lib qw(t/lib);
-
-use_ok( 'DBICTest' );
-use_ok( 'DBICTest::Schema' );
-my $schema = DBICTest->init_schema;
-
-{
-       my $warnings;
-       local $SIG{__WARN__} = sub { $warnings .= $_[0] };
-       eval {
-         $schema->resultset('CD')
-                ->create({ title => 'vacation in antarctica' })
-       };
-       like $@, qr/NULL/;  # as opposed to some other error
-       unlike( $warnings, qr/uninitialized value/, "No warning from Storage" );
-}
-

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/19quotes.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/19quotes.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/19quotes.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,75 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use IO::File;
-
-use lib qw(t/lib);
-use DBIC::SqlMakerTest;
-
-BEGIN {
-    eval "use DBD::SQLite";
-    plan $@
-        ? ( skip_all => 'needs DBD::SQLite for testing' )
-        : ( tests => 7 );
-}
-
-
-use_ok('DBICTest');
-use_ok('DBIC::DebugObj');
-my $schema = DBICTest->init_schema();
-
-#diag('Testing against ' . join(' ', map { $schema->storage->dbh->get_info($_) } qw/17 18/));
-
-$schema->storage->sql_maker->quote_char('`');
-$schema->storage->sql_maker->name_sep('.');
-
-my ($sql, @bind);
-$schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind));
-$schema->storage->debug(1);
-
-my $rs;
-
-$rs = $schema->resultset('CD')->search(
-           { 'me.year' => 2001, 'artist.name' => 'Caterwauler McCrae' },
-           { join => 'artist' });
-eval { $rs->count };
-is_same_sql_bind(
-  $sql, \@bind,
-  "SELECT COUNT( * ) FROM `cd` `me`  JOIN `artist` `artist` ON ( `artist`.`artistid` = `me`.`artist` ) WHERE ( `artist`.`name` = ? AND `me`.`year` = ? )", ["'Caterwauler McCrae'", "'2001'"],
-  'got correct SQL for count query with quoting'
-);
-
-my $order = 'year DESC';
-$rs = $schema->resultset('CD')->search({},
-            { 'order_by' => $order });
-eval { $rs->first };
-like($sql, qr/ORDER BY `\Q${order}\E`/, 'quoted ORDER BY with DESC (should use a scalarref anyway)');
-
-$rs = $schema->resultset('CD')->search({},
-            { 'order_by' => \$order });
-eval { $rs->first };
-like($sql, qr/ORDER BY \Q${order}\E/, 'did not quote ORDER BY with scalarref');
-
-$schema->storage->sql_maker->quote_char([qw/[ ]/]);
-$schema->storage->sql_maker->name_sep('.');
-
-$rs = $schema->resultset('CD')->search(
-           { 'me.year' => 2001, 'artist.name' => 'Caterwauler McCrae' },
-           { join => 'artist' });
-eval { $rs->count };
-is_same_sql_bind(
-  $sql, \@bind,
-  "SELECT COUNT( * ) FROM [cd] [me]  JOIN [artist] [artist] ON ( [artist].[artistid] = [me].[artist] ) WHERE ( [artist].[name] = ? AND [me].[year] = ? )", ["'Caterwauler McCrae'", "'2001'"],
-  'got correct SQL for count query with bracket quoting'
-);
-
-my %data = (
-       name => 'Bill',
-       order => '12'
-);
-
-$schema->storage->sql_maker->quote_char('`');
-$schema->storage->sql_maker->name_sep('.');
-
-is($schema->storage->sql_maker->update('group', \%data), 'UPDATE `group` SET `name` = ?, `order` = ?', 'quoted table names for UPDATE');

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/19quotes_newstyle.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/19quotes_newstyle.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/19quotes_newstyle.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,92 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use IO::File;
-
-use lib qw(t/lib);
-use DBIC::SqlMakerTest;
-
-BEGIN {
-    eval "use DBD::SQLite";
-    plan $@
-        ? ( skip_all => 'needs DBD::SQLite for testing' )
-        : ( tests => 7 );
-}
-
-use_ok('DBICTest');
-use_ok('DBIC::DebugObj');
-
-my $schema = DBICTest->init_schema();
-
-#diag('Testing against ' . join(' ', map { $schema->storage->dbh->get_info($_) } qw/17 18/));
-
-my $dsn = $schema->storage->_dbi_connect_info->[0];
-$schema->connection(
-  $dsn,
-  undef,
-  undef,
-  { AutoCommit => 1 },
-  { quote_char => '`', name_sep => '.' },
-);
-
-my ($sql, @bind);
-$schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind)),
-$schema->storage->debug(1);
-
-my $rs;
-
-$rs = $schema->resultset('CD')->search(
-           { 'me.year' => 2001, 'artist.name' => 'Caterwauler McCrae' },
-           { join => 'artist' });
-eval { $rs->count };
-is_same_sql_bind(
-  $sql, \@bind,
-  "SELECT COUNT( * ) FROM `cd` `me`  JOIN `artist` `artist` ON ( `artist`.`artistid` = `me`.`artist` ) WHERE ( `artist`.`name` = ? AND `me`.`year` = ? )", ["'Caterwauler McCrae'", "'2001'"],
-  'got correct SQL for count query with quoting'
-);
-
-my $order = 'year DESC';
-$rs = $schema->resultset('CD')->search({},
-            { 'order_by' => $order });
-eval { $rs->first };
-like($sql, qr/ORDER BY `\Q${order}\E`/, 'quoted ORDER BY with DESC (should use a scalarref anyway)');
-
-$rs = $schema->resultset('CD')->search({},
-            { 'order_by' => \$order });
-eval { $rs->first };
-like($sql, qr/ORDER BY \Q${order}\E/, 'did not quote ORDER BY with scalarref');
-
-$schema->connection(
-  $dsn,
-  undef,
-  undef,
-  { AutoCommit => 1, quote_char => [qw/[ ]/], name_sep => '.' }
-);
-
-$schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind)),
-$schema->storage->debug(1);
-
-$rs = $schema->resultset('CD')->search(
-           { 'me.year' => 2001, 'artist.name' => 'Caterwauler McCrae' },
-           { join => 'artist' });
-eval { $rs->count };
-is_same_sql_bind(
-  $sql, \@bind,
-  "SELECT COUNT( * ) FROM [cd] [me]  JOIN [artist] [artist] ON ( [artist].[artistid] = [me].[artist] ) WHERE ( [artist].[name] = ? AND [me].[year] = ? )", ["'Caterwauler McCrae'", "'2001'"],
-  'got correct SQL for count query with bracket quoting'
-);
-
-my %data = (
-       name => 'Bill',
-       order => '12'
-);
-
-$schema->connection(
-  $dsn,
-  undef,
-  undef,
-  { AutoCommit => 1, quote_char => '`', name_sep => '.' }
-);
-
-is($schema->storage->sql_maker->update('group', \%data), 'UPDATE `group` SET `name` = ?, `order` = ?', 'quoted table names for UPDATE');

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/20setuperrors.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/20setuperrors.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/20setuperrors.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,15 +1,19 @@
-#!/usr/bin/perl
+use warnings;
+use strict;
 
-use Test::More tests => 1;
+use Test::More;
+use Test::Exception;
 
-eval {
-  package BuggyTable;
-  use base 'DBIx::Class';
+throws_ok (
+  sub {
+    package BuggyTable;
+    use base 'DBIx::Class::Core';
 
-  __PACKAGE__->load_components qw/Core/;
-  __PACKAGE__->table('buggy_table');
-  __PACKAGE__->columns qw/this doesnt work as expected/;
-};
+    __PACKAGE__->table('buggy_table');
+    __PACKAGE__->columns qw/this doesnt work as expected/;
+  },
+  qr/\bcolumns\(\) is a read-only/,
+  'columns() error when apparently misused',
+);
 
-like($@,qr/\bcolumns\(\) is a read-only/,
-     "columns() error when apparently misused");
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/26dumper.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/26dumper.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/26dumper.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,20 +1,10 @@
 use strict;
 use Test::More;
-use IO::File;
 
 use Data::Dumper;
 $Data::Dumper::Sortkeys = 1;
 
 use lib qw(t/lib);
-
-BEGIN {
-    eval "use DBD::SQLite";
-    plan $ENV{DATA_DUMPER_TEST}
-        ? ( tests => 2 )
-        : ( skip_all => 'Set $ENV{DATA_DUMPER_TEST} to run this test' );
-}
-
-
 use_ok('DBICTest');
 
 my $schema = DBICTest->init_schema();
@@ -36,4 +26,4 @@
 
 cmp_ok( $rs->count(), '==', 1, "Single record in after death with dumper");
 
-1;
+done_testing;

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/31stats.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/31stats.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/31stats.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,109 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use warnings;
-use Test::More;
-
-BEGIN {
-    eval "use DBD::SQLite";
-    plan $@
-        ? ( skip_all => 'needs DBD::SQLite for testing' )
-        : ( tests => 12 );
-}
-
-use lib qw(t/lib);
-
-use_ok('DBICTest');
-my $schema = DBICTest->init_schema();
-
-my $cbworks = 0;
-
-$schema->storage->debugcb(sub { $cbworks = 1; });
-$schema->storage->debug(0);
-my $rs = $schema->resultset('CD')->search({});
-$rs->count();
-ok(!$cbworks, 'Callback not called with debug disabled');
-
-$schema->storage->debug(1);
-
-$rs->count();
-ok($cbworks, 'Debug callback worked.');
-
-my $prof = new DBIx::Test::Profiler();
-$schema->storage->debugobj($prof);
-
-# Test non-transaction calls.
-$rs->count();
-ok($prof->{'query_start'}, 'query_start called');
-ok($prof->{'query_end'}, 'query_end called');
-ok(!$prof->{'txn_begin'}, 'txn_begin not called');
-ok(!$prof->{'txn_commit'}, 'txn_commit not called');
-
-$prof->reset();
-
-# Test transaction calls
-$schema->txn_begin();
-ok($prof->{'txn_begin'}, 'txn_begin called');
-
-$rs = $schema->resultset('CD')->search({});
-$rs->count();
-ok($prof->{'query_start'}, 'query_start called');
-ok($prof->{'query_end'}, 'query_end called');
-
-$schema->txn_commit();
-ok($prof->{'txn_commit'}, 'txn_commit called');
-
-$prof->reset();
-
-# Test a rollback
-$schema->txn_begin();
-$rs = $schema->resultset('CD')->search({});
-$rs->count();
-$schema->txn_rollback();
-ok($prof->{'txn_rollback'}, 'txn_rollback called');
-
-$schema->storage->debug(0);
-
-package DBIx::Test::Profiler;
-use strict;
-
-sub new {
-    my $self = bless({});
-}
-
-sub query_start {
-    my $self = shift();
-    $self->{'query_start'} = 1;
-}
-
-sub query_end {
-    my $self = shift();
-    $self->{'query_end'} = 1;
-}
-
-sub txn_begin {
-    my $self = shift();
-    $self->{'txn_begin'} = 1;
-}
-
-sub txn_rollback {
-    my $self = shift();
-    $self->{'txn_rollback'} = 1;
-}
-
-sub txn_commit {
-    my $self = shift();
-    $self->{'txn_commit'} = 1;
-}
-
-sub reset {
-    my $self = shift();
-
-    $self->{'query_start'} = 0;
-    $self->{'query_end'} = 0;
-    $self->{'txn_begin'} = 0;
-    $self->{'txn_rollback'} = 0;
-    $self->{'txn_end'} = 0;
-}
-
-1;

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/32connect_code_ref.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/32connect_code_ref.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/32connect_code_ref.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,24 +0,0 @@
-use strict;
-use warnings;  
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-plan tests => 1;
-
-# Set up the "usual" sqlite for DBICTest
-my $normal_schema = DBICTest->init_schema( sqlite_use_file => 1 );
-
-# Steal the dsn, which should be like 'dbi:SQLite:t/var/DBIxClass.db'
-my $normal_dsn = $normal_schema->storage->_dbi_connect_info->[0];
-
-# Make sure we have no active connection
-$normal_schema->storage->disconnect;
-
-# Make a new clone with a new connection, using a code reference
-my $code_ref_schema = $normal_schema->connect(sub { DBI->connect($normal_dsn); });
-
-# Stolen from 60core.t - this just verifies things seem to work at all
-my @art = $code_ref_schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
-cmp_ok(@art, '==', 3, "Three artists returned");

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/33storage_reconnect.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/33storage_reconnect.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/33storage_reconnect.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,73 +0,0 @@
-use strict;
-use warnings;  
-
-use FindBin;
-use File::Copy;
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-plan tests => 6;
-
-my $db_orig = "$FindBin::Bin/var/DBIxClass.db";
-my $db_tmp  = "$db_orig.tmp";
-
-# Set up the "usual" sqlite for DBICTest
-my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
-
-# Make sure we're connected by doing something
-my @art = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
-cmp_ok(@art, '==', 3, "Three artists returned");
-
-# Disconnect the dbh, and be sneaky about it
-# Also test if DBD::SQLite finaly knows how to ->disconnect properly
-{
-  my $w;
-  local $SIG{__WARN__} = sub { $w = shift };
-  $schema->storage->_dbh->disconnect;
-  ok ($w !~ /active statement handles/, 'SQLite can disconnect properly');
-}
-
-# Try the operation again - What should happen here is:
-#   1. S::DBI blindly attempts the SELECT, which throws an exception
-#   2. It catches the exception, checks ->{Active}/->ping, sees the disconnected state...
-#   3. Reconnects, and retries the operation
-#   4. Success!
-my @art_two = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
-cmp_ok(@art_two, '==', 3, "Three artists returned");
-
-### Now, disconnect the dbh, and move the db file;
-# create a new one and chmod 000 to prevent SQLite from connecting.
-$schema->storage->_dbh->disconnect;
-move( $db_orig, $db_tmp );
-open DBFILE, '>', $db_orig;
-print DBFILE 'THIS IS NOT A REAL DATABASE';
-close DBFILE;
-chmod 0000, $db_orig;
-
-### Try the operation again... it should fail, since there's no db
-{
-    # Catch the DBI connection error
-    local $SIG{__WARN__} = sub {};
-    eval {
-        my @art_three = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
-    };
-    ok( $@, 'The operation failed' );
-}
-
-### Now, move the db file back to the correct name
-unlink($db_orig);
-move( $db_tmp, $db_orig );
-
-SKIP: {
-    skip "Cannot reconnect if original connection didn't fail", 2
-        if ( $@ =~ /encrypted or is not a database/ );
-
-    ### Try the operation again... this time, it should succeed
-    my @art_four;
-    eval {
-        @art_four = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
-    };
-    ok( !$@, 'The operation succeeded' );
-    cmp_ok( @art_four, '==', 3, "Three artists returned" );
-}

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/35disable_sth_caching.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/35disable_sth_caching.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/35disable_sth_caching.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,19 +0,0 @@
-use strict;
-use warnings;  
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-plan tests => 2;
-
-# Set up the "usual" sqlite for DBICTest
-my $schema = DBICTest->init_schema;
-
-my $sth_one = $schema->storage->sth('SELECT 42');
-my $sth_two = $schema->storage->sth('SELECT 42');
-$schema->storage->disable_sth_caching(1);
-my $sth_three = $schema->storage->sth('SELECT 42');
-
-ok($sth_one == $sth_two, "statement caching works");
-ok($sth_two != $sth_three, "disabling statement caching works");

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/36datetime.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/36datetime.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/36datetime.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,28 +0,0 @@
-use strict;
-use warnings;  
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-eval { require DateTime::Format::SQLite };
-plan $@ ? ( skip_all => 'Requires DateTime::Format::SQLite' )
-        : ( tests => 3 );
-
-my $schema = DBICTest->init_schema(
-    no_deploy => 1, # Deploying would cause an early rebless
-);
-
-is(
-    ref $schema->storage, 'DBIx::Class::Storage::DBI',
-    'Starting with generic storage'
-);
-
-# Calling date_time_parser should cause the storage to be reblessed,
-# so that we can pick up datetime_parser_type from subclasses
-
-my $parser = $schema->storage->datetime_parser();
-
-is($parser, 'DateTime::Format::SQLite', 'Got expected storage-set datetime_parser');
-isa_ok($schema->storage, 'DBIx::Class::Storage::DBI::SQLite', 'storage');
-

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/42toplimit.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/42toplimit.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/42toplimit.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,138 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBIC::SqlMakerTest;
-
-my $schema = DBICTest->init_schema;
-
-# Trick the sqlite DB to use Top limit emulation
-# We could test all of this via $sq->$op directly,
-# but some conditions needs a $rsrc
-delete $schema->storage->_sql_maker->{_cached_syntax};
-$schema->storage->_sql_maker->limit_dialect ('Top');
-
-my $rs = $schema->resultset ('FourKeys')->search ({}, { rows => 1, offset => 3 });
-
-sub test_order {
-  my $args = shift;
-
-  my $req_order = $args->{order_req}
-    ? "ORDER BY $args->{order_req}"
-    : ''
-  ;
-
-  is_same_sql_bind(
-    $rs->search ({}, {order_by => $args->{order_by}})->as_query,
-    "(
-      SELECT * FROM (
-        SELECT TOP 1 * FROM (
-          SELECT TOP 4 me.foo, me.bar, me.hello, me.goodbye, me.sensors, me.read_count FROM fourkeys me ORDER BY $args->{order_inner}
-        ) foo ORDER BY $args->{order_outer}
-      ) bar
-      $req_order
-    )",
-    [],
-  );
-}
-
-my @tests = (
-  {
-    order_by => \ 'foo DESC',
-    order_req => 'foo DESC',
-    order_inner => 'foo DESC',
-    order_outer => 'foo ASC' 
-  },
-  {
-    order_by => { -asc => 'foo'  },
-    order_req => 'foo ASC',
-    order_inner => 'foo ASC',
-    order_outer => 'foo DESC',
-  },
-  {
-    order_by => 'foo',
-    order_req => 'foo',
-    order_inner => 'foo ASC',
-    order_outer => 'foo DESC',
-  },
-  {
-    order_by => [ qw{ foo bar}   ],
-    order_req => 'foo, bar',
-    order_inner => 'foo ASC,bar ASC',
-    order_outer => 'foo DESC, bar DESC',
-  },
-  {
-    order_by => { -desc => 'foo' },
-    order_req => 'foo DESC',
-    order_inner => 'foo DESC',
-    order_outer => 'foo ASC',
-  },
-  {
-    order_by => ['foo', { -desc => 'bar' } ],
-    order_req => 'foo, bar DESC',
-    order_inner => 'foo ASC, bar DESC',
-    order_outer => 'foo DESC, bar ASC',
-  },
-  {
-    order_by => { -asc => [qw{ foo bar }] },
-    order_req => 'foo ASC, bar ASC',
-    order_inner => 'foo ASC, bar ASC',
-    order_outer => 'foo DESC, bar DESC',
-  },
-  {
-    order_by => [
-      { -asc => 'foo' },
-      { -desc => [qw{bar}] },
-      { -asc  => [qw{hello sensors}]},
-    ],
-    order_req => 'foo ASC, bar DESC, hello ASC, sensors ASC',
-    order_inner => 'foo ASC, bar DESC, hello ASC, sensors ASC',
-    order_outer => 'foo DESC, bar ASC, hello DESC, sensors DESC',
-  },
-  {
-    order_by => undef,
-    order_req => undef,
-    order_inner => 'foo ASC, bar ASC, hello ASC, goodbye ASC',
-    order_outer => 'foo DESC, bar DESC, hello DESC, goodbye DESC',
-  },
-  {
-    order_by => '',
-    order_req => undef,
-    order_inner => 'foo ASC, bar ASC, hello ASC, goodbye ASC',
-    order_outer => 'foo DESC, bar DESC, hello DESC, goodbye DESC',
-  },
-  {
-    order_by => {},
-    order_req => undef,
-    order_inner => 'foo ASC, bar ASC, hello ASC, goodbye ASC',
-    order_outer => 'foo DESC, bar DESC, hello DESC, goodbye DESC',
-  },
-  {
-    order_by => [],
-    order_req => undef,
-    order_inner => 'foo ASC, bar ASC, hello ASC, goodbye ASC',
-    order_outer => 'foo DESC, bar DESC, hello DESC, goodbye DESC',
-  },
-);
-
-plan (tests => scalar @tests + 1);
-
-test_order ($_) for @tests;
-
-is_same_sql_bind (
-  $rs->search ({}, { group_by => 'bar', order_by => 'bar' })->as_query,
-  '(
-    SELECT * FROM
-    (
-      SELECT TOP 1 * FROM
-      (
-        SELECT TOP 4  me.foo, me.bar, me.hello, me.goodbye, me.sensors, me.read_count FROM fourkeys me GROUP BY bar ORDER BY bar ASC
-      ) AS foo
-      ORDER BY bar DESC
-    ) AS bar
-    ORDER BY bar
-  )',
-  [],
-);

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/46where_attribute.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/46where_attribute.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/46where_attribute.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -2,7 +2,6 @@
 use warnings;
 
 use Test::More;
-use Data::Dumper;
 use lib qw(t/lib);
 use DBICTest;
 my $schema = DBICTest->init_schema();

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/51threads.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/51threads.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/51threads.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -8,7 +8,7 @@
 
 BEGIN {
     plan skip_all => 'Your perl does not support ithreads'
-        if !$Config{useithreads} || $] < 5.008;
+        if !$Config{useithreads};
 }
 
 use threads;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/51threadtxn.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/51threadtxn.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/51threadtxn.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -8,7 +8,7 @@
 
 BEGIN {
     plan skip_all => 'Your perl does not support ithreads'
-        if !$Config{useithreads} || $] < 5.008;
+        if !$Config{useithreads};
 }
 
 use threads;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/52cycle.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/52cycle.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/52cycle.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -8,16 +8,36 @@
   eval { require Test::Memory::Cycle; require Devel::Cycle };
   if ($@ or Devel::Cycle->VERSION < 1.10) {
     plan skip_all => "leak test needs Test::Memory::Cycle and Devel::Cycle >= 1.10";
-  } else {
-    plan tests => 1;
-  }
+  };
 }
 
 use DBICTest;
 use DBICTest::Schema;
+use Scalar::Util ();
 
 import Test::Memory::Cycle;
 
-my $s = DBICTest::Schema->clone;
+my $weak;
 
-memory_cycle_ok($s, 'No cycles in schema');
+{
+  my $s = $weak->{schema} = DBICTest->init_schema;
+  memory_cycle_ok($s, 'No cycles in schema');
+
+  my $rs = $weak->{resultset} = $s->resultset ('Artist');
+  memory_cycle_ok($rs, 'No cycles in resultset');
+
+  my $rsrc = $weak->{resultsource} = $rs->result_source;
+  memory_cycle_ok($rsrc, 'No cycles in resultsource');
+
+  my $row = $weak->{row} = $rs->first;
+  memory_cycle_ok($row, 'No cycles in row');
+
+  Scalar::Util::weaken ($_) for values %$weak;
+  memory_cycle_ok($weak, 'No cycles in weak object collection');
+}
+
+for (keys %$weak) {
+  ok (! $weak->{$_}, "No $_ leaks");
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/60core.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/60core.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/60core.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,16 +1,15 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use Test::Exception;
+use Test::Warn;
 use lib qw(t/lib);
 use DBICTest;
 use DBIC::SqlMakerTest;
 
 my $schema = DBICTest->init_schema();
 
-plan tests => 106;
-
 eval { require DateTime::Format::SQLite };
 my $NO_DTFM = $@ ? 1 : 0;
 
@@ -37,10 +36,10 @@
 my %not_dirty = $art->get_dirty_columns();
 is(scalar(keys(%not_dirty)), 0, 'Nothing is dirty');
 
-eval {
+throws_ok ( sub {
   my $ret = $art->make_column_dirty('name2');
-};
-ok(defined($@), 'Failed to make non-existent column dirty');
+}, qr/No such column 'name2'/, 'Failed to make non-existent column dirty');
+
 $art->make_column_dirty('name');
 my %fake_dirty = $art->get_dirty_columns();
 is(scalar(keys(%fake_dirty)), 1, '1 fake dirty column');
@@ -66,7 +65,7 @@
 
 is(@art, 2, 'And then there were two');
 
-ok(!$art->in_storage, "It knows it's dead");
+is($art->in_storage, 0, "It knows it's dead");
 
 dies_ok ( sub { $art->delete }, "Can't delete twice");
 
@@ -106,6 +105,19 @@
 
 is($new_again->ID, 'DBICTest::Artist|artist|artistid=4', 'unique object id generated correctly');
 
+# test that store_column is called once for create() for non sequence columns 
+{
+  ok(my $artist = $schema->resultset('Artist')->create({name => 'store_column test'}));
+  is($artist->name, 'X store_column test'); # used to be 'X X store...'
+
+  # call store_column even though the column doesn't seem to be dirty
+  $artist->name($artist->name);
+  is($artist->name, 'X X store_column test');
+  ok($artist->is_column_changed('name'), 'changed column marked as dirty');
+
+  $artist->delete;
+}
+
 # Test backwards compatibility
 {
   my $warnings = '';
@@ -134,7 +146,7 @@
   });
 
   is($new_obj->name, 'find_or_new', 'find_or_new: instantiated a new artist');
-  ok(! $new_obj->in_storage, 'new artist is not in storage');
+  is($new_obj->in_storage, 0, 'new artist is not in storage');
 }
 
 my $cd = $schema->resultset("CD")->find(1);
@@ -212,10 +224,10 @@
     isa_ok($tdata{'last_updated_on'}, 'DateTime', 'inflated accessored column');
 }
 
-eval { $schema->class("Track")->load_components('DoesNotExist'); };
+throws_ok (sub {
+  $schema->class("Track")->load_components('DoesNotExist');
+}, qr!Can't locate DBIx/Class/DoesNotExist.pm!, 'exception on nonexisting component');
 
-ok $@, $@;
-
 is($schema->class("Artist")->field_name_for->{name}, 'artist name', 'mk_classdata usage ok');
 
 my $search = [ { 'tags.tag' => 'Cheesy' }, { 'tags.tag' => 'Blue' } ];
@@ -229,20 +241,13 @@
 is ($collapsed_or_rs->all, 4, 'Collapsed joined search with OR returned correct number of rows');
 is ($collapsed_or_rs->count, 4, 'Collapsed search count with OR ok');
 
-my $pref_or_rs = $collapsed_or_rs->search ({}, { prefetch => [qw/tags/] });
-is_same_sql_bind (
-  $pref_or_rs->as_query,
-  '(SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, tags.tagid, tags.cd, tags.tag FROM cd me LEFT JOIN tags tags ON tags.cd = me.cdid WHERE ( ( tags.tag = ? OR tags.tag = ? ) ) GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, tags.tagid, tags.cd, tags.tag ORDER BY cdid, tags.cd, tags.tag)',
-  [
-    [ 'tags.tag' => 'Cheesy' ],
-    [ 'tags.tag' => 'Blue' ],
-  ],
-  'Prefetch + distinct resulted in correct group_by',
-);
-is ($pref_or_rs->all, 4, 'Prefetched grouped search with OR returned correct number of rows');
-is ($pref_or_rs->count, 4, 'Prefetched grouped count with OR ok');
+# make sure sure distinct on a grouped rs is warned about
+my $cd_rs = $schema->resultset ('CD')
+              ->search ({}, { distinct => 1, group_by => 'title' });
+warnings_exist (sub {
+  $cd_rs->next;
+}, qr/Useless use of distinct/, 'UUoD warning');
 
-
 {
   my $tcount = $schema->resultset('Track')->search(
     {},
@@ -416,9 +421,58 @@
 
 # make sure we got rid of the compat shims
 SKIP: {
-    skip "Remove in 0.09", 5 if $DBIx::Class::VERSION < 0.09;
+    skip "Remove in 0.082", 3 if $DBIx::Class::VERSION < 0.082;
 
-    for (qw/compare_relationship_keys pk_depends_on resolve_condition resolve_join resolve_prefetch/) {
+    for (qw/compare_relationship_keys pk_depends_on resolve_condition/) {
       ok (! DBIx::Class::ResultSource->can ($_), "$_ no longer provided by DBIx::Class::ResultSource");
     }
 }
+
+#------------------------------
+# READ THIS BEFORE "FIXING"
+#------------------------------
+#
+# make sure we got rid of discard_changes mess - this is a mess and a source
+# of great confusion. Here I simply die if the methods are available, which
+# is wrong on its own (we *have* to provide some sort of back-compat, even
+# if with warnings). Here is how I envision things should actually be. Also
+# note that a lot of the deprecation can be started today (i.e. the switch
+# from get_from_storage to copy_from_storage). So:
+#
+# $row->discard_changes =>
+#   warning, and delegation to reload_from_storage
+#
+# $row->reload_from_storage =>
+#   does what discard changes did in 0.08 - issues a query to the db
+#   and repopulates all column slots, regardless of dirty states etc.
+#
+# $row->revert_changes =>
+#   does what discard_changes should have done initially (before it became
+#   a dual-purpose call). In order to make this work we will have to
+#   augment $row to carry its own initial-state, much like svn has a
+#   copy of the current checkout in contrast to cvs.
+#
+# my $db_row = $row->get_from_storage =>
+#   warns and delegates to an improved name copy_from_storage, with the
+#   same semantics
+#
+# my $db_row = $row->copy_from_storage =>
+#   a much better/descriptive name than get_from_storage
+#
+#------------------------------
+# READ THIS BEFORE "FIXING"
+#------------------------------
+#
+SKIP: {
+    skip "Something needs to be done before 0.09", 2 if $DBIx::Class::VERSION < 0.09;
+
+    my $row = $schema->resultset ('Artist')->next;
+
+    for (qw/discard_changes get_from_storage/) {
+      ok (! $row->can ($_), "$_ needs *some* sort of facelift before 0.09 ships - current state of affairs is unacceptable");
+    }
+}
+
+throws_ok { $schema->resultset} qr/resultset\(\) expects a source name/, 'resultset with no argument throws exception';
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/71mysql.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/71mysql.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/71mysql.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -6,6 +6,7 @@
 use lib qw(t/lib);
 use DBICTest;
 use DBI::Const::GetInfoType;
+use DBIC::SqlMakerTest;
 
 my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MYSQL_${_}" } qw/DSN USER PASS/};
 
@@ -14,8 +15,6 @@
 plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
   unless ($dsn && $user);
 
-plan tests => 19;
-
 my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
 
 my $dbh = $schema->storage->dbh;
@@ -26,7 +25,7 @@
 
 $dbh->do("DROP TABLE IF EXISTS cd;");
 
-$dbh->do("CREATE TABLE cd (cdid INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, artist INTEGER, title TEXT, year INTEGER, genreid INTEGER, single_track INTEGER);");
+$dbh->do("CREATE TABLE cd (cdid INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, artist INTEGER, title TEXT, year DATE, genreid INTEGER, single_track INTEGER);");
 
 $dbh->do("DROP TABLE IF EXISTS producer;");
 
@@ -46,6 +45,14 @@
 
 #'dbi:mysql:host=localhost;database=dbic_test', 'dbic_test', '');
 
+# make sure sqlt_type overrides work (::Storage::DBI::mysql does this) 
+{
+  my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+
+  ok (!$schema->storage->_dbh, 'definitely not connected');
+  is ($schema->storage->sqlt_type, 'MySQL', 'sqlt_type correct pre-connection');
+}
+
 # This is in Core now, but it's here just to test that it doesn't break
 $schema->class('Artist')->load_components('PK::Auto');
 
@@ -155,41 +162,141 @@
     is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
 }
 
+my $cd = $schema->resultset ('CD')->create ({});
+my $producer = $schema->resultset ('Producer')->create ({});
+lives_ok { $cd->set_producers ([ $producer ]) } 'set_relationship doesnt die';
+
+{
+  my $artist = $schema->resultset('Artist')->next;
+  my $cd = $schema->resultset('CD')->next;
+  $cd->set_from_related ('artist', $artist);
+  $cd->update;
+
+  my $rs = $schema->resultset('CD')->search ({}, { prefetch => 'artist' });
+
+  lives_ok sub {
+    my $cd = $rs->next;
+    is ($cd->artist->name, $artist->name, 'Prefetched artist');
+  }, 'join does not throw (mysql 3 test)';
+
+  # induce a jointype override, make sure it works even if we don't have mysql3
+  local $schema->storage->sql_maker->{_default_jointype} = 'inner';
+  is_same_sql_bind (
+    $rs->as_query,
+    '(
+      SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+             artist.artistid, artist.name, artist.rank, artist.charfield
+        FROM cd me
+        INNER JOIN artist artist ON artist.artistid = me.artist
+    )',
+    [],
+    'overriden default join type works',
+  );
+}
+
 ## Can we properly deal with the null search problem?
 ##
 ## Only way is to do a SET SQL_AUTO_IS_NULL = 0; on connect
 ## But I'm not sure if we should do this or not (Ash, 2008/06/03)
+#
+# There is now a built-in function to do this, test that everything works
+# with it (ribasushi, 2009/07/03)
 
 NULLINSEARCH: {
-    
-    ok my $artist1_rs = $schema->resultset('Artist')->search({artistid=>6666})
-    => 'Created an artist resultset of 6666';
-    
+    my $ansi_schema = DBICTest::Schema->connect ($dsn, $user, $pass, { on_connect_call => 'set_strict_mode' });
+
+    $ansi_schema->resultset('Artist')->create ({ name => 'last created artist' });
+
+    ok my $artist1_rs = $ansi_schema->resultset('Artist')->search({artistid=>6666})
+      => 'Created an artist resultset of 6666';
+
     is $artist1_rs->count, 0
-    => 'Got no returned rows';
-    
-    ok my $artist2_rs = $schema->resultset('Artist')->search({artistid=>undef})
-    => 'Created an artist resultset of undef';
-    
-    TODO: {
-    	local $TODO = "need to fix the row count =1 when select * from table where pk IS NULL problem";
-	    is $artist2_rs->count, 0
-	    => 'got no rows';    	
-    }
+      => 'Got no returned rows';
 
+    ok my $artist2_rs = $ansi_schema->resultset('Artist')->search({artistid=>undef})
+      => 'Created an artist resultset of undef';
+
+    is $artist2_rs->count, 0
+      => 'got no rows';
+
     my $artist = $artist2_rs->single;
-    
+
     is $artist => undef
-    => 'Nothing Found!';
+      => 'Nothing Found!';
 }
-    
-my $cd = $schema->resultset ('CD')->create ({});
 
-my $producer = $schema->resultset ('Producer')->create ({});
+# check for proper grouped counts
+{
+  my $ansi_schema = DBICTest::Schema->connect ($dsn, $user, $pass, { on_connect_call => 'set_strict_mode' });
+  my $rs = $ansi_schema->resultset('CD');
 
-lives_ok { $cd->set_producers ([ $producer ]) } 'set_relationship doesnt die';
+  my $years;
+  $years->{$_->year|| scalar keys %$years}++ for $rs->all;  # NULL != NULL, thus the keys eval
 
-# clean up our mess
-END {
-    #$dbh->do("DROP TABLE artist") if $dbh;
+  lives_ok ( sub {
+    is (
+      $rs->search ({}, { group_by => 'year'})->count,
+      scalar keys %$years,
+      'grouped count correct',
+    );
+  }, 'Grouped count does not throw');
 }
+
+ZEROINSEARCH: {
+  my $cds_per_year = {
+    2001 => 2,
+    2002 => 1,
+    2005 => 3,
+  };
+
+  my $rs = $schema->resultset ('CD');
+  $rs->delete;
+  for my $y (keys %$cds_per_year) {
+    for my $c (1 .. $cds_per_year->{$y} ) {
+      $rs->create ({ title => "CD $y-$c", artist => 1, year => "$y-01-01" });
+    }
+  }
+
+  is ($rs->count, 6, 'CDs created successfully');
+
+  $rs = $rs->search ({}, {
+    select => [ \ 'YEAR(year)' ], as => ['y'], distinct => 1,
+  });
+
+  is_deeply (
+    [ sort ($rs->get_column ('y')->all) ],
+    [ sort keys %$cds_per_year ],
+    'Years group successfully',
+  );
+
+  $rs->create ({ artist => 1, year => '0-1-1', title => 'Jesus Rap' });
+
+  is_deeply (
+    [ sort $rs->get_column ('y')->all ],
+    [ 0, sort keys %$cds_per_year ],
+    'Zero-year groups successfully',
+  );
+
+  # convoluted search taken verbatim from list 
+  my $restrict_rs = $rs->search({ -and => [
+    year => { '!=', 0 },
+    year => { '!=', undef }
+  ]});
+
+  is_deeply (
+    [ $restrict_rs->get_column('y')->all ],
+    [ $rs->get_column ('y')->all ],
+    'Zero year was correctly excluded from resultset',
+  );
+}
+
+## If find() is the first query after connect()
+## DBI::Storage::sql_maker() will be called before
+## _determine_driver() and so the ::SQLHacks class for MySQL
+## will not be used
+
+my $schema2 = DBICTest::Schema->connect($dsn, $user, $pass);
+$schema2->resultset("Artist")->find(4);
+isa_ok($schema2->storage->sql_maker, 'DBIx::Class::SQLAHacks::MySQL');
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/72pg.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/72pg.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/72pg.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,107 +1,79 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
 
-{
-  package DBICTest::Schema::Casecheck;
 
-  use strict;
-  use warnings;
-  use base 'DBIx::Class';
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/};
 
-  __PACKAGE__->load_components(qw/Core/);
-  __PACKAGE__->table('testschema.casecheck');
-  __PACKAGE__->add_columns(qw/id name NAME uc_name storecolumn/);
-  __PACKAGE__->column_info_from_storage(1);
-  __PACKAGE__->set_primary_key('id');
+plan skip_all => <<EOM unless $dsn && $user;
+Set \$ENV{DBICTEST_PG_DSN}, _USER and _PASS to run this test
+( NOTE: This test drops and creates tables called 'artist', 'casecheck',
+  'array_test' and 'sequence_test' as well as following sequences:
+  'pkid1_seq', 'pkid2_seq' and 'nonpkid_seq''.  as well as following
+  schemas: 'dbic_t_schema', 'dbic_t_schema_2', 'dbic_t_schema_3',
+  'dbic_t_schema_4', and 'dbic_t_schema_5'
+)
+EOM
 
-  sub store_column {
-    my ($self, $name, $value) = @_;
-    $value = '#'.$value if($name eq "storecolumn");
-    $self->maybe::next::method($name, $value);
-  }
-}
+### load any test classes that are defined further down in the file via BEGIN blocks
 
-{
-  package DBICTest::Schema::ArrayTest;
+our @test_classes; #< array that will be pushed into by test classes defined in this file
+DBICTest::Schema->load_classes( map {s/.+:://;$_} @test_classes ) if @test_classes;
 
-  use strict;
-  use warnings;
-  use base 'DBIx::Class';
 
-  __PACKAGE__->load_components(qw/Core/);
-  __PACKAGE__->table('testschema.array_test');
-  __PACKAGE__->add_columns(qw/id arrayfield/);
-  __PACKAGE__->column_info_from_storage(1);
-  __PACKAGE__->set_primary_key('id');
+###  pre-connect tests (keep each test separate as to make sure rebless() runs)
+{
+  my $s = DBICTest::Schema->connect($dsn, $user, $pass);
 
-}
+  ok (!$s->storage->_dbh, 'definitely not connected');
 
-my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/};
+  # Check that datetime_parser returns correctly before we explicitly connect.
+  SKIP: {
+      eval { require DateTime::Format::Pg };
+      skip "DateTime::Format::Pg required", 2 if $@;
 
-plan skip_all => 'Set $ENV{DBICTEST_PG_DSN}, _USER and _PASS to run this test '.
-  '(note: This test drops and creates tables called \'artist\', \'casecheck\', \'array_test\' and \'sequence_test\''.
-  ' as well as following sequences: \'pkid1_seq\', \'pkid2_seq\' and \'nonpkid_seq\''.
-  ' as well as following schemas: \'testschema\'!)'
-    unless ($dsn && $user);
+      my $store = ref $s->storage;
+      is($store, 'DBIx::Class::Storage::DBI', 'Started with generic storage');
 
+      my $parser = $s->storage->datetime_parser;
+      is( $parser, 'DateTime::Format::Pg', 'datetime_parser is as expected');
+  }
 
-plan tests => 39;
-
-DBICTest::Schema->load_classes( 'Casecheck', 'ArrayTest' );
-my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
-
-# Check that datetime_parser returns correctly before we explicitly connect.
-SKIP: {
-    eval { require DateTime::Format::Pg };
-    skip "DateTime::Format::Pg required", 2 if $@;
-
-    my $store = ref $schema->storage;
-    is($store, 'DBIx::Class::Storage::DBI', 'Started with generic storage');
-
-    my $parser = $schema->storage->datetime_parser;
-    is( $parser, 'DateTime::Format::Pg', 'datetime_parser is as expected');
+  ok (!$s->storage->_dbh, 'still not connected');
 }
-
-my $dbh = $schema->storage->dbh;
-$schema->source("Artist")->name("testschema.artist");
-$schema->source("SequenceTest")->name("testschema.sequence_test");
 {
-    local $SIG{__WARN__} = sub {};
-    _cleanup ($dbh);
-
-    $dbh->do("CREATE SCHEMA testschema;");
-    $dbh->do("CREATE TABLE testschema.artist (artistid serial PRIMARY KEY, name VARCHAR(100), rank INTEGER NOT NULL DEFAULT '13', charfield CHAR(10), arrayfield INTEGER[]);");
-    $dbh->do("CREATE TABLE testschema.sequence_test (pkid1 integer, pkid2 integer, nonpkid integer, name VARCHAR(100), CONSTRAINT pk PRIMARY KEY(pkid1, pkid2));");
-    $dbh->do("CREATE SEQUENCE pkid1_seq START 1 MAXVALUE 999999 MINVALUE 0");
-    $dbh->do("CREATE SEQUENCE pkid2_seq START 10 MAXVALUE 999999 MINVALUE 0");
-    $dbh->do("CREATE SEQUENCE nonpkid_seq START 20 MAXVALUE 999999 MINVALUE 0");
-    ok ( $dbh->do('CREATE TABLE testschema.casecheck (id serial PRIMARY KEY, "name" VARCHAR(1), "NAME" VARCHAR(2), "UC_NAME" VARCHAR(3), "storecolumn" VARCHAR(10));'), 'Creation of casecheck table');
-    ok ( $dbh->do('CREATE TABLE testschema.array_test (id serial PRIMARY KEY, arrayfield INTEGER[]);'), 'Creation of array_test table');
+  my $s = DBICTest::Schema->connect($dsn, $user, $pass);
+  # make sure sqlt_type overrides work (::Storage::DBI::Pg does this)
+  ok (!$s->storage->_dbh, 'definitely not connected');
+  is ($s->storage->sqlt_type, 'PostgreSQL', 'sqlt_type correct pre-connection');
+  ok (!$s->storage->_dbh, 'still not connected');
 }
 
-# store_column is called once for create() for non sequence columns
+### connect, create postgres-specific test schema
 
-ok(my $storecolumn = $schema->resultset('Casecheck')->create({'storecolumn' => 'a'}));
+my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
 
-is($storecolumn->storecolumn, '#a'); # was '##a'
+drop_test_schema($schema);
+create_test_schema($schema);
 
+### begin main tests
 
-# This is in Core now, but it's here just to test that it doesn't break
-$schema->class('Artist')->load_components('PK::Auto');
 
-my $new = $schema->resultset('Artist')->create({ name => 'foo' });
+# run a BIG bunch of tests for last-insert-id / Auto-PK / sequence
+# discovery
+run_apk_tests($schema); #< older set of auto-pk tests
+run_extended_apk_tests($schema); #< new extended set of auto-pk tests
 
-is($new->artistid, 1, "Auto-PK worked");
 
-$new = $schema->resultset('Artist')->create({ name => 'bar' });
 
-is($new->artistid, 2, "Auto-PK worked");
 
+
+### type_info tests
+
 my $test_type_info = {
     'artistid' => {
         'data_type' => 'integer',
@@ -135,8 +107,7 @@
     },
 };
 
-
-my $type_info = $schema->storage->columns_info_for('testschema.artist');
+my $type_info = $schema->storage->columns_info_for('dbic_t_schema.artist');
 my $artistid_defval = delete $type_info->{artistid}->{default_value};
 like($artistid_defval,
      qr/^nextval\('([^\.]*\.){0,1}artist_artistid_seq'::(?:text|regclass)\)/,
@@ -144,7 +115,28 @@
 is_deeply($type_info, $test_type_info,
           'columns_info_for - column data types');
 
-{
+
+
+
+####### Array tests
+
+BEGIN {
+  package DBICTest::Schema::ArrayTest;
+  push @main::test_classes, __PACKAGE__;
+
+  use strict;
+  use warnings;
+  use base 'DBIx::Class::Core';
+
+  __PACKAGE__->table('dbic_t_schema.array_test');
+  __PACKAGE__->add_columns(qw/id arrayfield/);
+  __PACKAGE__->column_info_from_storage(1);
+  __PACKAGE__->set_primary_key('id');
+
+}
+SKIP: {
+  skip "Need DBD::Pg 2.9.2 or newer for array tests", 4 if $DBD::Pg::VERSION < 2.009002;
+
   lives_ok {
     $schema->resultset('ArrayTest')->create({
       arrayfield => [1, 2],
@@ -171,6 +163,23 @@
 }
 
 
+
+########## Case check
+
+BEGIN {
+  package DBICTest::Schema::Casecheck;
+  push @main::test_classes, __PACKAGE__;
+
+  use strict;
+  use warnings;
+  use base 'DBIx::Class::Core';
+
+  __PACKAGE__->table('dbic_t_schema.casecheck');
+  __PACKAGE__->add_columns(qw/id name NAME uc_name/);
+  __PACKAGE__->column_info_from_storage(1);
+  __PACKAGE__->set_primary_key('id');
+}
+
 my $name_info = $schema->source('Casecheck')->column_info( 'name' );
 is( $name_info->{size}, 1, "Case sensitive matching info for 'name'" );
 
@@ -180,83 +189,72 @@
 my $uc_name_info = $schema->source('Casecheck')->column_info( 'uc_name' );
 is( $uc_name_info->{size}, 3, "Case insensitive matching info for 'uc_name'" );
 
-# Test SELECT ... FOR UPDATE
-my $HaveSysSigAction = eval "require Sys::SigAction" && !$@;
-if ($HaveSysSigAction) {
-    Sys::SigAction->import( 'set_sig_handler' );
-}
 
+
+
+## Test SELECT ... FOR UPDATE
+
 SKIP: {
-    skip "Sys::SigAction is not available", 3 unless $HaveSysSigAction;
-    # create a new schema
-    my $schema2 = DBICTest::Schema->connect($dsn, $user, $pass);
-    $schema2->source("Artist")->name("testschema.artist");
+    if(eval "require Sys::SigAction" && !$@) {
+        Sys::SigAction->import( 'set_sig_handler' );
+    }
+    else {
+      skip "Sys::SigAction is not available", 6;
+    }
 
-    $schema->txn_do( sub {
-        my $artist = $schema->resultset('Artist')->search(
-            {
-                artistid => 1
-            },
-            {
-                for => 'update'
-            }
-        )->first;
-        is($artist->artistid, 1, "select for update returns artistid = 1");
+    my ($timed_out, $artist2);
 
-        my $artist_from_schema2;
-        my $error_ok = 0;
-        eval {
-            my $h = set_sig_handler( 'ALRM', sub { die "DBICTestTimeout" } );
-            alarm(2);
-            $artist_from_schema2 = $schema2->resultset('Artist')->find(1);
-            $artist_from_schema2->name('fooey');
-            $artist_from_schema2->update;
-            alarm(0);
-        };
-        if (my $e = $@) {
-            $error_ok = $e =~ /DBICTestTimeout/;
-        }
-
+    for my $t (
+      {
         # Make sure that an error was raised, and that the update failed
-        ok($error_ok, "update from second schema times out");
-        ok($artist_from_schema2->is_column_changed('name'), "'name' column is still dirty from second schema");
-    });
-}
+        update_lock => 1,
+        test_sub => sub {
+          ok($timed_out, "update from second schema times out");
+          ok($artist2->is_column_changed('name'), "'name' column is still dirty from second schema");
+        },
+      },
+      {
+        # Make sure that an error was NOT raised, and that the update succeeded
+        update_lock => 0,
+        test_sub => sub {
+          ok(! $timed_out, "update from second schema DOES NOT timeout");
+          ok(! $artist2->is_column_changed('name'), "'name' column is NOT dirty from second schema");
+        },
+      },
+    ) {
+      # create a new schema
+      my $schema2 = DBICTest::Schema->connect($dsn, $user, $pass);
+      $schema2->source("Artist")->name("dbic_t_schema.artist");
 
-SKIP: {
-    skip "Sys::SigAction is not available", 3 unless $HaveSysSigAction;
-    # create a new schema
-    my $schema2 = DBICTest::Schema->connect($dsn, $user, $pass);
-    $schema2->source("Artist")->name("testschema.artist");
-
-    $schema->txn_do( sub {
+      $schema->txn_do( sub {
         my $artist = $schema->resultset('Artist')->search(
             {
                 artistid => 1
             },
+            $t->{update_lock} ? { for => 'update' } : {}
         )->first;
-        is($artist->artistid, 1, "select for update returns artistid = 1");
+        is($artist->artistid, 1, "select returns artistid = 1");
 
-        my $artist_from_schema2;
-        my $error_ok = 0;
+        $timed_out = 0;
         eval {
             my $h = set_sig_handler( 'ALRM', sub { die "DBICTestTimeout" } );
             alarm(2);
-            $artist_from_schema2 = $schema2->resultset('Artist')->find(1);
-            $artist_from_schema2->name('fooey');
-            $artist_from_schema2->update;
+            $artist2 = $schema2->resultset('Artist')->find(1);
+            $artist2->name('fooey');
+            $artist2->update;
             alarm(0);
         };
-        if (my $e = $@) {
-            $error_ok = $e =~ /DBICTestTimeout/;
-        }
+        $timed_out = $@ =~ /DBICTestTimeout/;
+      });
 
-        # Make sure that an error was NOT raised, and that the update succeeded
-        ok(! $error_ok, "update from second schema DOES NOT timeout");
-        ok(! $artist_from_schema2->is_column_changed('name'), "'name' column is NOT dirty from second schema");
-    });
+      $t->{test_sub}->();
+    }
 }
 
+
+######## other older Auto-pk tests
+
+$schema->source("SequenceTest")->name("dbic_t_schema.sequence_test");
 for (1..5) {
     my $st = $schema->resultset('SequenceTest')->create({ name => 'foo' });
     is($st->pkid1, $_, "Oracle Auto-PK without trigger: First primary key");
@@ -266,21 +264,429 @@
 my $st = $schema->resultset('SequenceTest')->create({ name => 'foo', pkid1 => 55 });
 is($st->pkid1, 55, "Oracle Auto-PK without trigger: First primary key set manually");
 
-sub _cleanup {
-  my $dbh = shift or return;
+done_testing;
 
-  for my $stat (
-    'DROP TABLE testschema.artist',
-    'DROP TABLE testschema.casecheck',
-    'DROP TABLE testschema.sequence_test',
-    'DROP TABLE testschema.array_test',
-    'DROP SEQUENCE pkid1_seq',
-    'DROP SEQUENCE pkid2_seq',
-    'DROP SEQUENCE nonpkid_seq',
-    'DROP SCHEMA testschema',
-  ) {
-    eval { $dbh->do ($stat) };
-  }
+exit;
+
+END {
+    return unless $schema;
+    drop_test_schema($schema);
+    eapk_drop_all( $schema)
+};
+
+
+######### SUBROUTINES
+
+sub create_test_schema {
+    my $schema = shift;
+    $schema->storage->dbh_do(sub {
+      my (undef,$dbh) = @_;
+
+      local $dbh->{Warn} = 0;
+
+      my $std_artist_table = <<EOS;
+(
+  artistid serial PRIMARY KEY
+  , name VARCHAR(100)
+  , rank INTEGER NOT NULL DEFAULT '13'
+  , charfield CHAR(10)
+  , arrayfield INTEGER[]
+)
+EOS
+
+      $dbh->do("CREATE SCHEMA dbic_t_schema");
+      $dbh->do("CREATE TABLE dbic_t_schema.artist $std_artist_table");
+      $dbh->do(<<EOS);
+CREATE TABLE dbic_t_schema.sequence_test (
+    pkid1 integer
+    , pkid2 integer
+    , nonpkid integer
+    , name VARCHAR(100)
+    , CONSTRAINT pk PRIMARY KEY(pkid1, pkid2)
+)
+EOS
+      $dbh->do("CREATE SEQUENCE pkid1_seq START 1 MAXVALUE 999999 MINVALUE 0");
+      $dbh->do("CREATE SEQUENCE pkid2_seq START 10 MAXVALUE 999999 MINVALUE 0");
+      $dbh->do("CREATE SEQUENCE nonpkid_seq START 20 MAXVALUE 999999 MINVALUE 0");
+      $dbh->do(<<EOS);
+CREATE TABLE dbic_t_schema.casecheck (
+    id serial PRIMARY KEY
+    , "name" VARCHAR(1)
+    , "NAME" VARCHAR(2)
+    , "UC_NAME" VARCHAR(3)
+)
+EOS
+      $dbh->do(<<EOS);
+CREATE TABLE dbic_t_schema.array_test (
+    id serial PRIMARY KEY
+    , arrayfield INTEGER[]
+)
+EOS
+      $dbh->do("CREATE SCHEMA dbic_t_schema_2");
+      $dbh->do("CREATE TABLE dbic_t_schema_2.artist $std_artist_table");
+      $dbh->do("CREATE SCHEMA dbic_t_schema_3");
+      $dbh->do("CREATE TABLE dbic_t_schema_3.artist $std_artist_table");
+      $dbh->do('set search_path=dbic_t_schema,public');
+      $dbh->do("CREATE SCHEMA dbic_t_schema_4");
+      $dbh->do("CREATE SCHEMA dbic_t_schema_5");
+      $dbh->do(<<EOS);
+ CREATE TABLE dbic_t_schema_4.artist
+ (
+   artistid integer not null default nextval('artist_artistid_seq'::regclass) PRIMARY KEY
+   , name VARCHAR(100)
+   , rank INTEGER NOT NULL DEFAULT '13'
+   , charfield CHAR(10)
+   , arrayfield INTEGER[]
+ );
+EOS
+      $dbh->do('set search_path=public,dbic_t_schema,dbic_t_schema_3');
+      $dbh->do('create sequence public.artist_artistid_seq'); #< in the public schema
+      $dbh->do(<<EOS);
+ CREATE TABLE dbic_t_schema_5.artist
+ (
+   artistid integer not null default nextval('public.artist_artistid_seq'::regclass) PRIMARY KEY
+   , name VARCHAR(100)
+   , rank INTEGER NOT NULL DEFAULT '13'
+   , charfield CHAR(10)
+   , arrayfield INTEGER[]
+ );
+EOS
+      $dbh->do('set search_path=dbic_t_schema,public');
+  });
 }
 
-END { _cleanup($dbh) }
+
+
+sub drop_test_schema {
+    my ( $schema, $warn_exceptions ) = @_;
+
+    $schema->storage->dbh_do(sub {
+        my (undef,$dbh) = @_;
+
+        local $dbh->{Warn} = 0;
+
+        for my $stat (
+                      'DROP SCHEMA dbic_t_schema_5 CASCADE',
+                      'DROP SEQUENCE public.artist_artistid_seq',
+                      'DROP SCHEMA dbic_t_schema_4 CASCADE',
+                      'DROP SCHEMA dbic_t_schema CASCADE',
+                      'DROP SEQUENCE pkid1_seq',
+                      'DROP SEQUENCE pkid2_seq',
+                      'DROP SEQUENCE nonpkid_seq',
+                      'DROP SCHEMA dbic_t_schema_2 CASCADE',
+                      'DROP SCHEMA dbic_t_schema_3 CASCADE',
+                     ) {
+            eval { $dbh->do ($stat) };
+            diag $@ if $@ && $warn_exceptions;
+        }
+    });
+}
+
+
+###  auto-pk / last_insert_id / sequence discovery
+sub run_apk_tests {
+    my $schema = shift;
+
+    # This is in Core now, but it's here just to test that it doesn't break
+    $schema->class('Artist')->load_components('PK::Auto');
+    cmp_ok( $schema->resultset('Artist')->count, '==', 0, 'this should start with an empty artist table');
+
+    # test that auto-pk also works with the defined search path by
+    # un-schema-qualifying the table name
+    apk_t_set($schema,'artist');
+
+    my $unq_new;
+    lives_ok {
+        $unq_new = $schema->resultset('Artist')->create({ name => 'baz' });
+    } 'insert into unqualified, shadowed table succeeds';
+
+    is($unq_new && $unq_new->artistid, 1, "and got correct artistid");
+
+    my @test_schemas = ( [qw| dbic_t_schema_2    1  |],
+                         [qw| dbic_t_schema_3    1  |],
+                         [qw| dbic_t_schema_4    2  |],
+                         [qw| dbic_t_schema_5    1  |],
+                       );
+    foreach my $t ( @test_schemas ) {
+        my ($sch_name, $start_num) = @$t;
+        #test with dbic_t_schema_2
+        apk_t_set($schema,"$sch_name.artist");
+        my $another_new;
+        lives_ok {
+            $another_new = $schema->resultset('Artist')->create({ name => 'Tollbooth Willy'});
+            is( $another_new->artistid,$start_num, "got correct artistid for $sch_name")
+                or diag "USED SEQUENCE: ".($schema->source('Artist')->column_info('artistid')->{sequence} || '<none>');
+        } "$sch_name liid 1 did not die"
+            or diag "USED SEQUENCE: ".($schema->source('Artist')->column_info('artistid')->{sequence} || '<none>');
+        lives_ok {
+            $another_new = $schema->resultset('Artist')->create({ name => 'Adam Sandler'});
+            is( $another_new->artistid,$start_num+1, "got correct artistid for $sch_name")
+                or diag "USED SEQUENCE: ".($schema->source('Artist')->column_info('artistid')->{sequence} || '<none>');
+        } "$sch_name liid 2 did not die"
+            or diag "USED SEQUENCE: ".($schema->source('Artist')->column_info('artistid')->{sequence} || '<none>');
+
+    }
+
+    lives_ok {
+        apk_t_set($schema,'dbic_t_schema.artist');
+        my $new = $schema->resultset('Artist')->create({ name => 'foo' });
+        is($new->artistid, 4, "Auto-PK worked");
+        $new = $schema->resultset('Artist')->create({ name => 'bar' });
+        is($new->artistid, 5, "Auto-PK worked");
+    } 'old auto-pk tests did not die either';
+}
+
+# sets the artist table name and clears sequence name cache
+sub apk_t_set {
+    my ( $s, $n ) = @_;
+    $s->source("Artist")->name($n);
+    $s->source('Artist')->column_info('artistid')->{sequence} = undef; #< clear sequence name cache
+}
+
+
+######## EXTENDED AUTO-PK TESTS
+
+my @eapk_id_columns;
+BEGIN {
+  package DBICTest::Schema::ExtAPK;
+  push @main::test_classes, __PACKAGE__;
+
+  use strict;
+  use warnings;
+  use base 'DBIx::Class::Core';
+
+  __PACKAGE__->table('apk');
+
+  @eapk_id_columns = qw( id1 id2 id3 id4 );
+  __PACKAGE__->add_columns(
+    map { $_ => { data_type => 'integer', is_auto_increment => 1 } }
+       @eapk_id_columns
+  );
+
+  __PACKAGE__->set_primary_key('id2'); #< note the SECOND column is
+                                       #the primary key
+}
+
+my @eapk_schemas;
+BEGIN{ @eapk_schemas = map "dbic_apk_$_", 0..5 }
+my %seqs; #< hash of schema.table.col => currval of its (DBIC) primary key sequence
+
+sub run_extended_apk_tests {
+  my $schema = shift;
+
+  #save the search path and reset it at the end
+  my $search_path_save = eapk_get_search_path($schema);
+
+  eapk_drop_all($schema);
+
+  # make the test schemas and sequences
+  $schema->storage->dbh_do(sub {
+    my ( undef, $dbh ) = @_;
+
+    $dbh->do("CREATE SCHEMA $_")
+        for @eapk_schemas;
+
+    $dbh->do("CREATE SEQUENCE $eapk_schemas[5].fooseq");
+    $dbh->do("SELECT setval('$eapk_schemas[5].fooseq',400)");
+    $seqs{"$eapk_schemas[1].apk.id2"} = 400;
+
+    $dbh->do("CREATE SEQUENCE $eapk_schemas[4].fooseq");
+    $dbh->do("SELECT setval('$eapk_schemas[4].fooseq',300)");
+    $seqs{"$eapk_schemas[3].apk.id2"} = 300;
+
+    $dbh->do("CREATE SEQUENCE $eapk_schemas[3].fooseq");
+    $dbh->do("SELECT setval('$eapk_schemas[3].fooseq',200)");
+    $seqs{"$eapk_schemas[4].apk.id2"} = 200;
+
+    $dbh->do("SET search_path = ".join ',', reverse @eapk_schemas );
+  });
+
+  # clear our search_path cache
+  $schema->storage->{_pg_search_path} = undef;
+
+  eapk_create( $schema,
+               with_search_path => [0,1],
+             );
+  eapk_create( $schema,
+               with_search_path => [1,0,'public'],
+               nextval => "$eapk_schemas[5].fooseq",
+             );
+  eapk_create( $schema,
+               with_search_path => ['public',0,1],
+               qualify_table => 2,
+             );
+  eapk_create( $schema,
+               with_search_path => [3,1,0,'public'],
+               nextval => "$eapk_schemas[4].fooseq",
+             );
+  eapk_create( $schema,
+               with_search_path => [3,1,0,'public'],
+               nextval => "$eapk_schemas[3].fooseq",
+               qualify_table => 4,
+             );
+
+  eapk_poke( $schema );
+  eapk_poke( $schema, 0 );
+  eapk_poke( $schema, 2 );
+  eapk_poke( $schema, 4 );
+  eapk_poke( $schema, 1 );
+  eapk_poke( $schema, 0 );
+  eapk_poke( $schema, 1 );
+  eapk_poke( $schema );
+  eapk_poke( $schema, 4 );
+  eapk_poke( $schema, 3 );
+  eapk_poke( $schema, 1 );
+  eapk_poke( $schema, 2 );
+  eapk_poke( $schema, 0 );
+
+  # set our search path back
+  eapk_set_search_path( $schema, @$search_path_save );
+}
+
+# do a DBIC create on the apk table in the given schema number (which is an
+# index of @eapk_schemas)
+
+sub eapk_poke {
+  my ($s, $schema_num) = @_;
+
+  my $schema_name = defined $schema_num
+      ? $eapk_schemas[$schema_num]
+      : '';
+
+  my $schema_name_actual = $schema_name || eapk_find_visible_schema($s);
+
+  $s->source('ExtAPK')->name($schema_name ? $schema_name.'.apk' : 'apk');
+  #< clear sequence name cache
+  $s->source('ExtAPK')->column_info($_)->{sequence} = undef
+      for @eapk_id_columns;
+
+  no warnings 'uninitialized';
+  lives_ok {
+    my $new;
+    for my $inc (1,2,3) {
+      $new = $schema->resultset('ExtAPK')->create({ id1 => 1});
+      my $proper_seqval = ++$seqs{"$schema_name_actual.apk.id2"};
+      is( $new->id2, $proper_seqval, "$schema_name_actual.apk.id2 correct inc $inc" )
+          or eapk_seq_diag($s,$schema_name);
+      $new->discard_changes;
+      is( $new->id1, 1 );
+      for my $id ('id3','id4') {
+        my $proper_seqval = ++$seqs{"$schema_name_actual.apk.$id"};
+        is( $new->$id, $proper_seqval, "$schema_name_actual.apk.$id correct inc $inc" )
+            or eapk_seq_diag($s,$schema_name);
+      }
+    }
+  } "create in schema '$schema_name' lives"
+      or eapk_seq_diag($s,$schema_name);
+}
+
+# print diagnostic info on which sequences were found in the ExtAPK
+# class
+sub eapk_seq_diag {
+    my $s = shift;
+    my $schema = shift || eapk_find_visible_schema($s);
+
+    diag "$schema.apk sequences: ",
+        join(', ',
+             map "$_:".($s->source('ExtAPK')->column_info($_)->{sequence} || '<none>'),
+             @eapk_id_columns
+            );
+}
+
+# get the postgres search path as an arrayref
+sub eapk_get_search_path {
+    my ( $s ) = @_;
+    # cache the search path as ['schema','schema',...] in the storage
+    # obj
+
+    return $s->storage->dbh_do(sub {
+        my (undef, $dbh) = @_;
+        my @search_path;
+        my ($sp_string) = $dbh->selectrow_array('SHOW search_path');
+        while ( $sp_string =~ s/("[^"]+"|[^,]+),?// ) {
+            unless( defined $1 and length $1 ) {
+                die "search path sanity check failed: '$1'";
+            }
+            push @search_path, $1;
+        }
+        \@search_path
+    });
+}
+sub eapk_set_search_path {
+    my ($s, at sp) = @_;
+    my $sp = join ',', at sp;
+    $s->storage->dbh_do( sub { $_[1]->do("SET search_path = $sp") } );
+}
+
+# create the apk table in the given schema, can set whether the table name is qualified, what the nextval is for the second ID
+sub eapk_create {
+    my ($schema, %a) = @_;
+
+    $schema->storage->dbh_do(sub {
+        my (undef,$dbh) = @_;
+
+        my $searchpath_save;
+        if ( $a{with_search_path} ) {
+            ($searchpath_save) = $dbh->selectrow_array('SHOW search_path');
+
+            my $search_path = join ',',map {/\D/ ? $_ : $eapk_schemas[$_]} @{$a{with_search_path}};
+
+            $dbh->do("SET search_path = $search_path");
+        }
+
+        my $table_name = $a{qualify_table}
+            ? ($eapk_schemas[$a{qualify_table}] || die). ".apk"
+            : 'apk';
+        local $_[1]->{Warn} = 0;
+
+        my $id_def = $a{nextval}
+            ? "integer not null default nextval('$a{nextval}'::regclass)"
+            : 'serial';
+        $dbh->do(<<EOS);
+CREATE TABLE $table_name (
+  id1 serial
+  , id2 $id_def
+  , id3 serial primary key
+  , id4 serial
+)
+EOS
+
+        if( $searchpath_save ) {
+            $dbh->do("SET search_path = $searchpath_save");
+        }
+    });
+}
+
+sub eapk_drop_all {
+    my ( $schema, $warn_exceptions ) = @_;
+
+    $schema->storage->dbh_do(sub {
+        my (undef,$dbh) = @_;
+
+        local $dbh->{Warn} = 0;
+
+        # drop the test schemas
+        for (@eapk_schemas ) {
+            eval{ $dbh->do("DROP SCHEMA $_ CASCADE") };
+            diag $@ if $@ && $warn_exceptions;
+        }
+
+
+    });
+}
+
+sub eapk_find_visible_schema {
+    my ($s) = @_;
+
+    my ($schema) =
+        $s->storage->dbh_do(sub {
+            $_[1]->selectrow_array(<<EOS);
+SELECT n.nspname
+FROM pg_catalog.pg_namespace n
+JOIN pg_catalog.pg_class c ON c.relnamespace = n.oid
+WHERE c.relname = 'apk'
+  AND pg_catalog.pg_table_is_visible(c.oid)
+EOS
+        });
+    return $schema;
+}

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/73oracle.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/73oracle.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/73oracle.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -26,7 +26,7 @@
 }
 
 use strict;
-use warnings;  
+use warnings;
 
 use Test::Exception;
 use Test::More;
@@ -40,8 +40,6 @@
   ' as well as following sequences: \'pkid1_seq\', \'pkid2_seq\' and \'nonpkid_seq\''
   unless ($dsn && $user && $pass);
 
-plan tests => 34;
-
 DBICTest::Schema->load_classes('ArtistFQN');
 my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
 
@@ -49,6 +47,7 @@
 
 eval {
   $dbh->do("DROP SEQUENCE artist_seq");
+  $dbh->do("DROP SEQUENCE cd_seq");
   $dbh->do("DROP SEQUENCE pkid1_seq");
   $dbh->do("DROP SEQUENCE pkid2_seq");
   $dbh->do("DROP SEQUENCE nonpkid_seq");
@@ -58,15 +57,17 @@
   $dbh->do("DROP TABLE track");
 };
 $dbh->do("CREATE SEQUENCE artist_seq START WITH 1 MAXVALUE 999999 MINVALUE 0");
+$dbh->do("CREATE SEQUENCE cd_seq START WITH 1 MAXVALUE 999999 MINVALUE 0");
 $dbh->do("CREATE SEQUENCE pkid1_seq START WITH 1 MAXVALUE 999999 MINVALUE 0");
 $dbh->do("CREATE SEQUENCE pkid2_seq START WITH 10 MAXVALUE 999999 MINVALUE 0");
 $dbh->do("CREATE SEQUENCE nonpkid_seq START WITH 20 MAXVALUE 999999 MINVALUE 0");
 $dbh->do("CREATE TABLE artist (artistid NUMBER(12), name VARCHAR(255), rank NUMBER(38), charfield VARCHAR2(10))");
 $dbh->do("CREATE TABLE sequence_test (pkid1 NUMBER(12), pkid2 NUMBER(12), nonpkid NUMBER(12), name VARCHAR(255))");
-$dbh->do("CREATE TABLE cd (cdid NUMBER(12), artist NUMBER(12), title VARCHAR(255), year VARCHAR(4))");
-$dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at DATE)");
+$dbh->do("CREATE TABLE cd (cdid NUMBER(12), artist NUMBER(12), title VARCHAR(255), year VARCHAR(4), genreid NUMBER(12), single_track NUMBER(12))");
+$dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at DATE, small_dt DATE)");
 
 $dbh->do("ALTER TABLE artist ADD (CONSTRAINT artist_pk PRIMARY KEY (artistid))");
+$dbh->do("ALTER TABLE cd ADD (CONSTRAINT cd_pk PRIMARY KEY (cdid))");
 $dbh->do("ALTER TABLE sequence_test ADD (CONSTRAINT sequence_test_constraint PRIMARY KEY (pkid1, pkid2))");
 $dbh->do(qq{
   CREATE OR REPLACE TRIGGER artist_insert_trg
@@ -80,6 +81,18 @@
     END IF;
   END;
 });
+$dbh->do(qq{
+  CREATE OR REPLACE TRIGGER cd_insert_trg
+  BEFORE INSERT ON cd
+  FOR EACH ROW
+  BEGIN
+    IF :new.cdid IS NULL THEN
+      SELECT cd_seq.nextval
+      INTO :new.cdid
+      FROM DUAL;
+    END IF;
+  END;
+});
 
 {
     # Swiped from t/bindtype_columns.t to avoid creating my own Resultset.
@@ -88,7 +101,7 @@
     eval { $dbh->do('DROP TABLE bindtype_test') };
 
     $dbh->do(qq[
-        CREATE TABLE bindtype_test 
+        CREATE TABLE bindtype_test
         (
             id              integer      NOT NULL   PRIMARY KEY,
             bytea           integer      NULL,
@@ -108,20 +121,53 @@
 my $new = $schema->resultset('Artist')->create({ name => 'foo' });
 is($new->artistid, 1, "Oracle Auto-PK worked");
 
+my $cd = $schema->resultset('CD')->create({ artist => 1, title => 'EP C', year => '2003' });
+is($cd->cdid, 1, "Oracle Auto-PK worked - using scalar ref as table name");
+
 # test again with fully-qualified table name
 $new = $schema->resultset('ArtistFQN')->create( { name => 'bar' } );
 is( $new->artistid, 2, "Oracle Auto-PK worked with fully-qualified tablename" );
 
+# test rel names over the 30 char limit
+my $query = $schema->resultset('Artist')->search({
+  artistid => 1 
+}, {
+  prefetch => 'cds_very_very_very_long_relationship_name'
+});
+
+lives_and {
+  is $query->first->cds_very_very_very_long_relationship_name->first->cdid, 1
+} 'query with rel name over 30 chars survived and worked';
+
+# rel name over 30 char limit with user condition
+# This requires walking the SQLA data structure.
+{
+  local $TODO = 'user condition on rel longer than 30 chars';
+
+  $query = $schema->resultset('Artist')->search({
+    'cds_very_very_very_long_relationship_name.title' => 'EP C'
+  }, {
+    prefetch => 'cds_very_very_very_long_relationship_name'
+  });
+
+  lives_and {
+    is $query->first->cds_very_very_very_long_relationship_name->first->cdid, 1
+  } 'query with rel name over 30 chars and user condition survived and worked';
+}
+
 # test join with row count ambiguity
-my $cd = $schema->resultset('CD')->create({ cdid => 1, artist => 1, title => 'EP C', year => '2003' });
-my $track = $schema->resultset('Track')->create({ trackid => 1, cd => 1, position => 1, title => 'Track1' });
+
+my $track = $schema->resultset('Track')->create({ trackid => 1, cd => 1,
+    position => 1, title => 'Track1' });
 my $tjoin = $schema->resultset('Track')->search({ 'me.title' => 'Track1'},
         { join => 'cd',
           rows => 2 }
 );
 
-is($tjoin->next->title, 'Track1', "ambiguous column ok");
+ok(my $row = $tjoin->next);
 
+is($row->title, 'Track1', "ambiguous column ok");
+
 # check count distinct with multiple columns
 my $other_track = $schema->resultset('Track')->create({ trackid => 2, cd => 1, position => 1, title => 'Track2' });
 
@@ -145,7 +191,7 @@
 
 $tcount = $schema->resultset('Track')->search(
   {},
-  { 
+  {
      group_by => [ qw/position title/ ]
   }
 );
@@ -182,32 +228,44 @@
 my $st = $schema->resultset('SequenceTest')->create({ name => 'foo', pkid1 => 55 });
 is($st->pkid1, 55, "Oracle Auto-PK without trigger: First primary key set manually");
 
-{
-	my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
-	$binstr{'large'} = $binstr{'small'} x 1024;
+SKIP: {
+  my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
+  $binstr{'large'} = $binstr{'small'} x 1024;
 
-	my $maxloblen = length $binstr{'large'};
-	note "Localizing LongReadLen to $maxloblen to avoid truncation of test data";
-	local $dbh->{'LongReadLen'} = $maxloblen;
+  my $maxloblen = length $binstr{'large'};
+  note "Localizing LongReadLen to $maxloblen to avoid truncation of test data";
+  local $dbh->{'LongReadLen'} = $maxloblen;
 
-	my $rs = $schema->resultset('BindType');
-	my $id = 0;
+  my $rs = $schema->resultset('BindType');
+  my $id = 0;
 
-	foreach my $type (qw( blob clob )) {
-		foreach my $size (qw( small large )) {
-			$id++;
+  if ($DBD::Oracle::VERSION eq '1.23') {
+    throws_ok { $rs->create({ id => 1, blob => $binstr{large} }) }
+      qr/broken/,
+      'throws on blob insert with DBD::Oracle == 1.23';
 
-			lives_ok { $rs->create( { 'id' => $id, $type => $binstr{$size} } ) }
-				"inserted $size $type without dying";
-			ok($rs->find($id)->$type eq $binstr{$size}, "verified inserted $size $type" );
-		}
-	}
+    skip 'buggy BLOB support in DBD::Oracle 1.23', 7;
+  }
+
+  foreach my $type (qw( blob clob )) {
+    foreach my $size (qw( small large )) {
+      $id++;
+
+      lives_ok { $rs->create( { 'id' => $id, $type => $binstr{$size} } ) }
+      "inserted $size $type without dying";
+
+      ok($rs->find($id)->$type eq $binstr{$size}, "verified inserted $size $type" );
+    }
+  }
 }
 
+done_testing;
+
 # clean up our mess
 END {
     if($schema && ($dbh = $schema->storage->dbh)) {
         $dbh->do("DROP SEQUENCE artist_seq");
+        $dbh->do("DROP SEQUENCE cd_seq");
         $dbh->do("DROP SEQUENCE pkid1_seq");
         $dbh->do("DROP SEQUENCE pkid2_seq");
         $dbh->do("DROP SEQUENCE nonpkid_seq");

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/73oracle_inflate.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/73oracle_inflate.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/73oracle_inflate.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,76 +0,0 @@
-use strict;
-use warnings;  
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_ORA_${_}" } qw/DSN USER PASS/};
-
-if (not ($dsn && $user && $pass)) {
-    plan skip_all => 'Set $ENV{DBICTEST_ORA_DSN}, _USER and _PASS to run this test. ' .
-         'Warning: This test drops and creates a table called \'track\'';
-}
-else {
-    eval "use DateTime; use DateTime::Format::Oracle;";
-    if ($@) {
-        plan skip_all => 'needs DateTime and DateTime::Format::Oracle for testing';
-    }
-    else {
-        plan tests => 7;
-    }
-}
-
-# DateTime::Format::Oracle needs this set
-$ENV{NLS_DATE_FORMAT} = 'DD-MON-YY';
-$ENV{NLS_TIMESTAMP_FORMAT} = 'YYYY-MM-DD HH24:MI:SSXFF';
-$ENV{NLS_LANG} = 'AMERICAN_AMERICA.WE8ISO8859P1';
-
-my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
-
-# Need to redefine the last_updated_on column
-my $col_metadata = $schema->class('Track')->column_info('last_updated_on');
-$schema->class('Track')->add_column( 'last_updated_on' => {
-    data_type => 'date' });
-$schema->class('Track')->add_column( 'last_updated_at' => {
-    data_type => 'timestamp' });
-
-my $dbh = $schema->storage->dbh;
-
-#$dbh->do("alter session set nls_timestamp_format = 'YYYY-MM-DD HH24:MI:SSXFF'");
-
-eval {
-  $dbh->do("DROP TABLE track");
-};
-$dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at TIMESTAMP)");
-
-# insert a row to play with
-my $new = $schema->resultset('Track')->create({ trackid => 1, cd => 1, position => 1, title => 'Track1', last_updated_on => '06-MAY-07', last_updated_at => '2009-05-03 21:17:18.5' });
-is($new->trackid, 1, "insert sucessful");
-
-my $track = $schema->resultset('Track')->find( 1 );
-
-is( ref($track->last_updated_on), 'DateTime', "last_updated_on inflated ok");
-
-is( $track->last_updated_on->month, 5, "DateTime methods work on inflated column");
-
-#note '$track->last_updated_at => ', $track->last_updated_at;
-is( ref($track->last_updated_at), 'DateTime', "last_updated_at inflated ok");
-
-is( $track->last_updated_at->nanosecond, 500_000_000, "DateTime methods work with nanosecond precision");
-
-my $dt = DateTime->now();
-$track->last_updated_on($dt);
-$track->last_updated_at($dt);
-$track->update;
-
-is( $track->last_updated_on->month, $dt->month, "deflate ok");
-is( int $track->last_updated_at->nanosecond, int $dt->nanosecond, "deflate ok with nanosecond precision");
-
-# clean up our mess
-END {
-    if($dbh) {
-        $dbh->do("DROP TABLE track");
-    }
-}
-

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/745db2.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/745db2.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/745db2.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,7 +1,8 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
+use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
 
@@ -12,8 +13,6 @@
 plan skip_all => 'Set $ENV{DBICTEST_DB2_DSN}, _USER and _PASS to run this test'
   unless ($dsn && $user);
 
-plan tests => 9;
-
 my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
 
 my $dbh = $schema->storage->dbh;
@@ -22,40 +21,58 @@
 
 $dbh->do("CREATE TABLE artist (artistid INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 1, INCREMENT BY 1), name VARCHAR(255), charfield CHAR(10), rank INTEGER DEFAULT 13);");
 
-# This is in core, just testing that it still loads ok
-$schema->class('Artist')->load_components('PK::Auto');
-
 my $ars = $schema->resultset('Artist');
+is ( $ars->count, 0, 'No rows at first' );
 
 # test primary key handling
 my $new = $ars->create({ name => 'foo' });
 ok($new->artistid, "Auto-PK worked");
 
-my $init_count = $ars->count;
-for (1..6) {
-    $ars->create({ name => 'Artist ' . $_ });
-}
-is ($ars->count, $init_count + 6, 'Simple count works');
+# test explicit key spec
+$new = $ars->create ({ name => 'bar', artistid => 66 });
+is($new->artistid, 66, 'Explicit PK worked');
+$new->discard_changes;
+is($new->artistid, 66, 'Explicit PK assigned');
 
+# test populate
+lives_ok (sub {
+  my @pop;
+  for (1..2) {
+    push @pop, { name => "Artist_$_" };
+  }
+  $ars->populate (\@pop);
+});
+
+# test populate with explicit key
+lives_ok (sub {
+  my @pop;
+  for (1..2) {
+    push @pop, { name => "Artist_expkey_$_", artistid => 100 + $_ };
+  }
+  $ars->populate (\@pop);
+});
+
+# count what we did so far
+is ($ars->count, 6, 'Simple count works');
+
 # test LIMIT support
-my $it = $ars->search( {},
+my $lim = $ars->search( {},
   {
     rows => 3,
+    offset => 4,
     order_by => 'artistid'
   }
 );
-is( $it->count, 3, "LIMIT count ok" );
+is( $lim->count, 2, 'ROWS+OFFSET count ok' );
+is( $lim->all, 2, 'Number of ->all objects matches count' );
 
-my @all = $it->all;
-is (@all, 3, 'Number of ->all objects matches count');
+# test iterator
+$lim->reset;
+is( $lim->next->artistid, 101, "iterator->next ok" );
+is( $lim->next->artistid, 102, "iterator->next ok" );
+is( $lim->next, undef, "next past end of resultset ok" );
 
-$it->reset;
-is( $it->next->name, "foo", "iterator->next ok" );
-is( $it->next->name, "Artist 1", "iterator->next ok" );
-is( $it->next->name, "Artist 2", "iterator->next ok" );
-is( $it->next, undef, "next past end of resultset ok" );  # this can not succeed if @all > 3
 
-
 my $test_type_info = {
     'artistid' => {
         'data_type' => 'INTEGER',
@@ -70,12 +87,12 @@
     'charfield' => {
         'data_type' => 'CHAR',
         'is_nullable' => 1,
-        'size' => 10 
+        'size' => 10
     },
     'rank' => {
         'data_type' => 'INTEGER',
         'is_nullable' => 1,
-        'size' => 10 
+        'size' => 10
     },
 };
 
@@ -83,7 +100,10 @@
 my $type_info = $schema->storage->columns_info_for('artist');
 is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
 
+done_testing;
+
 # clean up our mess
 END {
+    my $dbh = eval { $schema->storage->_dbh };
     $dbh->do("DROP TABLE artist") if $dbh;
 }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/746db2_400.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/746db2_400.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/746db2_400.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -82,6 +82,6 @@
 
 # clean up our mess
 END {
+    my $dbh = eval { $schema->storage->_dbh };
     $dbh->do("DROP TABLE artist") if $dbh;
 }
-

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/746mssql.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/746mssql.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/746mssql.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,19 +1,20 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
+use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
+use DBIC::SqlMakerTest;
 
 my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MSSQL_ODBC_${_}" } qw/DSN USER PASS/};
 
 plan skip_all => 'Set $ENV{DBICTEST_MSSQL_ODBC_DSN}, _USER and _PASS to run this test'
   unless ($dsn && $user);
 
-plan tests => 13;
+DBICTest::Schema->load_classes('ArtistGUID');
+my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
 
-my $schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1});
-
 {
   no warnings 'redefine';
   my $connect_count = 0;
@@ -27,11 +28,15 @@
 
 isa_ok( $schema->storage, 'DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server' );
 
+{
+  my $schema2 = $schema->connect ($schema->storage->connect_info);
+  ok (! $schema2->storage->connected, 'a re-connected cloned schema starts unconnected');
+}
+
 $schema->storage->dbh_do (sub {
     my ($storage, $dbh) = @_;
     eval { $dbh->do("DROP TABLE artist") };
     $dbh->do(<<'SQL');
-
 CREATE TABLE artist (
    artistid INT IDENTITY NOT NULL,
    name VARCHAR(100),
@@ -39,20 +44,39 @@
    charfield CHAR(10) NULL,
    primary key(artistid)
 )
-
 SQL
-
 });
 
 my %seen_id;
 
-# fresh $schema so we start unconnected
-$schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1});
+my @opts = (
+  { on_connect_call => 'use_dynamic_cursors' },
+  {},
+);
+my $new;
 
-# test primary key handling
-my $new = $schema->resultset('Artist')->create({ name => 'foo' });
-ok($new->artistid > 0, "Auto-PK worked");
+# test Auto-PK with different options
+for my $opts (@opts) {
+  SKIP: {
+    $schema = DBICTest::Schema->connect($dsn, $user, $pass, $opts);
 
+    eval {
+      $schema->storage->ensure_connected
+    };
+    if ($@ =~ /dynamic cursors/) {
+      skip
+'Dynamic Cursors not functional, tds_version 8.0 or greater required if using'.
+' FreeTDS', 1;
+    }
+
+    $schema->resultset('Artist')->search({ name => 'foo' })->delete;
+
+    $new = $schema->resultset('Artist')->create({ name => 'foo' });
+
+    ok($new->artistid > 0, "Auto-PK worked");
+  }
+}
+
 $seen_id{$new->artistid}++;
 
 # test LIMIT support
@@ -73,10 +97,365 @@
 is( $it->next->name, "Artist 2", "iterator->next ok" );
 is( $it->next, undef, "next past end of resultset ok" );
 
+# test GUID columns
 
+$schema->storage->dbh_do (sub {
+    my ($storage, $dbh) = @_;
+    eval { $dbh->do("DROP TABLE artist") };
+    $dbh->do(<<'SQL');
+CREATE TABLE artist (
+   artistid UNIQUEIDENTIFIER NOT NULL,
+   name VARCHAR(100),
+   rank INT NOT NULL DEFAULT '13',
+   charfield CHAR(10) NULL,
+   a_guid UNIQUEIDENTIFIER,
+   primary key(artistid)
+)
+SQL
+});
+
+# start disconnected to make sure insert works on an un-reblessed storage
+$schema = DBICTest::Schema->connect($dsn, $user, $pass);
+
+my $row;
+lives_ok {
+  $row = $schema->resultset('ArtistGUID')->create({ name => 'mtfnpy' })
+} 'created a row with a GUID';
+
+ok(
+  eval { $row->artistid },
+  'row has GUID PK col populated',
+);
+diag $@ if $@;
+
+ok(
+  eval { $row->a_guid },
+  'row has a GUID col with auto_nextval populated',
+);
+diag $@ if $@;
+
+my $row_from_db = $schema->resultset('ArtistGUID')
+  ->search({ name => 'mtfnpy' })->first;
+
+is $row_from_db->artistid, $row->artistid,
+  'PK GUID round trip';
+
+is $row_from_db->a_guid, $row->a_guid,
+  'NON-PK GUID round trip';
+
+# test MONEY type
+$schema->storage->dbh_do (sub {
+    my ($storage, $dbh) = @_;
+    eval { $dbh->do("DROP TABLE money_test") };
+    $dbh->do(<<'SQL');
+CREATE TABLE money_test (
+   id INT IDENTITY PRIMARY KEY,
+   amount MONEY NULL
+)
+SQL
+});
+
+my $rs = $schema->resultset('Money');
+
+lives_ok {
+  $row = $rs->create({ amount => 100 });
+} 'inserted a money value';
+
+cmp_ok $rs->find($row->id)->amount, '==', 100, 'money value round-trip';
+
+lives_ok {
+  $row->update({ amount => 200 });
+} 'updated a money value';
+
+cmp_ok $rs->find($row->id)->amount, '==', 200,
+  'updated money value round-trip';
+
+lives_ok {
+  $row->update({ amount => undef });
+} 'updated a money value to NULL';
+
+is $rs->find($row->id)->amount, undef,'updated money value to NULL round-trip';
+
+$schema->storage->dbh_do (sub {
+    my ($storage, $dbh) = @_;
+    eval { $dbh->do("DROP TABLE owners") };
+    eval { $dbh->do("DROP TABLE books") };
+    $dbh->do(<<'SQL');
+CREATE TABLE books (
+   id INT IDENTITY (1, 1) NOT NULL,
+   source VARCHAR(100),
+   owner INT,
+   title VARCHAR(10),
+   price INT NULL
+)
+
+CREATE TABLE owners (
+   id INT IDENTITY (1, 1) NOT NULL,
+   name VARCHAR(100),
+)
+SQL
+
+});
+
+lives_ok ( sub {
+  # start a new connection, make sure rebless works
+  my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+  $schema->populate ('Owners', [
+    [qw/id  name  /],
+    [qw/1   wiggle/],
+    [qw/2   woggle/],
+    [qw/3   boggle/],
+    [qw/4   fRIOUX/],
+    [qw/5   fRUE/],
+    [qw/6   fREW/],
+    [qw/7   fROOH/],
+    [qw/8   fISMBoC/],
+    [qw/9   station/],
+    [qw/10   mirror/],
+    [qw/11   dimly/],
+    [qw/12   face_to_face/],
+    [qw/13   icarus/],
+    [qw/14   dream/],
+    [qw/15   dyrstyggyr/],
+  ]);
+}, 'populate with PKs supplied ok' );
+
+
+lives_ok (sub {
+  # start a new connection, make sure rebless works
+  # test an insert with a supplied identity, followed by one without
+  my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+  for (2, 1) {
+    my $id = $_ * 20 ;
+    $schema->resultset ('Owners')->create ({ id => $id, name => "troglodoogle $id" });
+    $schema->resultset ('Owners')->create ({ name => "troglodoogle " . ($id + 1) });
+  }
+}, 'create with/without PKs ok' );
+
+is ($schema->resultset ('Owners')->count, 19, 'owner rows really in db' );
+
+lives_ok ( sub {
+  # start a new connection, make sure rebless works
+  my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+  $schema->populate ('BooksInLibrary', [
+    [qw/source  owner title   /],
+    [qw/Library 1     secrets0/],
+    [qw/Library 1     secrets1/],
+    [qw/Eatery  1     secrets2/],
+    [qw/Library 2     secrets3/],
+    [qw/Library 3     secrets4/],
+    [qw/Eatery  3     secrets5/],
+    [qw/Library 4     secrets6/],
+    [qw/Library 5     secrets7/],
+    [qw/Eatery  5     secrets8/],
+    [qw/Library 6     secrets9/],
+    [qw/Library 7     secrets10/],
+    [qw/Eatery  7     secrets11/],
+    [qw/Library 8     secrets12/],
+  ]);
+}, 'populate without PKs supplied ok' );
+
+# plain ordered subqueries throw
+throws_ok (sub {
+  $schema->resultset('Owners')->search ({}, { order_by => 'name' })->as_query
+}, qr/ordered subselect encountered/, 'Ordered Subselect detection throws ok');
+
+# make sure ordered subselects *somewhat* work
+{
+  my $owners = $schema->resultset ('Owners')->search ({}, { order_by => 'name', offset => 2, rows => 3, unsafe_subselect_ok => 1 });
+
+  my $al = $owners->current_source_alias;
+  my $sealed_owners = $owners->result_source->resultset->search (
+    {},
+    {
+      alias => $al,
+      from => [{
+        -alias => $al,
+        -source_handle => $owners->result_source->handle,
+        $al => $owners->as_query,
+      }],
+    },
+  );
+
+  is_deeply (
+    [ map { $_->name } ($sealed_owners->all) ],
+    [ map { $_->name } ($owners->all) ],
+    'Sort preserved from within a subquery',
+  );
+}
+
+TODO: {
+  local $TODO = "This porbably will never work, but it isn't critical either afaik";
+
+  my $book_owner_ids = $schema->resultset ('BooksInLibrary')
+                               ->search ({}, { join => 'owner', distinct => 1, order_by => 'owner.name', unsafe_subselect_ok => 1 })
+                                ->get_column ('owner');
+
+  my $book_owners = $schema->resultset ('Owners')->search ({
+    id => { -in => $book_owner_ids->as_query }
+  });
+
+  is_deeply (
+    [ map { $_->id } ($book_owners->all) ],
+    [ $book_owner_ids->all ],
+    'Sort is preserved across IN subqueries',
+  );
+}
+
+# This is known not to work - thus the negative test
+{
+  my $owners = $schema->resultset ('Owners')->search ({}, { order_by => 'name', offset => 2, rows => 3, unsafe_subselect_ok => 1 });
+  my $corelated_owners = $owners->result_source->resultset->search (
+    {
+      id => { -in => $owners->get_column('id')->as_query },
+    },
+    {
+      order_by => 'name' #reorder because of what is shown above
+    },
+  );
+
+  cmp_ok (
+    join ("\x00", map { $_->name } ($corelated_owners->all) ),
+      'ne',
+    join ("\x00", map { $_->name } ($owners->all) ),
+    'Sadly sort not preserved from within a corelated subquery',
+  );
+
+  cmp_ok (
+    join ("\x00", sort map { $_->name } ($corelated_owners->all) ),
+      'ne',
+    join ("\x00", sort map { $_->name } ($owners->all) ),
+    'Which in fact gives a completely wrong dataset',
+  );
+}
+
+
+# make sure right-join-side single-prefetch ordering limit works
+{
+  my $rs = $schema->resultset ('BooksInLibrary')->search (
+    {
+      'owner.name' => { '!=', 'woggle' },
+    },
+    {
+      prefetch => 'owner',
+      order_by => 'owner.name',
+    }
+  );
+  # this is the order in which they should come from the above query
+  my @owner_names = qw/boggle fISMBoC fREW fRIOUX fROOH fRUE wiggle wiggle/;
+
+  is ($rs->all, 8, 'Correct amount of objects from right-sorted joined resultset');
+  is_deeply (
+    [map { $_->owner->name } ($rs->all) ],
+    \@owner_names,
+    'Rows were properly ordered'
+  );
+
+  my $limited_rs = $rs->search ({}, {rows => 7, offset => 2, unsafe_subselect_ok => 1});
+  is ($limited_rs->count, 6, 'Correct count of limited right-sorted joined resultset');
+  is ($limited_rs->count_rs->next, 6, 'Correct count_rs of limited right-sorted joined resultset');
+
+  my $queries;
+  $schema->storage->debugcb(sub { $queries++; });
+  $schema->storage->debug(1);
+
+  is_deeply (
+    [map { $_->owner->name } ($limited_rs->all) ],
+    [@owner_names[2 .. 7]],
+    'Limited rows were properly ordered'
+  );
+  is ($queries, 1, 'Only one query with prefetch');
+
+  $schema->storage->debugcb(undef);
+  $schema->storage->debug(0);
+
+
+  is_deeply (
+    [map { $_->name } ($limited_rs->search_related ('owner')->all) ],
+    [@owner_names[2 .. 7]],
+    'Rows are still properly ordered after search_related'
+  );
+}
+
+
+#
+# try a prefetch on tables with identically named columns
+#
+
+# set quote char - make sure things work while quoted
+$schema->storage->_sql_maker->{quote_char} = [qw/[ ]/];
+$schema->storage->_sql_maker->{name_sep} = '.';
+
+{
+  # try a ->has_many direction
+  my $owners = $schema->resultset ('Owners')->search (
+    {
+      'books.id' => { '!=', undef },
+      'me.name' => { '!=', 'somebogusstring' },
+    },
+    {
+      prefetch => 'books',
+      order_by => { -asc => \['name + ?', [ test => 'xxx' ]] }, # test bindvar propagation
+      rows     => 3,  # 8 results total
+      unsafe_subselect_ok => 1,
+    },
+  );
+
+  my ($sql, @bind) = @${$owners->page(3)->as_query};
+  is_deeply (
+    \@bind,
+    [ ([ 'me.name' => 'somebogusstring' ], [ test => 'xxx' ]) x 2 ],  # double because of the prefetch subq
+  );
+
+  is ($owners->page(1)->all, 3, 'has_many prefetch returns correct number of rows');
+  is ($owners->page(1)->count, 3, 'has-many prefetch returns correct count');
+
+  is ($owners->page(3)->all, 2, 'has_many prefetch returns correct number of rows');
+  is ($owners->page(3)->count, 2, 'has-many prefetch returns correct count');
+  is ($owners->page(3)->count_rs->next, 2, 'has-many prefetch returns correct count_rs');
+
+
+  # try a ->belongs_to direction (no select collapse, group_by should work)
+  my $books = $schema->resultset ('BooksInLibrary')->search (
+    {
+      'owner.name' => [qw/wiggle woggle/],
+    },
+    {
+      distinct => 1,
+      having => \['1 = ?', [ test => 1 ] ], #test having propagation
+      prefetch => 'owner',
+      rows     => 2,  # 3 results total
+      order_by => { -desc => 'me.owner' },
+      unsafe_subselect_ok => 1,
+    },
+  );
+
+  ($sql, @bind) = @${$books->page(3)->as_query};
+  is_deeply (
+    \@bind,
+    [
+      # inner
+      [ 'owner.name' => 'wiggle' ], [ 'owner.name' => 'woggle' ], [ source => 'Library' ], [ test => '1' ],
+      # outer
+      [ 'owner.name' => 'wiggle' ], [ 'owner.name' => 'woggle' ], [ source => 'Library' ],
+    ],
+  );
+
+  is ($books->page(1)->all, 2, 'Prefetched grouped search returns correct number of rows');
+  is ($books->page(1)->count, 2, 'Prefetched grouped search returns correct count');
+
+  is ($books->page(2)->all, 1, 'Prefetched grouped search returns correct number of rows');
+  is ($books->page(2)->count, 1, 'Prefetched grouped search returns correct count');
+  is ($books->page(2)->count_rs->next, 1, 'Prefetched grouped search returns correct count_rs');
+}
+
+done_testing;
+
 # clean up our mess
 END {
-    my $dbh = eval { $schema->storage->_dbh };
-    $dbh->do('DROP TABLE artist') if $dbh;
+  if (my $dbh = eval { $schema->storage->_dbh }) {
+    eval { $dbh->do("DROP TABLE $_") }
+      for qw/artist money_test books owners/;
+  }
 }
-
+# vim:sw=2 sts=2

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/746sybase.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/746sybase.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/746sybase.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,82 +1,617 @@
 use strict;
 use warnings;  
+no warnings 'uninitialized';
 
 use Test::More;
+use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
 
 my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
 
-plan skip_all => 'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test'
-  unless ($dsn && $user);
+my $TESTS = 66 + 2;
 
-plan tests => 12;
+if (not ($dsn && $user)) {
+  plan skip_all =>
+    'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' .
+    "\nWarning: This test drops and creates the tables " .
+    "'artist', 'money_test' and 'bindtype_test'";
+} else {
+  plan tests => $TESTS*2 + 1;
+}
 
-my $schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1});
+my @storage_types = (
+  'DBI::Sybase::ASE',
+  'DBI::Sybase::ASE::NoBindVars',
+);
+eval "require DBIx::Class::Storage::$_;" for @storage_types;
 
-$schema->storage->ensure_connected;
-isa_ok( $schema->storage, 'DBIx::Class::Storage::DBI::Sybase' );
+my $schema;
+my $storage_idx = -1;
 
-$schema->storage->dbh_do (sub {
-    my ($storage, $dbh) = @_;
-    eval { $dbh->do("DROP TABLE artist") };
-    $dbh->do(<<'SQL');
+sub get_schema {
+  DBICTest::Schema->connect($dsn, $user, $pass, {
+    on_connect_call => [
+      [ blob_setup => log_on_update => 1 ], # this is a safer option
+    ],
+  });
+}
 
+my $ping_count = 0;
+{
+  my $ping = DBIx::Class::Storage::DBI::Sybase::ASE->can('_ping');
+  *DBIx::Class::Storage::DBI::Sybase::ASE::_ping = sub {
+    $ping_count++;
+    goto $ping;
+  };
+}
+
+for my $storage_type (@storage_types) {
+  $storage_idx++;
+
+  unless ($storage_type eq 'DBI::Sybase::ASE') { # autodetect
+    DBICTest::Schema->storage_type("::$storage_type");
+  }
+
+  $schema = get_schema();
+
+  $schema->storage->ensure_connected;
+
+  if ($storage_idx == 0 &&
+      $schema->storage->isa('DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars')) {
+# no placeholders in this version of Sybase or DBD::Sybase (or using FreeTDS)
+      my $tb = Test::More->builder;
+      $tb->skip('no placeholders') for 1..$TESTS;
+      next;
+  }
+
+  isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" );
+
+  $schema->storage->_dbh->disconnect;
+  lives_ok (sub { $schema->storage->dbh }, 'reconnect works');
+
+  $schema->storage->dbh_do (sub {
+      my ($storage, $dbh) = @_;
+      eval { $dbh->do("DROP TABLE artist") };
+      $dbh->do(<<'SQL');
 CREATE TABLE artist (
-   artistid INT IDENTITY NOT NULL,
+   artistid INT IDENTITY PRIMARY KEY,
    name VARCHAR(100),
    rank INT DEFAULT 13 NOT NULL,
-   charfield CHAR(10) NULL,
-   primary key(artistid)
+   charfield CHAR(10) NULL
 )
-
 SQL
+  });
 
-});
+  my %seen_id;
 
-my %seen_id;
+# so we start unconnected
+  $schema->storage->disconnect;
 
-# fresh $schema so we start unconnected
-$schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1});
-
 # test primary key handling
-my $new = $schema->resultset('Artist')->create({ name => 'foo' });
-ok($new->artistid > 0, "Auto-PK worked");
+  my $new = $schema->resultset('Artist')->create({ name => 'foo' });
+  ok($new->artistid > 0, "Auto-PK worked");
 
-$seen_id{$new->artistid}++;
+  $seen_id{$new->artistid}++;
 
-# test LIMIT support
-for (1..6) {
+# check redispatch to storage-specific insert when auto-detected storage
+  if ($storage_type eq 'DBI::Sybase::ASE') {
+    DBICTest::Schema->storage_type('::DBI');
+    $schema = get_schema();
+  }
+
+  $new = $schema->resultset('Artist')->create({ name => 'Artist 1' });
+  is ( $seen_id{$new->artistid}, undef, 'id for Artist 1 is unique' );
+  $seen_id{$new->artistid}++;
+
+# inserts happen in a txn, so we make sure it still works inside a txn too
+  $schema->txn_begin;
+
+  for (2..6) {
     $new = $schema->resultset('Artist')->create({ name => 'Artist ' . $_ });
     is ( $seen_id{$new->artistid}, undef, "id for Artist $_ is unique" );
     $seen_id{$new->artistid}++;
-}
+  }
 
-my $it;
+  $schema->txn_commit;
 
-$it = $schema->resultset('Artist')->search( {}, {
+# test simple count
+  is ($schema->resultset('Artist')->count, 7, 'count(*) of whole table ok');
+
+# test LIMIT support
+  my $it = $schema->resultset('Artist')->search({
+    artistid => { '>' => 0 }
+  }, {
     rows => 3,
     order_by => 'artistid',
-});
+  });
 
-TODO: {
-    local $TODO = 'Sybase is very very fucked in the limit department';
+  is( $it->count, 3, "LIMIT count ok" );
 
-    is( $it->count, 3, "LIMIT count ok" );
-}
+  is( $it->next->name, "foo", "iterator->next ok" );
+  $it->next;
+  is( $it->next->name, "Artist 2", "iterator->next ok" );
+  is( $it->next, undef, "next past end of resultset ok" );
 
-# The iterator still works correctly with rows => 3, even though the sql is
-# fucked, very interesting.
+# now try with offset
+  $it = $schema->resultset('Artist')->search({}, {
+    rows => 3,
+    offset => 3,
+    order_by => 'artistid',
+  });
 
-is( $it->next->name, "foo", "iterator->next ok" );
-$it->next;
-is( $it->next->name, "Artist 2", "iterator->next ok" );
-is( $it->next, undef, "next past end of resultset ok" );
+  is( $it->count, 3, "LIMIT with offset count ok" );
 
+  is( $it->next->name, "Artist 3", "iterator->next ok" );
+  $it->next;
+  is( $it->next->name, "Artist 5", "iterator->next ok" );
+  is( $it->next, undef, "next past end of resultset ok" );
 
+# now try a grouped count
+  $schema->resultset('Artist')->create({ name => 'Artist 6' })
+    for (1..6);
+
+  $it = $schema->resultset('Artist')->search({}, {
+    group_by => 'name'
+  });
+
+  is( $it->count, 7, 'COUNT of GROUP_BY ok' );
+
+# do an IDENTITY_INSERT
+  {
+    no warnings 'redefine';
+
+    my @debug_out;
+    local $schema->storage->{debug} = 1;
+    local $schema->storage->debugobj->{callback} = sub {
+      push @debug_out, $_[1];
+    };
+
+    my $txn_used = 0;
+    my $txn_commit = \&DBIx::Class::Storage::DBI::txn_commit;
+    local *DBIx::Class::Storage::DBI::txn_commit = sub {
+      $txn_used = 1;
+      goto &$txn_commit;
+    };
+
+    $schema->resultset('Artist')
+      ->create({ artistid => 999, name => 'mtfnpy' });
+
+    ok((grep /IDENTITY_INSERT/i, @debug_out), 'IDENTITY_INSERT used');
+
+    SKIP: {
+      skip 'not testing lack of txn on IDENTITY_INSERT with NoBindVars', 1
+        if $storage_type =~ /NoBindVars/i;
+
+      is $txn_used, 0, 'no txn on insert with IDENTITY_INSERT';
+    }
+  }
+
+# do an IDENTITY_UPDATE
+  {
+    my @debug_out;
+    local $schema->storage->{debug} = 1;
+    local $schema->storage->debugobj->{callback} = sub {
+      push @debug_out, $_[1];
+    };
+
+    lives_and {
+      $schema->resultset('Artist')
+        ->find(999)->update({ artistid => 555 });
+      ok((grep /IDENTITY_UPDATE/i, @debug_out));
+    } 'IDENTITY_UPDATE used';
+    $ping_count-- if $@;
+  }
+
+  my $bulk_rs = $schema->resultset('Artist')->search({
+    name => { -like => 'bulk artist %' }
+  });
+
+# test insert_bulk using populate.
+  SKIP: {
+    skip 'insert_bulk not supported', 4
+      unless $storage_type !~ /NoBindVars/i;
+
+    lives_ok {
+      $schema->resultset('Artist')->populate([
+        {
+          name => 'bulk artist 1',
+          charfield => 'foo',
+        },
+        {
+          name => 'bulk artist 2',
+          charfield => 'foo',
+        },
+        {
+          name => 'bulk artist 3',
+          charfield => 'foo',
+        },
+      ]);
+    } 'insert_bulk via populate';
+
+    is $bulk_rs->count, 3, 'correct number inserted via insert_bulk';
+
+    is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
+      'column set correctly via insert_bulk');
+
+    my %bulk_ids;
+    @bulk_ids{map $_->artistid, $bulk_rs->all} = ();
+
+    is ((scalar keys %bulk_ids), 3,
+      'identities generated correctly in insert_bulk');
+
+    $bulk_rs->delete;
+  }
+
+# make sure insert_bulk works a second time on the same connection
+  SKIP: {
+    skip 'insert_bulk not supported', 3
+      unless $storage_type !~ /NoBindVars/i;
+
+    lives_ok {
+      $schema->resultset('Artist')->populate([
+        {
+          name => 'bulk artist 1',
+          charfield => 'bar',
+        },
+        {
+          name => 'bulk artist 2',
+          charfield => 'bar',
+        },
+        {
+          name => 'bulk artist 3',
+          charfield => 'bar',
+        },
+      ]);
+    } 'insert_bulk via populate called a second time';
+
+    is $bulk_rs->count, 3,
+      'correct number inserted via insert_bulk';
+
+    is ((grep $_->charfield eq 'bar', $bulk_rs->all), 3,
+      'column set correctly via insert_bulk');
+
+    $bulk_rs->delete;
+  }
+
+# test invalid insert_bulk (missing required column)
+#
+# There should be a rollback, reconnect and the next valid insert_bulk should
+# succeed.
+  throws_ok {
+    $schema->resultset('Artist')->populate([
+      {
+        charfield => 'foo',
+      }
+    ]);
+  } qr/no value or default|does not allow null|placeholders/i,
+# The second pattern is the error from fallback to regular array insert on
+# incompatible charset.
+# The third is for ::NoBindVars with no syb_has_blk.
+  'insert_bulk with missing required column throws error';
+
+# now test insert_bulk with IDENTITY_INSERT
+  SKIP: {
+    skip 'insert_bulk not supported', 3
+      unless $storage_type !~ /NoBindVars/i;
+
+    lives_ok {
+      $schema->resultset('Artist')->populate([
+        {
+          artistid => 2001,
+          name => 'bulk artist 1',
+          charfield => 'foo',
+        },
+        {
+          artistid => 2002,
+          name => 'bulk artist 2',
+          charfield => 'foo',
+        },
+        {
+          artistid => 2003,
+          name => 'bulk artist 3',
+          charfield => 'foo',
+        },
+      ]);
+    } 'insert_bulk with IDENTITY_INSERT via populate';
+
+    is $bulk_rs->count, 3,
+      'correct number inserted via insert_bulk with IDENTITY_INSERT';
+
+    is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
+      'column set correctly via insert_bulk with IDENTITY_INSERT');
+
+    $bulk_rs->delete;
+  }
+
+# test correlated subquery
+  my $subq = $schema->resultset('Artist')->search({ artistid => { '>' => 3 } })
+    ->get_column('artistid')
+    ->as_query;
+  my $subq_rs = $schema->resultset('Artist')->search({
+    artistid => { -in => $subq }
+  });
+  is $subq_rs->count, 11, 'correlated subquery';
+
+# mostly stolen from the blob stuff Nniuq wrote for t/73oracle.t
+  SKIP: {
+    skip 'TEXT/IMAGE support does not work with FreeTDS', 22
+      if $schema->storage->using_freetds;
+
+    my $dbh = $schema->storage->_dbh;
+    {
+      local $SIG{__WARN__} = sub {};
+      eval { $dbh->do('DROP TABLE bindtype_test') };
+
+      $dbh->do(qq[
+        CREATE TABLE bindtype_test 
+        (
+          id    INT   IDENTITY PRIMARY KEY,
+          bytea IMAGE NULL,
+          blob  IMAGE NULL,
+          clob  TEXT  NULL
+        )
+      ],{ RaiseError => 1, PrintError => 0 });
+    }
+
+    my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
+    $binstr{'large'} = $binstr{'small'} x 1024;
+
+    my $maxloblen = length $binstr{'large'};
+    
+    if (not $schema->storage->using_freetds) {
+      $dbh->{'LongReadLen'} = $maxloblen * 2;
+    } else {
+      $dbh->do("set textsize ".($maxloblen * 2));
+    }
+
+    my $rs = $schema->resultset('BindType');
+    my $last_id;
+
+    foreach my $type (qw(blob clob)) {
+      foreach my $size (qw(small large)) {
+        no warnings 'uninitialized';
+
+        my $created;
+        lives_ok {
+          $created = $rs->create( { $type => $binstr{$size} } )
+        } "inserted $size $type without dying";
+
+        $last_id = $created->id if $created;
+
+        lives_and {
+          ok($rs->find($last_id)->$type eq $binstr{$size})
+        } "verified inserted $size $type";
+      }
+    }
+
+    $rs->delete;
+
+    # blob insert with explicit PK
+    # also a good opportunity to test IDENTITY_INSERT
+    lives_ok {
+      $rs->create( { id => 1, blob => $binstr{large} } )
+    } 'inserted large blob without dying with manual PK';
+
+    lives_and {
+      ok($rs->find(1)->blob eq $binstr{large})
+    } 'verified inserted large blob with manual PK';
+
+    # try a blob update
+    my $new_str = $binstr{large} . 'mtfnpy';
+
+    # check redispatch to storage-specific update when auto-detected storage
+    if ($storage_type eq 'DBI::Sybase::ASE') {
+      DBICTest::Schema->storage_type('::DBI');
+      $schema = get_schema();
+    }
+
+    lives_ok {
+      $rs->search({ id => 1 })->update({ blob => $new_str })
+    } 'updated blob successfully';
+
+    lives_and {
+      ok($rs->find(1)->blob eq $new_str)
+    } 'verified updated blob';
+
+    # try a blob update with IDENTITY_UPDATE
+    lives_and {
+      $new_str = $binstr{large} . 'hlagh';
+      $rs->find(1)->update({ id => 999, blob => $new_str });
+      ok($rs->find(999)->blob eq $new_str);
+    } 'verified updated blob with IDENTITY_UPDATE';
+
+    ## try multi-row blob update
+    # first insert some blobs
+    $new_str = $binstr{large} . 'foo';
+    lives_and {
+      $rs->delete;
+      $rs->create({ blob => $binstr{large} }) for (1..2);
+      $rs->update({ blob => $new_str });
+      is((grep $_->blob eq $new_str, $rs->all), 2);
+    } 'multi-row blob update';
+
+    $rs->delete;
+
+    # now try insert_bulk with blobs and only blobs
+    $new_str = $binstr{large} . 'bar';
+    lives_ok {
+      $rs->populate([
+        {
+          bytea => 1,
+          blob => $binstr{large},
+          clob => $new_str,
+        },
+        {
+          bytea => 1,
+          blob => $binstr{large},
+          clob => $new_str,
+        },
+      ]);
+    } 'insert_bulk with blobs does not die';
+
+    is((grep $_->blob eq $binstr{large}, $rs->all), 2,
+      'IMAGE column set correctly via insert_bulk');
+
+    is((grep $_->clob eq $new_str, $rs->all), 2,
+      'TEXT column set correctly via insert_bulk');
+
+    # now try insert_bulk with blobs and a non-blob which also happens to be an
+    # identity column
+    SKIP: {
+      skip 'no insert_bulk without placeholders', 4
+        if $storage_type =~ /NoBindVars/i;
+
+      $rs->delete;
+      $new_str = $binstr{large} . 'bar';
+      lives_ok {
+        $rs->populate([
+          {
+            id => 1,
+            bytea => 1,
+            blob => $binstr{large},
+            clob => $new_str,
+          },
+          {
+            id => 2,
+            bytea => 1,
+            blob => $binstr{large},
+            clob => $new_str,
+          },
+        ]);
+      } 'insert_bulk with blobs and explicit identity does NOT die';
+
+      is((grep $_->blob eq $binstr{large}, $rs->all), 2,
+        'IMAGE column set correctly via insert_bulk with identity');
+
+      is((grep $_->clob eq $new_str, $rs->all), 2,
+        'TEXT column set correctly via insert_bulk with identity');
+
+      is_deeply [ map $_->id, $rs->all ], [ 1,2 ],
+        'explicit identities set correctly via insert_bulk with blobs';
+    }
+
+    lives_and {
+      $rs->delete;
+      $rs->create({ blob => $binstr{large} }) for (1..2);
+      $rs->update({ blob => undef });
+      is((grep !defined($_->blob), $rs->all), 2);
+    } 'blob update to NULL';
+  }
+
+# test MONEY column support (and some other misc. stuff)
+  $schema->storage->dbh_do (sub {
+      my ($storage, $dbh) = @_;
+      eval { $dbh->do("DROP TABLE money_test") };
+      $dbh->do(<<'SQL');
+CREATE TABLE money_test (
+   id INT IDENTITY PRIMARY KEY,
+   amount MONEY DEFAULT $999.99 NULL
+)
+SQL
+  });
+
+  my $rs = $schema->resultset('Money');
+
+# test insert with defaults
+  lives_and {
+    $rs->create({});
+    is((grep $_->amount == 999.99, $rs->all), 1);
+  } 'insert with all defaults works';
+  $rs->delete;
+
+# test insert transaction when there's an active cursor
+  {
+    my $artist_rs = $schema->resultset('Artist');
+    $artist_rs->first;
+    lives_ok {
+      my $row = $schema->resultset('Money')->create({ amount => 100 });
+      $row->delete;
+    } 'inserted a row with an active cursor';
+    $ping_count-- if $@; # dbh_do calls ->connected
+  }
+
+# test insert in an outer transaction when there's an active cursor
+  TODO: {
+    local $TODO = 'this should work once we have eager cursors';
+
+# clear state, or we get a deadlock on $row->delete
+# XXX figure out why this happens
+    $schema->storage->disconnect;
+
+    lives_ok {
+      $schema->txn_do(sub {
+        my $artist_rs = $schema->resultset('Artist');
+        $artist_rs->first;
+        my $row = $schema->resultset('Money')->create({ amount => 100 });
+        $row->delete;
+      });
+    } 'inserted a row with an active cursor in outer txn';
+    $ping_count-- if $@; # dbh_do calls ->connected
+  }
+
+# Now test money values.
+  my $row;
+  lives_ok {
+    $row = $rs->create({ amount => 100 });
+  } 'inserted a money value';
+
+  is eval { $rs->find($row->id)->amount }, 100, 'money value round-trip';
+
+  lives_ok {
+    $row->update({ amount => 200 });
+  } 'updated a money value';
+
+  is eval { $rs->find($row->id)->amount },
+    200, 'updated money value round-trip';
+
+  lives_ok {
+    $row->update({ amount => undef });
+  } 'updated a money value to NULL';
+
+  my $null_amount = eval { $rs->find($row->id)->amount };
+  ok(
+    (($null_amount == undef) && (not $@)),
+    'updated money value to NULL round-trip'
+  );
+  diag $@ if $@;
+
+# Test computed columns and timestamps
+  $schema->storage->dbh_do (sub {
+      my ($storage, $dbh) = @_;
+      eval { $dbh->do("DROP TABLE computed_column_test") };
+      $dbh->do(<<'SQL');
+CREATE TABLE computed_column_test (
+   id INT IDENTITY PRIMARY KEY,
+   a_computed_column AS getdate(),
+   a_timestamp timestamp,
+   charfield VARCHAR(20) DEFAULT 'foo' 
+)
+SQL
+  });
+
+  require DBICTest::Schema::ComputedColumn;
+  $schema->register_class(
+    ComputedColumn => 'DBICTest::Schema::ComputedColumn'
+  );
+
+  ok (($rs = $schema->resultset('ComputedColumn')),
+    'got rs for ComputedColumn');
+
+  lives_ok { $row = $rs->create({}) }
+    'empty insert for a table with computed columns survived';
+
+  lives_ok {
+    $row->update({ charfield => 'bar' })
+  } 'update of a table with computed columns survived';
+}
+
+is $ping_count, 0, 'no pings';
+
 # clean up our mess
 END {
-    my $dbh = eval { $schema->storage->_dbh };
-    $dbh->do('DROP TABLE artist') if $dbh;
+  if (my $dbh = eval { $schema->storage->_dbh }) {
+    eval { $dbh->do("DROP TABLE $_") }
+      for qw/artist bindtype_test money_test computed_column_test/;
+  }
 }
-

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/747mssql_ado.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/747mssql_ado.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/747mssql_ado.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,79 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MSSQL_ADO_${_}" } qw/DSN USER PASS/};
+
+plan skip_all => 'Set $ENV{DBICTEST_MSSQL_ADO_DSN}, _USER and _PASS to run this test'
+  unless ($dsn && $user);
+
+my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+$schema->storage->ensure_connected;
+
+isa_ok( $schema->storage, 'DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server' );
+
+$schema->storage->dbh_do (sub {
+    my ($storage, $dbh) = @_;
+    eval { $dbh->do("DROP TABLE artist") };
+    $dbh->do(<<'SQL');
+CREATE TABLE artist (
+   artistid INT IDENTITY NOT NULL,
+   name VARCHAR(100),
+   rank INT NOT NULL DEFAULT '13',
+   charfield CHAR(10) NULL,
+   primary key(artistid)
+)
+SQL
+});
+
+my $new = $schema->resultset('Artist')->create({ name => 'foo' });
+ok($new->artistid > 0, 'Auto-PK worked');
+
+# make sure select works
+my $found = $schema->resultset('Artist')->search({ name => 'foo' })->first;
+is $found->artistid, $new->artistid, 'search works';
+
+# test large column list in select
+$found = $schema->resultset('Artist')->search({ name => 'foo' }, {
+  select => ['artistid', 'name', map "'foo' foo_$_", 0..50],
+  as     => ['artistid', 'name', map       "foo_$_", 0..50],
+})->first;
+is $found->artistid, $new->artistid, 'select with big column list';
+is $found->get_column('foo_50'), 'foo', 'last item in big column list';
+
+# create a few more rows
+for (1..12) {
+  $schema->resultset('Artist')->create({ name => 'Artist ' . $_ });
+}
+
+# test multiple active cursors
+my $rs1 = $schema->resultset('Artist')->search({}, { order_by => 'artistid' });
+my $rs2 = $schema->resultset('Artist')->search({}, { order_by => 'name' });
+
+while ($rs1->next) {
+  ok eval { $rs2->next }, 'multiple active cursors';
+}
+
+# test bug where ADO blows up if the first bindparam is shorter than the second
+is $schema->resultset('Artist')->search({ artistid => 2 })->first->name,
+  'Artist 1',
+  'short bindparam';
+
+is $schema->resultset('Artist')->search({ artistid => 13 })->first->name,
+  'Artist 12',
+  'longer bindparam';
+
+done_testing;
+
+# clean up our mess
+END {
+  if (my $dbh = eval { $schema->storage->_dbh }) {
+    eval { $dbh->do("DROP TABLE $_") }
+      for qw/artist/;
+  }
+}
+# vim:sw=2 sts=2

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/748informix.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/745db2.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/748informix.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/748informix.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,82 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_INFORMIX_${_}" } qw/DSN USER PASS/};
+
+#warn "$dsn $user $pass";
+
+plan skip_all => 'Set $ENV{DBICTEST_INFORMIX_DSN}, _USER and _PASS to run this test'
+  unless ($dsn && $user);
+
+my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+
+my $dbh = $schema->storage->dbh;
+
+eval { $dbh->do("DROP TABLE artist") };
+
+$dbh->do("CREATE TABLE artist (artistid SERIAL, name VARCHAR(255), charfield CHAR(10), rank INTEGER DEFAULT 13);");
+
+my $ars = $schema->resultset('Artist');
+is ( $ars->count, 0, 'No rows at first' );
+
+# test primary key handling
+my $new = $ars->create({ name => 'foo' });
+ok($new->artistid, "Auto-PK worked");
+
+# test explicit key spec
+$new = $ars->create ({ name => 'bar', artistid => 66 });
+is($new->artistid, 66, 'Explicit PK worked');
+$new->discard_changes;
+is($new->artistid, 66, 'Explicit PK assigned');
+
+# test populate
+lives_ok (sub {
+  my @pop;
+  for (1..2) {
+    push @pop, { name => "Artist_$_" };
+  }
+  $ars->populate (\@pop);
+});
+
+# test populate with explicit key
+lives_ok (sub {
+  my @pop;
+  for (1..2) {
+    push @pop, { name => "Artist_expkey_$_", artistid => 100 + $_ };
+  }
+  $ars->populate (\@pop);
+});
+
+# count what we did so far
+is ($ars->count, 6, 'Simple count works');
+
+# test LIMIT support
+my $lim = $ars->search( {},
+  {
+    rows => 3,
+    offset => 4,
+    order_by => 'artistid'
+  }
+);
+is( $lim->count, 2, 'ROWS+OFFSET count ok' );
+is( $lim->all, 2, 'Number of ->all objects matches count' );
+
+# test iterator
+$lim->reset;
+is( $lim->next->artistid, 101, "iterator->next ok" );
+is( $lim->next->artistid, 102, "iterator->next ok" );
+is( $lim->next, undef, "next past end of resultset ok" );
+
+
+done_testing;
+
+# clean up our mess
+END {
+    my $dbh = eval { $schema->storage->_dbh };
+    $dbh->do("DROP TABLE artist") if $dbh;
+}

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/749sybase_asa.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/749sybase_asa.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/749sybase_asa.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,174 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+# tests stolen from 748informix.t
+
+my ($dsn, $user, $pass)    = @ENV{map { "DBICTEST_SYBASE_ASA_${_}" }      qw/DSN USER PASS/};
+my ($dsn2, $user2, $pass2) = @ENV{map { "DBICTEST_SYBASE_ASA_ODBC_${_}" } qw/DSN USER PASS/};
+
+plan skip_all => <<'EOF' unless $dsn || $dsn2;
+Set $ENV{DBICTEST_SYBASE_ASA_DSN} and/or $ENV{DBICTEST_SYBASE_ASA_ODBC_DSN},
+_USER and _PASS to run these tests
+EOF
+
+my @info = (
+  [ $dsn,  $user,  $pass  ],
+  [ $dsn2, $user2, $pass2 ],
+);
+
+my @handles_to_clean;
+
+foreach my $info (@info) {
+  my ($dsn, $user, $pass) = @$info;
+
+  next unless $dsn;
+
+  my $schema = DBICTest::Schema->connect($dsn, $user, $pass, {
+    auto_savepoint => 1
+  });
+
+  my $dbh = $schema->storage->dbh;
+
+  push @handles_to_clean, $dbh;
+
+  eval { $dbh->do("DROP TABLE artist") };
+
+  $dbh->do(<<EOF);
+  CREATE TABLE artist (
+    artistid INT IDENTITY PRIMARY KEY,
+    name VARCHAR(255) NULL,
+    charfield CHAR(10) NULL,
+    rank INT DEFAULT 13
+  )
+EOF
+
+  my $ars = $schema->resultset('Artist');
+  is ( $ars->count, 0, 'No rows at first' );
+
+# test primary key handling
+  my $new = $ars->create({ name => 'foo' });
+  ok($new->artistid, "Auto-PK worked");
+
+# test explicit key spec
+  $new = $ars->create ({ name => 'bar', artistid => 66 });
+  is($new->artistid, 66, 'Explicit PK worked');
+  $new->discard_changes;
+  is($new->artistid, 66, 'Explicit PK assigned');
+
+# test savepoints
+  eval {
+    $schema->txn_do(sub {
+      eval {
+        $schema->txn_do(sub {
+          $ars->create({ name => 'in_savepoint' });
+          die "rolling back savepoint";
+        });
+      };
+      ok ((not $ars->search({ name => 'in_savepoint' })->first),
+        'savepoint rolled back');
+      $ars->create({ name => 'in_outer_txn' });
+      die "rolling back outer txn";
+    });
+  };
+
+  like $@, qr/rolling back outer txn/,
+    'correct exception for rollback';
+
+  ok ((not $ars->search({ name => 'in_outer_txn' })->first),
+    'outer txn rolled back');
+
+# test populate
+  lives_ok (sub {
+    my @pop;
+    for (1..2) {
+      push @pop, { name => "Artist_$_" };
+    }
+    $ars->populate (\@pop);
+  });
+
+# test populate with explicit key
+  lives_ok (sub {
+    my @pop;
+    for (1..2) {
+      push @pop, { name => "Artist_expkey_$_", artistid => 100 + $_ };
+    }
+    $ars->populate (\@pop);
+  });
+
+# count what we did so far
+  is ($ars->count, 6, 'Simple count works');
+
+# test LIMIT support
+  my $lim = $ars->search( {},
+    {
+      rows => 3,
+      offset => 4,
+      order_by => 'artistid'
+    }
+  );
+  is( $lim->count, 2, 'ROWS+OFFSET count ok' );
+  is( $lim->all, 2, 'Number of ->all objects matches count' );
+
+# test iterator
+  $lim->reset;
+  is( $lim->next->artistid, 101, "iterator->next ok" );
+  is( $lim->next->artistid, 102, "iterator->next ok" );
+  is( $lim->next, undef, "next past end of resultset ok" );
+
+# test empty insert
+  {
+    local $ars->result_source->column_info('artistid')->{is_auto_increment} = 0;
+
+    lives_ok { $ars->create({}) }
+      'empty insert works';
+  }
+
+# test blobs (stolen from 73oracle.t)
+  eval { $dbh->do('DROP TABLE bindtype_test') };
+  $dbh->do(qq[
+  CREATE TABLE bindtype_test
+  (
+    id    INT          NOT NULL PRIMARY KEY,
+    bytea INT          NULL,
+    blob  LONG BINARY  NULL,
+    clob  LONG VARCHAR NULL
+  )
+  ],{ RaiseError => 1, PrintError => 1 });
+
+  my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
+  $binstr{'large'} = $binstr{'small'} x 1024;
+
+  my $maxloblen = length $binstr{'large'};
+  local $dbh->{'LongReadLen'} = $maxloblen;
+
+  my $rs = $schema->resultset('BindType');
+  my $id = 0;
+
+  foreach my $type (qw( blob clob )) {
+    foreach my $size (qw( small large )) {
+      $id++;
+
+# turn off horrendous binary DBIC_TRACE output
+      local $schema->storage->{debug} = 0;
+
+      lives_ok { $rs->create( { 'id' => $id, $type => $binstr{$size} } ) }
+      "inserted $size $type without dying";
+
+      ok($rs->find($id)->$type eq $binstr{$size}, "verified inserted $size $type" );
+    }
+  }
+}
+
+done_testing;
+
+# clean up our mess
+END {
+  foreach my $dbh (@handles_to_clean) {
+    eval { $dbh->do("DROP TABLE $_") } for qw/artist bindtype_test/;
+  }
+}

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/74mssql.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/74mssql.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/74mssql.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -9,6 +9,7 @@
 }
 
 use Test::More;
+use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
 
@@ -17,59 +18,178 @@
 plan skip_all => 'Set $ENV{DBICTEST_MSSQL_DSN}, _USER and _PASS to run this test'
   unless ($dsn);
 
-plan tests => 6;
+my @storage_types = (
+  'DBI::Sybase::Microsoft_SQL_Server',
+  'DBI::Sybase::Microsoft_SQL_Server::NoBindVars',
+);
+my $storage_idx = -1;
+my $schema;
 
-my $schema = DBICTest::Schema->clone;
-$schema->connection($dsn, $user, $pass);
+my $NUMBER_OF_TESTS_IN_BLOCK = 18;
+for my $storage_type (@storage_types) {
+  $storage_idx++;
 
-my $dbh = $schema->storage->dbh;
+  $schema = DBICTest::Schema->clone;
 
-isa_ok($schema->storage, 'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server');
+  $schema->connection($dsn, $user, $pass);
 
-$dbh->do("IF OBJECT_ID('artist', 'U') IS NOT NULL
-    DROP TABLE artist");
-$dbh->do("IF OBJECT_ID('cd', 'U') IS NOT NULL
-    DROP TABLE cd");
+  if ($storage_idx != 0) { # autodetect
+    no warnings 'redefine';
+    local *DBIx::Class::Storage::DBI::_typeless_placeholders_supported =
+      sub { 0 };
+#    $schema->storage_type("::$storage_type");
+    $schema->storage->ensure_connected;
+  }
+  else {
+    $schema->storage->ensure_connected;
+  }
 
-$dbh->do("CREATE TABLE artist (artistid INT IDENTITY PRIMARY KEY, name VARCHAR(100), rank INT DEFAULT '13', charfield CHAR(10) NULL);");
-$dbh->do("CREATE TABLE cd (cdid INT IDENTITY PRIMARY KEY, artist INT,  title VARCHAR(100), year VARCHAR(100), genreid INT NULL, single_track INT NULL);");
+  if ($storage_idx == 0 && ref($schema->storage) =~ /NoBindVars\z/) {
+    my $tb = Test::More->builder;
+    $tb->skip('no placeholders') for 1..$NUMBER_OF_TESTS_IN_BLOCK;
+    next;
+  }
+
+  isa_ok($schema->storage, "DBIx::Class::Storage::$storage_type");
+
+# start disconnected to test reconnection
+  $schema->storage->_dbh->disconnect;
+
+  my $dbh;
+  lives_ok (sub {
+    $dbh = $schema->storage->dbh;
+  }, 'reconnect works');
+
+  $dbh->do("IF OBJECT_ID('artist', 'U') IS NOT NULL
+      DROP TABLE artist");
+  $dbh->do("IF OBJECT_ID('cd', 'U') IS NOT NULL
+      DROP TABLE cd");
+
+  $dbh->do("CREATE TABLE artist (artistid INT IDENTITY PRIMARY KEY, name VARCHAR(100), rank INT DEFAULT '13', charfield CHAR(10) NULL);");
+  $dbh->do("CREATE TABLE cd (cdid INT IDENTITY PRIMARY KEY, artist INT,  title VARCHAR(100), year VARCHAR(100), genreid INT NULL, single_track INT NULL);");
 # Just to test compat shim, Auto is in Core
-$schema->class('Artist')->load_components('PK::Auto::MSSQL');
+  $schema->class('Artist')->load_components('PK::Auto::MSSQL');
 
 # Test PK
-my $new = $schema->resultset('Artist')->create( { name => 'foo' } );
-ok($new->artistid, "Auto-PK worked");
+  my $new = $schema->resultset('Artist')->create( { name => 'foo' } );
+  ok($new->artistid, "Auto-PK worked");
 
 # Test LIMIT
-for (1..6) {
-    $schema->resultset('Artist')->create( { name => 'Artist ' . $_, rank => $_ } );
-}
+  for (1..6) {
+      $schema->resultset('Artist')->create( { name => 'Artist ' . $_, rank => $_ } );
+  }
 
-my $it = $schema->resultset('Artist')->search( { },
-    { rows     => 3,
-      offset   => 2,
-      order_by => 'artistid'
-    }
-);
+  my $it = $schema->resultset('Artist')->search( { },
+      { rows     => 3,
+        offset   => 2,
+        order_by => 'artistid'
+      }
+  );
 
 # Test ? in data don't get treated as placeholders
-my $cd = $schema->resultset('CD')->create( {
-    artist      => 1,
-    title       => 'Does this break things?',
-    year        => 2007,
-} );
-ok($cd->id, 'Not treating ? in data as placeholders');
+  my $cd = $schema->resultset('CD')->create( {
+      artist      => 1,
+      title       => 'Does this break things?',
+      year        => 2007,
+  } );
+  ok($cd->id, 'Not treating ? in data as placeholders');
 
-is( $it->count, 3, "LIMIT count ok" );
-ok( $it->next->name, "iterator->next ok" );
-$it->next;
-$it->next;
-is( $it->next, undef, "next past end of resultset ok" );
+  is( $it->count, 3, "LIMIT count ok" );
+  ok( $it->next->name, "iterator->next ok" );
+  $it->next;
+  $it->next;
+  is( $it->next, undef, "next past end of resultset ok" );
 
+# test MONEY column support
+  $schema->storage->dbh_do (sub {
+      my ($storage, $dbh) = @_;
+      eval { $dbh->do("DROP TABLE money_test") };
+      $dbh->do(<<'SQL');
+  CREATE TABLE money_test (
+     id INT IDENTITY PRIMARY KEY,
+     amount MONEY NULL
+  )
+SQL
+
+  });
+
+  my $rs = $schema->resultset('Money');
+
+  my $row;
+  lives_ok {
+    $row = $rs->create({ amount => 100 });
+  } 'inserted a money value';
+
+  cmp_ok $rs->find($row->id)->amount, '==', 100, 'money value round-trip';
+
+  lives_ok {
+    $row->update({ amount => 200 });
+  } 'updated a money value';
+
+  cmp_ok $rs->find($row->id)->amount, '==', 200,
+    'updated money value round-trip';
+
+  lives_ok {
+    $row->update({ amount => undef });
+  } 'updated a money value to NULL';
+
+  is $rs->find($row->id)->amount,
+    undef, 'updated money value to NULL round-trip';
+
+  $rs->create({ amount => 300 }) for (1..3);
+
+  # test multiple active statements
+  lives_ok {
+    my $artist_rs = $schema->resultset('Artist');
+    while (my $row = $rs->next) {
+      my $artist = $artist_rs->next;
+    }
+    $rs->reset;
+  } 'multiple active statements';
+
+  $rs->delete;
+
+  # test simple transaction with commit
+  lives_ok {
+    $schema->txn_do(sub {
+      $rs->create({ amount => 400 });
+    });
+  } 'simple transaction';
+
+  cmp_ok $rs->first->amount, '==', 400, 'committed';
+  $rs->reset;
+
+  $rs->delete;
+
+  # test rollback
+  throws_ok {
+    $schema->txn_do(sub {
+      $rs->create({ amount => 400 });
+      die 'mtfnpy';
+    });
+  } qr/mtfnpy/, 'simple failed txn';
+
+  is $rs->first, undef, 'rolled back';
+  $rs->reset;
+}
+
+# test op-induced autoconnect
+lives_ok (sub {
+
+  my $schema =  DBICTest::Schema->clone;
+  $schema->connection($dsn, $user, $pass);
+
+  my $artist = $schema->resultset ('Artist')->search ({}, { order_by => 'artistid' })->next;
+  is ($artist->id, 1, 'Artist retrieved successfully');
+}, 'Query-induced autoconnect works');
+
+done_testing;
+
 # clean up our mess
 END {
-    $dbh->do("IF OBJECT_ID('artist', 'U') IS NOT NULL DROP TABLE artist")
-        if $dbh;
-    $dbh->do("IF OBJECT_ID('cd', 'U') IS NOT NULL DROP TABLE cd")
-        if $dbh;
+  if (my $dbh = eval { $schema->storage->dbh }) {
+    $dbh->do("IF OBJECT_ID('artist', 'U') IS NOT NULL DROP TABLE artist");
+    $dbh->do("IF OBJECT_ID('cd', 'U') IS NOT NULL DROP TABLE cd");
+    $dbh->do("IF OBJECT_ID('money_test', 'U') IS NOT NULL DROP TABLE money_test");
+  }
 }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/76joins.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/76joins.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/76joins.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,15 +4,12 @@
 use Test::More;
 use lib qw(t/lib);
 use DBICTest;
-use Data::Dumper;
 use DBIC::SqlMakerTest;
 
 my $schema = DBICTest->init_schema();
 
 my $orig_debug = $schema->storage->debug;
 
-use IO::File;
-
 BEGIN {
     eval "use DBD::SQLite";
     plan $@

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/76select.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/76select.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/76select.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,5 +1,5 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use Test::Exception;
@@ -9,8 +9,6 @@
 
 my $schema = DBICTest->init_schema();
 
-plan tests => 24;
-
 my $rs = $schema->resultset('CD')->search({},
     {
         '+select'   => \ 'COUNT(*)',
@@ -29,16 +27,6 @@
 lives_ok(sub { $rs->first->get_column('count') }, 'multiple +select/+as columns, 1st rscolumn present');
 lives_ok(sub { $rs->first->get_column('addedtitle') }, 'multiple +select/+as columns, 2nd rscolumn present');
 
-# Tests a regression in ResultSetColumn wrt +select
-$rs = $schema->resultset('CD')->search(undef,
-    {
-        '+select'   => [ \'COUNT(*) AS year_count' ],
-		order_by => 'year_count'
-	}
-);
-my @counts = $rs->get_column('cdid')->all;
-ok(scalar(@counts), 'got rows from ->all using +select');
-
 $rs = $schema->resultset('CD')->search({},
     {
         '+select'   => [ \ 'COUNT(*)', 'title' ],
@@ -64,6 +52,7 @@
 cmp_ok ($cds->count, '>', 2, 'Initially populated with more than 2 CDs');
 
 my $table = $cds->result_source->name;
+$table = $$table if ref $table eq 'SCALAR';
 my $subsel = $cds->search ({}, {
     columns => [qw/cdid title/],
     from => \ "(SELECT cdid, title FROM $table LIMIT 2) me",
@@ -100,13 +89,13 @@
 }, 'columns 2nd rscolumn present');
 
 lives_ok(sub {
-  $rs->first->artist->get_column('name') 
-}, 'columns 3rd rscolumn present'); 
+  $rs->first->artist->get_column('name')
+}, 'columns 3rd rscolumn present');
 
 
 
 $rs = $schema->resultset('CD')->search({},
-    {  
+    {
         'join' => 'artist',
         '+columns' => ['cdid', 'title', 'artist.name'],
     }
@@ -120,7 +109,7 @@
 );
 
 lives_ok(sub {
-  $rs->first->get_column('cdid') 
+  $rs->first->get_column('cdid')
 }, 'columns 1st rscolumn present');
 
 lives_ok(sub {
@@ -164,34 +153,16 @@
   }
 );
 
-is_deeply (
+is_deeply(
   $sub_rs->single,
   {
-    artist => 1,
-    track_position => 2,
-    tracks =>
-      {
-        trackid => 17,
-        title => 'Apiary',
-      },
+    artist         => 1,
+    tracks => {
+      title => 'Apiary',
+      trackid => 17,
+    },
   },
   'columns/select/as fold properly on sub-searches',
 );
 
-TODO: {
-  local $TODO = "Multi-collapsing still doesn't work right - HRI should be getting an arrayref, not an individual hash";
-  is_deeply (
-    $sub_rs->single,
-    {
-      artist => 1,
-      track_position => 2,
-      tracks => [
-        {
-          trackid => 17,
-          title => 'Apiary',
-        },
-      ],
-    },
-    'columns/select/as fold properly on sub-searches',
-  );
-}
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/79aliasing.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/79aliasing.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/79aliasing.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -52,7 +52,7 @@
   my $cd_rs = $schema->resultset('CD')->search({ 'artist.name' => 'Caterwauler McCrae' }, { join => 'artist' });
 
   my $cd = $cd_rs->find_or_new({ title => 'Huh?', year => 2006 });
-  ok(! $cd->in_storage, 'new CD not in storage yet');
+  is($cd->in_storage, 0, 'new CD not in storage yet');
   is($cd->title, 'Huh?', 'new CD title is correct');
   is($cd->year, 2006, 'new CD year is correct');
 }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/80unique.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/80unique.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/80unique.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,14 +1,14 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use lib qw(t/lib);
 use DBICTest;
+use DBIC::SqlMakerTest;
+use DBIC::DebugObj;
 
 my $schema = DBICTest->init_schema();
 
-plan tests => 49;
-
 # Check the defined unique constraints
 is_deeply(
   [ sort $schema->source('CD')->unique_constraint_names ],
@@ -195,7 +195,7 @@
       { key => 'cd_artist_title' }
     );
 
-    ok(!$cd1->in_storage, 'CD is not in storage yet after update_or_new');
+    is($cd1->in_storage, 0, 'CD is not in storage yet after update_or_new');
     $cd1->insert;
     ok($cd1->in_storage, 'CD got added to strage after update_or_new && insert');
 
@@ -209,4 +209,27 @@
     );
     ok($cd2->in_storage, 'Updating year using update_or_new was successful');
     is($cd2->id, $cd1->id, 'Got the same CD using update_or_new');
-}
\ No newline at end of file
+}
+
+# make sure the ident condition is assembled sanely
+{
+  my $artist = $schema->resultset('Artist')->next;
+
+  my ($sql, @bind);
+  $schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind)),
+  $schema->storage->debug(1);
+
+  $artist->discard_changes;
+
+  is_same_sql_bind (
+    $sql,
+    \@bind,
+    'SELECT me.artistid, me.name, me.rank, me.charfield FROM artist me WHERE me.artistid = ?',
+    [qw/'1'/],
+  );
+
+  $schema->storage->debug(0);
+  $schema->storage->debugobj(undef);
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/81transactions.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/81transactions.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/81transactions.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,36 +1,34 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
+use Test::Warn;
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
 
 my $schema = DBICTest->init_schema();
 
-plan tests => 64;
-
 my $code = sub {
   my ($artist, @cd_titles) = @_;
-  
+
   $artist->create_related('cds', {
     title => $_,
     year => 2006,
   }) foreach (@cd_titles);
-  
+
   return $artist->cds->all;
 };
 
 # Test checking of parameters
 {
-  eval {
+  throws_ok (sub {
     (ref $schema)->txn_do(sub{});
-  };
-  like($@, qr/storage/, "can't call txn_do without storage");
-  eval {
+  }, qr/storage/, "can't call txn_do without storage");
+
+  throws_ok ( sub {
     $schema->txn_do('');
-  };
-  like($@, qr/must be a CODE reference/, '$coderef parameter check ok');
+  }, qr/must be a CODE reference/, '$coderef parameter check ok');
 }
 
 # Test successful txn_do() - scalar context
@@ -82,13 +80,10 @@
   my $artist = $schema->resultset('Artist')->find(2);
   my $count_before = $artist->cds->count;
 
-  eval {
+  lives_ok (sub {
     $schema->txn_do($nested_code, $schema, $artist, $code);
-  };
+  }, 'nested txn_do succeeded');
 
-  my $error = $@;
-
-  ok(!$error, 'nested txn_do succeeded');
   is($artist->cds({
     title => 'nested txn_do test CD '.$_,
   })->first->year, 2006, qq{nested txn_do CD$_ year ok}) for (1..10);
@@ -113,13 +108,10 @@
 
   my $artist = $schema->resultset('Artist')->find(3);
 
-  eval {
+  throws_ok (sub {
     $schema->txn_do($fail_code, $artist);
-  };
+  }, qr/the sky is falling/, 'failed txn_do threw an exception');
 
-  my $error = $@;
-
-  like($error, qr/the sky is falling/, 'failed txn_do threw an exception');
   my $cd = $artist->cds({
     title => 'this should not exist',
     year => 2005,
@@ -135,13 +127,10 @@
 
   my $artist = $schema->resultset('Artist')->find(3);
 
-  eval {
+  throws_ok (sub {
     $schema->txn_do($fail_code, $artist);
-  };
+  }, qr/the sky is falling/, 'failed txn_do threw an exception');
 
-  my $error = $@;
-
-  like($error, qr/the sky is falling/, 'failed txn_do threw an exception');
   my $cd = $artist->cds({
     title => 'this should not exist',
     year => 2005,
@@ -168,17 +157,14 @@
     die 'FAILED';
   };
 
-  eval {
-    $schema->txn_do($fail_code, $artist);
-  };
+  throws_ok (
+    sub {
+      $schema->txn_do($fail_code, $artist);
+    },
+    qr/the sky is falling.+Rollback failed/s,
+    'txn_rollback threw a rollback exception (and included the original exception'
+  );
 
-  my $error = $@;
-
-  like($error, qr/Rollback failed/, 'failed txn_do with a failed '.
-       'txn_rollback threw a rollback exception');
-  like($error, qr/the sky is falling/, 'failed txn_do with a failed '.
-       'txn_rollback included the original exception');
-
   my $cd = $artist->cds({
     title => 'this should not exist',
     year => 2005,
@@ -209,13 +195,10 @@
 
   my $artist = $schema->resultset('Artist')->find(3);
 
-  eval {
+  throws_ok ( sub {
     $schema->txn_do($nested_fail_code, $schema, $artist, $code, $fail_code);
-  };
+  }, qr/the sky is falling/, 'nested failed txn_do threw exception');
 
-  my $error = $@;
-
-  like($error, qr/the sky is falling/, 'nested failed txn_do threw exception');
   ok(!defined($artist->cds({
     title => 'nested txn_do test CD '.$_,
     year => 2006,
@@ -230,12 +213,10 @@
 # Grab a new schema to test txn before connect
 {
     my $schema2 = DBICTest->init_schema(no_deploy => 1);
-    eval {
+    lives_ok (sub {
         $schema2->txn_begin();
         $schema2->txn_begin();
-    };
-    my $err = $@;
-    ok(! $err, 'Pre-connection nested transactions.');
+    }, 'Pre-connection nested transactions.');
 
     # although not connected DBI would still warn about rolling back at disconnect
     $schema2->txn_rollback;
@@ -258,29 +239,25 @@
       name => 'Death Cab for Cutie',
       made_up_column => 1,
     });
-    
+
    $guard->commit;
   } qr/No such column made_up_column .*? at .*?81transactions.t line \d+/s, "Error propogated okay";
 
   ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");
 
-  my $inner_exception;
-  eval {
+  my $inner_exception = '';  # set in inner() below
+  throws_ok (sub {
     outer($schema, 1);
-  };
-  is($@, $inner_exception, "Nested exceptions propogated");
+  }, qr/$inner_exception/, "Nested exceptions propogated");
 
   ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");
 
   lives_ok (sub {
-    my $w;
-    local $SIG{__WARN__} = sub { $w = shift };
-
-    # The 0 arg says don't die, just let the scope guard go out of scope 
-    # forcing a txn_rollback to happen
-    outer($schema, 0);
-
-    like ($w, qr/A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or an error/, 'Out of scope warning detected');
+    warnings_exist ( sub {
+      # The 0 arg says don't die, just let the scope guard go out of scope
+      # forcing a txn_rollback to happen
+      outer($schema, 0);
+    }, qr/A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or error. Rolling back./, 'Out of scope warning detected');
     ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");
   }, 'rollback successful withot exception');
 
@@ -303,9 +280,9 @@
     my $artist = $artist_rs->find({ name => 'Death Cab for Cutie' });
 
     eval {
-      $artist->cds->create({ 
+      $artist->cds->create({
         title => 'Plans',
-        year => 2005, 
+        year => 2005,
         $fatal ? ( foo => 'bar' ) : ()
       });
     };
@@ -319,3 +296,99 @@
     $inner_guard->commit;
   }
 }
+
+# make sure the guard does not eat exceptions
+{
+  my $schema = DBICTest->init_schema();
+  throws_ok (sub {
+    my $guard = $schema->txn_scope_guard;
+    $schema->resultset ('Artist')->create ({ name => 'bohhoo'});
+
+    $schema->storage->disconnect;  # this should freak out the guard rollback
+
+    die 'Deliberate exception';
+  }, qr/Deliberate exception.+Rollback failed/s);
+}
+
+# make sure it warns *big* on failed rollbacks
+{
+  my $schema = DBICTest->init_schema();
+
+  # something is really confusing Test::Warn here, no time to debug
+=begin
+  warnings_exist (
+    sub {
+      my $guard = $schema->txn_scope_guard;
+      $schema->resultset ('Artist')->create ({ name => 'bohhoo'});
+
+      $schema->storage->disconnect;  # this should freak out the guard rollback
+    },
+    [
+      qr/A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or error. Rolling back./,
+      qr/\*+ ROLLBACK FAILED\!\!\! \*+/,
+    ],
+    'proper warnings generated on out-of-scope+rollback failure'
+  );
+=cut
+
+  my @want = (
+    qr/A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or error. Rolling back./,
+    qr/\*+ ROLLBACK FAILED\!\!\! \*+/,
+  );
+
+  my @w;
+  local $SIG{__WARN__} = sub {
+    if (grep {$_[0] =~ $_} (@want)) {
+      push @w, $_[0];
+    }
+    else {
+      warn $_[0];
+    }
+  };
+  {
+      my $guard = $schema->txn_scope_guard;
+      $schema->resultset ('Artist')->create ({ name => 'bohhoo'});
+
+      $schema->storage->disconnect;  # this should freak out the guard rollback
+  }
+
+  is (@w, 2, 'Both expected warnings found');
+}
+
+# make sure AutoCommit => 0 on external handles behaves correctly with scope_guard
+{
+  my $factory = DBICTest->init_schema (AutoCommit => 0);
+  cmp_ok ($factory->resultset('CD')->count, '>', 0, 'Something to delete');
+  my $dbh = $factory->storage->dbh;
+
+  ok (!$dbh->{AutoCommit}, 'AutoCommit is off on $dbh');
+  my $schema = DBICTest::Schema->connect (sub { $dbh });
+
+
+  lives_ok ( sub {
+    my $guard = $schema->txn_scope_guard;
+    $schema->resultset('CD')->delete;
+    $guard->commit;
+  }, 'No attempt to start a transaction with scope guard');
+
+  is ($schema->resultset('CD')->count, 0, 'Deletion successful');
+}
+
+# make sure AutoCommit => 0 on external handles behaves correctly with txn_do
+{
+  my $factory = DBICTest->init_schema (AutoCommit => 0);
+  cmp_ok ($factory->resultset('CD')->count, '>', 0, 'Something to delete');
+  my $dbh = $factory->storage->dbh;
+
+  ok (!$dbh->{AutoCommit}, 'AutoCommit is off on $dbh');
+  my $schema = DBICTest::Schema->connect (sub { $dbh });
+
+
+  lives_ok ( sub {
+    $schema->txn_do (sub { $schema->resultset ('CD')->delete });
+  }, 'No attempt to start a atransaction with txn_do');
+
+  is ($schema->resultset('CD')->count, 0, 'Deletion successful');
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/83cache.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/83cache.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/83cache.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -8,10 +8,9 @@
 my $schema = DBICTest->init_schema();
 
 my $queries;
-$schema->storage->debugcb( sub{ $queries++ } );
+my $debugcb = sub{ $queries++ };
+my $sdebug = $schema->storage->debug;
 
-eval "use DBD::SQLite";
-plan skip_all => 'needs DBD::SQLite for testing' if $@;
 plan tests => 23;
 
 my $rs = $schema->resultset("Artist")->search(
@@ -46,6 +45,7 @@
 
 $queries = 0;
 $schema->storage->debug(1);
+$schema->storage->debugcb ($debugcb);
 
 $rs = $schema->resultset('Artist')->search( undef, { cache => 1 } );
 while( $artist = $rs->next ) {}
@@ -53,7 +53,8 @@
 
 is( $queries, 1, 'revisiting a row does not issue a query when cache => 1' );
 
-$schema->storage->debug(0);
+$schema->storage->debug($sdebug);
+$schema->storage->debugcb (undef);
 
 my @a = $schema->resultset("Artist")->search(
   { },
@@ -73,11 +74,10 @@
   }
 );
 
-use Data::Dumper; $Data::Dumper::Deparse = 1;
-
 # start test for prefetch SELECT count
 $queries = 0;
 $schema->storage->debug(1);
+$schema->storage->debugcb ($debugcb);
 
 $artist = $rs->first;
 $rs->reset();
@@ -99,7 +99,8 @@
 
 is($queries, 1, 'only one SQL statement executed');
 
-$schema->storage->debug(0);
+$schema->storage->debug($sdebug);
+$schema->storage->debugcb (undef);
 
 # make sure related_resultset is deleted after object is updated
 $artist->set_column('name', 'New Name');
@@ -131,18 +132,21 @@
 # SELECT count for nested has_many prefetch
 $queries = 0;
 $schema->storage->debug(1);
+$schema->storage->debugcb ($debugcb);
 
 $artist = ($rs->all)[0];
 
 is($queries, 1, 'only one SQL statement executed');
 
-$schema->storage->debug(0);
+$schema->storage->debug($sdebug);
+$schema->storage->debugcb (undef);
 
 my @objs;
 #$artist = $rs->find(1);
 
 $queries = 0;
 $schema->storage->debug(1);
+$schema->storage->debugcb ($debugcb);
 
 my $cds = $artist->cds;
 my $tags = $cds->next->tags;
@@ -185,5 +189,5 @@
 
 is( $queries, 1, 'only one select statement on find with has_many prefetch on resultset' );
 
-$schema->storage->debug(0);
-
+$schema->storage->debug($sdebug);
+$schema->storage->debugcb (undef);

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/85utf8.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/85utf8.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/85utf8.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,37 +1,43 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
+use Test::Warn;
 use lib qw(t/lib);
 use DBICTest;
 
-my $schema = DBICTest->init_schema();
+warning_like (
+  sub {
+    package A::Comp;
+    use base 'DBIx::Class';
+    sub store_column { shift->next::method (@_) };
+    1;
 
-if ($] <= 5.008000) {
+    package A::Test;
+    use base 'DBIx::Class::Core';
+    __PACKAGE__->load_components(qw(UTF8Columns +A::Comp));
+    1;
+  },
+  qr/Incorrect loading order of DBIx::Class::UTF8Columns.+affect other components overriding store_column \(A::Comp\)/,
+  'incorrect order warning issued',
+);
 
-    eval 'use Encode; 1' or plan skip_all => 'Need Encode run this test';
-
-} else {
-
-    eval 'use utf8; 1' or plan skip_all => 'Need utf8 run this test';
-}
-
-plan tests => 6;
-
+my $schema = DBICTest->init_schema();
 DBICTest::Schema::CD->load_components('UTF8Columns');
 DBICTest::Schema::CD->utf8_columns('title');
 Class::C3->reinitialize();
 
-my $cd = $schema->resultset('CD')->create( { artist => 1, title => 'øni', year => '2048' } );
-my $utf8_char = 'uniuni';
+my $cd = $schema->resultset('CD')->create( { artist => 1, title => "weird\x{466}stuff", year => '2048' } );
 
+ok( utf8::is_utf8( $cd->title ), 'got title with utf8 flag' );
+ok(! utf8::is_utf8( $cd->{_column_data}{title} ), 'store title without utf8' );
 
-ok( _is_utf8( $cd->title ), 'got title with utf8 flag' );
-ok(! _is_utf8( $cd->year ), 'got year without utf8 flag' );
+ok(! utf8::is_utf8( $cd->year ), 'got year without utf8 flag' );
+ok(! utf8::is_utf8( $cd->{_column_data}{year} ), 'store year without utf8' );
 
-_force_utf8($utf8_char);
-$cd->title($utf8_char);
-ok(! _is_utf8( $cd->{_column_data}{title} ), 'store utf8-less chars' );
+$cd->title('nonunicode');
+ok(! utf8::is_utf8( $cd->title ), 'got title without utf8 flag' );
+ok(! utf8::is_utf8( $cd->{_column_data}{title} ), 'store utf8-less chars' );
 
 
 my $v_utf8 = "\x{219}";
@@ -47,24 +53,7 @@
 TODO: {
   local $TODO = 'There is currently no way to propagate aliases to inflate_result()';
   $cd = $schema->resultset('CD')->find ({ title => $v_utf8 }, { select => 'title', as => 'name' });
-  ok (_is_utf8( $cd->get_column ('name') ), 'utf8 flag propagates via as');
+  ok (utf8::is_utf8( $cd->get_column ('name') ), 'utf8 flag propagates via as');
 }
 
-
-sub _force_utf8 {
-  if ($] <= 5.008000) {
-    Encode::_utf8_on ($_[0]);
-  }
-  else {
-    utf8::decode ($_[0]);
-  }
-}
-
-sub _is_utf8 {
-  if ($] <= 5.008000) {
-    return Encode::is_utf8 (shift);
-  }
-  else {
-    return utf8::is_utf8 (shift);
-  }
-}
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/86might_have.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/86might_have.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/86might_have.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -2,20 +2,16 @@
 use warnings;  
 
 use Test::More;
+use Test::Warn;
 use lib qw(t/lib);
 use DBICTest;
 
 my $schema = DBICTest->init_schema();
 
 my $queries;
-#$schema->storage->debugfh(IO::File->new('t/var/temp.trace', 'w'));
 $schema->storage->debugcb( sub{ $queries++ } );
+my $sdebug = $schema->storage->debug;
 
-eval "use DBD::SQLite";
-plan skip_all => 'needs DBD::SQLite for testing' if $@;
-plan tests => 2;
-
-
 my $cd = $schema->resultset("CD")->find(1);
 $cd->title('test');
 
@@ -28,7 +24,7 @@
 is($queries, 1, 'liner_notes (might_have) not prefetched - do not load 
 liner_notes on update');
 
-$schema->storage->debug(0);
+$schema->storage->debug($sdebug);
 
 
 my $cd2 = $schema->resultset("CD")->find(2, {prefetch => 'liner_notes'});
@@ -43,5 +39,26 @@
 is($queries, 1, 'liner_notes (might_have) prefetched - do not load 
 liner_notes on update');
 
-$schema->storage->debug(0);
+warning_like {
+  DBICTest::Schema::Bookmark->might_have(
+    linky => 'DBICTest::Schema::Link',
+    { "foreign.id" => "self.link" },
+  );
+}
+  qr{"might_have/has_one" must not be on columns with is_nullable set to true},
+  'might_have should warn if the self.id column is nullable';
 
+{
+  local $ENV{DBIC_DONT_VALIDATE_RELS} = 1;
+  warning_is { 
+    DBICTest::Schema::Bookmark->might_have(
+      slinky => 'DBICTest::Schema::Link',
+      { "foreign.id" => "self.link" },
+    );
+  }
+  undef,
+  'Setting DBIC_DONT_VALIDATE_RELS suppresses nullable relation warnings';
+}
+
+$schema->storage->debug($sdebug);
+done_testing();

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/86sqlt.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/86sqlt.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/86sqlt.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -5,13 +5,82 @@
 use lib qw(t/lib);
 use DBICTest;
 
-eval "use SQL::Translator";
-plan skip_all => 'SQL::Translator required' if $@;
+BEGIN {
+  require DBIx::Class;
+  plan skip_all =>
+      'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy')
+    unless DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')
+}
 
-my $schema = DBICTest->init_schema;
+my $custom_deployment_statements_called = 0;
 
-plan tests => 133;
+sub DBICTest::Schema::deployment_statements {
+  $custom_deployment_statements_called = 1;
+  my $self = shift;
+  return $self->next::method(@_);
+}
 
+my $schema = DBICTest->init_schema (no_deploy => 1);
+
+
+# Check deployment statements ctx sensitivity
+{
+  my $not_first_table_creation_re = qr/CREATE TABLE fourkeys_to_twokeys/;
+
+
+  my $statements = $schema->deployment_statements;
+  like (
+    $statements,
+    $not_first_table_creation_re,
+    'All create statements returned in 1 string in scalar ctx'
+  );
+
+  my @statements = $schema->deployment_statements;
+  cmp_ok (scalar @statements, '>', 1, 'Multiple statement lines in array ctx');
+
+  my $i = 0;
+  while ($i <= $#statements) {
+    last if $statements[$i] =~ $not_first_table_creation_re;
+    $i++;
+  }
+
+  ok (
+    ($i > 0) && ($i <= $#statements),
+    "Creation statement was found somewherere within array ($i)"
+  );
+}
+
+
+
+{
+  my $deploy_hook_called = 0;
+
+  # replace the sqlt calback with a custom version ading an index
+  $schema->source('Track')->sqlt_deploy_callback(sub {
+    my ($self, $sqlt_table) = @_;
+
+    $deploy_hook_called = 1;
+
+    is (
+      $sqlt_table->schema->translator->producer_type,
+      join ('::', 'SQL::Translator::Producer', $schema->storage->sqlt_type),
+      'Production type passed to translator object',
+    );
+
+    if ($schema->storage->sqlt_type eq 'SQLite' ) {
+      $sqlt_table->add_index( name => 'track_title', fields => ['title'] )
+        or die $sqlt_table->error;
+    }
+
+    $self->default_sqlt_deploy_hook($sqlt_table);
+  });
+
+  $schema->deploy; # do not remove, this fires the is() test in the callback above
+  ok($deploy_hook_called, 'deploy hook got called');
+  ok($custom_deployment_statements_called, '->deploy used the schemas deploy_statements method');
+}
+
+
 my $translator = SQL::Translator->new( 
   parser_args => {
     'DBIx::Schema' => $schema,
@@ -26,17 +95,7 @@
     my $relinfo = $schema->source('Artist')->relationship_info ('cds');
     local $relinfo->{attrs}{on_delete} = 'restrict';
 
-    $schema->source('Track')->sqlt_deploy_callback(sub {
-      my ($self, $sqlt_table) = @_;
 
-      if ($schema->storage->sqlt_type eq 'SQLite' ) {
-        $sqlt_table->add_index( name => 'track_title', fields => ['title'] )
-          or die $sqlt_table->error;
-      }
-
-      $self->default_sqlt_deploy_hook($sqlt_table);
-    });
-
     $translator->parser('SQL::Translator::Parser::DBIx::Class');
     $translator->producer('SQLite');
 
@@ -45,6 +104,7 @@
     ok($output, "SQLT produced someoutput")
       or diag($translator->error);
 
+
     like (
       $warn,
       qr/SQLT attribute .+? was supplied for relationship .+? which does not appear to be a foreign constraint/,
@@ -224,6 +284,7 @@
       'name' => 'forceforeign_fk_artist', 'index_name' => 'forceforeign_idx_artist',
       'selftable' => 'forceforeign', 'foreigntable' => 'artist', 
       'selfcols'  => ['artist'], 'foreigncols' => ['artistid'], 
+      'noindex'  => 1,
       on_delete => '', on_update => '', deferrable => 1,
     },
   ],
@@ -419,21 +480,21 @@
   my ($expected, $got) = @_;
   my $desc = $expected->{display};
   is( $got->name, $expected->{name},
-      "name parameter correct for `$desc'" );
+      "name parameter correct for '$desc'" );
   is( $got->on_delete, $expected->{on_delete},
-      "on_delete parameter correct for `$desc'" );
+      "on_delete parameter correct for '$desc'" );
   is( $got->on_update, $expected->{on_update},
-      "on_update parameter correct for `$desc'" );
+      "on_update parameter correct for '$desc'" );
   is( $got->deferrable, $expected->{deferrable},
-      "is_deferrable parameter correct for `$desc'" );
+      "is_deferrable parameter correct for '$desc'" );
 
   my $index = get_index( $got->table, { fields => $expected->{selfcols} } );
 
   if ($expected->{noindex}) {
-      ok( !defined $index, "index doesn't for `$desc'" );
+      ok( !defined $index, "index doesn't for '$desc'" );
   } else {
-      ok( defined $index, "index exists for `$desc'" );
-      is( $index->name, $expected->{index_name}, "index has correct name for `$desc'" );
+      ok( defined $index, "index exists for '$desc'" );
+      is( $index->name, $expected->{index_name}, "index has correct name for '$desc'" );
   }
 }
 
@@ -441,5 +502,7 @@
   my ($expected, $got) = @_;
   my $desc = $expected->{display};
   is( $got->name, $expected->{name},
-      "name parameter correct for `$desc'" );
+      "name parameter correct for '$desc'" );
 }
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/87ordered.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/87ordered.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/87ordered.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -10,8 +10,6 @@
 
 my $schema = DBICTest->init_schema();
 
-plan tests => 1269;
-
 my $employees = $schema->resultset('Employee');
 $employees->delete();
 
@@ -42,11 +40,9 @@
 my $group_3 = $employees->search({group_id=>3});
 my $to_group = 1;
 my $to_pos = undef;
-# now that we have transactions we need to work around stupid sqlite
 {
   my @empl = $group_3->all;
   while (my $employee = shift @empl) {
-    $employee->discard_changes;     # since we are effective shift()ing the $rs while doing this
     $employee->move_to_group($to_group, $to_pos);
     $to_pos++;
     $to_group = $to_group==1 ? 2 : 1;
@@ -54,7 +50,6 @@
 }
 foreach my $group_id (1..4) {
     my $group_employees = $employees->search({group_id=>$group_id});
-    $group_employees->all();
     ok( check_rs($group_employees), "group positions after move_to_group" );
 }
 
@@ -129,7 +124,6 @@
 my $to_group_2 = 1;
 $to_pos = undef;
 
-# now that we have transactions we need to work around stupid sqlite
 {
   my @empl = $group_3->all;
   while (my $employee = shift @empl) {
@@ -143,7 +137,6 @@
 foreach my $group_id_2 (1..4) {
     foreach my $group_id_3 (1..4) {
         my $group_employees = $employees->search({group_id_2=>$group_id_2,group_id_3=>$group_id_3});
-        $group_employees->all();
         ok( check_rs($group_employees), "group positions after move_to_group" );
     }
 }
@@ -275,3 +268,4 @@
     return 1;
 }
 
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/88result_set_column.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/88result_set_column.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/88result_set_column.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,17 +1,23 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
+use Test::Warn;
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
+use DBIC::SqlMakerTest;
 
 my $schema = DBICTest->init_schema();
 
-plan tests => 18;
+my $rs = $schema->resultset("CD");
 
-my $cd;
-my $rs = $cd = $schema->resultset("CD")->search({}, { order_by => 'cdid' });
+cmp_ok (
+  $rs->count,
+    '!=',
+  $rs->search ({}, {columns => ['year'], distinct => 1})->count,
+  'At least one year is the same in rs'
+);
 
 my $rs_title = $rs->get_column('title');
 my $rs_year = $rs->get_column('year');
@@ -34,35 +40,86 @@
 
 is($rs_year->first, 1999, "first okay");
 
+warnings_exist (sub {
+  is($rs_year->single, 1999, "single okay");
+}, qr/Query returned more than one row/, 'single warned');
+
+
+# test distinct propagation
+is_deeply (
+  [$rs->search ({}, { distinct => 1 })->get_column ('year')->all],
+  [$rs_year->func('distinct')],
+  'distinct => 1 is passed through properly',
+);
+
 # test +select/+as for single column
 my $psrs = $schema->resultset('CD')->search({},
     {
-        '+select'   => \'COUNT(*)',
-        '+as'       => 'count'
+        '+select'   => \'MAX(year)',
+        '+as'       => 'last_year'
     }
 );
-lives_ok(sub { $psrs->get_column('count')->next }, '+select/+as additional column "count" present (scalar)');
+lives_ok(sub { $psrs->get_column('last_year')->next }, '+select/+as additional column "last_year" present (scalar)');
 dies_ok(sub { $psrs->get_column('noSuchColumn')->next }, '+select/+as nonexistent column throws exception');
 
-# test +select/+as for multiple columns
+# test +select/+as for overriding a column
 $psrs = $schema->resultset('CD')->search({},
     {
-        '+select'   => [ \'COUNT(*)', 'title' ],
-        '+as'       => [ 'count', 'addedtitle' ]
+        'select'   => \"'The Final Countdown'",
+        'as'       => 'title'
     }
 );
-lives_ok(sub { $psrs->get_column('count')->next }, '+select/+as multiple additional columns, "count" column present');
-lives_ok(sub { $psrs->get_column('addedtitle')->next }, '+select/+as multiple additional columns, "addedtitle" column present');
+is($psrs->get_column('title')->next, 'The Final Countdown', '+select/+as overridden column "title"');
 
-# test +select/+as for overriding a column
+
+# test +select/+as for multiple columns
 $psrs = $schema->resultset('CD')->search({},
     {
-        'select'   => \"'The Final Countdown'",
-        'as'       => 'title'
+        '+select'   => [ \'LENGTH(title) AS title_length', 'title' ],
+        '+as'       => [ 'tlength', 'addedtitle' ]
     }
 );
-is($psrs->get_column('title')->next, 'The Final Countdown', '+select/+as overridden column "title"');
+lives_ok(sub { $psrs->get_column('tlength')->next }, '+select/+as multiple additional columns, "tlength" column present');
+lives_ok(sub { $psrs->get_column('addedtitle')->next }, '+select/+as multiple additional columns, "addedtitle" column present');
 
+# test that +select/+as specs do not leak
+is_same_sql_bind (
+  $psrs->get_column('year')->as_query,
+  '(SELECT me.year FROM cd me)',
+  [],
+  'Correct SQL for get_column/as'
+);
+
+is_same_sql_bind (
+  $psrs->get_column('addedtitle')->as_query,
+  '(SELECT me.title FROM cd me)',
+  [],
+  'Correct SQL for get_column/+as col'
+);
+
+is_same_sql_bind (
+  $psrs->get_column('tlength')->as_query,
+  '(SELECT LENGTH(title) AS title_length FROM cd me)',
+  [],
+  'Correct SQL for get_column/+as func'
+);
+
+# test that order_by over a function forces a subquery
+lives_ok ( sub {
+  is_deeply (
+    [ $psrs->search ({}, { order_by => { -desc => 'title_length' } })->get_column ('title')->all ],
+    [
+      "Generic Manufactured Singles",
+      "Come Be Depressed With Us",
+      "Caterwaulin' Blues",
+      "Spoonful of bees",
+      "Forkful of bees",
+    ],
+    'Subquery count induced by aliased ordering function',
+  );
+});
+
+# test for prefetch not leaking
 {
   my $rs = $schema->resultset("CD")->search({}, { prefetch => 'artist' });
   my $rsc = $rs->get_column('year');
@@ -76,3 +133,24 @@
 my $owner = $schema->resultset('Owners')->find ({ name => 'Newton' });
 ok ($owner->books->count > 1, 'Owner Newton has multiple books');
 is ($owner->search_related ('books')->get_column ('price')->sum, 60, 'Correctly calculated price of all owned books');
+
+
+# make sure joined/prefetched get_column of a PK dtrt
+
+$rs->reset;
+my $j_rs = $rs->search ({}, { join => 'tracks' })->get_column ('cdid');
+is_deeply (
+  [ $j_rs->all ],
+  [ map { my $c = $rs->next; ( ($c->id) x $c->tracks->count ) } (1 .. $rs->count) ],
+  'join properly explodes amount of rows from get_column',
+);
+
+$rs->reset;
+my $p_rs = $rs->search ({}, { prefetch => 'tracks' })->get_column ('cdid');
+is_deeply (
+  [ $p_rs->all ],
+  [ $rs->get_column ('cdid')->all ],
+  'prefetch properly collapses amount of rows from get_column',
+);
+
+done_testing;

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/89dbicadmin.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/89dbicadmin.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/89dbicadmin.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,89 +0,0 @@
-# vim: filetype=perl
-use strict;
-use warnings;  
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-
-eval 'require JSON::Any';
-plan skip_all => 'Install JSON::Any to run this test' if ($@);
-
-eval 'require Text::CSV_XS';
-if ($@) {
-    eval 'require Text::CSV_PP';
-    plan skip_all => 'Install Text::CSV_XS or Text::CSV_PP to run this test' if ($@);
-}
-
-my @json_backends = qw/XS JSON DWIW Syck/;
-my $tests_per_run = 5;
-
-plan tests => $tests_per_run * @json_backends;
-
-for my $js (@json_backends) {
-
-    eval {JSON::Any->import ($js) };
-    SKIP: {
-        skip ("Json backend $js is not available, skip testing", $tests_per_run) if $@;
-
-        $ENV{JSON_ANY_ORDER} = $js;
-        eval { test_dbicadmin () };
-        diag $@ if $@;
-    }
-}
-
-sub test_dbicadmin {
-    my $schema = DBICTest->init_schema( sqlite_use_file => 1 );  # reinit a fresh db for every run
-
-    my $employees = $schema->resultset('Employee');
-
-    system( _prepare_system_args( qw|--op=insert --set={"name":"Matt"}| ) );
-    ok( ($employees->count()==1), "$ENV{JSON_ANY_ORDER}: insert count" );
-
-    my $employee = $employees->find(1);
-    ok( ($employee->name() eq 'Matt'), "$ENV{JSON_ANY_ORDER}: insert valid" );
-
-    system( _prepare_system_args( qw|--op=update --set={"name":"Trout"}| ) );
-    $employee = $employees->find(1);
-    ok( ($employee->name() eq 'Trout'), "$ENV{JSON_ANY_ORDER}: update" );
-
-    system( _prepare_system_args( qw|--op=insert --set={"name":"Aran"}| ) );
-
-    SKIP: {
-        skip ("MSWin32 doesn't support -| either", 1) if $^O eq 'MSWin32';
-
-        open(my $fh, "-|",  _prepare_system_args( qw|--op=select --attrs={"order_by":"name"}| ) ) or die $!;
-        my $data = do { local $/; <$fh> };
-        close($fh);
-        ok( ($data=~/Aran.*Trout/s), "$ENV{JSON_ANY_ORDER}: select with attrs" );
-    }
-
-    system( _prepare_system_args( qw|--op=delete --where={"name":"Trout"}| ) );
-    ok( ($employees->count()==1), "$ENV{JSON_ANY_ORDER}: delete" );
-}
-
-# Why do we need this crap? Apparently MSWin32 can not pass through quotes properly
-# (sometimes it will and sometimes not, depending on what compiler was used to build
-# perl). So we go the extra mile to escape all the quotes. We can't also use ' instead
-# of ", because JSON::XS (proudly) does not support "malformed JSON" as the author
-# calls it. Bleh.
-#
-sub _prepare_system_args {
-    my $perl = $^X;
-    my @args = (
-        qw|script/dbicadmin --quiet --schema=DBICTest::Schema --class=Employee --tlibs|,
-        q|--connect=["dbi:SQLite:dbname=t/var/DBIxClass.db","","",{"AutoCommit":1}]|,
-        qw|--force --tlibs|,
-        @_,
-    );
-
-    if ( $^O eq 'MSWin32' ) {
-        $perl = qq|"$perl"|;    # execution will fail if $^X contains paths
-        for (@args) {
-            $_ =~ s/"/\\"/g;
-        }
-    }
-
-    return ($perl, @args);
-}

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/90join_torture.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/90join_torture.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/90join_torture.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -46,9 +46,9 @@
 
 my $rs3 = $rs2->search_related('cds');
 
-cmp_ok(scalar($rs3->all), '==', 45, "All cds for artist returned");
+cmp_ok(scalar($rs3->all), '==', 15, "All cds for artist returned");
 
-cmp_ok($rs3->count, '==', 45, "All cds for artist returned via count");
+cmp_ok($rs3->count, '==', 15, "All cds for artist returned via count");
 
 my $rs4 = $schema->resultset("CD")->search({ 'artist.artistid' => '1' }, { join => ['tracks', 'artist'], prefetch => 'artist' });
 my @rs4_results = $rs4->all;

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/91debug.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/91debug.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/91debug.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,73 +0,0 @@
-use strict;
-use warnings; 
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBIC::DebugObj;
-use DBIC::SqlMakerTest;
-
-my $schema = DBICTest->init_schema();
-
-plan tests => 7;
-
-ok ( $schema->storage->debug(1), 'debug' );
-ok ( defined(
-       $schema->storage->debugfh(
-         IO::File->new('t/var/sql.log', 'w')
-       )
-     ),
-     'debugfh'
-   );
-
-$schema->storage->debugfh->autoflush(1);
-my $rs = $schema->resultset('CD')->search({});
-$rs->count();
-
-my $log = new IO::File('t/var/sql.log', 'r') or die($!);
-my $line = <$log>;
-$log->close();
-ok($line =~ /^SELECT COUNT/, 'Log success');
-
-$schema->storage->debugfh(undef);
-$ENV{'DBIC_TRACE'} = '=t/var/foo.log';
-$rs = $schema->resultset('CD')->search({});
-$rs->count();
-$log = new IO::File('t/var/foo.log', 'r') or die($!);
-$line = <$log>;
-$log->close();
-ok($line =~ /^SELECT COUNT/, 'Log success');
-$schema->storage->debugobj->debugfh(undef);
-delete($ENV{'DBIC_TRACE'});
-open(STDERRCOPY, '>&STDERR');
-stat(STDERRCOPY); # nop to get warnings quiet
-close(STDERR);
-eval {
-    $rs = $schema->resultset('CD')->search({});
-    $rs->count();
-};
-ok($@, 'Died on closed FH');
-open(STDERR, '>&STDERRCOPY');
-
-# test trace output correctness for bind params
-{
-    my ($sql, @bind);
-    $schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind));
-
-    my @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
-    is_same_sql_bind(
-        $sql, \@bind,
-        "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) ): '1', '1', '3'",
-        [qw/'1' '1' '3'/],
-        'got correct SQL with all bind parameters (debugcb)'
-    );
-
-    @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
-    is_same_sql_bind(
-        $sql, \@bind,
-        "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) )", ["'1'", "'1'", "'3'"],
-        'got correct SQL with all bind parameters (debugobj)'
-    );
-}
-
-1;

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/92storage.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/92storage.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/92storage.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,170 +0,0 @@
-use strict;
-use warnings;  
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use Data::Dumper;
-
-{
-    package DBICTest::ExplodingStorage::Sth;
-    use strict;
-    use warnings;
-
-    sub execute { die "Kablammo!" }
-
-    sub bind_param {}
-
-    package DBICTest::ExplodingStorage;
-    use strict;
-    use warnings;
-    use base 'DBIx::Class::Storage::DBI::SQLite';
-
-    my $count = 0;
-    sub sth {
-      my ($self, $sql) = @_;
-      return bless {},  "DBICTest::ExplodingStorage::Sth" unless $count++;
-      return $self->next::method($sql);
-    }
-
-    sub connected {
-      return 0 if $count == 1;
-      return shift->next::method(@_);
-    }
-}
-
-plan tests => 17;
-
-my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
-
-is( ref($schema->storage), 'DBIx::Class::Storage::DBI::SQLite',
-    'Storage reblessed correctly into DBIx::Class::Storage::DBI::SQLite' );
-
-my $storage = $schema->storage;
-$storage->ensure_connected;
-
-eval {
-    $schema->storage->throw_exception('test_exception_42');
-};
-like($@, qr/\btest_exception_42\b/, 'basic exception');
-
-eval {
-    $schema->resultset('CD')->search_literal('broken +%$#$1')->all;
-};
-like($@, qr/prepare_cached failed/, 'exception via DBI->HandleError, etc');
-
-bless $storage, "DBICTest::ExplodingStorage";
-$schema->storage($storage);
-
-eval { 
-    $schema->resultset('Artist')->create({ name => "Exploding Sheep" });
-};
-
-is($@, "", "Exploding \$sth->execute was caught");
-
-is(1, $schema->resultset('Artist')->search({name => "Exploding Sheep" })->count,
-  "And the STH was retired");
-
-
-# testing various invocations of connect_info ([ ... ])
-
-my $coderef = sub { 42 };
-my $invocations = {
-  'connect_info ([ $d, $u, $p, \%attr, \%extra_attr])' => {
-      args => [
-          'foo',
-          'bar',
-          undef,
-          {
-            on_connect_do => [qw/a b c/],
-            PrintError => 0,
-          },
-          {
-            AutoCommit => 1,
-            on_disconnect_do => [qw/d e f/],
-          },
-          {
-            unsafe => 1,
-            auto_savepoint => 1,
-          },
-        ],
-      dbi_connect_info => [
-          'foo',
-          'bar',
-          undef,
-          {
-            PrintError => 0,
-            AutoCommit => 1,
-          },
-      ],
-  },
-
-  'connect_info ([ \%code, \%extra_attr ])' => {
-      args => [
-          $coderef,
-          {
-            on_connect_do => [qw/a b c/],
-            PrintError => 0,
-            AutoCommit => 1,
-            on_disconnect_do => [qw/d e f/],
-          },
-          {
-            unsafe => 1,
-            auto_savepoint => 1,
-          },
-        ],
-      dbi_connect_info => [
-          $coderef,
-      ],
-  },
-
-  'connect_info ([ \%attr ])' => {
-      args => [
-          {
-            on_connect_do => [qw/a b c/],
-            PrintError => 0,
-            AutoCommit => 1,
-            on_disconnect_do => [qw/d e f/],
-            user => 'bar',
-            dsn => 'foo',
-          },
-          {
-            unsafe => 1,
-            auto_savepoint => 1,
-          },
-      ],
-      dbi_connect_info => [
-          'foo',
-          'bar',
-          undef,
-          {
-            PrintError => 0,
-            AutoCommit => 1,
-          },
-      ],
-  },
-};
-
-for my $type (keys %$invocations) {
-
-  # we can not use a cloner portably because of the coderef
-  # so compare dumps instead
-  local $Data::Dumper::Sortkeys = 1;
-  my $arg_dump = Dumper ($invocations->{$type}{args});
-
-  $storage->connect_info ($invocations->{$type}{args});
-
-  is ($arg_dump, Dumper ($invocations->{$type}{args}), "$type didn't modify passed arguments");
-
-
-  is_deeply ($storage->_dbi_connect_info, $invocations->{$type}{dbi_connect_info}, "$type produced correct _dbi_connect_info");
-  ok ( (not $storage->auto_savepoint and not $storage->unsafe), "$type correctly ignored extra hashref");
-
-  is_deeply (
-    [$storage->on_connect_do, $storage->on_disconnect_do ],
-    [ [qw/a b c/], [qw/d e f/] ],
-    "$type correctly parsed DBIC specific on_[dis]connect_do",
-  );
-}
-
-1;

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/92storage_on_connect_do.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/92storage_on_connect_do.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/92storage_on_connect_do.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,88 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More tests => 12;
-
-use lib qw(t/lib);
-use base 'DBICTest';
-
-
-my $schema = DBICTest->init_schema(
-    no_connect  => 1,
-    no_deploy   => 1,
-);
-
-ok $schema->connection(
-  DBICTest->_database,
-  {
-    on_connect_do => 'CREATE TABLE TEST_empty (id INTEGER)',
-  },
-), 'connection()';
-
-is_deeply (
-  $schema->storage->dbh->selectall_arrayref('SELECT * FROM TEST_empty'),
-  [],
-  'string version on_connect_do() worked'
-);
-
-$schema->storage->disconnect;
-
-ok $schema->connection(
-    DBICTest->_database,
-    {
-        on_connect_do       => [
-            'CREATE TABLE TEST_empty (id INTEGER)',
-            [ 'INSERT INTO TEST_empty VALUES (?)', {}, 2 ],
-            \&insert_from_subref,
-        ],
-        on_disconnect_do    =>
-            [\&check_exists, 'DROP TABLE TEST_empty', \&check_dropped],
-    },
-), 'connection()';
-
-is_deeply (
-  $schema->storage->dbh->selectall_arrayref('SELECT * FROM TEST_empty'),
-  [ [ 2 ], [ 3 ], [ 7 ] ],
-  'on_connect_do() worked'
-);
-eval { $schema->storage->dbh->do('SELECT 1 FROM TEST_nonexistent'); };
-ok $@, 'Searching for nonexistent table dies';
-
-$schema->storage->disconnect();
-
-my($connected, $disconnected, @cb_args);
-ok $schema->connection(
-    DBICTest->_database,
-    {
-        on_connect_do       => sub { $connected = 1; @cb_args = @_; },
-        on_disconnect_do    => sub { $disconnected = 1 },
-    },
-), 'second connection()';
-$schema->storage->dbh->do('SELECT 1');
-ok $connected, 'on_connect_do() called after connect()';
-ok ! $disconnected, 'on_disconnect_do() not called after connect()';
-$schema->storage->disconnect();
-ok $disconnected, 'on_disconnect_do() called after disconnect()';
-
-isa_ok($cb_args[0], 'DBIx::Class::Storage', 'first arg to on_connect_do hook');
-
-sub check_exists {
-    my $storage = shift;
-    ok $storage->dbh->do('SELECT 1 FROM TEST_empty'), 'Table still exists';
-    return;
-}
-
-sub check_dropped {
-    my $storage = shift;
-    eval { $storage->dbh->do('SELECT 1 FROM TEST_empty'); };
-    ok $@, 'Reading from dropped table fails';
-    return;
-}
-
-sub insert_from_subref {
-    my $storage = shift;
-    return [
-        [ 'INSERT INTO TEST_empty VALUES (?)', {}, 3 ],
-        'INSERT INTO TEST_empty VALUES (7)',
-    ];
-}

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/93autocast.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/93autocast.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/93autocast.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,82 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+{ # Fake storage driver for sqlite with autocast
+    package DBICTest::SQLite::AutoCast;
+    use base qw/
+        DBIx::Class::Storage::DBI::AutoCast
+        DBIx::Class::Storage::DBI::SQLite
+    /;
+    use mro 'c3';
+
+    my $type_map = {
+      datetime => 'DateTime',
+      integer => 'INT',
+      int => undef, # no conversion
+    };
+
+    sub _native_data_type {
+      return $type_map->{$_[1]};
+    }
+}
+
+my $schema = DBICTest->init_schema (storage_type => 'DBICTest::SQLite::AutoCast');
+
+# 'me.id' will be cast unlike the unqualified 'id'
+my $rs = $schema->resultset ('CD')->search ({
+  cdid => { '>', 5 },
+  'tracks.last_updated_at' => { '!=', undef },
+  'tracks.last_updated_on' => { '<', 2009 },
+  'tracks.position' => 4,
+  'tracks.single_track' => \[ '= ?', [ single_track => [1, 2, 3 ] ] ],
+}, { join => 'tracks' });
+
+my $bind = [
+  [ cdid => 5 ],
+  [ 'tracks.last_updated_on' => 2009 ],
+  [ 'tracks.position' => 4 ],
+  [ 'single_track' => [ 1, 2, 3] ],
+];
+
+is_same_sql_bind (
+  $rs->as_query,
+  '(
+    SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+      FROM cd me
+      LEFT JOIN track tracks ON tracks.cd = me.cdid
+    WHERE
+          cdid > ?
+      AND tracks.last_updated_at IS NOT NULL
+      AND tracks.last_updated_on < ?
+      AND tracks.position = ?
+      AND tracks.single_track = ?
+  )',
+  $bind,
+  'expected sql with casting off',
+);
+
+$schema->storage->auto_cast (1);
+
+is_same_sql_bind (
+  $rs->as_query,
+  '(
+    SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+      FROM cd me
+      LEFT JOIN track tracks ON tracks.cd = me.cdid
+    WHERE
+          cdid > CAST(? AS INT)
+      AND tracks.last_updated_at IS NOT NULL
+      AND tracks.last_updated_on < CAST (? AS DateTime)
+      AND tracks.position = ?
+      AND tracks.single_track = CAST(? AS INT)
+  )',
+  $bind,
+  'expected sql with casting on',
+);
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/93nobindvars.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/93nobindvars.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/93nobindvars.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -65,5 +65,6 @@
 
 # clean up our mess
 END {
+    my $dbh = eval { $schema->storage->_dbh };
     $dbh->do("DROP TABLE artist") if $dbh;
 }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/93single_accessor_object.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/93single_accessor_object.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/93single_accessor_object.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -48,7 +48,7 @@
 	my $artist = $schema->resultset('Artist')->create({ artistid => 666, name => 'bad religion' });
 	my $cd = $schema->resultset('CD')->create({ cdid => 187, artist => 1, title => 'how could hell be any worse?', year => 1982, genreid => undef });
 
-	ok(!defined($cd->genreid), 'genreid is NULL');
+	ok(!defined($cd->get_column('genreid')), 'genreid is NULL');  #no accessor was defined for this column
 	ok(!defined($cd->genre), 'genre accessor returns undef');
 }
 

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/93storage_replication.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/93storage_replication.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/93storage_replication.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,717 +0,0 @@
-use strict;
-use warnings;
-use lib qw(t/lib);
-use Test::More;
-use Test::Exception;
-use DBICTest;
-use List::Util 'first';
-use Scalar::Util 'reftype';
-use IO::Handle;
-
-BEGIN {
-    eval "use DBIx::Class::Storage::DBI::Replicated; use Test::Moose";
-    plan $@
-        ? ( skip_all => "Deps not installed: $@" )
-        : ( tests => 90 );
-}
-
-use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
-use_ok 'DBIx::Class::Storage::DBI::Replicated::Balancer';
-use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-use_ok 'DBIx::Class::Storage::DBI::Replicated';
-
-=head1 HOW TO USE
-
-    This is a test of the replicated storage system.  This will work in one of
-    two ways, either it was try to fake replication with a couple of SQLite DBs
-    and creative use of copy, or if you define a couple of %ENV vars correctly
-    will try to test those.  If you do that, it will assume the setup is properly
-    replicating.  Your results may vary, but I have demonstrated this to work with
-    mysql native replication.
-    
-=cut
-
-
-## ----------------------------------------------------------------------------
-## Build a class to hold all our required testing data and methods.
-## ----------------------------------------------------------------------------
-
-TESTSCHEMACLASSES: {
-
-    ## --------------------------------------------------------------------- ##
-    ## Create an object to contain your replicated stuff.
-    ## --------------------------------------------------------------------- ##
-    
-    package DBIx::Class::DBI::Replicated::TestReplication;
-   
-    use DBICTest;
-    use base qw/Class::Accessor::Fast/;
-    
-    __PACKAGE__->mk_accessors( qw/schema/ );
-
-    ## Initialize the object
-    
-	sub new {
-	    my ($class, $schema_method) = (shift, shift);
-	    my $self = $class->SUPER::new(@_);
-	
-	    $self->schema( $self->init_schema($schema_method) );
-	    return $self;
-	}
-    
-    ## Get the Schema and set the replication storage type
-    
-    sub init_schema {
-        # current SQLT SQLite producer does not handle DROP TABLE IF EXISTS, trap warnings here
-        local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/ };
-
-        my ($class, $schema_method) = @_;
-
-        my $method = "get_schema_$schema_method";
-        my $schema = $class->$method;
-
-        return $schema;
-    }
-
-    sub get_schema_by_storage_type {
-      DBICTest->init_schema(
-        sqlite_use_file => 1,
-        storage_type=>{
-          '::DBI::Replicated' => {
-            balancer_type=>'::Random',
-            balancer_args=>{
-              auto_validate_every=>100,
-	      master_read_weight => 1
-            },
-          }
-        },
-        deploy_args=>{
-          add_drop_table => 1,
-        },
-      );
-    }
-
-    sub get_schema_by_connect_info {
-      DBICTest->init_schema(
-        sqlite_use_file => 1,
-        storage_type=> '::DBI::Replicated',
-        balancer_type=>'::Random',
-        balancer_args=> {
-          auto_validate_every=>100,
-	  master_read_weight => 1
-        },
-        deploy_args=>{
-          add_drop_table => 1,
-        },
-      );
-    }
-
-    sub generate_replicant_connect_info {}
-    sub replicate {}
-    sub cleanup {}
-
-    ## --------------------------------------------------------------------- ##
-    ## Add a connect_info option to test option merging.
-    ## --------------------------------------------------------------------- ##
-    {
-    package DBIx::Class::Storage::DBI::Replicated;
-
-    use Moose;
-
-    __PACKAGE__->meta->make_mutable;
-
-    around connect_info => sub {
-      my ($next, $self, $info) = @_;
-      $info->[3]{master_option} = 1;
-      $self->$next($info);
-    };
-
-    __PACKAGE__->meta->make_immutable;
-
-    no Moose;
-    }
-  
-    ## --------------------------------------------------------------------- ##
-    ## Subclass for when you are using SQLite for testing, this provides a fake
-    ## replication support.
-    ## --------------------------------------------------------------------- ##
-        
-    package DBIx::Class::DBI::Replicated::TestReplication::SQLite;
-
-    use DBICTest;
-    use File::Copy;    
-    use base 'DBIx::Class::DBI::Replicated::TestReplication';
-    
-    __PACKAGE__->mk_accessors( qw/master_path slave_paths/ );
-    
-    ## Set the mastep path from DBICTest
-    
-	sub new {
-	    my $class = shift @_;
-	    my $self = $class->SUPER::new(@_);
-	
-	    $self->master_path( DBICTest->_sqlite_dbfilename );
-	    $self->slave_paths([
-            "t/var/DBIxClass_slave1.db",
-            "t/var/DBIxClass_slave2.db",    
-        ]);
-        
-	    return $self;
-	}    
-	
-    ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
-    ## $storage->connect_info to be used for connecting replicants.
-    
-    sub generate_replicant_connect_info {
-        my $self = shift @_;
-        my @dsn = map {
-            "dbi:SQLite:${_}";
-        } @{$self->slave_paths};
-        
-        my @connect_infos = map { [$_,'','',{AutoCommit=>1}] } @dsn;
-
-    # try a hashref too
-        my $c = $connect_infos[0];
-        $connect_infos[0] = {
-          dsn => $c->[0],
-          user => $c->[1],
-          password => $c->[2],
-          %{ $c->[3] }
-        };
-
-        @connect_infos
-    }
-
-    ## Do a 'good enough' replication by copying the master dbfile over each of
-    ## the slave dbfiles.  If the master is SQLite we do this, otherwise we
-    ## just do a one second pause to let the slaves catch up.
-    
-    sub replicate {
-        my $self = shift @_;
-        foreach my $slave (@{$self->slave_paths}) {
-            copy($self->master_path, $slave);
-        }
-    }
-    
-    ## Cleanup after ourselves.  Unlink all gthe slave paths.
-    
-    sub cleanup {
-        my $self = shift @_;
-        foreach my $slave (@{$self->slave_paths}) {
-            unlink $slave;
-        }     
-    }
-    
-    ## --------------------------------------------------------------------- ##
-    ## Subclass for when you are setting the databases via custom export vars
-    ## This is for when you have a replicating database setup that you are
-    ## going to test against.  You'll need to define the correct $ENV and have
-    ## two slave databases to test against, as well as a replication system
-    ## that will replicate in less than 1 second.
-    ## --------------------------------------------------------------------- ##
-        
-    package DBIx::Class::DBI::Replicated::TestReplication::Custom; 
-    use base 'DBIx::Class::DBI::Replicated::TestReplication';
-    
-    ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
-    ## $storage->connect_info to be used for connecting replicants.
-    
-    sub generate_replicant_connect_info { 
-        return (
-            [$ENV{"DBICTEST_SLAVE0_DSN"}, $ENV{"DBICTEST_SLAVE0_DBUSER"}, $ENV{"DBICTEST_SLAVE0_DBPASS"}, {AutoCommit => 1}],
-            [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}],           
-        );
-    }
-    
-    ## pause a bit to let the replication catch up 
-    
-    sub replicate {
-    	sleep 1;
-    } 
-}
-
-## ----------------------------------------------------------------------------
-## Create an object and run some tests
-## ----------------------------------------------------------------------------
-
-## Thi first bunch of tests are basic, just make sure all the bits are behaving
-
-my $replicated_class = DBICTest->has_custom_dsn ?
-    'DBIx::Class::DBI::Replicated::TestReplication::Custom' :
-    'DBIx::Class::DBI::Replicated::TestReplication::SQLite';
-
-my $replicated;
-
-for my $method (qw/by_connect_info by_storage_type/) {
-  ok $replicated = $replicated_class->new($method)
-      => "Created a replication object $method";
-      
-  isa_ok $replicated->schema
-      => 'DBIx::Class::Schema';
-      
-  isa_ok $replicated->schema->storage
-      => 'DBIx::Class::Storage::DBI::Replicated';
-
-  isa_ok $replicated->schema->storage->balancer
-      => 'DBIx::Class::Storage::DBI::Replicated::Balancer::Random'
-      => 'configured balancer_type';
-}
-
-ok $replicated->schema->storage->meta
-    => 'has a meta object';
-    
-isa_ok $replicated->schema->storage->master
-    => 'DBIx::Class::Storage::DBI';
-    
-isa_ok $replicated->schema->storage->pool
-    => 'DBIx::Class::Storage::DBI::Replicated::Pool';
-    
-does_ok $replicated->schema->storage->balancer
-    => 'DBIx::Class::Storage::DBI::Replicated::Balancer'; 
-
-ok my @replicant_connects = $replicated->generate_replicant_connect_info
-    => 'got replication connect information';
-
-ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects)
-    => 'Created some storages suitable for replicants';
-
-ok my @all_storages = $replicated->schema->storage->all_storages
-    => '->all_storages';
-
-is scalar @all_storages,
-    3
-    => 'correct number of ->all_storages';
-
-is ((grep $_->isa('DBIx::Class::Storage::DBI'), @all_storages),
-    3
-    => '->all_storages are correct type');
-
-my @all_storage_opts =
-  grep { (reftype($_)||'') eq 'HASH' }
-    map @{ $_->_connect_info }, @all_storages;
-
-is ((grep $_->{master_option}, @all_storage_opts),
-    3
-    => 'connect_info was merged from master to replicants');
- 
-my @replicant_names = keys %{ $replicated->schema->storage->replicants };
-
-## Silence warning about not supporting the is_replicating method if using the
-## sqlite dbs.
-$replicated->schema->storage->debugobj->silence(1)
-  if first { m{^t/} } @replicant_names;
-   
-isa_ok $replicated->schema->storage->balancer->current_replicant
-    => 'DBIx::Class::Storage::DBI'; 
-
-$replicated->schema->storage->debugobj->silence(0);
-
-ok $replicated->schema->storage->pool->has_replicants
-    => 'does have replicants';     
-
-is $replicated->schema->storage->pool->num_replicants => 2
-    => 'has two replicants';
-       
-does_ok $replicated_storages[0]
-    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-
-does_ok $replicated_storages[1]
-    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-    
-does_ok $replicated->schema->storage->replicants->{$replicant_names[0]}
-    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-
-does_ok $replicated->schema->storage->replicants->{$replicant_names[1]}
-    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';  
-
-## Add some info to the database
-
-$replicated
-    ->schema
-    ->populate('Artist', [
-        [ qw/artistid name/ ],
-        [ 4, "Ozric Tentacles"],
-    ]);
-                
-## Make sure all the slaves have the table definitions
-
-$replicated->replicate;
-$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
-$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
-
-## Silence warning about not supporting the is_replicating method if using the
-## sqlite dbs.
-$replicated->schema->storage->debugobj->silence(1)
-  if first { m{^t/} } @replicant_names;
- 
-$replicated->schema->storage->pool->validate_replicants;
-
-$replicated->schema->storage->debugobj->silence(0);
-
-## Make sure we can read the data.
-
-ok my $artist1 = $replicated->schema->resultset('Artist')->find(4)
-    => 'Created Result';
-
-isa_ok $artist1
-    => 'DBICTest::Artist';
-    
-is $artist1->name, 'Ozric Tentacles'
-    => 'Found expected name for first result';
-
-## Check that master_read_weight is honored
-{
-    no warnings qw/once redefine/;
-
-    local
-    *DBIx::Class::Storage::DBI::Replicated::Balancer::Random::_random_number =
-	sub { 999 };
-
-    $replicated->schema->storage->balancer->increment_storage;
-
-    is $replicated->schema->storage->balancer->current_replicant,
-       $replicated->schema->storage->master
-       => 'master_read_weight is honored';
-
-    ## turn it off for the duration of the test
-    $replicated->schema->storage->balancer->master_read_weight(0);
-    $replicated->schema->storage->balancer->increment_storage;
-}
-
-## Add some new rows that only the master will have  This is because
-## we overload any type of write operation so that is must hit the master
-## database.
-
-$replicated
-    ->schema
-    ->populate('Artist', [
-        [ qw/artistid name/ ],
-        [ 5, "Doom's Children"],
-        [ 6, "Dead On Arrival"],
-        [ 7, "Watergate"],
-    ]);
-
-## Make sure all the slaves have the table definitions
-$replicated->replicate;
-
-## Should find some data now
-
-ok my $artist2 = $replicated->schema->resultset('Artist')->find(5)
-    => 'Sync succeed';
-    
-isa_ok $artist2
-    => 'DBICTest::Artist';
-    
-is $artist2->name, "Doom's Children"
-    => 'Found expected name for first result';
-
-## What happens when we disconnect all the replicants?
-
-is $replicated->schema->storage->pool->connected_replicants => 2
-    => "both replicants are connected";
-    
-$replicated->schema->storage->replicants->{$replicant_names[0]}->disconnect;
-$replicated->schema->storage->replicants->{$replicant_names[1]}->disconnect;
-
-is $replicated->schema->storage->pool->connected_replicants => 0
-    => "both replicants are now disconnected";
-
-## All these should pass, since the database should automatically reconnect
-
-ok my $artist3 = $replicated->schema->resultset('Artist')->find(6)
-    => 'Still finding stuff.';
-    
-isa_ok $artist3
-    => 'DBICTest::Artist';
-    
-is $artist3->name, "Dead On Arrival"
-    => 'Found expected name for first result';
-
-is $replicated->schema->storage->pool->connected_replicants => 1
-    => "At Least One replicant reconnected to handle the job";
-    
-## What happens when we try to select something that doesn't exist?
-
-ok ! $replicated->schema->resultset('Artist')->find(666)
-    => 'Correctly failed to find something.';
-    
-## test the reliable option
-
-TESTRELIABLE: {
-	
-	$replicated->schema->storage->set_reliable_storage;
-	
-	ok $replicated->schema->resultset('Artist')->find(2)
-	    => 'Read from master 1';
-	
-	ok $replicated->schema->resultset('Artist')->find(5)
-	    => 'Read from master 2';
-	    
-    $replicated->schema->storage->set_balanced_storage;	    
-	    
-	ok $replicated->schema->resultset('Artist')->find(3)
-        => 'Read from replicant';
-}
-
-## Make sure when reliable goes out of scope, we are using replicants again
-
-ok $replicated->schema->resultset('Artist')->find(1)
-    => 'back to replicant 1.';
-    
-ok $replicated->schema->resultset('Artist')->find(2)
-    => 'back to replicant 2.';
-
-## set all the replicants to inactive, and make sure the balancer falls back to
-## the master.
-
-$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
-$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
-
-{
-    ## catch the fallback to master warning
-    open my $debugfh, '>', \my $fallback_warning;
-    my $oldfh = $replicated->schema->storage->debugfh;
-    $replicated->schema->storage->debugfh($debugfh);
-
-    ok $replicated->schema->resultset('Artist')->find(2)
-	=> 'Fallback to master';
-
-    like $fallback_warning, qr/falling back to master/
-	=> 'emits falling back to master warning';
-
-    $replicated->schema->storage->debugfh($oldfh);
-}
-
-$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
-$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
-
-## Silence warning about not supporting the is_replicating method if using the
-## sqlite dbs.
-$replicated->schema->storage->debugobj->silence(1)
-  if first { m{^t/} } @replicant_names;
- 
-$replicated->schema->storage->pool->validate_replicants;
-
-$replicated->schema->storage->debugobj->silence(0);
-
-ok $replicated->schema->resultset('Artist')->find(2)
-    => 'Returned to replicates';
-    
-## Getting slave status tests
-
-SKIP: {
-    ## We skip this tests unless you have a custom replicants, since the default
-    ## sqlite based replication tests don't support these functions.
-    
-    skip 'Cannot Test Replicant Status on Non Replicating Database', 9
-     unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
-
-    $replicated->replicate; ## Give the slaves a chance to catchup.
-
-	ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating
-	    => 'Replicants are replicating';
-	    
-	is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0
-	    => 'Replicant is zero seconds behind master';
-	    
-	## Test the validate replicants
-	
-	$replicated->schema->storage->pool->validate_replicants;
-	
-	is $replicated->schema->storage->pool->active_replicants, 2
-	    => 'Still have 2 replicants after validation';
-	    
-	## Force the replicants to fail the validate test by required their lag to
-	## be negative (ie ahead of the master!)
-	
-    $replicated->schema->storage->pool->maximum_lag(-10);
-    $replicated->schema->storage->pool->validate_replicants;
-    
-    is $replicated->schema->storage->pool->active_replicants, 0
-        => 'No way a replicant be be ahead of the master';
-        
-    ## Let's be fair to the replicants again.  Let them lag up to 5
-	
-    $replicated->schema->storage->pool->maximum_lag(5);
-    $replicated->schema->storage->pool->validate_replicants;
-    
-    is $replicated->schema->storage->pool->active_replicants, 2
-        => 'Both replicants in good standing again';	
-        
-	## Check auto validate
-	
-	is $replicated->schema->storage->balancer->auto_validate_every, 100
-	    => "Got the expected value for auto validate";
-	    
-		## This will make sure we auto validatge everytime
-		$replicated->schema->storage->balancer->auto_validate_every(0);
-		
-		## set all the replicants to inactive, and make sure the balancer falls back to
-		## the master.
-		
-		$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
-		$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
-		
-		## Ok, now when we go to run a query, autovalidate SHOULD reconnect
-	
-	is $replicated->schema->storage->pool->active_replicants => 0
-	    => "both replicants turned off";
-	    	
-	ok $replicated->schema->resultset('Artist')->find(5)
-	    => 'replicant reactivated';
-	    
-	is $replicated->schema->storage->pool->active_replicants => 2
-	    => "both replicants reactivated";        
-}
-
-## Test the reliably callback
-
-ok my $reliably = sub {
-	
-    ok $replicated->schema->resultset('Artist')->find(5)
-        => 'replicant reactivated';	
-	
-} => 'created coderef properly';
-
-$replicated->schema->storage->execute_reliably($reliably);
-
-## Try something with an error
-
-ok my $unreliably = sub {
-    
-    ok $replicated->schema->resultset('ArtistXX')->find(5)
-        => 'replicant reactivated'; 
-    
-} => 'created coderef properly';
-
-throws_ok {$replicated->schema->storage->execute_reliably($unreliably)} 
-    qr/Can't find source for ArtistXX/
-    => 'Bad coderef throws proper error';
-    
-## Make sure replication came back
-
-ok $replicated->schema->resultset('Artist')->find(3)
-    => 'replicant reactivated';
-    
-## make sure transactions are set to execute_reliably
-
-ok my $transaction = sub {
-	
-	my $id = shift @_;
-	
-	$replicated
-	    ->schema
-	    ->populate('Artist', [
-	        [ qw/artistid name/ ],
-	        [ $id, "Children of the Grave"],
-	    ]);
-	    
-    ok my $result = $replicated->schema->resultset('Artist')->find($id)
-        => 'Found expected artist';
-        
-    ok my $more = $replicated->schema->resultset('Artist')->find(1)
-        => 'Found expected artist again';
-        
-   return ($result, $more);
-   
-} => 'Created a coderef properly';
-
-## Test the transaction with multi return
-{
-	ok my @return = $replicated->schema->txn_do($transaction, 666)
-	    => 'did transaction';
-	    
-	    is $return[0]->id, 666
-	        => 'first returned value is correct';
-	        
-	    is $return[1]->id, 1
-	        => 'second returned value is correct';
-}
-
-## Test that asking for single return works
-{
-	ok my $return = $replicated->schema->txn_do($transaction, 777)
-	    => 'did transaction';
-	    
-	    is $return->id, 777
-	        => 'first returned value is correct';
-}
-
-## Test transaction returning a single value
-
-{
-	ok my $result = $replicated->schema->txn_do(sub {
-		ok my $more = $replicated->schema->resultset('Artist')->find(1)
-		=> 'found inside a transaction';
-		return $more;
-	}) => 'successfully processed transaction';
-	
-	is $result->id, 1
-	   => 'Got expected single result from transaction';
-}
-
-## Make sure replication came back
-
-ok $replicated->schema->resultset('Artist')->find(1)
-    => 'replicant reactivated';
-    
-## Test Discard changes
-
-{
-	ok my $artist = $replicated->schema->resultset('Artist')->find(2)
-	    => 'got an artist to test discard changes';
-	    
-	ok $artist->discard_changes
-	   => 'properly discard changes';
-}
-
-## Test some edge cases, like trying to do a transaction inside a transaction, etc
-
-{
-    ok my $result = $replicated->schema->txn_do(sub {
-    	return $replicated->schema->txn_do(sub {
-	        ok my $more = $replicated->schema->resultset('Artist')->find(1)
-	        => 'found inside a transaction inside a transaction';
-	        return $more;    		
-    	});
-    }) => 'successfully processed transaction';
-    
-    is $result->id, 1
-       => 'Got expected single result from transaction';	  
-}
-
-{
-    ok my $result = $replicated->schema->txn_do(sub {
-    	return $replicated->schema->storage->execute_reliably(sub {
-	    	return $replicated->schema->txn_do(sub {
-	    		return $replicated->schema->storage->execute_reliably(sub {
-			        ok my $more = $replicated->schema->resultset('Artist')->find(1)
-			        => 'found inside crazy deep transactions and execute_reliably';
-			        return $more; 	    			
-	    		});
-	    	});    	
-    	});
-    }) => 'successfully processed transaction';
-    
-    is $result->id, 1
-       => 'Got expected single result from transaction';	  
-}     
-
-## Test the force_pool resultset attribute.
-
-{
-	ok my $artist_rs = $replicated->schema->resultset('Artist')
-        => 'got artist resultset';
-	   
-	## Turn on Forced Pool Storage
-	ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'})
-        => 'Created a resultset using force_pool storage';
-	   
-    ok my $artist = $reliable_artist_rs->find(2) 
-        => 'got an artist result via force_pool storage';
-}
-
-## Delete the old database files
-$replicated->cleanup;
-
-# vim: sw=4 sts=4 :

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/94versioning.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/94versioning.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/94versioning.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,10 +1,13 @@
 #!/usr/bin/perl
+
 use strict;
 use warnings;
 use Test::More;
-use File::Spec;
+use Test::Warn;
+use Test::Exception;
+
+use Path::Class;
 use File::Copy;
-use Time::HiRes qw/time sleep/;
 
 #warn "$dsn $user $pass";
 my ($dsn, $user, $pass);
@@ -15,113 +18,153 @@
   plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
     unless ($dsn);
 
+  eval { require Time::HiRes }
+    || plan skip_all => 'Test needs Time::HiRes';
+  Time::HiRes->import(qw/time sleep/);
 
-    eval "use DBD::mysql; use SQL::Translator 0.09003;";
-    plan $@
-        ? ( skip_all => 'needs DBD::mysql and SQL::Translator 0.09003 for testing' )
-        : ( tests => 22 );
+  require DBIx::Class;
+  plan skip_all =>
+      'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy')
+    unless DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')
 }
 
+use lib qw(t/lib);
+use DBICTest; # do not remove even though it is not used
+
+use_ok('DBICVersion_v1');
+
 my $version_table_name = 'dbix_class_schema_versions';
 my $old_table_name = 'SchemaVersions';
 
-my $ddl_dir = File::Spec->catdir ('t', 'var');
+my $ddl_dir = dir ('t', 'var');
+mkdir ($ddl_dir) unless -d $ddl_dir;
+
 my $fn = {
-    v1 => File::Spec->catfile($ddl_dir, 'DBICVersion-Schema-1.0-MySQL.sql'),
-    v2 => File::Spec->catfile($ddl_dir, 'DBICVersion-Schema-2.0-MySQL.sql'),
-    trans => File::Spec->catfile($ddl_dir, 'DBICVersion-Schema-1.0-2.0-MySQL.sql'),
+    v1 => $ddl_dir->file ('DBICVersion-Schema-1.0-MySQL.sql'),
+    v2 => $ddl_dir->file ('DBICVersion-Schema-2.0-MySQL.sql'),
+    v3 => $ddl_dir->file ('DBICVersion-Schema-3.0-MySQL.sql'),
+    trans_v12 => $ddl_dir->file ('DBICVersion-Schema-1.0-2.0-MySQL.sql'),
+    trans_v23 => $ddl_dir->file ('DBICVersion-Schema-2.0-3.0-MySQL.sql'),
 };
 
-use lib qw(t/lib);
-use DBICTest; # do not remove even though it is not used
+my $schema_v1 = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 1 });
+eval { $schema_v1->storage->dbh->do('drop table ' . $version_table_name) };
+eval { $schema_v1->storage->dbh->do('drop table ' . $old_table_name) };
 
-use_ok('DBICVersionOrig');
-
-my $schema_orig = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 1 });
-eval { $schema_orig->storage->dbh->do('drop table ' . $version_table_name) };
-eval { $schema_orig->storage->dbh->do('drop table ' . $old_table_name) };
-
-is($schema_orig->ddl_filename('MySQL', '1.0', $ddl_dir), $fn->{v1}, 'Filename creation working');
+is($schema_v1->ddl_filename('MySQL', '1.0', $ddl_dir), $fn->{v1}, 'Filename creation working');
 unlink( $fn->{v1} ) if ( -e $fn->{v1} );
-$schema_orig->create_ddl_dir('MySQL', undef, $ddl_dir);
+$schema_v1->create_ddl_dir('MySQL', undef, $ddl_dir);
 
 ok(-f $fn->{v1}, 'Created DDL file');
-$schema_orig->deploy({ add_drop_table => 1 });
+$schema_v1->deploy({ add_drop_table => 1 });
 
-my $tvrs = $schema_orig->{vschema}->resultset('Table');
-is($schema_orig->_source_exists($tvrs), 1, 'Created schema from DDL file');
+my $tvrs = $schema_v1->{vschema}->resultset('Table');
+is($schema_v1->_source_exists($tvrs), 1, 'Created schema from DDL file');
 
 # loading a new module defining a new version of the same table
 DBICVersion::Schema->_unregister_source ('Table');
-eval "use DBICVersionNew";
+use_ok('DBICVersion_v2');
 
-my $schema_upgrade = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 1 });
+my $schema_v2 = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 1 });
 {
   unlink($fn->{v2});
-  unlink($fn->{trans});
+  unlink($fn->{trans_v12});
 
-  is($schema_upgrade->get_db_version(), '1.0', 'get_db_version ok');
-  is($schema_upgrade->schema_version, '2.0', 'schema version ok');
-  $schema_upgrade->create_ddl_dir('MySQL', '2.0', $ddl_dir, '1.0');
-  ok(-f $fn->{trans}, 'Created DDL file');
+  is($schema_v2->get_db_version(), '1.0', 'get_db_version ok');
+  is($schema_v2->schema_version, '2.0', 'schema version ok');
+  $schema_v2->create_ddl_dir('MySQL', '2.0', $ddl_dir, '1.0');
+  ok(-f $fn->{trans_v12}, 'Created DDL file');
 
-  {
-    my $w;
-    local $SIG{__WARN__} = sub { $w = shift };
+  warnings_like (
+    sub { $schema_v2->upgrade() },
+    qr/DB version .+? is lower than the schema version/,
+    'Warn before upgrade',
+  );
 
-    sleep 1;    # remove this when TODO below is completed
+  is($schema_v2->get_db_version(), '2.0', 'db version number upgraded');
 
-    $schema_upgrade->upgrade();
-    like ($w, qr/Attempting upgrade\.$/, 'Warn before upgrade');
-  }
+  lives_ok ( sub {
+    $schema_v2->storage->dbh->do('select NewVersionName from TestVersion');
+  }, 'new column created' );
 
-  is($schema_upgrade->get_db_version(), '2.0', 'db version number upgraded');
-
-  eval {
-    $schema_upgrade->storage->dbh->do('select NewVersionName from TestVersion');
-  };
-  is($@, '', 'new column created');
-
-  # should overwrite files and warn about it
-  my @w;
-  local $SIG{__WARN__} = sub { 
-    if ($_[0] =~ /Overwriting existing/) {
-      push @w, $_[0];
-    }
-    else {
-      warn @_;
-    }
-  };
-  $schema_upgrade->create_ddl_dir('MySQL', '2.0', $ddl_dir, '1.0');
-
-  is (2, @w, 'A warning generated for both the DDL and the diff');
-  like ($w[0], qr/Overwriting existing DDL file - $fn->{v2}/, 'New version DDL overwrite warning');
-  like ($w[1], qr/Overwriting existing diff file - $fn->{trans}/, 'Upgrade diff overwrite warning');
+  warnings_exist (
+    sub { $schema_v2->create_ddl_dir('MySQL', '2.0', $ddl_dir, '1.0') },
+    [
+      qr/Overwriting existing DDL file - $fn->{v2}/,
+      qr/Overwriting existing diff file - $fn->{trans_v12}/,
+    ],
+    'An overwrite warning generated for both the DDL and the diff',
+  );
 }
 
 {
   my $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
-  eval {
+  lives_ok (sub {
     $schema_version->storage->dbh->do('select * from ' . $version_table_name);
-  };
-  is($@, '', 'version table exists');
+  }, 'version table exists');
 
-  eval {
+  lives_ok (sub {
     $schema_version->storage->dbh->do("DROP TABLE IF EXISTS $old_table_name");
     $schema_version->storage->dbh->do("RENAME TABLE $version_table_name TO $old_table_name");
-  };
-  is($@, '', 'versions table renamed to old style table');
+  }, 'versions table renamed to old style table');
 
   $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
   is($schema_version->get_db_version, '2.0', 'transition from old table name to new okay');
 
-  eval {
+  dies_ok (sub {
     $schema_version->storage->dbh->do('select * from ' . $old_table_name);
-  };
-  ok($@, 'old version table gone');
+  }, 'old version table gone');
 
 }
 
+# repeat the v1->v2 process for v2->v3 before testing v1->v3
+DBICVersion::Schema->_unregister_source ('Table');
+use_ok('DBICVersion_v3');
+
+my $schema_v3 = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 1 });
+{
+  unlink($fn->{v3});
+  unlink($fn->{trans_v23});
+
+  is($schema_v3->get_db_version(), '2.0', 'get_db_version 2.0 ok');
+  is($schema_v3->schema_version, '3.0', 'schema version 3.0 ok');
+  $schema_v3->create_ddl_dir('MySQL', '3.0', $ddl_dir, '2.0');
+  ok(-f $fn->{trans_v23}, 'Created DDL 2.0 -> 3.0 file');
+
+  warnings_exist (
+    sub { $schema_v3->upgrade() },
+    qr/DB version .+? is lower than the schema version/,
+    'Warn before upgrade',
+  );
+
+  is($schema_v3->get_db_version(), '3.0', 'db version number upgraded');
+
+  lives_ok ( sub {
+    $schema_v3->storage->dbh->do('select ExtraColumn from TestVersion');
+  }, 'new column created');
+}
+
+# now put the v1 schema back again
+{
+  # drop all the tables...
+  eval { $schema_v1->storage->dbh->do('drop table ' . $version_table_name) };
+  eval { $schema_v1->storage->dbh->do('drop table ' . $old_table_name) };
+  eval { $schema_v1->storage->dbh->do('drop table TestVersion') };
+
+  {
+    local $DBICVersion::Schema::VERSION = '1.0';
+    $schema_v1->deploy;
+  }
+  is($schema_v1->get_db_version(), '1.0', 'get_db_version 1.0 ok');
+}
+
+# attempt v1 -> v3 upgrade
+{
+  local $SIG{__WARN__} = sub { warn if $_[0] !~ /Attempting upgrade\.$/ };
+  $schema_v3->upgrade();
+  is($schema_v3->get_db_version(), '3.0', 'db version number upgraded');
+}
+
 # check behaviour of DBIC_NO_VERSION_CHECK env var and ignore_version connect attr
 {
   my $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
@@ -130,55 +173,49 @@
   };
 
 
-  my $warn = '';
-  local $SIG{__WARN__} = sub { $warn = shift };
-  $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
-  like($warn, qr/Your DB is currently unversioned/, 'warning detected without env var or attr');
+  warnings_like ( sub {
+    $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
+  }, qr/Your DB is currently unversioned/, 'warning detected without env var or attr' );
 
+  warnings_like ( sub {
+    $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 1 });
+  },  [], 'warning not detected with attr set');
 
-  # should warn
-  $warn = '';
-  $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 1 });
-  is($warn, '', 'warning not detected with attr set');
-  # should not warn
 
   local $ENV{DBIC_NO_VERSION_CHECK} = 1;
-  $warn = '';
-  $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
-  is($warn, '', 'warning not detected with env var set');
-  # should not warn
+  warnings_like ( sub {
+    $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass);
+  }, [], 'warning not detected with env var set');
 
-  $warn = '';
-  $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 0 });
-  like($warn, qr/Your DB is currently unversioned/, 'warning detected without env var or attr');
-  # should warn
+  warnings_like ( sub {
+    $schema_version = DBICVersion::Schema->connect($dsn, $user, $pass, { ignore_version => 0 });
+  }, qr/Your DB is currently unversioned/, 'warning detected without env var or attr');
 }
 
 # attempt a deploy/upgrade cycle within one second
-TODO: {
+{
+  eval { $schema_v2->storage->dbh->do('drop table ' . $version_table_name) };
+  eval { $schema_v2->storage->dbh->do('drop table ' . $old_table_name) };
+  eval { $schema_v2->storage->dbh->do('drop table TestVersion') };
 
-  local $TODO = 'To fix this properly the table must be extended with an autoinc column, mst will not accept anything less';
-
-  eval { $schema_orig->storage->dbh->do('drop table ' . $version_table_name) };
-  eval { $schema_orig->storage->dbh->do('drop table ' . $old_table_name) };
-  eval { $schema_orig->storage->dbh->do('drop table TestVersion') };
-
   # this attempts to sleep until the turn of the second
   my $t = time();
   sleep (int ($t) + 1 - $t);
-  diag ('Fast deploy/upgrade start: ', time() );
+  note ('Fast deploy/upgrade start: ', time() );
 
   {
-    local $DBICVersion::Schema::VERSION = '1.0';
-    $schema_orig->deploy;
+    local $DBICVersion::Schema::VERSION = '2.0';
+    $schema_v2->deploy;
   }
 
   local $SIG{__WARN__} = sub { warn if $_[0] !~ /Attempting upgrade\.$/ };
-  $schema_upgrade->upgrade();
+  $schema_v2->upgrade();
 
-  is($schema_upgrade->get_db_version(), '2.0', 'Fast deploy/upgrade');
+  is($schema_v2->get_db_version(), '3.0', 'Fast deploy/upgrade');
 };
 
 unless ($ENV{DBICTEST_KEEP_VERSIONING_DDL}) {
     unlink $_ for (values %$fn);
 }
+
+done_testing;

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/95sql_maker.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/95sql_maker.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/95sql_maker.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,57 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use Test::Exception;
-
-use lib qw(t/lib);
-use DBIC::SqlMakerTest;
-
-plan tests => 4;
-
-use_ok('DBICTest');
-
-my $schema = DBICTest->init_schema();
-
-my $sql_maker = $schema->storage->sql_maker;
-
-
-{
-  my ($sql, @bind) = $sql_maker->insert(
-            'lottery',
-            {
-              'day' => '2008-11-16',
-              'numbers' => [13, 21, 34, 55, 89]
-            }
-  );
-
-  is_same_sql_bind(
-    $sql, \@bind,
-    q/INSERT INTO lottery (day, numbers) VALUES (?, ?)/,
-      [ ['day' => '2008-11-16'], ['numbers' => [13, 21, 34, 55, 89]] ],
-    'sql_maker passes arrayrefs in insert'
-  );
-
-
-  ($sql, @bind) = $sql_maker->update(
-            'lottery',
-            {
-              'day' => '2008-11-16',
-              'numbers' => [13, 21, 34, 55, 89]
-            }
-  );
-
-  is_same_sql_bind(
-    $sql, \@bind,
-    q/UPDATE lottery SET day = ?, numbers = ?/,
-      [ ['day' => '2008-11-16'], ['numbers' => [13, 21, 34, 55, 89]] ],
-    'sql_maker passes arrayrefs in update'
-  );
-}
-
-# Make sure the carp/croak override in SQLA works (via SQLAHacks)
-my $file = __FILE__;
-$file = "\Q$file\E";
-throws_ok (sub {
-  $schema->resultset ('Artist')->search ({}, { order_by => { -asc => 'stuff', -desc => 'staff' } } )->as_query;
-}, qr/$file/, 'Exception correctly croak()ed');

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/95sql_maker_quote.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/95sql_maker_quote.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/95sql_maker_quote.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,310 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-
-use lib qw(t/lib);
-use DBIC::SqlMakerTest;
-
-BEGIN {
-    eval "use DBD::SQLite";
-    plan $@
-        ? ( skip_all => 'needs DBD::SQLite for testing' )
-        : ( tests => 12 );
-}
-
-use_ok('DBICTest');
-
-my $schema = DBICTest->init_schema();
-
-my $sql_maker = $schema->storage->sql_maker;
-
-$sql_maker->quote_char('`');
-$sql_maker->name_sep('.');
-
-my ($sql, @bind) = $sql_maker->select(
-          [
-            {
-              'me' => 'cd'
-            },
-            [
-              {
-                'artist' => 'artist',
-                '-join_type' => ''
-              },
-              {
-                'artist.artistid' => 'me.artist'
-              }
-            ]
-          ],
-          [
-            {
-              'count' => '*'
-            }
-          ],
-          {
-            'artist.name' => 'Caterwauler McCrae',
-            'me.year' => 2001
-          },
-          [],
-          undef,
-          undef
-);
-
-is_same_sql_bind(
-  $sql, \@bind,
-  q/SELECT COUNT( * ) FROM `cd` `me`  JOIN `artist` `artist` ON ( `artist`.`artistid` = `me`.`artist` ) WHERE ( `artist`.`name` = ? AND `me`.`year` = ? )/, [ ['artist.name' => 'Caterwauler McCrae'], ['me.year' => 2001] ],
-  'got correct SQL and bind parameters for count query with quoting'
-);
-
-
-($sql, @bind) = $sql_maker->select(
-          [
-            {
-              'me' => 'cd'
-            }
-          ],
-          [
-            'me.cdid',
-            'me.artist',
-            'me.title',
-            'me.year'
-          ],
-          undef,
-          'year DESC',
-          undef,
-          undef
-);
-
-is_same_sql_bind(
-  $sql, \@bind,
-  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year` FROM `cd` `me` ORDER BY `year DESC`/, [],
-  'scalar ORDER BY okay (single value)'
-);
-
-
-($sql, @bind) = $sql_maker->select(
-          [
-            {
-              'me' => 'cd'
-            }
-          ],
-          [
-            'me.cdid',
-            'me.artist',
-            'me.title',
-            'me.year'
-          ],
-          undef,
-          [
-            'year DESC',
-            'title ASC'
-          ],
-          undef,
-          undef
-);
-
-is_same_sql_bind(
-  $sql, \@bind,
-  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year` FROM `cd` `me` ORDER BY `year DESC`, `title ASC`/, [],
-  'scalar ORDER BY okay (multiple values)'
-);
-
-{
-  ($sql, @bind) = $sql_maker->select(
-            [
-              {
-                'me' => 'cd'
-              }
-            ],
-            [
-              'me.cdid',
-              'me.artist',
-              'me.title',
-              'me.year'
-            ],
-            undef,
-            { -desc => 'year' },
-            undef,
-            undef
-  );
-
-  is_same_sql_bind(
-    $sql, \@bind,
-    q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year` FROM `cd` `me` ORDER BY `year` DESC/, [],
-    'hashref ORDER BY okay (single value)'
-  );
-
-
-  ($sql, @bind) = $sql_maker->select(
-            [
-              {
-                'me' => 'cd'
-              }
-            ],
-            [
-              'me.cdid',
-              'me.artist',
-              'me.title',
-              'me.year'
-            ],
-            undef,
-            [
-              { -desc => 'year' },
-              { -asc => 'title' }
-            ],
-            undef,
-            undef
-  );
-
-  is_same_sql_bind(
-    $sql, \@bind,
-    q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year` FROM `cd` `me` ORDER BY `year` DESC, `title` ASC/, [],
-    'hashref ORDER BY okay (multiple values)'
-  );
-
-}
-
-
-($sql, @bind) = $sql_maker->select(
-          [
-            {
-              'me' => 'cd'
-            }
-          ],
-          [
-            'me.cdid',
-            'me.artist',
-            'me.title',
-            'me.year'
-          ],
-          undef,
-          \'year DESC',
-          undef,
-          undef
-);
-
-is_same_sql_bind(
-  $sql, \@bind,
-  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year` FROM `cd` `me` ORDER BY year DESC/, [],
-  'did not quote ORDER BY with scalarref (single value)'
-);
-
-
-($sql, @bind) = $sql_maker->select(
-          [
-            {
-              'me' => 'cd'
-            }
-          ],
-          [
-            'me.cdid',
-            'me.artist',
-            'me.title',
-            'me.year'
-          ],
-          undef,
-          [
-            \'year DESC',
-            \'title ASC'
-          ],
-          undef,
-          undef
-);
-
-is_same_sql_bind(
-  $sql, \@bind,
-  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year` FROM `cd` `me` ORDER BY year DESC, title ASC/, [],
-  'did not quote ORDER BY with scalarref (multiple values)'
-);
-
-
-($sql, @bind) = $sql_maker->update(
-          'group',
-          {
-            'order' => '12',
-            'name' => 'Bill'
-          }
-);
-
-is_same_sql_bind(
-  $sql, \@bind,
-  q/UPDATE `group` SET `name` = ?, `order` = ?/, [ ['name' => 'Bill'], ['order' => '12'] ],
-  'quoted table names for UPDATE'
-);
-
-{
-  ($sql, @bind) = $sql_maker->select(
-        [
-          {
-            'me' => 'cd'
-          }
-        ],
-        [
-          'me.*'
-        ],
-        undef,
-        [],
-        undef,
-        undef    
-  );
-
-  is_same_sql_bind(
-    $sql, \@bind,
-    q/SELECT `me`.* FROM `cd` `me`/, [],
-    'select attr with me.* is right'
-  );
-}
-
-
-$sql_maker->quote_char([qw/[ ]/]);
-
-($sql, @bind) = $sql_maker->select(
-          [
-            {
-              'me' => 'cd'
-            },
-            [
-              {
-                'artist' => 'artist',
-                '-join_type' => ''
-              },
-              {
-                'artist.artistid' => 'me.artist'
-              }
-            ]
-          ],
-          [
-            {
-              'count' => '*'
-            }
-          ],
-          {
-            'artist.name' => 'Caterwauler McCrae',
-            'me.year' => 2001
-          },
-          [],
-          undef,
-          undef
-);
-
-is_same_sql_bind(
-  $sql, \@bind,
-  q/SELECT COUNT( * ) FROM [cd] [me]  JOIN [artist] [artist] ON ( [artist].[artistid] = [me].[artist] ) WHERE ( [artist].[name] = ? AND [me].[year] = ? )/, [ ['artist.name' => 'Caterwauler McCrae'], ['me.year' => 2001] ],
-  'got correct SQL and bind parameters for count query with bracket quoting'
-);
-
-
-($sql, @bind) = $sql_maker->update(
-          'group',
-          {
-            'order' => '12',
-            'name' => 'Bill'
-          }
-);
-
-is_same_sql_bind(
-  $sql, \@bind,
-  q/UPDATE [group] SET [name] = ?, [order] = ?/, [ ['name' => 'Bill'], ['order' => '12'] ],
-  'bracket quoted table names for UPDATE'
-);

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/98savepoints.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/98savepoints.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/98savepoints.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -8,11 +8,11 @@
 
 my ($create_sql, $dsn, $user, $pass);
 
-if (exists $ENV{DBICTEST_PG_DSN}) {
+if ($ENV{DBICTEST_PG_DSN}) {
   ($dsn, $user, $pass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/};
 
   $create_sql = "CREATE TABLE artist (artistid serial PRIMARY KEY, name VARCHAR(100), rank INTEGER NOT NULL DEFAULT '13', charfield CHAR(10))";
-} elsif (exists $ENV{DBICTEST_MYSQL_DSN}) {
+} elsif ($ENV{DBICTEST_MYSQL_DSN}) {
   ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MYSQL_${_}" } qw/DSN USER PASS/};
 
   $create_sql = "CREATE TABLE artist (artistid INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, name VARCHAR(100), rank INTEGER NOT NULL DEFAULT '13', charfield CHAR(10)) ENGINE=InnoDB";

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/99dbic_sqlt_parser.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/99dbic_sqlt_parser.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/99dbic_sqlt_parser.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,81 +1,160 @@
-#!/usr/bin/perl
 use strict;
 use warnings;
+
 use Test::More;
+use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
+use DBICTest::Schema;
+use Scalar::Util ();
 
-
 BEGIN {
-    eval "use SQL::Translator 0.09003;";
-    if ($@) {
-        plan skip_all => 'needs SQL::Translator 0.09003 for testing';
-    }
+  require DBIx::Class;
+  plan skip_all =>
+      'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy')
+    unless DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')
 }
 
+# Test for SQLT-related leaks
+{
+  my $s = DBICTest::Schema->clone;
+  create_schema ({ schema => $s });
+  Scalar::Util::weaken ($s);
+
+  ok (!$s, 'Schema not leaked');
+}
+
+
 my $schema = DBICTest->init_schema();
 # Dummy was yanked out by the sqlt hook test
+# CustomSql tests the horrific/deprecated ->name(\$sql) hack
 # YearXXXXCDs are views
-my @sources = grep { $_ ne 'Dummy' && $_ !~ /^Year\d{4}CDs$/ } 
-                $schema->sources;
+#
+my @sources = grep
+  { $_ !~ /^ (?: Dummy | CustomSql | Year\d{4}CDs ) $/x }
+  $schema->sources
+;
 
-plan tests => ( @sources * 3);
+my $idx_exceptions = {
+    'Artwork'       => -1,
+    'ForceForeign'  => -1,
+    'LinerNotes'    => -1,
+    'TwoKeys'       => -1, # TwoKeys has the index turned off on the rel def
+};
 
-{ 
-	my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { } } });
+{
+  my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { } } });
 
-	foreach my $source (@sources) {
-		my $table = $sqlt_schema->get_table($schema->source($source)->from);
+  foreach my $source_name (@sources) {
+    my $table = get_table($sqlt_schema, $schema, $source_name);
 
-		my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
-		my @indices = $table->get_indices;
-		my $index_count = scalar(@indices);
-    $index_count++ if ($source eq 'TwoKeys'); # TwoKeys has the index turned off on the rel def
-		is($index_count, $fk_count, "correct number of indices for $source with no args");
-	}
+    my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
+    $fk_count += $idx_exceptions->{$source_name} || 0;
+    my @indices = $table->get_indices;
+
+    my $index_count = scalar(@indices);
+    is($index_count, $fk_count, "correct number of indices for $source_name with no args");
+
+    for my $index (@indices) {
+        my $source = $schema->source($source_name);
+        my $pk_test = join("\x00", $source->primary_columns);
+        my $idx_test = join("\x00", $index->fields);
+        isnt ( $pk_test, $idx_test, "no additional index for the primary columns exists in $source_name");
+    }
+  }
 }
 
-{ 
-	my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 1 } } });
+{
+  my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 1 } } });
 
-	foreach my $source (@sources) {
-		my $table = $sqlt_schema->get_table($schema->source($source)->from);
+  foreach my $source_name (@sources) {
+    my $table = get_table($sqlt_schema, $schema, $source_name);
 
-		my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
-		my @indices = $table->get_indices;
-		my $index_count = scalar(@indices);
-    $index_count++ if ($source eq 'TwoKeys'); # TwoKeys has the index turned off on the rel def
-		is($index_count, $fk_count, "correct number of indices for $source with add_fk_index => 1");
-	}
+    my $fk_count = scalar(grep { $_->type eq 'FOREIGN KEY' } $table->get_constraints);
+    $fk_count += $idx_exceptions->{$source_name} || 0;
+    my @indices = $table->get_indices;
+    my $index_count = scalar(@indices);
+    is($index_count, $fk_count, "correct number of indices for $source_name with add_fk_index => 1");
+  }
 }
 
-{ 
-	my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 0 } } });
+{
+  my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { add_fk_index => 0 } } });
 
-	foreach my $source (@sources) {
-		my $table = $sqlt_schema->get_table($schema->source($source)->from);
+  foreach my $source (@sources) {
+    my $table = get_table($sqlt_schema, $schema, $source);
 
-		my @indices = $table->get_indices;
-		my $index_count = scalar(@indices);
-		is($index_count, 0, "correct number of indices for $source with add_fk_index => 0");
-	}
+    my @indices = $table->get_indices;
+    my $index_count = scalar(@indices);
+    is($index_count, 0, "correct number of indices for $source with add_fk_index => 0");
+  }
 }
 
+{
+    {
+        package # hide from PAUSE
+            DBICTest::Schema::NoViewDefinition;
+
+        use base qw/DBICTest::BaseResult/;
+
+        __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
+        __PACKAGE__->table('noviewdefinition');
+
+        1;
+    }
+
+    my $schema_invalid_view = $schema->clone;
+    $schema_invalid_view->register_class('NoViewDefinition', 'DBICTest::Schema::NoViewDefinition');
+
+    throws_ok { create_schema({ schema => $schema_invalid_view }) }
+        qr/view noviewdefinition is missing a view_definition/,
+        'parser detects views with a view_definition';
+}
+
+lives_ok (sub {
+  my $sqlt_schema = create_schema ({
+    schema => $schema,
+    args => {
+      parser_args => {
+        sources => ['CD']
+      },
+    },
+  });
+
+  is_deeply (
+    [$sqlt_schema->get_tables ],
+    ['cd'],
+    'sources limitng with relationships works',
+  );
+
+});
+
+done_testing;
+
 sub create_schema {
-	my $args = shift;
+  my $args = shift;
 
-	my $schema = $args->{schema};
-	my $additional_sqltargs = $args->{args} || {};
+  my $schema = $args->{schema};
+  my $additional_sqltargs = $args->{args} || {};
 
-	my $sqltargs = {
-		add_drop_table => 1, 
-		ignore_constraint_names => 1,
-		ignore_index_names => 1,
-		%{$additional_sqltargs}
-		};
+  my $sqltargs = {
+    add_drop_table => 1, 
+    ignore_constraint_names => 1,
+    ignore_index_names => 1,
+    %{$additional_sqltargs}
+  };
 
-	my $sqlt = SQL::Translator->new( $sqltargs );
+  my $sqlt = SQL::Translator->new( $sqltargs );
 
-	$sqlt->parser('SQL::Translator::Parser::DBIx::Class');
-	return $sqlt->translate({ data => $schema }) or die $sqlt->error;
+  $sqlt->parser('SQL::Translator::Parser::DBIx::Class');
+  return $sqlt->translate({ data => $schema }) || die $sqlt->error;
 }
+
+sub get_table {
+    my ($sqlt_schema, $schema, $source) = @_;
+
+    my $table_name = $schema->source($source)->from;
+    $table_name    = $$table_name if ref $table_name;
+
+    return $sqlt_schema->get_table($table_name);
+}

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/admin/01load.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/admin/01load.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/admin/01load.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,15 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+BEGIN {
+    require DBIx::Class;
+    plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for('admin')
+      unless DBIx::Class::Optional::Dependencies->req_ok_for('admin');
+}
+
+use_ok 'DBIx::Class::Admin';
+
+
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/admin/02ddl.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/admin/02ddl.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/admin/02ddl.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,130 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use Test::Warn;
+
+BEGIN {
+    require DBIx::Class;
+    plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for('admin')
+      unless DBIx::Class::Optional::Dependencies->req_ok_for('admin');
+
+    plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for('deploy')
+      unless DBIx::Class::Optional::Dependencies->req_ok_for('deploy');
+}
+
+use lib qw(t/lib);
+use DBICTest;
+
+use Path::Class;
+
+use ok 'DBIx::Class::Admin';
+
+
+my $sql_dir = dir(qw/t var/);
+my @connect_info = DBICTest->_database(
+  no_deploy=>1,
+  no_populate=>1,
+  sqlite_use_file  => 1,
+);
+{ # create the schema
+
+#  make sure we are  clean
+clean_dir($sql_dir);
+
+
+my $admin = DBIx::Class::Admin->new(
+  schema_class=> "DBICTest::Schema",
+  sql_dir=> $sql_dir,
+  connect_info => \@connect_info, 
+);
+isa_ok ($admin, 'DBIx::Class::Admin', 'create the admin object');
+lives_ok { $admin->create('MySQL'); } 'Can create MySQL sql';
+lives_ok { $admin->create('SQLite'); } 'Can Create SQLite sql';
+}
+
+{ # upgrade schema
+
+#my $schema = DBICTest->init_schema(
+#  no_deploy    => 1,
+#  no_populat    => 1,
+#  sqlite_use_file  => 1,
+#);
+
+clean_dir($sql_dir);
+require DBICVersion_v1;
+
+my $admin = DBIx::Class::Admin->new(
+  schema_class => 'DBICVersion::Schema', 
+  sql_dir =>  $sql_dir,
+  connect_info => \@connect_info,
+);
+
+my $schema = $admin->schema();
+
+lives_ok { $admin->create($schema->storage->sqlt_type(), {add_drop_table=>0}); } 'Can create DBICVersionOrig sql in ' . $schema->storage->sqlt_type;
+lives_ok { $admin->deploy(  ) } 'Can Deploy schema';
+
+# connect to now deployed schema
+lives_ok { $schema = DBICVersion::Schema->connect(@{$schema->storage->connect_info()}); } 'Connect to deployed Database';
+
+is($schema->get_db_version, $DBICVersion::Schema::VERSION, 'Schema deployed and versions match');
+
+
+require DBICVersion_v2;
+
+$admin = DBIx::Class::Admin->new(
+  schema_class => 'DBICVersion::Schema', 
+  sql_dir =>  $sql_dir,
+  connect_info => \@connect_info
+);
+
+lives_ok { $admin->create($schema->storage->sqlt_type(), {}, "1.0" ); } 'Can create diff for ' . $schema->storage->sqlt_type;
+{
+  local $SIG{__WARN__} = sub { warn $_[0] unless $_[0] =~ /DB version .+? is lower than the schema version/ };
+  lives_ok {$admin->upgrade();} 'upgrade the schema';
+}
+
+is($schema->get_db_version, $DBICVersion::Schema::VERSION, 'Schema and db versions match');
+
+}
+
+{ # install
+
+clean_dir($sql_dir);
+
+my $admin = DBIx::Class::Admin->new(
+  schema_class  => 'DBICVersion::Schema', 
+  sql_dir      => $sql_dir,
+  _confirm    => 1,
+  connect_info  => \@connect_info,
+);
+
+$admin->version("3.0");
+lives_ok { $admin->install(); } 'install schema version 3.0';
+is($admin->schema->get_db_version, "3.0", 'db thinks its version 3.0');
+dies_ok { $admin->install("4.0"); } 'cannot install to allready existing version';
+
+$admin->force(1);
+warnings_exist ( sub {
+  lives_ok { $admin->install("4.0") } 'can force install to allready existing version'
+}, qr/Forcing install may not be a good idea/, 'Force warning emitted' );
+is($admin->schema->get_db_version, "4.0", 'db thinks its version 4.0');
+#clean_dir($sql_dir);
+}
+
+sub clean_dir {
+  my ($dir) = @_;
+  $dir = $dir->resolve;
+  if ( ! -d $dir ) {
+    $dir->mkpath();
+  }
+  foreach my $file ($dir->children) {
+    # skip any hidden files
+    next if ($file =~ /^\./); 
+    unlink $file;
+  }
+}
+
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/admin/03data.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/admin/03data.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/admin/03data.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,65 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+use Test::Exception;
+use Test::Deep;
+
+BEGIN {
+    require DBIx::Class;
+    plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for('admin')
+      unless DBIx::Class::Optional::Dependencies->req_ok_for('admin');
+}
+
+use lib 't/lib';
+use DBICTest;
+
+use ok 'DBIx::Class::Admin';
+
+
+{ # test data maniplulation functions
+
+  # create a DBICTest so we can steal its connect info
+  my $schema = DBICTest->init_schema(
+    sqlite_use_file => 1,
+  );
+
+  my $admin = DBIx::Class::Admin->new(
+    schema_class=> "DBICTest::Schema",
+    connect_info => $schema->storage->connect_info(),
+    quiet  => 1,
+    _confirm=>1,
+  );
+  isa_ok ($admin, 'DBIx::Class::Admin', 'create the admin object');
+
+  $admin->insert('Employee', { name => 'Matt' });
+  my $employees = $schema->resultset('Employee');
+  is ($employees->count(), 1, "insert okay" );
+
+  my $employee = $employees->find(1);
+  is($employee->name(),  'Matt', "insert valid" );
+
+  $admin->update('Employee', {name => 'Trout'}, {name => 'Matt'});
+
+  $employee = $employees->find(1);
+  is($employee->name(),  'Trout', "update Matt to Trout" );
+
+  $admin->insert('Employee', {name =>'Aran'});
+
+  my $expected_data = [ 
+    [$employee->result_source->columns() ],
+    [1,1,undef,undef,undef,'Trout'],
+    [2,2,undef,undef,undef,'Aran']
+  ];
+  my $data;
+  lives_ok { $data = $admin->select('Employee')} 'can retrive data from database';
+  cmp_deeply($data, $expected_data, 'DB matches whats expected');
+
+  $admin->delete('Employee', {name=>'Trout'});
+  my $del_rs  = $employees->search({name => 'Trout'});
+  is($del_rs->count(), 0, "delete Trout" );
+  is ($employees->count(), 1, "left Aran" );
+}
+
+done_testing;

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/admin/10script.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/89dbicadmin.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/admin/10script.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/admin/10script.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,91 @@
+# vim: filetype=perl
+use strict;
+use warnings;
+
+use Test::More;
+use Config;
+use lib qw(t/lib);
+$ENV{PERL5LIB} = join ($Config{path_sep}, @INC);
+use DBICTest;
+
+
+BEGIN {
+    require DBIx::Class;
+    plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for('admin_script')
+      unless DBIx::Class::Optional::Dependencies->req_ok_for('admin_script');
+}
+
+my @json_backends = qw/XS JSON DWIW/;
+my $tests_per_run = 5;
+
+plan tests => $tests_per_run * @json_backends;
+
+for my $js (@json_backends) {
+
+    eval {JSON::Any->import ($js) };
+    SKIP: {
+        skip ("Json backend $js is not available, skip testing", $tests_per_run) if $@;
+
+        $ENV{JSON_ANY_ORDER} = $js;
+        eval { test_dbicadmin () };
+        diag $@ if $@;
+    }
+}
+
+sub test_dbicadmin {
+    my $schema = DBICTest->init_schema( sqlite_use_file => 1 );  # reinit a fresh db for every run
+
+    my $employees = $schema->resultset('Employee');
+
+    system( _prepare_system_args( qw|--op=insert --set={"name":"Matt"}| ) );
+    ok( ($employees->count()==1), "$ENV{JSON_ANY_ORDER}: insert count" );
+
+    my $employee = $employees->find(1);
+    ok( ($employee->name() eq 'Matt'), "$ENV{JSON_ANY_ORDER}: insert valid" );
+
+    system( _prepare_system_args( qw|--op=update --set={"name":"Trout"}| ) );
+    $employee = $employees->find(1);
+    ok( ($employee->name() eq 'Trout'), "$ENV{JSON_ANY_ORDER}: update" );
+
+    system( _prepare_system_args( qw|--op=insert --set={"name":"Aran"}| ) );
+
+    SKIP: {
+        skip ("MSWin32 doesn't support -| either", 1) if $^O eq 'MSWin32';
+
+        open(my $fh, "-|",  _prepare_system_args( qw|--op=select --attrs={"order_by":"name"}| ) ) or die $!;
+        my $data = do { local $/; <$fh> };
+        close($fh);
+        if (!ok( ($data=~/Aran.*Trout/s), "$ENV{JSON_ANY_ORDER}: select with attrs" )) {
+          diag ("data from select is $data")
+        };
+    }
+
+    system( _prepare_system_args( qw|--op=delete --where={"name":"Trout"}| ) );
+    ok( ($employees->count()==1), "$ENV{JSON_ANY_ORDER}: delete" );
+}
+
+# Why do we need this crap? Apparently MSWin32 can not pass through quotes properly
+# (sometimes it will and sometimes not, depending on what compiler was used to build
+# perl). So we go the extra mile to escape all the quotes. We can't also use ' instead
+# of ", because JSON::XS (proudly) does not support "malformed JSON" as the author
+# calls it. Bleh.
+#
+sub _prepare_system_args {
+    my $perl = $^X;
+
+    my @args = (
+        qw|script/dbicadmin --quiet --schema=DBICTest::Schema --class=Employee|,
+        q|--connect=["dbi:SQLite:dbname=t/var/DBIxClass.db","","",{"AutoCommit":1}]|,
+        qw|--force|,
+        @_,
+    );
+
+    if ( $^O eq 'MSWin32' ) {
+        $perl = qq|"$perl"|;    # execution will fail if $^X contains paths
+        for (@args) {
+            $_ =~ s/"/\\"/g;
+        }
+    }
+
+    return ($perl, @args);
+}

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/bind/attribute.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/bind/attribute.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/bind/attribute.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -13,7 +13,7 @@
     eval "use DBD::SQLite";
     plan $@
         ? ( skip_all => 'needs DBD::SQLite for testing' )
-        : ( tests => 9 );
+        : ( tests => 13 );
 }
 
 my $where_bind = {
@@ -38,52 +38,80 @@
         ->search({ artistid => 1});
 
     is ( $rs->count, 1, 'where/bind first' );
-            
+
     $rs = $schema->resultset('Artist')->search({ artistid => 1})
         ->search({}, $where_bind);
 
     is ( $rs->count, 1, 'where/bind last' );
 }
 
-# More complex cases, based primarily on the Cookbook
-# "Arbitrary SQL through a custom ResultSource" technique,
-# which seems to be the only place the bind attribute is
-# documented.  Breaking this technique probably breaks existing
-# application code.
-my $source = DBICTest::Artist->result_source_instance;
-my $new_source = $source->new($source);
-$new_source->source_name('Complex');
+{
+  # More complex cases, based primarily on the Cookbook
+  # "Arbitrary SQL through a custom ResultSource" technique,
+  # which seems to be the only place the bind attribute is
+  # documented.  Breaking this technique probably breaks existing
+  # application code.
+  my $source = DBICTest::Artist->result_source_instance;
+  my $new_source = $source->new($source);
+  $new_source->source_name('Complex');
 
-$new_source->name(\<<'');
-( SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year 
-  FROM artist a
-  JOIN cd ON cd.artist = a.artistid
-  WHERE cd.year = ?)
+  $new_source->name(\<<'');
+  ( SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year 
+    FROM artist a
+    JOIN cd ON cd.artist = a.artistid
+    WHERE cd.year = ?)
 
-$schema->register_extra_source('Complex' => $new_source);
+  $schema->register_extra_source('Complex' => $new_source);
 
-$rs = $schema->resultset('Complex')->search({}, { bind => [ 1999 ] });
-is ( $rs->count, 1, 'cookbook arbitrary sql example' );
+  $rs = $schema->resultset('Complex')->search({}, { bind => [ 1999 ] });
+  is ( $rs->count, 1, 'cookbook arbitrary sql example' );
 
-$rs = $schema->resultset('Complex')->search({ 'artistid' => 1 }, { bind => [ 1999 ] });
-is ( $rs->count, 1, '...coobook + search condition' );
+  $rs = $schema->resultset('Complex')->search({ 'artistid' => 1 }, { bind => [ 1999 ] });
+  is ( $rs->count, 1, '...cookbook + search condition' );
 
-$rs = $schema->resultset('Complex')->search({}, { bind => [ 1999 ] })
-    ->search({ 'artistid' => 1 });
-is ( $rs->count, 1, '...cookbook (bind first) + chained search' );
+  $rs = $schema->resultset('Complex')->search({}, { bind => [ 1999 ] })
+      ->search({ 'artistid' => 1 });
+  is ( $rs->count, 1, '...cookbook (bind first) + chained search' );
 
-{
   $rs = $schema->resultset('Complex')->search({}, { bind => [ 1999 ] })->search({}, { where => \"title LIKE ?", bind => [ 'Spoon%' ] });
   is_same_sql_bind(
     $rs->as_query,
-    "(SELECT me.artistid, me.name, me.rank, me.charfield FROM (SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year FROM artist a JOIN cd ON cd.artist = a.artistid WHERE cd.year = ?) WHERE title LIKE ?)",
+    "(SELECT me.artistid, me.name, me.rank, me.charfield FROM (SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year FROM artist a JOIN cd ON cd.artist = a.artistid WHERE cd.year = ?) me WHERE title LIKE ?)",
     [
       [ '!!dummy' => '1999' ], 
       [ '!!dummy' => 'Spoon%' ]
     ],
     'got correct SQL'
-);
+  );
+}
 
+{
+  # More complex cases, based primarily on the Cookbook
+  # "Arbitrary SQL through a custom ResultSource" technique,
+  # which seems to be the only place the bind attribute is
+  # documented.  Breaking this technique probably breaks existing
+  # application code.
+
+  $rs = $schema->resultset('CustomSql')->search({}, { bind => [ 1999 ] });
+  is ( $rs->count, 1, 'cookbook arbitrary sql example (in separate file)' );
+
+  $rs = $schema->resultset('CustomSql')->search({ 'artistid' => 1 }, { bind => [ 1999 ] });
+  is ( $rs->count, 1, '...cookbook (in separate file) + search condition' );
+
+  $rs = $schema->resultset('CustomSql')->search({}, { bind => [ 1999 ] })
+      ->search({ 'artistid' => 1 });
+  is ( $rs->count, 1, '...cookbook (bind first, in separate file) + chained search' );
+
+  $rs = $schema->resultset('CustomSql')->search({}, { bind => [ 1999 ] })->search({}, { where => \"title LIKE ?", bind => [ 'Spoon%' ] });
+  is_same_sql_bind(
+    $rs->as_query,
+    "(SELECT me.artistid, me.name, me.rank, me.charfield FROM (SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year FROM artist a JOIN cd ON cd.artist = a.artistid WHERE cd.year = ?) me WHERE title LIKE ?)",
+    [
+      [ '!!dummy' => '1999' ], 
+      [ '!!dummy' => 'Spoon%' ]
+    ],
+    'got correct SQL (cookbook arbitrary SQL, in separate file)'
+  );
 }
 
 TODO: {

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/bind/bindtype_columns.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/bind/bindtype_columns.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/bind/bindtype_columns.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -9,7 +9,7 @@
 
 plan skip_all => 'Set $ENV{DBICTEST_PG_DSN}, _USER and _PASS to run this test'
   unless ($dsn && $dbuser);
-  
+
 plan tests => 6;
 
 my $schema = DBICTest::Schema->connection($dsn, $dbuser, $dbpass, { AutoCommit => 1 });
@@ -32,7 +32,7 @@
     ],{ RaiseError => 1, PrintError => 1 });
 }
 
-my $big_long_string	= "\x00\x01\x02 abcd" x 125000;
+my $big_long_string = "\x00\x01\x02 abcd" x 125000;
 
 my $new;
 # test inserting a row
@@ -40,7 +40,7 @@
   $new = $schema->resultset('BindType')->create({ bytea => $big_long_string });
 
   ok($new->id, "Created a bytea row");
-  is($new->bytea, 	$big_long_string, "Set the blob correctly.");
+  is($new->bytea, $big_long_string, "Set the blob correctly.");
 }
 
 # test retrieval of the bytea column

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/01-columns.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/01-columns.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/01-columns.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -24,15 +24,15 @@
 #State->has_many(cities => "City");
 
 sub accessor_name_for {
-	my ($class, $column) = @_;
-	my $return = $column eq "Rain" ? "Rainfall" : $column;
-	return $return;
+  my ($class, $column) = @_;
+  my $return = $column eq "Rain" ? "Rainfall" : $column;
+  return $return;
 }
 
 sub mutator_name_for {
-	my ($class, $column) = @_;
-	my $return = $column eq "Rain" ? "set_Rainfall" : "set_$column";
-	return $return;
+  my ($class, $column) = @_;
+  my $return = $column eq "Rain" ? "set_Rainfall" : "set_$column";
+  return $return;
 }
 
 sub Snowfall { 1 }
@@ -69,61 +69,61 @@
 is(State->table,          'State', 'State table()');
 is(State->primary_column, 'name',  'State primary()');
 is_deeply [ State->columns('Primary') ] => [qw/name/],
-	'State Primary:' . join ", ", State->columns('Primary');
+  'State Primary:' . join ", ", State->columns('Primary');
 is_deeply [ sort State->columns('Essential') ] => [qw/abbreviation name/],
-	'State Essential:' . join ", ", State->columns('Essential');
+  'State Essential:' . join ", ", State->columns('Essential');
 is_deeply [ sort State->columns('All') ] =>
-	[ sort qw/name abbreviation rain snowfall capital population/ ],
-	'State All:' . join ", ", State->columns('All');
+  [ sort qw/name abbreviation rain snowfall capital population/ ],
+  'State All:' . join ", ", State->columns('All');
 
 is(CD->primary_column, 'artist', 'CD primary()');
 is_deeply [ CD->columns('Primary') ] => [qw/artist/],
-	'CD primary:' . join ", ", CD->columns('Primary');
+  'CD primary:' . join ", ", CD->columns('Primary');
 is_deeply [ sort CD->columns('All') ] => [qw/artist length title/],
-	'CD all:' . join ", ", CD->columns('All');
+  'CD all:' . join ", ", CD->columns('All');
 is_deeply [ sort CD->columns('Essential') ] => [qw/artist/],
-	'CD essential:' . join ", ", CD->columns('Essential');
+  'CD essential:' . join ", ", CD->columns('Essential');
 
 ok(State->find_column('Rain'), 'find_column Rain');
 ok(State->find_column('rain'), 'find_column rain');
 ok(!State->find_column('HGLAGAGlAG'), '!find_column HGLAGAGlAG');
 
 {
-    
+
     can_ok +State => qw/Rainfall _Rainfall_accessor set_Rainfall
-    	_set_Rainfall_accessor Snowfall _Snowfall_accessor set_Snowfall
-    	_set_Snowfall_accessor/;
-    
-    foreach my $method (qw/Rain _Rain_accessor rain snowfall/) { 
-    	ok !State->can($method), "State can't $method";
+      _set_Rainfall_accessor Snowfall _Snowfall_accessor set_Snowfall
+      _set_Snowfall_accessor/;
+
+    foreach my $method (qw/Rain _Rain_accessor rain snowfall/) {
+      ok !State->can($method), "State can't $method";
     }
 
 }
 
 {
-        SKIP: {
-          skip "No column objects", 1;
+  SKIP: {
+    skip "No column objects", 1;
 
-  	  eval { my @grps = State->__grouper->groups_for("Huh"); };
-	  ok $@, "Huh not in groups";
-        }
+    eval { my @grps = State->__grouper->groups_for("Huh"); };
+    ok $@, "Huh not in groups";
+  }
 
-	my @grps = sort State->__grouper->groups_for(State->_find_columns(qw/rain capital/));
-	is @grps, 2, "Rain and Capital = 2 groups";
+  my @grps = sort State->__grouper->groups_for(State->_find_columns(qw/rain capital/));
+  is @grps, 2, "Rain and Capital = 2 groups";
         @grps = sort @grps; # Because the underlying API is hash-based
-	is $grps[0], 'Other',   " - Other";
-	is $grps[1], 'Weather', " - Weather";
+  is $grps[0], 'Other',   " - Other";
+  is $grps[1], 'Weather', " - Weather";
 }
 
 #{
-#        
+#
 #        package DieTest;
 #        @DieTest::ISA = qw(DBIx::Class);
 #        DieTest->load_components(qw/CDBICompat::Retrieve Core/);
 #        package main;
-#	local $SIG{__WARN__} = sub { };
-#	eval { DieTest->retrieve(1) };
-#	like $@, qr/unless primary columns are defined/, "Need primary key for retrieve";
+#  local $SIG{__WARN__} = sub { };
+#  eval { DieTest->retrieve(1) };
+#  like $@, qr/unless primary columns are defined/, "Need primary key for retrieve";
 #}
 
 #-----------------------------------------------------------------------

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/02-Film.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/02-Film.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/02-Film.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -8,33 +8,32 @@
     plan (skip_all => 'Class::Trigger and DBIx::ContextualFetch required');
     next;
   }
-  eval "use DBD::SQLite";
-  plan $@ ? (skip_all => 'needs DBD::SQLite for testing') : (tests => 98);
+  plan tests => 98;
 }
 
 INIT {
-	use lib 't/cdbi/testlib';
-	use Film;
+  use lib 't/cdbi/testlib';
+  use Film;
 }
 
 ok(Film->can('db_Main'), 'set_db()');
 is(Film->__driver, "SQLite", "Driver set correctly");
 
 {
-	my $nul = eval { Film->retrieve() };
-	is $nul, undef, "Can't retrieve nothing";
-	like $@, qr/./, "retrieve needs parameters";    # TODO fix this...
+  my $nul = eval { Film->retrieve() };
+  is $nul, undef, "Can't retrieve nothing";
+  like $@, qr/./, "retrieve needs parameters";    # TODO fix this...
 }
 
 {
-	eval { my $id = Film->id };
-	like $@, qr/class method/, "Can't get id with no object";
+  eval { my $id = Film->id };
+  like $@, qr/class method/, "Can't get id with no object";
 }
 
 {
-	eval { my $id = Film->title };
-	#like $@, qr/class method/, "Can't get title with no object";
-	ok $@, "Can't get title with no object";
+  eval { my $id = Film->title };
+  #like $@, qr/class method/, "Can't get title with no object";
+  ok $@, "Can't get title with no object";
 } 
 
 eval { my $duh = Film->insert; };
@@ -50,24 +49,24 @@
 is($btaste->NumExplodingSheep, 1,               'NumExplodingSheep() get');
 
 {
-	my $bt2 = Film->find_or_create(Title => 'Bad Taste');
-	is $bt2->Director, $btaste->Director, "find_or_create";
-	my @bt = Film->search(Title => 'Bad Taste');
-	is @bt, 1, " doesn't create a new one";
+  my $bt2 = Film->find_or_create(Title => 'Bad Taste');
+  is $bt2->Director, $btaste->Director, "find_or_create";
+  my @bt = Film->search(Title => 'Bad Taste');
+  is @bt, 1, " doesn't create a new one";
 }
 
 ok my $gone = Film->find_or_create(
-	{
-		Title             => 'Gone With The Wind',
-		Director          => 'Bob Baggadonuts',
-		Rating            => 'PG',
-		NumExplodingSheep => 0
-	}
-	),
-	"Add Gone With The Wind";
+  {
+    Title             => 'Gone With The Wind',
+    Director          => 'Bob Baggadonuts',
+    Rating            => 'PG',
+    NumExplodingSheep => 0
+  }
+  ),
+  "Add Gone With The Wind";
 isa_ok $gone, 'Film';
 ok $gone = Film->retrieve(Title => 'Gone With The Wind'),
-	"Fetch it back again";
+  "Fetch it back again";
 isa_ok $gone, 'Film';
 
 # Shocking new footage found reveals bizarre Scarlet/sheep scene!
@@ -82,8 +81,8 @@
 $gone->update;
 
 {
-	my @films = eval { Film->retrieve_all };
-	cmp_ok(@films, '==', 2, "We have 2 films in total");
+  my @films = eval { Film->retrieve_all };
+  cmp_ok(@films, '==', 2, "We have 2 films in total");
 }
 
 # EXTRA TEST: added by mst to check a bug found by Numa
@@ -95,11 +94,11 @@
 
 # Grab the 'Bladerunner' entry.
 Film->create(
-	{
-		Title    => 'Bladerunner',
-		Director => 'Bob Ridley Scott',
-		Rating   => 'R'
-	}
+  {
+    Title    => 'Bladerunner',
+    Director => 'Bob Ridley Scott',
+    Rating   => 'R'
+  }
 );
 
 my $blrunner = Film->retrieve('Bladerunner');
@@ -111,10 +110,10 @@
 
 # Make a copy of 'Bladerunner' and create an entry of the directors cut
 my $blrunner_dc = $blrunner->copy(
-	{
-		title  => "Bladerunner: Director's Cut",
-		rating => "15",
-	}
+  {
+    title  => "Bladerunner: Director's Cut",
+    rating => "15",
+  }
 );
 is(ref $blrunner_dc, 'Film', "copy() produces a film");
 is($blrunner_dc->Title,    "Bladerunner: Director's Cut", 'Title correct');
@@ -124,81 +123,78 @@
 
 # Set up own SQL:
 {
-	Film->add_constructor(title_asc  => "title LIKE ? ORDER BY title");
-	Film->add_constructor(title_desc => "title LIKE ? ORDER BY title DESC");
+  Film->add_constructor(title_asc  => "title LIKE ? ORDER BY title");
+  Film->add_constructor(title_desc => "title LIKE ? ORDER BY title DESC");
     Film->add_constructor(title_asc_nl => q{
         title LIKE ?
         ORDER BY title
         LIMIT 1
     });
 
-	{
-		my @films = Film->title_asc("Bladerunner%");
-		is @films, 2, "We have 2 Bladerunners";
-		is $films[0]->Title, $blrunner->Title, "Ordered correctly";
-	}
-	{
-		my @films = Film->title_desc("Bladerunner%");
-		is @films, 2, "We have 2 Bladerunners";
-		is $films[0]->Title, $blrunner_dc->Title, "Ordered correctly";
-	}
-	{
-		my @films = Film->title_asc_nl("Bladerunner%");
-		is @films, 1, "We have 2 Bladerunners";
-		is $films[0]->Title, $blrunner->Title, "Ordered correctly";
-	}
+  {
+    my @films = Film->title_asc("Bladerunner%");
+    is @films, 2, "We have 2 Bladerunners";
+    is $films[0]->Title, $blrunner->Title, "Ordered correctly";
+  }
+  {
+    my @films = Film->title_desc("Bladerunner%");
+    is @films, 2, "We have 2 Bladerunners";
+    is $films[0]->Title, $blrunner_dc->Title, "Ordered correctly";
+  }
+  {
+    my @films = Film->title_asc_nl("Bladerunner%");
+    is @films, 1, "We have 2 Bladerunners";
+    is $films[0]->Title, $blrunner->Title, "Ordered correctly";
+  }
 }
 
 # Multi-column search
 {
-	my @films = $blrunner->search (title => { -like => "Bladerunner%"}, rating => '15');
-	is @films, 1, "Only one Bladerunner is a 15";
+  my @films = $blrunner->search (title => { -like => "Bladerunner%"}, rating => '15');
+  is @films, 1, "Only one Bladerunner is a 15";
 }
 
 # Inline SQL
 {
-	my @films = Film->retrieve_from_sql("numexplodingsheep > 0 ORDER BY title");
-	is @films, 2, "Inline SQL";
-	is $films[0]->id, $btaste->id, "Correct film";
-	is $films[1]->id, $gone->id,   "Correct film";
+  my @films = Film->retrieve_from_sql("numexplodingsheep > 0 ORDER BY title");
+  is @films, 2, "Inline SQL";
+  is $films[0]->id, $btaste->id, "Correct film";
+  is $films[1]->id, $gone->id,   "Correct film";
 }
 
 # Inline SQL removes WHERE
 {
-	my @films =
-		Film->retrieve_from_sql(" WHErE numexplodingsheep > 0 ORDER BY title");
-	is @films, 2, "Inline SQL";
-	is $films[0]->id, $btaste->id, "Correct film";
-	is $films[1]->id, $gone->id,   "Correct film";
+  my @films =
+    Film->retrieve_from_sql(" WHErE numexplodingsheep > 0 ORDER BY title");
+  is @films, 2, "Inline SQL";
+  is $films[0]->id, $btaste->id, "Correct film";
+  is $films[1]->id, $gone->id,   "Correct film";
 }
 
 eval {
-	my $ishtar = Film->insert({ Title => 'Ishtar', Director => 'Elaine May' });
-	my $mandn =
-		Film->insert({ Title => 'Mikey and Nicky', Director => 'Elaine May' });
-	my $new_leaf =
-		Film->insert({ Title => 'A New Leaf', Director => 'Elaine May' });
+  my $ishtar = Film->insert({ Title => 'Ishtar', Director => 'Elaine May' });
+  my $mandn =
+    Film->insert({ Title => 'Mikey and Nicky', Director => 'Elaine May' });
+  my $new_leaf =
+    Film->insert({ Title => 'A New Leaf', Director => 'Elaine May' });
 
 #use Data::Dumper; die Dumper(Film->search( Director => 'Elaine May' ));
-	cmp_ok(Film->search(Director => 'Elaine May'), '==', 3,
-		"3 Films by Elaine May");
-	ok(Film->retrieve('Ishtar')->delete,
-		"Ishtar doesn't deserve an entry any more");
-	ok(!Film->retrieve('Ishtar'), 'Ishtar no longer there');
-	{
-		my $deprecated = 0;
-		#local $SIG{__WARN__} = sub { $deprecated++ if $_[0] =~ /deprecated/ };
-		ok(
-			Film->delete(Director => 'Elaine May'),
-			"In fact, delete all films by Elaine May"
-		);
-		cmp_ok(Film->search(Director => 'Elaine May'), '==',
-			0, "0 Films by Elaine May");
-                SKIP: {
-                    skip "No deprecated warnings from compat layer", 1;
-		    is $deprecated, 1, "Got a deprecated warning";
-                }
-	}
+  cmp_ok(Film->search(Director => 'Elaine May'), '==', 3,
+    "3 Films by Elaine May");
+  ok(Film->retrieve('Ishtar')->delete,
+    "Ishtar doesn't deserve an entry any more");
+  ok(!Film->retrieve('Ishtar'), 'Ishtar no longer there');
+  {
+    my $deprecated = 0;
+    local $SIG{__WARN__} = sub { $deprecated++ if $_[0] =~ /deprecated/ };
+    ok(
+      Film->delete(Director => 'Elaine May'),
+      "In fact, delete all films by Elaine May"
+    );
+    cmp_ok(Film->search(Director => 'Elaine May'), '==',
+      0, "0 Films by Elaine May");
+    is $deprecated, 0, "No deprecated warnings from compat layer";
+  }
 };
 is $@, '', "No problems with deletes";
 
@@ -211,23 +207,23 @@
 @films = Film->search ( { 'Director' => { -like => 'Bob %' } });
 is(scalar @films, 3, ' search_like returns 3 films');
 ok(
-	eq_array(
-		[ sort map { $_->id } @films ],
-		[ sort map { $_->id } $blrunner_dc, $gone, $blrunner ]
-	),
-	'the correct ones'
+  eq_array(
+    [ sort map { $_->id } @films ],
+    [ sort map { $_->id } $blrunner_dc, $gone, $blrunner ]
+  ),
+  'the correct ones'
 );
 
 # Find Ridley Scott films which don't have vomit
 @films =
-	Film->search(numExplodingSheep => undef, Director => 'Bob Ridley Scott');
+  Film->search(numExplodingSheep => undef, Director => 'Bob Ridley Scott');
 is(scalar @films, 2, ' search where attribute is null returns 2 films');
 ok(
-	eq_array(
-		[ sort map { $_->id } @films ],
-		[ sort map { $_->id } $blrunner_dc, $blrunner ]
-	),
-	'the correct ones'
+  eq_array(
+    [ sort map { $_->id } @films ],
+    [ sort map { $_->id } $blrunner_dc, $blrunner ]
+  ),
+  'the correct ones'
 );
 
 # Test that a disconnect doesnt harm anything.
@@ -252,166 +248,166 @@
 }
 
 SKIP: {
-	skip "ActiveState perl produces additional warnings", 3
+  skip "ActiveState perl produces additional warnings", 3
           if ($^O eq 'MSWin32');
 
-	Film->autoupdate(1);
-	my $btaste2 = Film->retrieve($btaste->id);
-	$btaste->NumExplodingSheep(18);
-	my @warnings;
-	local $SIG{__WARN__} = sub { push(@warnings, @_); };
-	{
+  Film->autoupdate(1);
+  my $btaste2 = Film->retrieve($btaste->id);
+  $btaste->NumExplodingSheep(18);
+  my @warnings;
+  local $SIG{__WARN__} = sub { push(@warnings, @_); };
+  {
 
-		# unhook from live object cache, so next one is not from cache
-		$btaste2->remove_from_object_index;
-		my $btaste3 = Film->retrieve($btaste->id);
-		is $btaste3->NumExplodingSheep, 18, "Class based AutoCommit";
-		$btaste3->autoupdate(0);    # obj a/c should override class a/c
-		is @warnings, 0, "No warnings so far";
-		$btaste3->NumExplodingSheep(13);
-	}
-	is @warnings, 1, "DESTROY without update warns";
-	Film->autoupdate(0);
+    # unhook from live object cache, so next one is not from cache
+    $btaste2->remove_from_object_index;
+    my $btaste3 = Film->retrieve($btaste->id);
+    is $btaste3->NumExplodingSheep, 18, "Class based AutoCommit";
+    $btaste3->autoupdate(0);    # obj a/c should override class a/c
+    is @warnings, 0, "No warnings so far";
+    $btaste3->NumExplodingSheep(13);
+  }
+  is @warnings, 1, "DESTROY without update warns";
+  Film->autoupdate(0);
 }
 
 {                               # update unchanged object
-	my $film   = Film->retrieve($btaste->id);
-	my $retval = $film->update;
-	is $retval, -1, "Unchanged object";
+  my $film   = Film->retrieve($btaste->id);
+  my $retval = $film->update;
+  is $retval, -1, "Unchanged object";
 }
 
 {                               # update deleted object
-	my $rt = "Royal Tenenbaums";
-	my $ten = Film->insert({ title => $rt, Rating => "R" });
-	$ten->rating(18);
-	Film->set_sql(drt => "DELETE FROM __TABLE__ WHERE title = ?");
-	Film->sql_drt->execute($rt);
-	my @films = Film->search({ title => $rt });
-	is @films, 0, "RT gone";
-	my $retval = eval { $ten->update };
-	like $@, qr/row not found/, "Update deleted object throws error";
-	$ten->discard_changes;
+  my $rt = "Royal Tenenbaums";
+  my $ten = Film->insert({ title => $rt, Rating => "R" });
+  $ten->rating(18);
+  Film->set_sql(drt => "DELETE FROM __TABLE__ WHERE title = ?");
+  Film->sql_drt->execute($rt);
+  my @films = Film->search({ title => $rt });
+  is @films, 0, "RT gone";
+  my $retval = eval { $ten->update };
+  like $@, qr/row not found/, "Update deleted object throws error";
+  $ten->discard_changes;
 }
 
 {
-	$btaste->autoupdate(1);
-	$btaste->NumExplodingSheep(32);
-	my $btaste2 = Film->retrieve($btaste->id);
-	is $btaste2->NumExplodingSheep, 32, "Object based AutoCommit";
-	$btaste->autoupdate(0);
+  $btaste->autoupdate(1);
+  $btaste->NumExplodingSheep(32);
+  my $btaste2 = Film->retrieve($btaste->id);
+  is $btaste2->NumExplodingSheep, 32, "Object based AutoCommit";
+  $btaste->autoupdate(0);
 }
 
 # Primary key of 0
 {
-	my $zero = Film->insert({ Title => 0, Rating => "U" });
-	ok defined $zero, "Create 0";
-	ok my $ret = Film->retrieve(0), "Retrieve 0";
-	is $ret->Title,  0,   "Title OK";
-	is $ret->Rating, "U", "Rating OK";
+  my $zero = Film->insert({ Title => 0, Rating => "U" });
+  ok defined $zero, "Create 0";
+  ok my $ret = Film->retrieve(0), "Retrieve 0";
+  is $ret->Title,  0,   "Title OK";
+  is $ret->Rating, "U", "Rating OK";
 }
 
 # Change after_update policy
 SKIP: {
         skip "DBIx::Class compat doesn't handle the exists stuff quite right yet", 4;
-	my $bt = Film->retrieve($btaste->id);
-	$bt->autoupdate(1);
+  my $bt = Film->retrieve($btaste->id);
+  $bt->autoupdate(1);
 
-	$bt->rating("17");
-	ok !$bt->_attribute_exists('rating'), "changed column needs reloaded";
-	ok $bt->_attribute_exists('title'), "but we still have the title";
+  $bt->rating("17");
+  ok !$bt->_attribute_exists('rating'), "changed column needs reloaded";
+  ok $bt->_attribute_exists('title'), "but we still have the title";
 
-	# Don't re-load
-	$bt->add_trigger(
-		after_update => sub {
-			my ($self, %args) = @_;
-			my $discard_columns = $args{discard_columns};
-			@$discard_columns = qw/title/;
-		}
-	);
-	$bt->rating("19");
-	ok $bt->_attribute_exists('rating'), "changed column needs reloaded";
-	ok !$bt->_attribute_exists('title'), "but no longer have the title";
+  # Don't re-load
+  $bt->add_trigger(
+    after_update => sub {
+      my ($self, %args) = @_;
+      my $discard_columns = $args{discard_columns};
+      @$discard_columns = qw/title/;
+    }
+  );
+  $bt->rating("19");
+  ok $bt->_attribute_exists('rating'), "changed column needs reloaded";
+  ok !$bt->_attribute_exists('title'), "but no longer have the title";
 }
 
 # Make sure that we can have other accessors. (Bugfix in 0.28)
 if (0) {
-	Film->mk_accessors(qw/temp1 temp2/);
-	my $blrunner = Film->retrieve('Bladerunner');
-	$blrunner->temp1("Foo");
-	$blrunner->NumExplodingSheep(2);
-	eval { $blrunner->update };
-	ok(!$@, "Other accessors");
+  Film->mk_accessors(qw/temp1 temp2/);
+  my $blrunner = Film->retrieve('Bladerunner');
+  $blrunner->temp1("Foo");
+  $blrunner->NumExplodingSheep(2);
+  eval { $blrunner->update };
+  ok(!$@, "Other accessors");
 }
 
 # overloading
 {
-	is "$blrunner", "Bladerunner", "stringify";
+  is "$blrunner", "Bladerunner", "stringify";
 
-	ok(Film->columns(Stringify => 'rating'), "Can change stringify column");
-	is "$blrunner", "R", "And still stringifies correctly";
+  ok(Film->columns(Stringify => 'rating'), "Can change stringify column");
+  is "$blrunner", "R", "And still stringifies correctly";
 
-	ok(
-		Film->columns(Stringify => qw/title rating/),
-		"Can have multiple stringify columns"
-	);
-	is "$blrunner", "Bladerunner/R", "And still stringifies correctly";
+  ok(
+    Film->columns(Stringify => qw/title rating/),
+    "Can have multiple stringify columns"
+  );
+  is "$blrunner", "Bladerunner/R", "And still stringifies correctly";
 
-	no warnings 'once';
-	local *Film::stringify_self = sub { join ":", $_[0]->title, $_[0]->rating };
-	is "$blrunner", "Bladerunner:R", "Provide stringify_self()";
+  no warnings 'once';
+  local *Film::stringify_self = sub { join ":", $_[0]->title, $_[0]->rating };
+  is "$blrunner", "Bladerunner:R", "Provide stringify_self()";
 }
 
 {
-	{
-		ok my $byebye = DeletingFilm->insert(
-			{
-				Title  => 'Goodbye Norma Jean',
-				Rating => 'PG',
-			}
-			),
-			"Add a deleting Film";
+  {
+    ok my $byebye = DeletingFilm->insert(
+      {
+        Title  => 'Goodbye Norma Jean',
+        Rating => 'PG',
+      }
+      ),
+      "Add a deleting Film";
 
-		isa_ok $byebye, 'DeletingFilm';
-		isa_ok $byebye, 'Film';
-		ok(Film->retrieve('Goodbye Norma Jean'), "Fetch it back again");
-	}
-	my $film;
-	eval { $film = Film->retrieve('Goodbye Norma Jean') };
-	ok !$film, "It destroys itself";
+    isa_ok $byebye, 'DeletingFilm';
+    isa_ok $byebye, 'Film';
+    ok(Film->retrieve('Goodbye Norma Jean'), "Fetch it back again");
+  }
+  my $film;
+  eval { $film = Film->retrieve('Goodbye Norma Jean') };
+  ok !$film, "It destroys itself";
 }
 
 SKIP: {
     skip "Caching has been removed", 5
         if Film->isa("DBIx::Class::CDBICompat::NoObjectIndex");
 
-	# my bad taste is your bad taste
-	my $btaste  = Film->retrieve('Bad Taste');
-	my $btaste2 = Film->retrieve('Bad Taste');
-	is Scalar::Util::refaddr($btaste), Scalar::Util::refaddr($btaste2),
-		"Retrieving twice gives ref to same object";
+  # my bad taste is your bad taste
+  my $btaste  = Film->retrieve('Bad Taste');
+  my $btaste2 = Film->retrieve('Bad Taste');
+  is Scalar::Util::refaddr($btaste), Scalar::Util::refaddr($btaste2),
+    "Retrieving twice gives ref to same object";
 
-	my ($btaste5) = Film->search(title=>'Bad Taste');
-	is Scalar::Util::refaddr($btaste), Scalar::Util::refaddr($btaste5),
-		"Searching also gives ref to same object";
+  my ($btaste5) = Film->search(title=>'Bad Taste');
+  is Scalar::Util::refaddr($btaste), Scalar::Util::refaddr($btaste5),
+    "Searching also gives ref to same object";
 
-	$btaste2->remove_from_object_index;
-	my $btaste3 = Film->retrieve('Bad Taste');
-	isnt Scalar::Util::refaddr($btaste2), Scalar::Util::refaddr($btaste3),
-		"Removing from object_index and retrieving again gives new object";
+  $btaste2->remove_from_object_index;
+  my $btaste3 = Film->retrieve('Bad Taste');
+  isnt Scalar::Util::refaddr($btaste2), Scalar::Util::refaddr($btaste3),
+    "Removing from object_index and retrieving again gives new object";
 
-	$btaste3->clear_object_index;
-	my $btaste4 = Film->retrieve('Bad Taste');
-	isnt Scalar::Util::refaddr($btaste2), Scalar::Util::refaddr($btaste4),
-		"Clearing cache and retrieving again gives new object";
+  $btaste3->clear_object_index;
+  my $btaste4 = Film->retrieve('Bad Taste');
+  isnt Scalar::Util::refaddr($btaste2), Scalar::Util::refaddr($btaste4),
+    "Clearing cache and retrieving again gives new object";
  
   $btaste=Film->insert({
-		Title             => 'Bad Taste 2',
-		Director          => 'Peter Jackson',
-		Rating            => 'R',
-		NumExplodingSheep => 2,
-	});
-	$btaste2 = Film->retrieve('Bad Taste 2');
-	is Scalar::Util::refaddr($btaste), Scalar::Util::refaddr($btaste2),
-		"Creating and retrieving gives ref to same object";
+    Title             => 'Bad Taste 2',
+    Director          => 'Peter Jackson',
+    Rating            => 'R',
+    NumExplodingSheep => 2,
+  });
+  $btaste2 = Film->retrieve('Bad Taste 2');
+  is Scalar::Util::refaddr($btaste), Scalar::Util::refaddr($btaste2),
+    "Creating and retrieving gives ref to same object";
  
 }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/03-subclassing.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/03-subclassing.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/03-subclassing.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -22,7 +22,7 @@
 
 ok(Film::Threat->db_Main->ping, 'subclass db_Main()');
 is_deeply [ sort Film::Threat->columns ], [ sort Film->columns ],
-	'has the same columns';
+  'has the same columns';
 
 my $bt = Film->create_test_film;
 ok my $btaste = Film::Threat->retrieve('Bad Taste'), "subclass retrieve";

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/04-lazy.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/04-lazy.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/04-lazy.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -17,8 +17,8 @@
 }
 
 INIT {
-	use lib 't/cdbi/testlib';
-	use Lazy;
+  use lib 't/cdbi/testlib';
+  use Lazy;
 }
 
 is_deeply [ Lazy->columns('Primary') ],        [qw/this/],      "Pri";
@@ -29,13 +29,13 @@
 is_deeply [ sort Lazy->columns('All') ], [qw/eep oop opop orp that this/], "All";
 
 {
-	my @groups = Lazy->__grouper->groups_for(Lazy->find_column('this'));
-	is_deeply [ sort @groups ], [sort qw/things Essential Primary/], "this (@groups)";
+  my @groups = Lazy->__grouper->groups_for(Lazy->find_column('this'));
+  is_deeply [ sort @groups ], [sort qw/things Essential Primary/], "this (@groups)";
 }
 
 {
-	my @groups = Lazy->__grouper->groups_for(Lazy->find_column('that'));
-	is_deeply \@groups, [qw/things/], "that (@groups)";
+  my @groups = Lazy->__grouper->groups_for(Lazy->find_column('that'));
+  is_deeply \@groups, [qw/things/], "that (@groups)";
 }
 
 Lazy->create({ this => 1, that => 2, oop => 3, opop => 4, eep => 5 });
@@ -54,28 +54,28 @@
 ok(!$obj->_attribute_exists('that'), 'nor that');
 
 {
-	Lazy->columns(All => qw/this that eep orp oop opop/);
-	ok(my $obj = Lazy->retrieve(1), 'Retrieve by Primary');
-	ok !$obj->_attribute_exists('oop'), " Don't have oop";
-	my $null = $obj->eep;
-	ok !$obj->_attribute_exists('oop'),
-		" Don't have oop - even after getting eep";
+  Lazy->columns(All => qw/this that eep orp oop opop/);
+  ok(my $obj = Lazy->retrieve(1), 'Retrieve by Primary');
+  ok !$obj->_attribute_exists('oop'), " Don't have oop";
+  my $null = $obj->eep;
+  ok !$obj->_attribute_exists('oop'),
+    " Don't have oop - even after getting eep";
 }
 
 # Test contructor breaking.
 
 eval {    # Need a hashref
-	Lazy->create(this => 10, that => 20, oop => 30, opop => 40, eep => 50);
+  Lazy->create(this => 10, that => 20, oop => 30, opop => 40, eep => 50);
 };
 ok($@, $@);
 
 eval {    # False column
-	Lazy->create({ this => 10, that => 20, theother => 30 });
+  Lazy->create({ this => 10, that => 20, theother => 30 });
 };
 ok($@, $@);
 
 eval {    # Multiple false columns
-	Lazy->create({ this => 10, that => 20, theother => 30, andanother => 40 });
+  Lazy->create({ this => 10, that => 20, theother => 30, andanother => 40 });
 };
 ok($@, $@);
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/06-hasa.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/06-hasa.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/06-hasa.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -16,9 +16,9 @@
 #local $SIG{__WARN__} = sub { };
 
 INIT {
-	use lib 't/cdbi/testlib';
-	use Film;
-	use Director;
+  use lib 't/cdbi/testlib';
+  use Film;
+  use Director;
 }
 
 Film->create_test_film;
@@ -28,14 +28,14 @@
 
 ok(Film->has_a('Director' => 'Director'), "Link Director table");
 ok(
-	Director->create(
-		{
-			Name     => 'Peter Jackson',
-			Birthday => -300000000,
-			IsInsane => 1
-		}
-	),
-	'create Director'
+  Director->create(
+    {
+      Name     => 'Peter Jackson',
+      Birthday => -300000000,
+      IsInsane => 1
+    }
+  ),
+  'create Director'
 );
 
 $btaste = Film->retrieve('Bad Taste');
@@ -46,11 +46,11 @@
 
 # Oh no!  Its Peter Jacksons even twin, Skippy!  Born one minute after him.
 my $sj = Director->create(
-	{
-		Name     => 'Skippy Jackson',
-		Birthday => (-300000000 + 60),
-		IsInsane => 1,
-	}
+  {
+    Name     => 'Skippy Jackson',
+    Birthday => (-300000000 + 60),
+    IsInsane => 1,
+  }
 );
 
 is($sj->id, 'Skippy Jackson', 'We have a new director');
@@ -61,71 +61,71 @@
 $btaste->update;
 is($btaste->CoDirector->Name, 'Skippy Jackson', 'He co-directed');
 is(
-	$btaste->Director->Name,
-	'Peter Jackson',
-	"Didnt interfere with each other"
+  $btaste->Director->Name,
+  'Peter Jackson',
+  "Didnt interfere with each other"
 );
 
 { # Ensure search can take an object
-	my @films = Film->search(Director => $pj);
-	is @films, 1, "1 Film directed by $pj";
-	is $films[0]->id, "Bad Taste", "Bad Taste";
+  my @films = Film->search(Director => $pj);
+  is @films, 1, "1 Film directed by $pj";
+  is $films[0]->id, "Bad Taste", "Bad Taste";
 }
 
 inheriting_hasa();
 
 {
 
-	# Skippy directs a film and Peter helps!
-	$sj = Director->retrieve('Skippy Jackson');
-	$pj = Director->retrieve('Peter Jackson');
+  # Skippy directs a film and Peter helps!
+  $sj = Director->retrieve('Skippy Jackson');
+  $pj = Director->retrieve('Peter Jackson');
 
-	fail_with_bad_object($sj, $btaste);
-	taste_bad($sj,            $pj);
+  fail_with_bad_object($sj, $btaste);
+  taste_bad($sj,            $pj);
 }
 
 sub inheriting_hasa {
-	my $btaste = YA::Film->retrieve('Bad Taste');
-	is(ref($btaste->Director),   'Director', 'inheriting has_a()');
-	is(ref($btaste->CoDirector), 'Director', 'inheriting has_a()');
-	is($btaste->CoDirector->Name, 'Skippy Jackson', ' ... correctly');
+  my $btaste = YA::Film->retrieve('Bad Taste');
+  is(ref($btaste->Director),   'Director', 'inheriting has_a()');
+  is(ref($btaste->CoDirector), 'Director', 'inheriting has_a()');
+  is($btaste->CoDirector->Name, 'Skippy Jackson', ' ... correctly');
 }
 
 sub taste_bad {
-	my ($dir, $codir) = @_;
-	my $tastes_bad = YA::Film->create(
-		{
-			Title             => 'Tastes Bad',
-			Director          => $dir,
-			CoDirector        => $codir,
-			Rating            => 'R',
-			NumExplodingSheep => 23
-		}
-	);
-	is($tastes_bad->_Director_accessor, 'Skippy Jackson', 'Director_accessor');
-	is($tastes_bad->Director->Name,   'Skippy Jackson', 'Director');
-	is($tastes_bad->CoDirector->Name, 'Peter Jackson',  'CoDirector');
-	is(
-		$tastes_bad->_CoDirector_accessor,
-		'Peter Jackson',
-		'CoDirector_accessor'
-	);
+  my ($dir, $codir) = @_;
+  my $tastes_bad = YA::Film->create(
+    {
+      Title             => 'Tastes Bad',
+      Director          => $dir,
+      CoDirector        => $codir,
+      Rating            => 'R',
+      NumExplodingSheep => 23
+    }
+  );
+  is($tastes_bad->_Director_accessor, 'Skippy Jackson', 'Director_accessor');
+  is($tastes_bad->Director->Name,   'Skippy Jackson', 'Director');
+  is($tastes_bad->CoDirector->Name, 'Peter Jackson',  'CoDirector');
+  is(
+    $tastes_bad->_CoDirector_accessor,
+    'Peter Jackson',
+    'CoDirector_accessor'
+  );
 }
 
 sub fail_with_bad_object {
-	my ($dir, $codir) = @_;
-	eval {
-		YA::Film->create(
-			{
-				Title             => 'Tastes Bad',
-				Director          => $dir,
-				CoDirector        => $codir,
-				Rating            => 'R',
-				NumExplodingSheep => 23
-			}
-		);
-	};
-	ok $@, $@;
+  my ($dir, $codir) = @_;
+  eval {
+    YA::Film->create(
+      {
+        Title             => 'Tastes Bad',
+        Director          => $dir,
+        CoDirector        => $codir,
+        Rating            => 'R',
+        NumExplodingSheep => 23
+      }
+    );
+  };
+  ok $@, $@;
 }
 
 package Foo;
@@ -135,8 +135,8 @@
 # fav is a film
 __PACKAGE__->db_Main->do( qq{
      CREATE TABLE foo (
-	     id        INTEGER,
-	     fav       VARCHAR(255)
+       id        INTEGER,
+       fav       VARCHAR(255)
      )
 });
 
@@ -148,8 +148,8 @@
 # fav is a foo
 __PACKAGE__->db_Main->do( qq{
      CREATE TABLE bar (
-	     id        INTEGER,
-	     fav       INTEGER
+       id        INTEGER,
+       fav       INTEGER
      )
 });
 
@@ -162,9 +162,9 @@
 isa_ok($foo->fav, "Film");
 
 { 
-	my $foo;
-	Foo->add_trigger(after_create => sub { $foo = shift->fav });
-	my $gwh = Foo->create({ id => 93, fav => 'Good Will Hunting' });
-	isa_ok $foo, "Film", "Object in after_create trigger";
+  my $foo;
+  Foo->add_trigger(after_create => sub { $foo = shift->fav });
+  my $gwh = Foo->create({ id => 93, fav => 'Good Will Hunting' });
+  isa_ok $foo, "Film", "Object in after_create trigger";
 }
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/09-has_many.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/09-has_many.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/09-has_many.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -25,14 +25,14 @@
 ok(my $btaste = Film->retrieve('Bad Taste'), "We have Bad Taste");
 
 ok(
-	my $pvj = Actor->create(
-		{
-			Name   => 'Peter Vere-Jones',
-			Film   => undef,
-			Salary => '30_000',             # For a voice!
-		}
-	),
-	'create Actor'
+  my $pvj = Actor->create(
+    {
+      Name   => 'Peter Vere-Jones',
+      Film   => undef,
+      Salary => '30_000',             # For a voice!
+    }
+  ),
+  'create Actor'
 );
 is $pvj->Name, "Peter Vere-Jones", "PVJ name ok";
 is $pvj->Film, undef, "No film";
@@ -40,14 +40,14 @@
 $pvj->update;
 is $pvj->Film->id, $btaste->id, "Now film";
 {
-	my @actors = $btaste->actors;
-	is(@actors, 1, "Bad taste has one actor");
-	is($actors[0]->Name, $pvj->Name, " - the correct one");
+  my @actors = $btaste->actors;
+  is(@actors, 1, "Bad taste has one actor");
+  is($actors[0]->Name, $pvj->Name, " - the correct one");
 }
 
 my %pj_data = (
-	Name   => 'Peter Jackson',
-	Salary => '0',               # it's a labour of love
+  Name   => 'Peter Jackson',
+  Salary => '0',               # it's a labour of love
 );
 
 eval { my $pj = Film->add_to_actors(\%pj_data) };
@@ -57,37 +57,37 @@
 like $@, qr/needs/, "add_to_actors takes hash";
 
 ok(
-	my $pj = $btaste->add_to_actors(
-		{
-			Name   => 'Peter Jackson',
-			Salary => '0',               # it's a labour of love
-		}
-	),
-	'add_to_actors'
+  my $pj = $btaste->add_to_actors(
+    {
+      Name   => 'Peter Jackson',
+      Salary => '0',               # it's a labour of love
+    }
+  ),
+  'add_to_actors'
 );
 is $pj->Name,  "Peter Jackson",    "PJ ok";
 is $pvj->Name, "Peter Vere-Jones", "PVJ still ok";
 
 {
-	my @actors = $btaste->actors;
-	is @actors, 2, " - so now we have 2";
-	is $actors[0]->Name, $pj->Name,  "PJ first";
-	is $actors[1]->Name, $pvj->Name, "PVJ first";
+  my @actors = $btaste->actors;
+  is @actors, 2, " - so now we have 2";
+  is $actors[0]->Name, $pj->Name,  "PJ first";
+  is $actors[1]->Name, $pvj->Name, "PVJ first";
 }
 
 eval {
-	my @actors = $btaste->actors(Name => $pj->Name);
-	is @actors, 1, "One actor from restricted (sorted) has_many";
-	is $actors[0]->Name, $pj->Name, "It's PJ";
+  my @actors = $btaste->actors(Name => $pj->Name);
+  is @actors, 1, "One actor from restricted (sorted) has_many";
+  is $actors[0]->Name, $pj->Name, "It's PJ";
 };
 is $@, '', "No errors";
 
 my $as = Actor->create(
-	{
-		Name   => 'Arnold Schwarzenegger',
-		Film   => 'Terminator 2',
-		Salary => '15_000_000'
-	}
+  {
+    Name   => 'Arnold Schwarzenegger',
+    Film   => 'Terminator 2',
+    Salary => '15_000_000'
+  }
 );
 
 eval { $btaste->actors($pj, $pvj, $as) };

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/11-triggers.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/11-triggers.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/11-triggers.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -18,8 +18,8 @@
 sub delete_trigger  { ::ok(1, "Deleting " . shift->Title) }
 
 sub pre_up_trigger {
-	$_[0]->_attribute_set(numexplodingsheep => 1);
-	::ok(1, "Running pre-update trigger");
+  $_[0]->_attribute_set(numexplodingsheep => 1);
+  ::ok(1, "Running pre-update trigger");
 }
 sub pst_up_trigger { ::ok(1, "Running post-update trigger"); }
 
@@ -32,15 +32,15 @@
 Film->add_trigger(after_update  => \&pst_up_trigger);
 
 ok(
-	my $ver = Film->create({
-			title    => 'La Double Vie De Veronique',
-			director => 'Kryzstof Kieslowski',
+  my $ver = Film->create({
+      title    => 'La Double Vie De Veronique',
+      director => 'Kryzstof Kieslowski',
 
-			# rating           => '15',
-			numexplodingsheep => 0,
-		}
-	),
-	"Create Veronique"
+      # rating           => '15',
+      numexplodingsheep => 0,
+    }
+  ),
+  "Create Veronique"
 );
 
 is $ver->Rating,            15, "Default rating";
@@ -48,19 +48,19 @@
 ok $ver->Rating('12') && $ver->update, "Change the rating";
 is $ver->NumExplodingSheep, 1, "Updated object's sheep count";
 is + (
-	$ver->db_Main->selectall_arrayref(
-		    'SELECT numexplodingsheep FROM '
-			. $ver->table
-			. ' WHERE '
-			. $ver->primary_column . ' = '
-			. $ver->db_Main->quote($ver->id))
+  $ver->db_Main->selectall_arrayref(
+        'SELECT numexplodingsheep FROM '
+      . $ver->table
+      . ' WHERE '
+      . $ver->primary_column . ' = '
+      . $ver->db_Main->quote($ver->id))
 )->[0]->[0], 1, "Updated database's sheep count";
 ok $ver->delete, "Delete";
 
 {
-	Film->add_trigger(before_create => sub { 
-		my $self = shift;
-		ok !$self->_attribute_exists('title'), "PK doesn't auto-vivify";
-	});
-	Film->create({director => "Me"});
+  Film->add_trigger(before_create => sub { 
+    my $self = shift;
+    ok !$self->_attribute_exists('title'), "PK doesn't auto-vivify";
+  });
+  Film->create({director => "Me"});
 }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/12-filter.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/12-filter.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/12-filter.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -22,76 +22,76 @@
 my $film2 = Film->create({ Title => 'Another Film' });
 
 my @act = (
-	Actor->create(
-		{
-			name   => 'Actor 1',
-			film   => $film,
-			salary => 10,
-		}
-	),
-	Actor->create(
-		{
-			name   => 'Actor 2',
-			film   => $film,
-			salary => 20,
-		}
-	),
-	Actor->create(
-		{
-			name   => 'Actor 3',
-			film   => $film,
-			salary => 30,
-		}
-	),
-	Actor->create(
-		{
-			name   => 'Actor 4',
-			film   => $film2,
-			salary => 50,
-		}
-	),
+  Actor->create(
+    {
+      name   => 'Actor 1',
+      film   => $film,
+      salary => 10,
+    }
+  ),
+  Actor->create(
+    {
+      name   => 'Actor 2',
+      film   => $film,
+      salary => 20,
+    }
+  ),
+  Actor->create(
+    {
+      name   => 'Actor 3',
+      film   => $film,
+      salary => 30,
+    }
+  ),
+  Actor->create(
+    {
+      name   => 'Actor 4',
+      film   => $film2,
+      salary => 50,
+    }
+  ),
 );
 
 eval {
-	my @actors = $film->actors(name => 'Actor 1');
-	is @actors, 1, "Got one actor from restricted has_many";
-	is $actors[0]->name, "Actor 1", "Correct name";
+  my @actors = $film->actors(name => 'Actor 1');
+  is @actors, 1, "Got one actor from restricted has_many";
+  is $actors[0]->name, "Actor 1", "Correct name";
 };
 is $@, '', "No errors";
 
 {
-	my @actors = Actor->double_search("Actor 1", 10);
-	is @actors, 1, "Got one actor";
-	is $actors[0]->name, "Actor 1", "Correct name";
+  my @actors = Actor->double_search("Actor 1", 10);
+  is @actors, 1, "Got one actor";
+  is $actors[0]->name, "Actor 1", "Correct name";
 }
 
 {
-	ok my @actors = Actor->salary_between(0, 100), "Range 0 - 100";
-	is @actors, 4, "Got all";
+  ok my @actors = Actor->salary_between(0, 100), "Range 0 - 100";
+  is @actors, 4, "Got all";
 }
 
 {
-	my @actors = Actor->salary_between(100, 200);
-	is @actors, 0, "None in Range 100 - 200";
+  my @actors = Actor->salary_between(100, 200);
+  is @actors, 0, "None in Range 100 - 200";
 }
 
 {
-	ok my @actors = Actor->salary_between(0, 10), "Range 0 - 10";
-	is @actors, 1, "Got 1";
-	is $actors[0]->name, $act[0]->name, "Actor 1";
+  ok my @actors = Actor->salary_between(0, 10), "Range 0 - 10";
+  is @actors, 1, "Got 1";
+  is $actors[0]->name, $act[0]->name, "Actor 1";
 }
 
 {
-	ok my @actors = Actor->salary_between(20, 30), "Range 20 - 20";
-	@actors = sort { $a->salary <=> $b->salary } @actors;
-	is @actors, 2, "Got 2";
-	is $actors[0]->name, $act[1]->name, "Actor 2";
-	is $actors[1]->name, $act[2]->name, "and Actor 3";
+  ok my @actors = Actor->salary_between(20, 30), "Range 20 - 20";
+  @actors = sort { $a->salary <=> $b->salary } @actors;
+  is @actors, 2, "Got 2";
+  is $actors[0]->name, $act[1]->name, "Actor 2";
+  is $actors[1]->name, $act[2]->name, "and Actor 3";
 }
 
 {
-	ok my @actors = Actor->search(Film => $film), "Search by object";
-	is @actors, 3, "3 actors in film 1";
+  ok my @actors = Actor->search(Film => $film), "Search by object";
+  is @actors, 3, "3 actors in film 1";
 }
 
 #----------------------------------------------------------------------
@@ -101,29 +101,29 @@
 my $it_class = 'DBIx::Class::ResultSet';
 
 sub test_normal_iterator {
-	my $it = $film->actors;
-	isa_ok $it, $it_class;
-	is $it->count, 3, " - with 3 elements";
-	my $i = 0;
-	while (my $film = $it->next) {
-		is $film->name, $act[ $i++ ]->name, "Get $i";
-	}
-	ok !$it->next, "No more";
-	is $it->first->name, $act[0]->name, "Get first";
+  my $it = $film->actors;
+  isa_ok $it, $it_class;
+  is $it->count, 3, " - with 3 elements";
+  my $i = 0;
+  while (my $film = $it->next) {
+    is $film->name, $act[ $i++ ]->name, "Get $i";
+  }
+  ok !$it->next, "No more";
+  is $it->first->name, $act[0]->name, "Get first";
 }
 
 test_normal_iterator;
 {
-	Film->has_many(actor_ids => [ Actor => 'id' ]);
-	my $it = $film->actor_ids;
-	isa_ok $it, $it_class;
-	is $it->count, 3, " - with 3 elements";
-	my $i = 0;
-	while (my $film_id = $it->next) {
-		is $film_id, $act[ $i++ ]->id, "Get id $i";
-	}
-	ok !$it->next, "No more";
-	is $it->first, $act[0]->id, "Get first";
+  Film->has_many(actor_ids => [ Actor => 'id' ]);
+  my $it = $film->actor_ids;
+  isa_ok $it, $it_class;
+  is $it->count, 3, " - with 3 elements";
+  my $i = 0;
+  while (my $film_id = $it->next) {
+    is $film_id, $act[ $i++ ]->id, "Get id $i";
+  }
+  ok !$it->next, "No more";
+  is $it->first, $act[0]->id, "Get first";
 }
 
 # make sure nothing gets clobbered;
@@ -134,22 +134,22 @@
 
 
 {
-	my @acts = $film->actors->slice(1, 2);
-	is @acts, 2, "Slice gives 2 actor";
-	is $acts[0]->name, "Actor 2", "Actor 2";
-	is $acts[1]->name, "Actor 3", "and actor 3";
+  my @acts = $film->actors->slice(1, 2);
+  is @acts, 2, "Slice gives 2 actor";
+  is $acts[0]->name, "Actor 2", "Actor 2";
+  is $acts[1]->name, "Actor 3", "and actor 3";
 }
 
 {
-	my @acts = $film->actors->slice(1);
-	is @acts, 1, "Slice of 1 actor";
-	is $acts[0]->name, "Actor 2", "Actor 2";
+  my @acts = $film->actors->slice(1);
+  is @acts, 1, "Slice of 1 actor";
+  is $acts[0]->name, "Actor 2", "Actor 2";
 }
 
 {
-	my @acts = $film->actors->slice(2, 8);
-	is @acts, 1, "Slice off the end";
-	is $acts[0]->name, "Actor 3", "Gets last actor only";
+  my @acts = $film->actors->slice(2, 8);
+  is @acts, 1, "Slice off the end";
+  is $acts[0]->name, "Actor 3", "Gets last actor only";
 }
 
 package Class::DBI::My::Iterator;
@@ -167,15 +167,15 @@
 delete $film->{related_resultsets};
 
 {
-	my @acts = $film->actors->slice(1, 2);
-	is @acts, 2, "Slice gives 2 results";
-	ok eq_set(\@acts, [qw/fred barney/]), "Fred and Barney";
+  my @acts = $film->actors->slice(1, 2);
+  is @acts, 2, "Slice gives 2 results";
+  ok eq_set(\@acts, [qw/fred barney/]), "Fred and Barney";
 
-	ok $film->actors->delete_all, "Can delete via iterator";
-	is $film->actors, 0, "no actors left";
+  ok $film->actors->delete_all, "Can delete via iterator";
+  is $film->actors, 0, "no actors left";
 
-	eval { $film->actors->delete_all };
-	is $@, '', "Deleting again does no harm";
+  eval { $film->actors->delete_all };
+  is $@, '', "Deleting again does no harm";
 }
 
 } # end SKIP block

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/13-constraint.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/13-constraint.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/13-constraint.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -95,13 +95,11 @@
     }
     eval { Film->constrain_column(codirector => Untaint => 'date') };
     is $@, '', 'Can constrain with untaint';
+
     my $freeaa =
         eval { Film->create({ title => "The Freaa", codirector => 'today' }) };
-    TODO: {
-        local $TODO = "no idea what this is supposed to do";
-        is $@, '', "Can create codirector";
-        is $freeaa && $freeaa->codirector, '2001-03-03', "Set the codirector";
-    }
+    is $@, '', "Can create codirector";
+    is $freeaa && $freeaa->codirector, '2001-03-03', "Set the codirector";
 }
 
 __DATA__

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/14-might_have.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/14-might_have.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/14-might_have.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -26,45 +26,45 @@
 Film->create_test_film;
 
 {
-	ok my $bt = Film->retrieve('Bad Taste'), "Get Film";
-	isa_ok $bt, "Film";
-	is $bt->info, undef, "No blurb yet";
-	# bug where we couldn't write a class with a might_have that didn't_have
-	$bt->rating(16);
-	eval { $bt->update };
-	is $@, '', "No problems updating when don't have";
-	is $bt->rating, 16, "Updated OK";
+  ok my $bt = Film->retrieve('Bad Taste'), "Get Film";
+  isa_ok $bt, "Film";
+  is $bt->info, undef, "No blurb yet";
+  # bug where we couldn't write a class with a might_have that didn't_have
+  $bt->rating(16);
+  eval { $bt->update };
+  is $@, '', "No problems updating when don't have";
+  is $bt->rating, 16, "Updated OK";
 
-	is $bt->blurb, undef, "Bad taste has no blurb";
-	$bt->blurb("Wibble bar");
-	$bt->update;
-	is $bt->blurb, "Wibble bar", "And we can write the info";
+  is $bt->blurb, undef, "Bad taste has no blurb";
+  $bt->blurb("Wibble bar");
+  $bt->update;
+  is $bt->blurb, "Wibble bar", "And we can write the info";
 }
 
 {
-	my $bt   = Film->retrieve('Bad Taste');
-	my $info = $bt->info;
-	isa_ok $info, 'Blurb';
+  my $bt   = Film->retrieve('Bad Taste');
+  my $info = $bt->info;
+  isa_ok $info, 'Blurb';
 
-	is $bt->blurb, $info->blurb, "Blurb is the same as fetching the long way";
-	ok $bt->blurb("New blurb"), "We can set the blurb";
-	$bt->update;
-	is $bt->blurb, $info->blurb, "Blurb has been set";
+  is $bt->blurb, $info->blurb, "Blurb is the same as fetching the long way";
+  ok $bt->blurb("New blurb"), "We can set the blurb";
+  $bt->update;
+  is $bt->blurb, $info->blurb, "Blurb has been set";
 
-	$bt->rating(18);
-	eval { $bt->update };
-	is $@, '', "No problems updating when do have";
-	is $bt->rating, 18, "Updated OK";
+  $bt->rating(18);
+  eval { $bt->update };
+  is $@, '', "No problems updating when do have";
+  is $bt->rating, 18, "Updated OK";
 
-	# cascade delete?
-	{
-		my $blurb = Blurb->retrieve('Bad Taste');
-		isa_ok $blurb => "Blurb";
-		$bt->delete;
-		$blurb = Blurb->retrieve('Bad Taste');
-		is $blurb, undef, "Blurb has gone";
-	}
-		
+  # cascade delete?
+  {
+    my $blurb = Blurb->retrieve('Bad Taste');
+    isa_ok $blurb => "Blurb";
+    $bt->delete;
+    $blurb = Blurb->retrieve('Bad Taste');
+    is $blurb, undef, "Blurb has gone";
+  }
+    
 }
 
 {

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/15-accessor.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/15-accessor.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/15-accessor.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -83,7 +83,7 @@
     my $data = { %$data };
     $data->{NumExplodingSheep} = 1;
     ok my $bt = Film->find_or_create($data),
-		"find_or_create Modified accessor - find with column name";
+    "find_or_create Modified accessor - find with column name";
     isa_ok $bt, "Film";
     is $bt->sheep, 1, 'sheep bursting violently';
 };
@@ -93,7 +93,7 @@
     my $data = { %$data };
     $data->{sheep} = 1;
     ok my $bt = Film->find_or_create($data),
-		"find_or_create Modified accessor - find with accessor";
+    "find_or_create Modified accessor - find with accessor";
     isa_ok $bt, "Film";
     is $bt->sheep, 1, 'sheep bursting violently';
 };
@@ -104,7 +104,7 @@
     my $data = { %$data };
     $data->{NumExplodingSheep} = 3;
     ok my $bt = Film->find_or_create($data),
-		"find_or_create Modified accessor - create with column name";
+    "find_or_create Modified accessor - create with column name";
     isa_ok $bt, "Film";
     is $bt->sheep, 3, 'sheep bursting violently';
 };
@@ -114,7 +114,7 @@
     my $data = { %$data };
     $data->{sheep} = 4;
     ok my $bt = Film->find_or_create($data),
-		"find_or_create Modified accessor - create with accessor";
+    "find_or_create Modified accessor - create with accessor";
     isa_ok $bt, "Film";
     is $bt->sheep, 4, 'sheep bursting violently';
 };

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/18-has_a.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/18-has_a.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/18-has_a.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -24,217 +24,217 @@
 
 ok(Film->has_a('Director' => 'Director'), "Link Director table");
 ok(
-	Director->create({
-			Name     => 'Peter Jackson',
-			Birthday => -300000000,
-			IsInsane => 1
-		}
-	),
-	'create Director'
+  Director->create({
+      Name     => 'Peter Jackson',
+      Birthday => -300000000,
+      IsInsane => 1
+    }
+  ),
+  'create Director'
 );
 
 {
-	ok $btaste = Film->retrieve('Bad Taste'), "Reretrieve Bad Taste";
-	ok $pj = $btaste->Director, "Bad taste now hasa() director";
-	isa_ok $pj => 'Director';
-	{
-		no warnings qw(redefine once);
-		local *Ima::DBI::st::execute =
-			sub { ::fail("Shouldn't need to query db"); };
-		is $pj->id, 'Peter Jackson', 'ID already stored';
-	}
-	ok $pj->IsInsane, "But we know he's insane";
+  ok $btaste = Film->retrieve('Bad Taste'), "Reretrieve Bad Taste";
+  ok $pj = $btaste->Director, "Bad taste now hasa() director";
+  isa_ok $pj => 'Director';
+  {
+    no warnings qw(redefine once);
+    local *Ima::DBI::st::execute =
+      sub { ::fail("Shouldn't need to query db"); };
+    is $pj->id, 'Peter Jackson', 'ID already stored';
+  }
+  ok $pj->IsInsane, "But we know he's insane";
 }
 
 # Oh no!  Its Peter Jacksons even twin, Skippy!  Born one minute after him.
 my $sj = Director->create({
-		Name     => 'Skippy Jackson',
-		Birthday => (-300000000 + 60),
-		IsInsane => 1,
-	});
+    Name     => 'Skippy Jackson',
+    Birthday => (-300000000 + 60),
+    IsInsane => 1,
+  });
 
 {
-	eval { $btaste->Director($btaste) };
-	like $@, qr/Director/, "Can't set film as director";
-	is $btaste->Director->id, $pj->id, "PJ still the director";
+  eval { $btaste->Director($btaste) };
+  like $@, qr/Director/, "Can't set film as director";
+  is $btaste->Director->id, $pj->id, "PJ still the director";
 
-	# drop from cache so that next retrieve() is from db
-	$btaste->remove_from_object_index;
+  # drop from cache so that next retrieve() is from db
+  $btaste->remove_from_object_index;
 }
 
 {    # Still inflated after update
-	my $btaste = Film->retrieve('Bad Taste');
-	isa_ok $btaste->Director, "Director";
-	$btaste->numexplodingsheep(17);
-	$btaste->update;
-	isa_ok $btaste->Director, "Director";
+  my $btaste = Film->retrieve('Bad Taste');
+  isa_ok $btaste->Director, "Director";
+  $btaste->numexplodingsheep(17);
+  $btaste->update;
+  isa_ok $btaste->Director, "Director";
 
-	$btaste->Director('Someone Else');
-	$btaste->update;
-	isa_ok $btaste->Director, "Director";
-	is $btaste->Director->id, "Someone Else", "Can change director";
+  $btaste->Director('Someone Else');
+  $btaste->update;
+  isa_ok $btaste->Director, "Director";
+  is $btaste->Director->id, "Someone Else", "Can change director";
 }
 
 is $sj->id, 'Skippy Jackson', 'Create new director - Skippy';
 Film->has_a('CoDirector' => 'Director');
 {
-	eval { $btaste->CoDirector("Skippy Jackson") };
-	is $@, "", "Auto inflates";
-	isa_ok $btaste->CoDirector, "Director";
-	is $btaste->CoDirector->id, $sj->id, "To skippy";
+  eval { $btaste->CoDirector("Skippy Jackson") };
+  is $@, "", "Auto inflates";
+  isa_ok $btaste->CoDirector, "Director";
+  is $btaste->CoDirector->id, $sj->id, "To skippy";
 }
 
 $btaste->CoDirector($sj);
 $btaste->update;
 is($btaste->CoDirector->Name, 'Skippy Jackson', 'He co-directed');
 is(
-	$btaste->Director->Name,
-	'Peter Jackson',
-	"Didnt interfere with each other"
+  $btaste->Director->Name,
+  'Peter Jackson',
+  "Didnt interfere with each other"
 );
 
 {    # Inheriting hasa
-	my $btaste = YA::Film->retrieve('Bad Taste');
-	is(ref($btaste->Director),    'Director',       'inheriting hasa()');
-	is(ref($btaste->CoDirector),  'Director',       'inheriting hasa()');
-	is($btaste->CoDirector->Name, 'Skippy Jackson', ' ... correctly');
+  my $btaste = YA::Film->retrieve('Bad Taste');
+  is(ref($btaste->Director),    'Director',       'inheriting hasa()');
+  is(ref($btaste->CoDirector),  'Director',       'inheriting hasa()');
+  is($btaste->CoDirector->Name, 'Skippy Jackson', ' ... correctly');
 }
 
 {
-	$sj = Director->retrieve('Skippy Jackson');
-	$pj = Director->retrieve('Peter Jackson');
+  $sj = Director->retrieve('Skippy Jackson');
+  $pj = Director->retrieve('Peter Jackson');
 
-	my $fail;
-	eval {
-		$fail = YA::Film->create({
-				Title             => 'Tastes Bad',
-				Director          => $sj,
-				codirector        => $btaste,
-				Rating            => 'R',
-				NumExplodingSheep => 23
-			});
-	};
-	ok $@,    "Can't have film as codirector: $@";
-	is $fail, undef, "We didn't get anything";
+  my $fail;
+  eval {
+    $fail = YA::Film->create({
+        Title             => 'Tastes Bad',
+        Director          => $sj,
+        codirector        => $btaste,
+        Rating            => 'R',
+        NumExplodingSheep => 23
+      });
+  };
+  ok $@,    "Can't have film as codirector: $@";
+  is $fail, undef, "We didn't get anything";
 
-	my $tastes_bad = YA::Film->create({
-			Title             => 'Tastes Bad',
-			Director          => $sj,
-			codirector        => $pj,
-			Rating            => 'R',
-			NumExplodingSheep => 23
-		});
-	is($tastes_bad->Director->Name, 'Skippy Jackson', 'Director');
-	is(
-		$tastes_bad->_director_accessor->Name,
-		'Skippy Jackson',
-		'director_accessor'
-	);
-	is($tastes_bad->codirector->Name, 'Peter Jackson', 'codirector');
-	is(
-		$tastes_bad->_codirector_accessor->Name,
-		'Peter Jackson',
-		'codirector_accessor'
-	);
+  my $tastes_bad = YA::Film->create({
+      Title             => 'Tastes Bad',
+      Director          => $sj,
+      codirector        => $pj,
+      Rating            => 'R',
+      NumExplodingSheep => 23
+    });
+  is($tastes_bad->Director->Name, 'Skippy Jackson', 'Director');
+  is(
+    $tastes_bad->_director_accessor->Name,
+    'Skippy Jackson',
+    'director_accessor'
+  );
+  is($tastes_bad->codirector->Name, 'Peter Jackson', 'codirector');
+  is(
+    $tastes_bad->_codirector_accessor->Name,
+    'Peter Jackson',
+    'codirector_accessor'
+  );
 }
 
 SKIP: {
         skip "Non-standard CDBI relationships not supported by compat", 9;
-	{
+  {
 
-		YA::Film->add_relationship_type(has_a => "YA::HasA");
+    YA::Film->add_relationship_type(has_a => "YA::HasA");
 
-		package YA::HasA;
-		#use base 'Class::DBI::Relationship::HasA';
+    package YA::HasA;
+    #use base 'Class::DBI::Relationship::HasA';
 
-		sub _inflator {
-			my $self  = shift;
-			my $col   = $self->accessor;
-			my $super = $self->SUPER::_inflator($col);
+    sub _inflator {
+      my $self  = shift;
+      my $col   = $self->accessor;
+      my $super = $self->SUPER::_inflator($col);
 
-			return $super
-				unless $col eq $self->class->find_column('Director');
+      return $super
+        unless $col eq $self->class->find_column('Director');
 
-			return sub {
-				my $self = shift;
-				$self->_attribute_store($col, 'Ghostly Peter')
-					if $self->_attribute_exists($col)
-					and not defined $self->_attrs($col);
-				return &$super($self);
-			};
-		}
-	}
-	{
+      return sub {
+        my $self = shift;
+        $self->_attribute_store($col, 'Ghostly Peter')
+          if $self->_attribute_exists($col)
+          and not defined $self->_attrs($col);
+        return &$super($self);
+      };
+    }
+  }
+  {
 
-		package Rating;
+    package Rating;
 
-		sub new {
-			my ($class, $mpaa, @details) = @_;
-			bless {
-				MPAA => $mpaa,
-				WHY  => "@details"
-			}, $class;
-		}
-		sub mpaa { shift->{MPAA}; }
-		sub why  { shift->{WHY}; }
-	}
-	local *Director::mapme = sub {
-		my ($class, $val) = @_;
-		$val =~ s/Skippy/Peter/;
-		$val;
-	};
-	no warnings 'once';
-	local *Director::sanity_check = sub { $_[0]->IsInsane ? undef: $_[0] };
-	YA::Film->has_a(
-		director => 'Director',
-		inflate  => 'mapme',
-		deflate  => 'sanity_check'
-	);
-	YA::Film->has_a(
-		rating  => 'Rating',
-		inflate => sub {
-			my ($val, $parent) = @_;
-			my $sheep = $parent->find_column('NumexplodingSheep');
-			if ($parent->_attrs($sheep) || 0 > 20) {
-				return new Rating 'NC17', 'Graphic ovine violence';
-			} else {
-				return new Rating $val, 'Just because';
-			}
-		},
-		deflate => sub {
-			shift->mpaa;
-		});
+    sub new {
+      my ($class, $mpaa, @details) = @_;
+      bless {
+        MPAA => $mpaa,
+        WHY  => "@details"
+      }, $class;
+    }
+    sub mpaa { shift->{MPAA}; }
+    sub why  { shift->{WHY}; }
+  }
+  local *Director::mapme = sub {
+    my ($class, $val) = @_;
+    $val =~ s/Skippy/Peter/;
+    $val;
+  };
+  no warnings 'once';
+  local *Director::sanity_check = sub { $_[0]->IsInsane ? undef: $_[0] };
+  YA::Film->has_a(
+    director => 'Director',
+    inflate  => 'mapme',
+    deflate  => 'sanity_check'
+  );
+  YA::Film->has_a(
+    rating  => 'Rating',
+    inflate => sub {
+      my ($val, $parent) = @_;
+      my $sheep = $parent->find_column('NumexplodingSheep');
+      if ($parent->_attrs($sheep) || 0 > 20) {
+        return new Rating 'NC17', 'Graphic ovine violence';
+      } else {
+        return new Rating $val, 'Just because';
+      }
+    },
+    deflate => sub {
+      shift->mpaa;
+    });
 
-	my $tbad = YA::Film->retrieve('Tastes Bad');
+  my $tbad = YA::Film->retrieve('Tastes Bad');
 
-	isa_ok $tbad->Director, 'Director';
-	is $tbad->Director->Name, 'Peter Jackson', 'Director shuffle';
-	$tbad->Director('Skippy Jackson');
-	$tbad->update;
-	is $tbad->Director, 'Ghostly Peter', 'Sanity checked';
+  isa_ok $tbad->Director, 'Director';
+  is $tbad->Director->Name, 'Peter Jackson', 'Director shuffle';
+  $tbad->Director('Skippy Jackson');
+  $tbad->update;
+  is $tbad->Director, 'Ghostly Peter', 'Sanity checked';
 
-	isa_ok $tbad->Rating, 'Rating';
-	is $tbad->Rating->mpaa, 'NC17', 'Rating bumped';
-	$tbad->Rating(new Rating 'NS17', 'Shaken sheep');
-	no warnings 'redefine';
-	local *Director::mapme = sub {
-		my ($class, $obj) = @_;
-		$obj->isa('Film') ? $obj->Director : $obj;
-	};
+  isa_ok $tbad->Rating, 'Rating';
+  is $tbad->Rating->mpaa, 'NC17', 'Rating bumped';
+  $tbad->Rating(new Rating 'NS17', 'Shaken sheep');
+  no warnings 'redefine';
+  local *Director::mapme = sub {
+    my ($class, $obj) = @_;
+    $obj->isa('Film') ? $obj->Director : $obj;
+  };
 
-	$pj->IsInsane(0);
-	$pj->update;    # Hush warnings
+  $pj->IsInsane(0);
+  $pj->update;    # Hush warnings
 
-	ok $tbad->Director($btaste), 'Cross-class mapping';
-	is $tbad->Director, 'Peter Jackson', 'Yields PJ';
-	$tbad->update;
+  ok $tbad->Director($btaste), 'Cross-class mapping';
+  is $tbad->Director, 'Peter Jackson', 'Yields PJ';
+  $tbad->update;
 
-	$tbad = Film->retrieve('Tastes Bad');
-	ok !ref($tbad->Rating), 'Unmagical rating';
-	is $tbad->Rating, 'NS17', 'but prior change stuck';
+  $tbad = Film->retrieve('Tastes Bad');
+  ok !ref($tbad->Rating), 'Unmagical rating';
+  is $tbad->Rating, 'NS17', 'but prior change stuck';
 }
 
 { # Broken has_a declaration
-	eval { Film->has_a(driector => "Director") };
-	like $@, qr/driector/, "Sensible error from has_a with incorrect column: $@";
+  eval { Film->has_a(driector => "Director") };
+  like $@, qr/driector/, "Sensible error from has_a with incorrect column: $@";
 }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/19-set_sql.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/19-set_sql.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/19-set_sql.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -16,14 +16,14 @@
 use Actor;
 
 { # Check __ESSENTIAL__ expansion (RT#13038)
-	my @cols = Film->columns('Essential');
-	is_deeply \@cols, ['title'], "1 Column in essential";
-	is +Film->transform_sql('__ESSENTIAL__'), 'title', '__ESSENTIAL__ expansion';
-	
-	# This provides a more interesting test
-	Film->columns(Essential => qw(title rating));
-	is +Film->transform_sql('__ESSENTIAL__'), 'title, rating',
-	    'multi-col __ESSENTIAL__ expansion';
+  my @cols = Film->columns('Essential');
+  is_deeply \@cols, ['title'], "1 Column in essential";
+  is +Film->transform_sql('__ESSENTIAL__'), 'title', '__ESSENTIAL__ expansion';
+  
+  # This provides a more interesting test
+  Film->columns(Essential => qw(title rating));
+  is +Film->transform_sql('__ESSENTIAL__'), 'title, rating',
+      'multi-col __ESSENTIAL__ expansion';
 }
 
 my $f1 = Film->create({ title => 'A', director => 'AA', rating => 'PG' });
@@ -33,43 +33,43 @@
 my $f5 = Film->create({ title => 'E', director => 'AA', rating => '18' });
 
 Film->set_sql(
-	pgs => qq{
-	SELECT __ESSENTIAL__
-	FROM   __TABLE__
-	WHERE  __TABLE__.rating = 'PG'
-	ORDER BY title DESC 
+  pgs => qq{
+  SELECT __ESSENTIAL__
+  FROM   __TABLE__
+  WHERE  __TABLE__.rating = 'PG'
+  ORDER BY title DESC 
 }
 );
 
 {
-	(my $sth = Film->sql_pgs())->execute;
-	my @pgs = Film->sth_to_objects($sth);
-	is @pgs, 2, "Execute our own SQL";
-	is $pgs[0]->id, $f2->id, "get F2";
-	is $pgs[1]->id, $f1->id, "and F1";
+  (my $sth = Film->sql_pgs())->execute;
+  my @pgs = Film->sth_to_objects($sth);
+  is @pgs, 2, "Execute our own SQL";
+  is $pgs[0]->id, $f2->id, "get F2";
+  is $pgs[1]->id, $f1->id, "and F1";
 }
 
 {
-	my @pgs = Film->search_pgs;
-	is @pgs, 2, "SQL creates search() method";
-	is $pgs[0]->id, $f2->id, "get F2";
-	is $pgs[1]->id, $f1->id, "and F1";
+  my @pgs = Film->search_pgs;
+  is @pgs, 2, "SQL creates search() method";
+  is $pgs[0]->id, $f2->id, "get F2";
+  is $pgs[1]->id, $f1->id, "and F1";
 };
 
 Film->set_sql(
-	rating => qq{
-	SELECT __ESSENTIAL__
-	FROM   __TABLE__
-	WHERE  rating = ?
-	ORDER BY title DESC 
+  rating => qq{
+  SELECT __ESSENTIAL__
+  FROM   __TABLE__
+  WHERE  rating = ?
+  ORDER BY title DESC 
 }
 );
 
 {
-	my @pgs = Film->search_rating('18');
-	is @pgs, 2, "Can pass parameters to created search()";
-	is $pgs[0]->id, $f5->id, "F5";
-	is $pgs[1]->id, $f4->id, "and F4";
+  my @pgs = Film->search_rating('18');
+  is @pgs, 2, "Can pass parameters to created search()";
+  is $pgs[0]->id, $f5->id, "F5";
+  is $pgs[1]->id, $f4->id, "and F4";
 };
 
 {
@@ -89,44 +89,44 @@
 
 
 {
-	Actor->has_a(film => "Film");
-	Film->set_sql(
-		namerate => qq{
-		SELECT __ESSENTIAL(f)__
-		FROM   __TABLE(=f)__, __TABLE(Actor=a)__ 
-		WHERE  __JOIN(a f)__    
-		AND    a.name LIKE ?
-		AND    f.rating = ?
-		ORDER BY title 
-	}
-	);
+  Actor->has_a(film => "Film");
+  Film->set_sql(
+    namerate => qq{
+    SELECT __ESSENTIAL(f)__
+    FROM   __TABLE(=f)__, __TABLE(Actor=a)__ 
+    WHERE  __JOIN(a f)__    
+    AND    a.name LIKE ?
+    AND    f.rating = ?
+    ORDER BY title 
+  }
+  );
 
-	my $a1 = Actor->create({ name => "A1", film => $f1 });
-	my $a2 = Actor->create({ name => "A2", film => $f2 });
-	my $a3 = Actor->create({ name => "B1", film => $f1 });
+  my $a1 = Actor->create({ name => "A1", film => $f1 });
+  my $a2 = Actor->create({ name => "A2", film => $f2 });
+  my $a3 = Actor->create({ name => "B1", film => $f1 });
 
-	my @apg = Film->search_namerate("A_", "PG");
-	is @apg, 2, "2 Films with A* that are PG";
-	is $apg[0]->title, "A", "A";
-	is $apg[1]->title, "B", "and B";
+  my @apg = Film->search_namerate("A_", "PG");
+  is @apg, 2, "2 Films with A* that are PG";
+  is $apg[0]->title, "A", "A";
+  is $apg[1]->title, "B", "and B";
 }
 
 {    # join in reverse
-	Actor->has_a(film => "Film");
-	Film->set_sql(
-		ratename => qq{
-		SELECT __ESSENTIAL(f)__
-		FROM   __TABLE(=f)__, __TABLE(Actor=a)__ 
-		WHERE  __JOIN(f a)__    
-		AND    f.rating = ?
-		AND    a.name LIKE ?
-		ORDER BY title 
-	}
-	);
+  Actor->has_a(film => "Film");
+  Film->set_sql(
+    ratename => qq{
+    SELECT __ESSENTIAL(f)__
+    FROM   __TABLE(=f)__, __TABLE(Actor=a)__ 
+    WHERE  __JOIN(f a)__    
+    AND    f.rating = ?
+    AND    a.name LIKE ?
+    ORDER BY title 
+  }
+  );
 
-	my @apg = Film->search_ratename(PG => "A_");
-	is @apg, 2, "2 Films with A* that are PG";
-	is $apg[0]->title, "A", "A";
-	is $apg[1]->title, "B", "and B";
+  my @apg = Film->search_ratename(PG => "A_");
+  is @apg, 2, "2 Films with A* that are PG";
+  is $apg[0]->title, "A", "A";
+  is $apg[1]->title, "B", "and B";
 }
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/21-iterator.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/21-iterator.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/21-iterator.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -17,70 +17,70 @@
 my $it_class = "DBIx::Class::ResultSet";
 
 my @film  = (
-	Film->create({ Title => 'Film 1' }),
-	Film->create({ Title => 'Film 2' }),
-	Film->create({ Title => 'Film 3' }),
-	Film->create({ Title => 'Film 4' }),
-	Film->create({ Title => 'Film 5' }),
-	Film->create({ Title => 'Film 6' }),
+  Film->create({ Title => 'Film 1' }),
+  Film->create({ Title => 'Film 2' }),
+  Film->create({ Title => 'Film 3' }),
+  Film->create({ Title => 'Film 4' }),
+  Film->create({ Title => 'Film 5' }),
+  Film->create({ Title => 'Film 6' }),
 );
 
 {
-	my $it1 = Film->retrieve_all;
-	isa_ok $it1, $it_class;
+  my $it1 = Film->retrieve_all;
+  isa_ok $it1, $it_class;
 
-	my $it2 = Film->retrieve_all;
-	isa_ok $it2, $it_class;
+  my $it2 = Film->retrieve_all;
+  isa_ok $it2, $it_class;
 
-	while (my $from1 = $it1->next) {
-		my $from2 = $it2->next;
-		is $from1->id, $from2->id, "Both iterators get $from1";
-	}
+  while (my $from1 = $it1->next) {
+    my $from2 = $it2->next;
+    is $from1->id, $from2->id, "Both iterators get $from1";
+  }
 }
 
 {
-	my $it = Film->retrieve_all;
-	is $it->first->title, "Film 1", "Film 1 first";
-	is $it->next->title, "Film 2", "Film 2 next";
-	is $it->first->title, "Film 1", "First goes back to 1";
-	is $it->next->title, "Film 2", "With 2 still next";
-	$it->reset;
-	is $it->next->title, "Film 1", "Reset brings us to film 1 again";
-	is $it->next->title, "Film 2", "And 2 is still next";
+  my $it = Film->retrieve_all;
+  is $it->first->title, "Film 1", "Film 1 first";
+  is $it->next->title, "Film 2", "Film 2 next";
+  is $it->first->title, "Film 1", "First goes back to 1";
+  is $it->next->title, "Film 2", "With 2 still next";
+  $it->reset;
+  is $it->next->title, "Film 1", "Reset brings us to film 1 again";
+  is $it->next->title, "Film 2", "And 2 is still next";
 }
 
 
 {
-	my $it = Film->retrieve_all;
-	my @slice = $it->slice(2,4);
-	is @slice, 3, "correct slice size (array)";
-	is $slice[0]->title, "Film 3", "Film 3 first";
-	is $slice[2]->title, "Film 5", "Film 5 last";
+  my $it = Film->retrieve_all;
+  my @slice = $it->slice(2,4);
+  is @slice, 3, "correct slice size (array)";
+  is $slice[0]->title, "Film 3", "Film 3 first";
+  is $slice[2]->title, "Film 5", "Film 5 last";
 }
 
 {
-	my $it = Film->retrieve_all;
-	my $slice = $it->slice(2,4);
-	isa_ok $slice, $it_class, "slice as iterator";
-	is $slice->count, 3,"correct slice size (array)";
-	is $slice->first->title, "Film 3", "Film 3 first";
-	is $slice->next->title, "Film 4", "Film 4 next";
-	is $slice->first->title, "Film 3", "First goes back to 3";
-	is $slice->next->title, "Film 4", "With 4 still next";
-	$slice->reset;
-	is $slice->next->title, "Film 3", "Reset brings us to film 3 again";
-	is $slice->next->title, "Film 4", "And 4 is still next";
+  my $it = Film->retrieve_all;
+  my $slice = $it->slice(2,4);
+  isa_ok $slice, $it_class, "slice as iterator";
+  is $slice->count, 3,"correct slice size (array)";
+  is $slice->first->title, "Film 3", "Film 3 first";
+  is $slice->next->title, "Film 4", "Film 4 next";
+  is $slice->first->title, "Film 3", "First goes back to 3";
+  is $slice->next->title, "Film 4", "With 4 still next";
+  $slice->reset;
+  is $slice->next->title, "Film 3", "Reset brings us to film 3 again";
+  is $slice->next->title, "Film 4", "And 4 is still next";
 
-	# check if the original iterator still works
-	is $it->count, 6, "back to the original iterator, is of right size";
-	is $it->first->title, "Film 1", "Film 1 first";
-	is $it->next->title, "Film 2", "Film 2 next";
-	is $it->first->title, "Film 1", "First goes back to 1";
-	is $it->next->title, "Film 2", "With 2 still next";
-	is $it->next->title, "Film 3", "Film 3 is still in original Iterator";
-	$it->reset;
-	is $it->next->title, "Film 1", "Reset brings us to film 1 again";
-	is $it->next->title, "Film 2", "And 2 is still next";
+  # check if the original iterator still works
+  is $it->count, 6, "back to the original iterator, is of right size";
+  is $it->first->title, "Film 1", "Film 1 first";
+  is $it->next->title, "Film 2", "Film 2 next";
+  is $it->first->title, "Film 1", "First goes back to 1";
+  is $it->next->title, "Film 2", "With 2 still next";
+  is $it->next->title, "Film 3", "Film 3 is still in original Iterator";
+  $it->reset;
+  is $it->next->title, "Film 1", "Reset brings us to film 1 again";
+  is $it->next->title, "Film 2", "And 2 is still next";
 }
 
 {

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/22-deflate_order.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/22-deflate_order.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/22-deflate_order.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -9,15 +9,17 @@
     next;
 }
 
+plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
+  unless ($ENV{DBICTEST_MYSQL_DSN} && $ENV{DBICTEST_MYSQL_USER});
+
 eval { require Time::Piece::MySQL };
 plan skip_all => "Need Time::Piece::MySQL for this test" if $@;
 
+plan tests => 3;
+
 use lib 't/cdbi/testlib';
-eval { require 't/cdbi/testlib/Log.pm' };
-plan skip_all => "Need MySQL for this test" if $@;
+use_ok ('Log');
 
-plan tests => 2;
-
 package main;
 
 my $log = Log->insert( { message => 'initial message' } );

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/26-mutator.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/26-mutator.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/26-mutator.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -8,40 +8,40 @@
 }
 
 BEGIN {
-	eval "use DBD::SQLite";
-	plan $@
-		? (skip_all => 'needs DBD::SQLite for testing')
-		: (tests => 6);
+  eval "use DBD::SQLite";
+  plan $@
+    ? (skip_all => 'needs DBD::SQLite for testing')
+    : (tests => 6);
 }
 
 use lib 't/cdbi/testlib';
 require Film;
 
 sub Film::accessor_name_for {
-	my ($class, $col) = @_;
-	return "sheep" if lc $col eq "numexplodingsheep";
-	return $col;
+  my ($class, $col) = @_;
+  return "sheep" if lc $col eq "numexplodingsheep";
+  return $col;
 }
 
 my $data = {
-	Title    => 'Bad Taste',
-	Director => 'Peter Jackson',
-	Rating   => 'R',
+  Title    => 'Bad Taste',
+  Director => 'Peter Jackson',
+  Rating   => 'R',
 };
 
 my $bt;
 eval {
-	my $data = $data;
-	$data->{sheep} = 1;
-	ok $bt = Film->insert($data), "Modified accessor - with  
+  my $data = $data;
+  $data->{sheep} = 1;
+  ok $bt = Film->insert($data), "Modified accessor - with  
 accessor";
-	isa_ok $bt, "Film";
+  isa_ok $bt, "Film";
 };
 is $@, '', "No errors";
 
 eval {
-	ok $bt->sheep(2), 'Modified accessor, set';
-	ok $bt->update, 'Update';
+  ok $bt->sheep(2), 'Modified accessor, set';
+  ok $bt->update, 'Update';
 };
 is $@, '', "No errors";
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/30-pager.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/30-pager.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/30-pager.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -15,11 +15,11 @@
 use Film;
 
 my @film  = (
-	Film->create({ Title => 'Film 1' }),
-	Film->create({ Title => 'Film 2' }),
-	Film->create({ Title => 'Film 3' }),
-	Film->create({ Title => 'Film 4' }),
-	Film->create({ Title => 'Film 5' }),
+  Film->create({ Title => 'Film 1' }),
+  Film->create({ Title => 'Film 2' }),
+  Film->create({ Title => 'Film 3' }),
+  Film->create({ Title => 'Film 4' }),
+  Film->create({ Title => 'Film 5' }),
 );
 
 # first page

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/98-failure.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/98-failure.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/98-failure.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -21,42 +21,42 @@
 Film->create_test_film;
 
 {
-	my $btaste = Film->retrieve('Bad Taste');
-	isa_ok $btaste, 'Film', "We have Bad Taste";
-	{
-		no warnings 'redefine';
-		local *DBIx::ContextualFetch::st::execute = sub { die "Database died" };
-		eval { $btaste->delete };
-		::like $@, qr/Database died/s, "We failed";
-	}
-	my $still = Film->retrieve('Bad Taste');
-	isa_ok $btaste, 'Film', "We still have Bad Taste";
+  my $btaste = Film->retrieve('Bad Taste');
+  isa_ok $btaste, 'Film', "We have Bad Taste";
+  {
+    no warnings 'redefine';
+    local *DBIx::ContextualFetch::st::execute = sub { die "Database died" };
+    eval { $btaste->delete };
+    ::like $@, qr/Database died/s, "We failed";
+  }
+  my $still = Film->retrieve('Bad Taste');
+  isa_ok $btaste, 'Film', "We still have Bad Taste";
 }
 
 {
-	my $btaste = Film->retrieve('Bad Taste');
-	isa_ok $btaste, 'Film', "We have Bad Taste";
-	$btaste->numexplodingsheep(10);
-	{
-		no warnings 'redefine';
-		local *DBIx::ContextualFetch::st::execute = sub { die "Database died" };
-		eval { $btaste->update };
-		::like $@, qr/Database died/s, "We failed";
-	}
-	$btaste->discard_changes;
-	my $still = Film->retrieve('Bad Taste');
-	isa_ok $btaste, 'Film', "We still have Bad Taste";
-	is $btaste->numexplodingsheep, 1, "with 1 sheep";
+  my $btaste = Film->retrieve('Bad Taste');
+  isa_ok $btaste, 'Film', "We have Bad Taste";
+  $btaste->numexplodingsheep(10);
+  {
+    no warnings 'redefine';
+    local *DBIx::ContextualFetch::st::execute = sub { die "Database died" };
+    eval { $btaste->update };
+    ::like $@, qr/Database died/s, "We failed";
+  }
+  $btaste->discard_changes;
+  my $still = Film->retrieve('Bad Taste');
+  isa_ok $btaste, 'Film', "We still have Bad Taste";
+  is $btaste->numexplodingsheep, 1, "with 1 sheep";
 }
 
 if (0) {
-	my $sheep = Film->maximum_value_of('numexplodingsheep');
-	is $sheep, 1, "1 exploding sheep";
-	{
-		local *DBIx::ContextualFetch::st::execute = sub { die "Database died" };
-		my $sheep = eval { Film->maximum_value_of('numexplodingsheep') };
-		::like $@, qr/select.*Database died/s,
-			"Handle database death in single value select";
-	}
+  my $sheep = Film->maximum_value_of('numexplodingsheep');
+  is $sheep, 1, "1 exploding sheep";
+  {
+    local *DBIx::ContextualFetch::st::execute = sub { die "Database died" };
+    my $sheep = eval { Film->maximum_value_of('numexplodingsheep') };
+    ::like $@, qr/select.*Database died/s,
+      "Handle database death in single value select";
+  }
 }
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/abstract/search_where.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/abstract/search_where.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/abstract/search_where.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -14,8 +14,8 @@
 }
 
 INIT {
-	use lib 't/cdbi/testlib';
-	use Film;
+  use lib 't/cdbi/testlib';
+  use Film;
 }
 
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Actor.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Actor.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Actor.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -16,12 +16,12 @@
 sub mutator_name_for { "set_$_[1]" }
 
 sub create_sql {
-	return qq{
-		id     INTEGER PRIMARY KEY,
-		name   CHAR(40),
-		film   VARCHAR(255),   
-		salary INT
-	}
+  return qq{
+    id     INTEGER PRIMARY KEY,
+    name   CHAR(40),
+    film   VARCHAR(255),   
+    salary INT
+  }
 }
 
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/ActorAlias.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/ActorAlias.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/ActorAlias.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -14,11 +14,11 @@
 __PACKAGE__->has_a( alias => 'Actor' );
 
 sub create_sql {
-	return qq{
-		id    INTEGER PRIMARY KEY,
-		actor INTEGER,
-		alias INTEGER
-	}
+  return qq{
+    id    INTEGER PRIMARY KEY,
+    actor INTEGER,
+    alias INTEGER
+  }
 }
 
 1;

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Binary.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Binary.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Binary.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,16 +0,0 @@
-package # hide from PAUSE
-    Binary;
-
-use strict;
-use base 'PgBase';
-
-__PACKAGE__->table(cdbibintest => 'cdbibintest');
-__PACKAGE__->sequence('binseq');
-__PACKAGE__->columns(All => qw(id bin));
-
-# __PACKAGE__->data_type(bin => DBI::SQL_BINARY);
-
-sub schema { "id INTEGER, bin BYTEA" }
-
-1;
-

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Blurb.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Blurb.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Blurb.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -9,9 +9,9 @@
 __PACKAGE__->columns('Blurb',   qw/ blurb/);
 
 sub create_sql {
-	return qq{
-			title                   VARCHAR(255) PRIMARY KEY,
-			blurb                   VARCHAR(255) NOT NULL
+  return qq{
+      title                   VARCHAR(255) PRIMARY KEY,
+      blurb                   VARCHAR(255) NOT NULL
   }
 }
 


Property changes on: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/DBIC/Test/SQLite.pm
___________________________________________________________________
Name: svn:eol-style
   - native

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Director.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Director.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Director.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -8,11 +8,11 @@
 __PACKAGE__->columns('All' => qw/ Name Birthday IsInsane /);
 
 sub create_sql {
-	return qq{
-			name                    VARCHAR(80),
-			birthday                INTEGER,
-			isinsane                INTEGER
-	};
+  return qq{
+      name                    VARCHAR(80),
+      birthday                INTEGER,
+      isinsane                INTEGER
+  };
 }
 
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Film.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Film.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Film.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -11,23 +11,23 @@
 __PACKAGE__->columns('Other',     qw( Rating NumExplodingSheep HasVomit ));
 
 sub create_sql {
-	return qq{
-		title                   VARCHAR(255),
-		director                VARCHAR(80),
-		codirector              VARCHAR(80),
-		rating                  CHAR(5),
-		numexplodingsheep       INTEGER,
-		hasvomit                CHAR(1)
+  return qq{
+    title                   VARCHAR(255),
+    director                VARCHAR(80),
+    codirector              VARCHAR(80),
+    rating                  CHAR(5),
+    numexplodingsheep       INTEGER,
+    hasvomit                CHAR(1)
   }
 }
 
 sub create_test_film { 
-	return shift->create({
-		Title             => 'Bad Taste',
-		Director          => 'Peter Jackson',
-		Rating            => 'R',
-		NumExplodingSheep => 1,
-	});
+  return shift->create({
+    Title             => 'Bad Taste',
+    Director          => 'Peter Jackson',
+    Rating            => 'R',
+    NumExplodingSheep => 1,
+  });
 }
 
 package DeletingFilm;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Lazy.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Lazy.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Lazy.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -12,14 +12,14 @@
 __PACKAGE__->columns('vertical',  qw(oop opop));
 
 sub create_sql {
-	return qq{
-		this INTEGER,
-		that INTEGER,
-		eep  INTEGER,
-		orp  INTEGER,
-		oop  INTEGER,
-		opop INTEGER
-	};
+  return qq{
+    this INTEGER,
+    that INTEGER,
+    eep  INTEGER,
+    orp  INTEGER,
+    oop  INTEGER,
+    opop INTEGER
+  };
 }
 
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Log.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Log.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Log.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -10,21 +10,21 @@
 __PACKAGE__->set_table();
 __PACKAGE__->columns(All => qw/id message datetime_stamp/);
 __PACKAGE__->has_a(
-	datetime_stamp => 'Time::Piece',
-	inflate        => 'from_mysql_datetime',
-	deflate        => 'mysql_datetime'
+  datetime_stamp => 'Time::Piece',
+  inflate        => 'from_mysql_datetime',
+  deflate        => 'mysql_datetime'
 );
 
 __PACKAGE__->add_trigger(before_create => \&set_dts);
 __PACKAGE__->add_trigger(before_update => \&set_dts);
 
 sub set_dts {
-	shift->datetime_stamp(
-		POSIX::strftime('%Y-%m-%d %H:%M:%S', localtime(time)));
+  shift->datetime_stamp(
+    POSIX::strftime('%Y-%m-%d %H:%M:%S', localtime(time)));
 }
 
 sub create_sql {
-	return qq{
+  return qq{
     id             INT UNSIGNED AUTO_INCREMENT PRIMARY KEY,
     message        VARCHAR(255),
     datetime_stamp DATETIME

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyBase.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyBase.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyBase.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -17,30 +17,30 @@
 __PACKAGE__->connection(@connect);
 
 sub set_table {
-	my $class = shift;
-	$class->table($class->create_test_table);
+  my $class = shift;
+  $class->table($class->create_test_table);
 }
 
 sub create_test_table {
-	my $self   = shift;
-	my $table  = $self->next_available_table;
-	my $create = sprintf "CREATE TABLE $table ( %s )", $self->create_sql;
-	push @table, $table;
-	$dbh->do($create);
-	return $table;
+  my $self   = shift;
+  my $table  = $self->next_available_table;
+  my $create = sprintf "CREATE TABLE $table ( %s )", $self->create_sql;
+  push @table, $table;
+  $dbh->do($create);
+  return $table;
 }
 
 sub next_available_table {
-	my $self   = shift;
-	my @tables = sort @{
-		$dbh->selectcol_arrayref(
-			qq{
+  my $self   = shift;
+  my @tables = sort @{
+    $dbh->selectcol_arrayref(
+      qq{
     SHOW TABLES
   }
-		)
-		};
-	my $table = $tables[-1] || "aaa";
-	return "z$table";
+    )
+    };
+  my $table = $tables[-1] || "aaa";
+  return "z$table";
 }
 
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyFilm.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyFilm.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyFilm.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -16,7 +16,7 @@
 sub stars { map $_->star, shift->_stars }
 
 sub create_sql {
-	return qq{
+  return qq{
     filmid  TINYINT NOT NULL AUTO_INCREMENT PRIMARY KEY,
     title   VARCHAR(255)
   };

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyFoo.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyFoo.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyFoo.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -10,14 +10,14 @@
 __PACKAGE__->set_table();
 __PACKAGE__->columns(All => qw/myid name val tdate/);
 __PACKAGE__->has_a(
-	tdate   => 'Date::Simple',
-	inflate => sub { Date::Simple->new(shift) },
-	deflate => 'format',
+  tdate   => 'Date::Simple',
+  inflate => sub { Date::Simple->new(shift) },
+  deflate => 'format',
 );
 #__PACKAGE__->find_column('tdate')->placeholder("IF(1, CURDATE(), ?)");
 
 sub create_sql {
-	return qq{
+  return qq{
     myid mediumint not null auto_increment primary key,
     name varchar(50) not null default '',
     val  char(1) default 'A',

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyStar.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyStar.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyStar.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -12,10 +12,10 @@
 # sub films { map $_->film, shift->_films }
 
 sub create_sql {
-	return qq{
-		starid  TINYINT NOT NULL AUTO_INCREMENT PRIMARY KEY,
-		name   VARCHAR(255)
-	};
+  return qq{
+    starid  TINYINT NOT NULL AUTO_INCREMENT PRIMARY KEY,
+    name   VARCHAR(255)
+  };
 }
 
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyStarLink.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyStarLink.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyStarLink.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -11,7 +11,7 @@
 __PACKAGE__->has_a(star  => 'MyStar');
 
 sub create_sql {
-	return qq{
+  return qq{
     linkid  TINYINT NOT NULL AUTO_INCREMENT PRIMARY KEY,
     film    TINYINT NOT NULL,
     star    TINYINT NOT NULL

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyStarLinkMCPK.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyStarLinkMCPK.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/MyStarLinkMCPK.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -18,7 +18,7 @@
 __PACKAGE__->has_a(star => 'MyStar');
 
 sub create_sql {
-	return qq{
+  return qq{
     film    INTEGER NOT NULL,
     star    INTEGER NOT NULL,
     PRIMARY KEY (film, star)

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Order.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Order.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/Order.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -10,10 +10,10 @@
 __PACKAGE__->columns(Others  => qw/orders/);
 
 sub create_sql {
-	return qq{
-		film     VARCHAR(255),
-		orders   INTEGER
-	};
+  return qq{
+    film     VARCHAR(255),
+    orders   INTEGER
+  };
 }
 
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/OtherFilm.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/OtherFilm.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/OtherFilm.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,4 +1,4 @@
-package # hide from PAUSE 
+package # hide from PAUSE
     OtherFilm;
 
 use strict;
@@ -7,14 +7,14 @@
 __PACKAGE__->set_table('Different_Film');
 
 sub create_sql {
-	return qq{
-		title                   VARCHAR(255),
-		director                VARCHAR(80),
-		codirector              VARCHAR(80),
-		rating                  CHAR(5),
-		numexplodingsheep       INTEGER,
-		hasvomit                CHAR(1)
-	};
+  return qq{
+    title                   VARCHAR(255),
+    director                VARCHAR(80),
+    codirector              VARCHAR(80),
+    rating                  CHAR(5),
+    numexplodingsheep       INTEGER,
+    hasvomit                CHAR(1)
+  };
 }
 
 1;

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/PgBase.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/PgBase.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/cdbi/testlib/PgBase.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,23 +0,0 @@
-package # hide from PAUSE 
-    PgBase;
-
-use strict;
-use base 'DBIx::Class::CDBICompat';
-
-my $db   = $ENV{DBD_PG_DBNAME} || 'template1';
-my $user = $ENV{DBD_PG_USER}   || 'postgres';
-my $pass = $ENV{DBD_PG_PASSWD} || '';
-
-__PACKAGE__->connection("dbi:Pg:dbname=$db", $user, $pass,
-	{ AutoCommit => 1 });
-
-sub CONSTRUCT {
-	my $class = shift;
-	my ($table, $sequence) = ($class->table, $class->sequence || "");
-	my $schema = $class->schema;
-	$class->db_Main->do("CREATE TEMPORARY SEQUENCE $sequence") if $sequence;
-	$class->db_Main->do("CREATE TEMPORARY TABLE $table ( $schema )");
-}
-
-1;
-

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/count/count_rs.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/count/count_rs.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/count/count_rs.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -33,9 +33,8 @@
     \@bind,
     'SELECT COUNT( * )
       FROM cd me
-      LEFT JOIN track tracks ON tracks.cd = me.cdid
+      JOIN track tracks ON tracks.cd = me.cdid
       JOIN cd disc ON disc.cdid = tracks.cd
-      LEFT JOIN lyrics lyrics ON lyrics.track_id = tracks.trackid 
      WHERE ( ( position = ? OR position = ? ) )
     ',
     [ qw/'1' '2'/ ],
@@ -51,9 +50,8 @@
        FROM (
         SELECT tracks.trackid
           FROM cd me
-          LEFT JOIN track tracks ON tracks.cd = me.cdid
+          JOIN track tracks ON tracks.cd = me.cdid
           JOIN cd disc ON disc.cdid = tracks.cd
-          LEFT JOIN lyrics lyrics ON lyrics.track_id = tracks.trackid 
         WHERE ( ( position = ? OR position = ? ) )
         LIMIT 3 OFFSET 8
        ) count_subq
@@ -85,7 +83,7 @@
       FROM (
         SELECT cds.cdid
           FROM artist me
-          LEFT JOIN cd cds ON cds.artist = me.artistid
+          JOIN cd cds ON cds.artist = me.artistid
           LEFT JOIN track tracks ON tracks.cd = cds.cdid
           JOIN artist artist ON artist.artistid = cds.artist
         WHERE tracks.position = ? OR tracks.position = ?
@@ -105,7 +103,7 @@
       FROM (
         SELECT cds.cdid
           FROM artist me
-          LEFT JOIN cd cds ON cds.artist = me.artistid
+          JOIN cd cds ON cds.artist = me.artistid
           LEFT JOIN track tracks ON tracks.cd = cds.cdid
           JOIN artist artist ON artist.artistid = cds.artist
         WHERE tracks.position = ? OR tracks.position = ?

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/count/distinct.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/count/distinct.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/count/distinct.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -11,8 +11,6 @@
 
 my $schema = DBICTest->init_schema();
 
-plan tests => 58;
-
 # The tag Blue is assigned to cds 1 2 3 and 5
 # The tag Cheesy is assigned to cds 2 4 and 5
 #
@@ -80,23 +78,40 @@
   is($get_count->($rs), 3, 'Count by distinct function result as select literal');
 }
 
-eval {
-  my @warnings;
-  local $SIG{__WARN__} = sub { $_[0] =~ /The select => { distinct => ... } syntax will be deprecated/ 
-    ? push @warnings, @_
-    : warn @_
-  };
-  my $row = $schema->resultset('Tag')->search({}, { select => { distinct => 'tag' } })->first;
-  is (@warnings, 1, 'Warned about deprecated distinct') if $DBIx::Class::VERSION < 0.09;
-};
-ok ($@, 'Exception on deprecated distinct usage thrown') if $DBIx::Class::VERSION >= 0.09;
-
 throws_ok(
   sub { my $row = $schema->resultset('Tag')->search({}, { select => { distinct => [qw/tag cd/] } })->first },
   qr/select => { distinct => \.\.\. } syntax is not supported for multiple columns/,
   'throw on unsupported syntax'
 );
 
+# make sure distinct+func works
+{
+  my $rs = $schema->resultset('Artist')->search(
+    {},
+    {
+      join => 'cds',
+      distinct => 1,
+      '+select' => [ { count => 'cds.cdid', -as => 'amount_of_cds' } ],
+      '+as' => [qw/num_cds/],
+      order_by => { -desc => 'amount_of_cds' },
+    }
+  );
+
+  is_same_sql_bind (
+    $rs->as_query,
+    '(
+      SELECT me.artistid, me.name, me.rank, me.charfield, COUNT( cds.cdid ) AS amount_of_cds
+        FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid
+      GROUP BY me.artistid, me.name, me.rank, me.charfield
+      ORDER BY amount_of_cds DESC
+    )',
+    [],
+  );
+
+  is ($rs->next->get_column ('num_cds'), 3, 'Function aliased correctly');
+}
+
 # These two rely on the database to throw an exception. This might not be the case one day. Please revise.
 dies_ok(sub { my $count = $schema->resultset('Tag')->search({}, { '+select' => \'tagid AS tag_id', distinct => 1 })->count }, 'expecting to die');
-dies_ok(sub { my $count = $schema->resultset('Tag')->search({}, { select => { length => 'tag' }, distinct => 1 })->count }, 'expecting to die');
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/count/grouped_pager.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/count/grouped_pager.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/count/grouped_pager.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -11,8 +11,6 @@
 
 my $schema = DBICTest->init_schema();
 
-use Data::Dumper;
-
 # add 2 extra artists
 $schema->populate ('Artist', [
     [qw/name/],

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/count/in_subquery.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/count/in_subquery.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/count/in_subquery.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,8 +3,6 @@
 use strict;
 use warnings;
 
-use Data::Dumper;
-
 use Test::More;
 
 plan ( tests => 1 );

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/count/joined.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/count/joined.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/count/joined.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -7,7 +7,7 @@
 
 use DBICTest;
 
-plan tests => 3;
+plan tests => 7;
 
 my $schema = DBICTest->init_schema();
 
@@ -26,6 +26,12 @@
   "Count correct with requested distinct collapse of main table"
 );
 
+# JOIN and LEFT JOIN issues mean that we've seen problems where counted rows and fetched rows are sometimes 1 higher than they should
+# be in the related resultset.
+my $artist=$schema->resultset('Artist')->create({name => 'xxx'});
+is($artist->related_resultset('cds')->count(), 0, "No CDs found for a shiny new artist");
+is(scalar($artist->related_resultset('cds')->all()), 0, "No CDs fetched for a shiny new artist");
 
-
-
+my $artist_rs = $schema->resultset('Artist')->search({artistid => $artist->id});
+is($artist_rs->related_resultset('cds')->count(), 0, "No CDs counted for a shiny new artist using a resultset search");
+is(scalar($artist_rs->related_resultset('cds')->all), 0, "No CDs fetched for a shiny new artist using a resultset search");

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/count/prefetch.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/count/prefetch.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/count/prefetch.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -6,10 +6,7 @@
 use Test::More;
 use DBICTest;
 use DBIC::SqlMakerTest;
-use DBIC::DebugObj;
 
-plan tests => 6;
-
 my $schema = DBICTest->init_schema();
 
 # collapsing prefetch
@@ -20,20 +17,55 @@
                 { prefetch => [qw/tracks artist/] },
             );
   is ($rs->all, 5, 'Correct number of objects');
+  is ($rs->count, 5, 'Correct count');
 
+  is_same_sql_bind (
+    $rs->count_rs->as_query,
+    '(
+      SELECT COUNT( * )
+        FROM (
+          SELECT cds.cdid
+            FROM artist me
+            JOIN cd cds ON cds.artist = me.artistid
+            LEFT JOIN track tracks ON tracks.cd = cds.cdid
+            JOIN artist artist ON artist.artistid = cds.artist
+          WHERE tracks.position = ? OR tracks.position = ?
+          GROUP BY cds.cdid
+        ) count_subq
+    )',
+    [ map { [ 'tracks.position' => $_ ] } (1, 2) ],
+  );
+}
 
-  my ($sql, @bind);
-  $schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind));
-  $schema->storage->debug(1);
+# collapsing prefetch with distinct
+{
+  my $rs = $schema->resultset("Artist")->search(undef, {distinct => 1})
+            ->search_related('cds')->search_related('genre',
+                { 'genre.name' => 'emo' },
+                { prefetch => q(cds) },
+            );
+  is ($rs->all, 1, 'Correct number of objects');
+  is ($rs->count, 1, 'Correct count');
 
-
-  is ($rs->count, 5, 'Correct count');
-
   is_same_sql_bind (
-    $sql,
-    \@bind,
-    'SELECT COUNT( * ) FROM (SELECT cds.cdid FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid LEFT JOIN track tracks ON tracks.cd = cds.cdid JOIN artist artist ON artist.artistid = cds.artist WHERE tracks.position = ? OR tracks.position = ? GROUP BY cds.cdid) count_subq',
-    [ qw/'1' '2'/ ],
+    $rs->count_rs->as_query,
+    '(
+      SELECT COUNT( * )
+        FROM (
+          SELECT genre.genreid
+            FROM (
+              SELECT me.artistid, me.name, me.rank, me.charfield
+                FROM artist me
+              GROUP BY me.artistid, me.name, me.rank, me.charfield
+            ) me
+            JOIN cd cds ON cds.artist = me.artistid
+            JOIN genre genre ON genre.genreid = cds.genreid
+          WHERE ( genre.name = ? )
+          GROUP BY genre.genreid
+        )
+      count_subq
+    )',
+    [ [ 'genre.name' => 'emo' ] ],
   );
 }
 
@@ -41,23 +73,26 @@
 {
   my $rs = $schema->resultset("CD")
             ->search_related('tracks',
-                { position => [1,2] },
+                { position => [1,2], 'lyrics.lyric_id' => undef },
                 { prefetch => [qw/disc lyrics/] },
             );
   is ($rs->all, 10, 'Correct number of objects');
 
 
-  my ($sql, @bind);
-  $schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind));
-  $schema->storage->debug(1);
-
-
   is ($rs->count, 10, 'Correct count');
 
   is_same_sql_bind (
-    $sql,
-    \@bind,
-    'SELECT COUNT( * ) FROM cd me LEFT JOIN track tracks ON tracks.cd = me.cdid JOIN cd disc ON disc.cdid = tracks.cd LEFT JOIN lyrics lyrics ON lyrics.track_id = tracks.trackid WHERE ( ( position = ? OR position = ? ) )',
-    [ qw/'1' '2'/ ],
+    $rs->count_rs->as_query,
+    '(
+      SELECT COUNT( * )
+        FROM cd me
+        JOIN track tracks ON tracks.cd = me.cdid
+        JOIN cd disc ON disc.cdid = tracks.cd
+        LEFT JOIN lyrics lyrics ON lyrics.track_id = tracks.trackid
+      WHERE lyrics.lyric_id IS NULL AND (position = ? OR position = ?)
+    )',
+    [ map { [ position => $_ ] } (1, 2) ],
   );
 }
+
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/count/search_related.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/count/search_related.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/count/search_related.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,41 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+my $cd_rs = $schema->resultset('CD')->search ({}, { rows => 1, order_by => 'cdid' });
+
+my $track_count = $cd_rs->first->tracks->count;
+
+cmp_ok ($track_count, '>', 1, 'First CD has several tracks');
+
+is ($cd_rs->search_related ('tracks')->count, $track_count, 'related->count returns correct number chained off a limited rs');
+is (scalar ($cd_rs->search_related ('tracks')->all), $track_count, 'related->all returns correct number of objects chained off a limited rs');
+
+
+my $joined_cd_rs = $cd_rs->search ({}, {
+  join => 'tracks', rows => 2, distinct => 1, having => \ 'count(tracks.trackid) > 2',
+});
+
+my $multiple_track_count = $schema->resultset('Track')->search ({
+  cd => { -in => $joined_cd_rs->get_column ('cdid')->as_query }
+})->count;
+
+
+is (
+  $joined_cd_rs->search_related ('tracks')->count,
+  $multiple_track_count,
+  'related->count returns correct number chained off a grouped rs',
+);
+is (
+  scalar ($joined_cd_rs->search_related ('tracks')->all),
+  $multiple_track_count,
+  'related->all returns correct number of objects chained off a grouped rs',
+);
+
+done_testing;

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/dbh_do.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/dbh_do.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/dbh_do.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,33 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use warnings;  
-
-use Test::More tests => 8;
-use lib qw(t/lib);
-use DBICTest;
-
-
-my $schema = DBICTest->init_schema();
-my $storage = $schema->storage;
-
-my $test_func = sub {
-    is $_[0], $storage;
-    is $_[1], $storage->dbh;
-    is $_[2], "foo";
-    is $_[3], "bar";
-};
-
-$storage->dbh_do(
-    $test_func,
-    "foo", "bar"
-);
-
-my $storage_class = ref $storage;
-{
-    no strict 'refs';
-    *{$storage_class .'::__test_method'} = $test_func;
-}
-$storage->dbh_do("__test_method", "foo", "bar");
-
-    
\ No newline at end of file

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/delete/complex.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/delete/complex.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/delete/complex.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,35 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+my $artist_rs = $schema->resultset ('Artist');
+
+my $init_count = $artist_rs->count;
+ok ($init_count, 'Some artists is database');
+
+$artist_rs->populate ([
+  {
+    name => 'foo',
+  },
+  {
+    name => 'bar',
+  }
+]);
+
+is ($artist_rs->count, $init_count + 2, '2 Artists created');
+
+$artist_rs->search ({
+ -and => [
+  { 'me.artistid' => { '!=', undef } },
+  [ { 'me.name' => 'foo' }, { 'me.name' => 'bar' } ],
+ ],
+})->delete;
+
+is ($artist_rs->count, $init_count, 'Correct amount of artists deleted');
+
+done_testing;
+

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/from_subquery.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/from_subquery.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/from_subquery.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,19 +1,14 @@
 use strict;
-use warnings FATAL => 'all';
+use warnings;
 
 use Test::More;
 
-BEGIN {
-    eval "use SQL::Abstract 1.49";
-    plan $@
-        ? ( skip_all => "Needs SQLA 1.49+" )
-        : ( tests => 8 );
-}
-
 use lib qw(t/lib);
 use DBICTest;
 use DBIC::SqlMakerTest;
 
+plan tests => 8;
+
 my $schema = DBICTest->init_schema();
 my $art_rs = $schema->resultset('Artist');
 my $cdrs = $schema->resultset('CD');
@@ -25,7 +20,7 @@
 
   is_same_sql_bind(
     $cdrs2->as_query,
-    "(SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE artist_id IN ( SELECT id FROM artist me LIMIT 1 ))",
+    "(SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE artist_id IN ( SELECT id FROM artist me LIMIT 1 ))",
     [],
   );
 }
@@ -78,7 +73,9 @@
 
   is_same_sql_bind(
     $rs->as_query,
-    "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE ( id > ? ) ) cd2)",
+    "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (
+        SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( id > ? )
+     ) cd2)",
     [
       [ 'id', 20 ]
     ],
@@ -124,11 +121,11 @@
 
   is_same_sql_bind(
     $rs->as_query,
-    "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track 
-      FROM 
-        (SELECT cd3.cdid,cd3.artist,cd3.title,cd3.year,cd3.genreid,cd3.single_track 
-          FROM 
-            (SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track 
+    "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track
+      FROM
+        (SELECT cd3.cdid, cd3.artist, cd3.title, cd3.year, cd3.genreid, cd3.single_track
+          FROM
+            (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
               FROM cd me WHERE ( id < ? ) ) cd3
           WHERE ( id > ? ) ) cd2)",
     [
@@ -168,7 +165,9 @@
 
   is_same_sql_bind(
     $rs->as_query,
-    "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE ( title = ? ) ) cd2)",
+    "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (
+        SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( title = ? )
+     ) cd2)",
     [ [ 'title', 'Thriller' ] ],
   );
 }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/core.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/core.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/core.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,7 +1,8 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
+use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
 
@@ -10,15 +11,15 @@
 eval { require DateTime };
 plan skip_all => "Need DateTime for inflation tests" if $@;
 
-plan tests => 22;
-
 $schema->class('CD') ->inflate_column( 'year',
     { inflate => sub { DateTime->new( year => shift ) },
       deflate => sub { shift->year } }
 );
 
+my $rs = $schema->resultset('CD');
+
 # inflation test
-my $cd = $schema->resultset("CD")->find(3);
+my $cd = $rs->find(3);
 
 is( ref($cd->year), 'DateTime', 'year is a DateTime, ok' );
 
@@ -46,7 +47,7 @@
 $cd->year( $now );
 $cd->update;
 
-$cd = $schema->resultset("CD")->find(3);
+$cd = $rs->find(3);
 is( $cd->year->year, $now->year, 'deflate ok' );
 
 # set_inflated_column test
@@ -54,30 +55,23 @@
 ok(!$@, 'set_inflated_column with DateTime object');
 $cd->update;
 
-$cd = $schema->resultset("CD")->find(3);                 
+$cd = $rs->find(3);
 is( $cd->year->year, $now->year, 'deflate ok' );
 
-$cd = $schema->resultset("CD")->find(3);                 
+$cd = $rs->find(3);
 my $before_year = $cd->year->year;
 eval { $cd->set_inflated_column('year', \'year + 1') };
 ok(!$@, 'set_inflated_column to "year + 1"');
 $cd->update;
 
-TODO: {
-  local $TODO = 'this was left in without a TODO - should it work?';
+$cd->store_inflated_column('year', \'year + 1');
+is_deeply( $cd->year, \'year + 1', 'scalarref deflate passthrough ok' );
 
-  eval {
-    $cd->store_inflated_column('year', \'year + 1');
-    is_deeply( $cd->year, \'year + 1', 'deflate ok' );
-  };
-  ok(!$@, 'store_inflated_column to "year + 1"');
-}
-
-$cd = $schema->resultset("CD")->find(3);                 
+$cd = $rs->find(3);
 is( $cd->year->year, $before_year+1, 'deflate ok' );
 
 # store_inflated_column test
-$cd = $schema->resultset("CD")->find(3);                 
+$cd = $rs->find(3);
 eval { $cd->store_inflated_column('year', $now) };
 ok(!$@, 'store_inflated_column with DateTime object');
 $cd->update;
@@ -85,21 +79,21 @@
 is( $cd->year->year, $now->year, 'deflate ok' );
 
 # update tests
-$cd = $schema->resultset("CD")->find(3);                 
+$cd = $rs->find(3);
 eval { $cd->update({'year' => $now}) };
 ok(!$@, 'update using DateTime object ok');
 is($cd->year->year, $now->year, 'deflate ok');
 
-$cd = $schema->resultset("CD")->find(3);                 
+$cd = $rs->find(3);
 $before_year = $cd->year->year;
 eval { $cd->update({'year' => \'year + 1'}) };
 ok(!$@, 'update using scalarref ok');
 
-$cd = $schema->resultset("CD")->find(3);                 
+$cd = $rs->find(3);
 is($cd->year->year, $before_year + 1, 'deflate ok');
 
 # discard_changes test
-$cd = $schema->resultset("CD")->find(3);                 
+$cd = $rs->find(3);
 # inflate the year
 $before_year = $cd->year->year;
 $cd->update({ year => \'year + 1'});
@@ -109,5 +103,45 @@
 
 my $copy = $cd->copy({ year => $now, title => "zemoose" });
 
-isnt( $copy->year->year, $before_year, "copy" );
- 
+is( $copy->year->year, $now->year, "copy" );
+
+
+
+my $artist = $cd->artist;
+my $sval = \ '2012';
+
+$cd = $rs->create ({
+        artist => $artist,
+        year => $sval,
+        title => 'create with scalarref',
+});
+
+is ($cd->year, $sval, 'scalar value retained');
+my $cd2 = $cd->copy ({ title => 'copy with scalar in coldata' });
+is ($cd2->year, $sval, 'copied scalar value retained');
+
+$cd->discard_changes;
+is ($cd->year->year, 2012, 'infation upon reload');
+
+$cd2->discard_changes;
+is ($cd2->year->year, 2012, 'infation upon reload of copy');
+
+
+my $precount = $rs->count;
+$cd = $rs->update_or_create ({artist => $artist, title => 'nonexisting update/create test row', year => $sval });
+is ($rs->count, $precount + 1, 'Row created');
+
+is ($cd->year, $sval, 'scalar value retained on creating update_or_create');
+$cd->discard_changes;
+is ($cd->year->year, 2012, 'infation upon reload');
+
+my $sval2 = \ '2013';
+
+$cd = $rs->update_or_create ({artist => $artist, title => 'nonexisting update/create test row', year => $sval2 });
+is ($rs->count, $precount + 1, 'No more rows created');
+
+is ($cd->year, $sval2, 'scalar value retained on updating update_or_create');
+$cd->discard_changes;
+is ($cd->year->year, 2013, 'infation upon reload');
+
+done_testing;

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_determine_parser.t (from rev 6669, DBIx-Class/0.08/branches/run_file_against_storage/t/36datetime.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_determine_parser.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_determine_parser.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,28 @@
+use strict;
+use warnings;  
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+eval { require DateTime::Format::SQLite };
+plan $@ ? ( skip_all => 'Requires DateTime::Format::SQLite' )
+        : ( tests => 3 );
+
+my $schema = DBICTest->init_schema(
+    no_deploy => 1, # Deploying would cause an early rebless
+);
+
+is(
+    ref $schema->storage, 'DBIx::Class::Storage::DBI',
+    'Starting with generic storage'
+);
+
+# Calling date_time_parser should cause the storage to be reblessed,
+# so that we can pick up datetime_parser_type from subclasses
+
+my $parser = $schema->storage->datetime_parser();
+
+is($parser, 'DateTime::Format::SQLite', 'Got expected storage-set datetime_parser');
+isa_ok($schema->storage, 'DBIx::Class::Storage::DBI::SQLite', 'storage');
+

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_mssql.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_mssql.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_mssql.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,85 @@
+use strict;
+use warnings;  
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MSSQL_ODBC_${_}" } qw/DSN USER PASS/};
+
+if (not ($dsn && $user)) {
+  plan skip_all =>
+    'Set $ENV{DBICTEST_MSSQL_ODBC_DSN}, _USER and _PASS to run this test' .
+    "\nWarning: This test drops and creates a table called 'track'";
+} else {
+  eval "use DateTime; use DateTime::Format::Strptime;";
+  if ($@) {
+    plan skip_all => 'needs DateTime and DateTime::Format::Strptime for testing';
+  }
+  else {
+    plan tests => 4 * 2; # (tests * dt_types)
+  }
+}
+
+my $schema = DBICTest::Schema->clone;
+
+$schema->connection($dsn, $user, $pass);
+$schema->storage->ensure_connected;
+
+# coltype, column, datehash
+my @dt_types = (
+  ['DATETIME',
+   'last_updated_at',
+   {
+    year => 2004,
+    month => 8,
+    day => 21,
+    hour => 14,
+    minute => 36,
+    second => 48,
+    nanosecond => 500000000,
+  }],
+  ['SMALLDATETIME', # minute precision
+   'small_dt',
+   {
+    year => 2004,
+    month => 8,
+    day => 21,
+    hour => 14,
+    minute => 36,
+  }],
+);
+
+for my $dt_type (@dt_types) {
+  my ($type, $col, $sample_dt) = @$dt_type;
+
+  eval { $schema->storage->dbh->do("DROP TABLE track") };
+  $schema->storage->dbh->do(<<"SQL");
+CREATE TABLE track (
+ trackid INT IDENTITY PRIMARY KEY,
+ cd INT,
+ position INT,
+ $col $type,
+)
+SQL
+  ok(my $dt = DateTime->new($sample_dt));
+
+  my $row;
+  ok( $row = $schema->resultset('Track')->create({
+        $col => $dt,
+        cd => 1,
+      }));
+  ok( $row = $schema->resultset('Track')
+    ->search({ trackid => $row->trackid }, { select => [$col] })
+    ->first
+  );
+  is( $row->$col, $dt, 'DateTime roundtrip' );
+}
+
+# clean up our mess
+END {
+  if (my $dbh = eval { $schema->storage->_dbh }) {
+    $dbh->do('DROP TABLE track');
+  }
+}

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_oracle.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/73oracle_inflate.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_oracle.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_oracle.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,103 @@
+use strict;
+use warnings;  
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_ORA_${_}" } qw/DSN USER PASS/};
+
+if (not ($dsn && $user && $pass)) {
+    plan skip_all => 'Set $ENV{DBICTEST_ORA_DSN}, _USER and _PASS to run this test. ' .
+         'Warning: This test drops and creates a table called \'track\'';
+}
+else {
+    eval "use DateTime; use DateTime::Format::Oracle;";
+    if ($@) {
+        plan skip_all => 'needs DateTime and DateTime::Format::Oracle for testing';
+    }
+    else {
+        plan tests => 10;
+    }
+}
+
+# DateTime::Format::Oracle needs this set
+$ENV{NLS_DATE_FORMAT} = 'DD-MON-YY';
+$ENV{NLS_TIMESTAMP_FORMAT} = 'YYYY-MM-DD HH24:MI:SSXFF';
+$ENV{NLS_LANG} = 'AMERICAN_AMERICA.WE8ISO8859P1';
+
+my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+
+# Need to redefine the last_updated_on column
+my $col_metadata = $schema->class('Track')->column_info('last_updated_on');
+$schema->class('Track')->add_column( 'last_updated_on' => {
+    data_type => 'date' });
+$schema->class('Track')->add_column( 'last_updated_at' => {
+    data_type => 'timestamp' });
+
+my $dbh = $schema->storage->dbh;
+
+#$dbh->do("alter session set nls_timestamp_format = 'YYYY-MM-DD HH24:MI:SSXFF'");
+
+eval {
+  $dbh->do("DROP TABLE track");
+};
+$dbh->do("CREATE TABLE track (trackid NUMBER(12), cd NUMBER(12), position NUMBER(12), title VARCHAR(255), last_updated_on DATE, last_updated_at TIMESTAMP, small_dt DATE)");
+
+# insert a row to play with
+my $new = $schema->resultset('Track')->create({ trackid => 1, cd => 1, position => 1, title => 'Track1', last_updated_on => '06-MAY-07', last_updated_at => '2009-05-03 21:17:18.5' });
+is($new->trackid, 1, "insert sucessful");
+
+my $track = $schema->resultset('Track')->find( 1 );
+
+is( ref($track->last_updated_on), 'DateTime', "last_updated_on inflated ok");
+
+is( $track->last_updated_on->month, 5, "DateTime methods work on inflated column");
+
+#note '$track->last_updated_at => ', $track->last_updated_at;
+is( ref($track->last_updated_at), 'DateTime', "last_updated_at inflated ok");
+
+is( $track->last_updated_at->nanosecond, 500_000_000, "DateTime methods work with nanosecond precision");
+
+my $dt = DateTime->now();
+$track->last_updated_on($dt);
+$track->last_updated_at($dt);
+$track->update;
+
+is( $track->last_updated_on->month, $dt->month, "deflate ok");
+is( int $track->last_updated_at->nanosecond, int $dt->nanosecond, "deflate ok with nanosecond precision");
+
+# test datetime_setup
+
+$schema->storage->disconnect;
+
+delete $ENV{NLS_DATE_FORMAT};
+delete $ENV{NLS_TIMESTAMP_FORMAT};
+
+$schema->connection($dsn, $user, $pass, {
+    on_connect_call => 'datetime_setup'
+});
+
+$dt = DateTime->now();
+
+my $timestamp = $dt->clone;
+$timestamp->set_nanosecond( int 500_000_000 );
+
+$track = $schema->resultset('Track')->find( 1 );
+$track->update({ last_updated_on => $dt, last_updated_at => $timestamp });
+
+$track = $schema->resultset('Track')->find(1);
+
+is( $track->last_updated_on, $dt, 'DateTime round-trip as DATE' );
+is( $track->last_updated_at, $timestamp, 'DateTime round-trip as TIMESTAMP' );
+
+is( int $track->last_updated_at->nanosecond, int 500_000_000,
+  'TIMESTAMP nanoseconds survived' );
+
+# clean up our mess
+END {
+    if($schema && ($dbh = $schema->storage->dbh)) {
+        $dbh->do("DROP TABLE track");
+    }
+}
+

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_pg.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_pg.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_pg.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -13,7 +13,7 @@
 eval { require DateTime::Format::Pg };
 plan $@
   ? ( skip_all =>  'Need DateTime::Format::Pg for timestamp inflation tests')
-  : ( tests => 3 )
+  : ( tests => 6 )
 ;
 
 
@@ -27,4 +27,14 @@
   is($event->created_on->time_zone->name, "America/Chicago", "Timezone changed");
   # Time zone difference -> -6hours
   is($event->created_on->iso8601, "2009-01-15T11:00:00", "Time with TZ correct");
+
+# test 'timestamp without time zone'
+  my $dt = DateTime->from_epoch(epoch => time);
+  $dt->set_nanosecond(int 500_000_000);
+  $event->update({ts_without_tz => $dt});
+  $event->discard_changes;
+  isa_ok($event->ts_without_tz, "DateTime") or diag $event->created_on;
+  is($event->ts_without_tz, $dt, 'timestamp without time zone inflation');
+  is($event->ts_without_tz->microsecond, $dt->microsecond,
+    'timestamp without time zone microseconds survived');
 }

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_sybase.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_sybase.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_sybase.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,107 @@
+use strict;
+use warnings;  
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
+
+if (not ($dsn && $user)) {
+  plan skip_all =>
+    'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' .
+    "\nWarning: This test drops and creates a table called 'track'";
+} else {
+  eval "use DateTime; use DateTime::Format::Sybase;";
+  if ($@) {
+    plan skip_all => 'needs DateTime and DateTime::Format::Sybase for testing';
+  }
+}
+
+my @storage_types = (
+  'DBI::Sybase::ASE',
+  'DBI::Sybase::ASE::NoBindVars',
+);
+my $schema;
+
+for my $storage_type (@storage_types) {
+  $schema = DBICTest::Schema->clone;
+
+  unless ($storage_type eq 'DBI::Sybase::ASE') { # autodetect
+    $schema->storage_type("::$storage_type");
+  }
+  $schema->connection($dsn, $user, $pass, {
+    AutoCommit => 1,
+    on_connect_call => [ 'datetime_setup' ],
+  });
+
+  $schema->storage->ensure_connected;
+
+  isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" );
+
+# coltype, col, date
+  my @dt_types = (
+    ['DATETIME', 'last_updated_at', '2004-08-21T14:36:48.080Z'],
+# minute precision
+    ['SMALLDATETIME', 'small_dt', '2004-08-21T14:36:00.000Z'],
+  );
+  
+  for my $dt_type (@dt_types) {
+    my ($type, $col, $sample_dt) = @$dt_type;
+
+    eval { $schema->storage->dbh->do("DROP TABLE track") };
+    $schema->storage->dbh->do(<<"SQL");
+CREATE TABLE track (
+   trackid INT IDENTITY PRIMARY KEY,
+   cd INT NULL,
+   position INT NULL,
+   $col $type NULL
+)
+SQL
+    ok(my $dt = DateTime::Format::Sybase->parse_datetime($sample_dt));
+
+    my $row;
+    ok( $row = $schema->resultset('Track')->create({
+          $col => $dt,
+          cd => 1,
+        }));
+    ok( $row = $schema->resultset('Track')
+      ->search({ trackid => $row->trackid }, { select => [$col] })
+      ->first
+    );
+    is( $row->$col, $dt, 'DateTime roundtrip' );
+  }
+
+  # test a computed datetime column
+  eval { $schema->storage->dbh->do("DROP TABLE track") };
+  $schema->storage->dbh->do(<<"SQL");
+CREATE TABLE track (
+   trackid INT IDENTITY PRIMARY KEY,
+   cd INT NULL,
+   position INT NULL,
+   title VARCHAR(100) NULL,
+   last_updated_on DATETIME NULL,
+   last_updated_at AS getdate(),
+   small_dt SMALLDATETIME NULL
+)
+SQL
+
+  my $now     = DateTime->now;
+  sleep 1;
+  my $new_row = $schema->resultset('Track')->create({});
+  $new_row->discard_changes;
+
+  lives_and {
+    cmp_ok (($new_row->last_updated_at - $now)->seconds, '>=', 1)
+  } 'getdate() computed column works';
+}
+
+done_testing;
+
+# clean up our mess
+END {
+  if (my $dbh = eval { $schema->storage->_dbh }) {
+    $dbh->do('DROP TABLE track');
+  }
+}

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_sybase_asa.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_sybase_asa.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/datetime_sybase_asa.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,89 @@
+use strict;
+use warnings;  
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my ($dsn, $user, $pass)    = @ENV{map { "DBICTEST_SYBASE_ASA_${_}" }      qw/DSN USER PASS/};
+my ($dsn2, $user2, $pass2) = @ENV{map { "DBICTEST_SYBASE_ASA_ODBC_${_}" } qw/DSN USER PASS/};
+
+if (not ($dsn || $dsn2)) {
+  plan skip_all => <<'EOF';
+Set $ENV{DBICTEST_SYBASE_ASA_DSN} and/or $ENV{DBICTEST_SYBASE_ASA_ODBC_DSN}
+_USER and _PASS to run this test'.
+Warning: This test drops and creates a table called 'track'";
+EOF
+} else {
+  eval "use DateTime; use DateTime::Format::Strptime;";
+  if ($@) {
+    plan skip_all => 'needs DateTime and DateTime::Format::Strptime for testing';
+  }
+}
+
+my @info = (
+  [ $dsn,  $user,  $pass  ],
+  [ $dsn2, $user2, $pass2 ],
+);
+
+my @handles_to_clean;
+
+foreach my $info (@info) {
+  my ($dsn, $user, $pass) = @$info;
+
+  next unless $dsn;
+
+  my $schema = DBICTest::Schema->clone;
+
+  $schema->connection($dsn, $user, $pass, {
+    on_connect_call => [ 'datetime_setup' ],
+  });
+
+  push @handles_to_clean, $schema->storage->dbh;
+
+# coltype, col, date
+  my @dt_types = (
+    ['TIMESTAMP', 'last_updated_at', '2004-08-21 14:36:48.080445'],
+# date only (but minute precision according to ASA docs)
+    ['DATE', 'small_dt', '2004-08-21 00:00:00.000000'],
+  );
+
+  for my $dt_type (@dt_types) {
+    my ($type, $col, $sample_dt) = @$dt_type;
+
+    eval { $schema->storage->dbh->do("DROP TABLE track") };
+    $schema->storage->dbh->do(<<"SQL");
+    CREATE TABLE track (
+      trackid INT IDENTITY PRIMARY KEY,
+      cd INT,
+      position INT,
+      $col $type,
+    )
+SQL
+    ok(my $dt = $schema->storage->datetime_parser->parse_datetime($sample_dt));
+
+    my $row;
+    ok( $row = $schema->resultset('Track')->create({
+          $col => $dt,
+          cd => 1,
+        }));
+    ok( $row = $schema->resultset('Track')
+      ->search({ trackid => $row->trackid }, { select => [$col] })
+      ->first
+    );
+    is( $row->$col, $dt, 'DateTime roundtrip' );
+
+    is $row->$col->nanosecond, $dt->nanosecond,
+        'nanoseconds survived' if 0+$dt->nanosecond;
+  }
+}
+
+done_testing;
+
+# clean up our mess
+END {
+  foreach my $dbh (@handles_to_clean) {
+    eval { $dbh->do("DROP TABLE $_") } for qw/track/;
+  }
+}

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/file_column.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/file_column.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/file_column.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,7 +4,6 @@
 use Test::More;
 use lib qw(t/lib);
 use DBICTest;
-use IO::File;
 use File::Compare;
 use Path::Class qw/file/;
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/hri.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/hri.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/hri.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,7 +1,7 @@
 use strict;
-use warnings;  
+use warnings;
 
-use Test::More qw(no_plan);
+use Test::More;
 use lib qw(t/lib);
 use DBICTest;
 my $schema = DBICTest->init_schema();
@@ -9,7 +9,7 @@
 # Under some versions of SQLite if the $rs is left hanging around it will lock
 # So we create a scope here cos I'm lazy
 {
-    my $rs = $schema->resultset('CD');
+    my $rs = $schema->resultset('CD')->search ({}, { order_by => 'cdid' });
 
     # get the defined columns
     my @dbic_cols = sort $rs->result_source->columns;
@@ -23,12 +23,14 @@
     my @hashref_cols = sort keys %$datahashref1;
 
     is_deeply( \@dbic_cols, \@hashref_cols, 'returned columns' );
+
+    my $cd1 = $rs->find ({cdid => 1});
+    is_deeply ( $cd1, $datahashref1, 'first/find return the same thing');
 }
 
-
 sub check_cols_of {
     my ($dbic_obj, $datahashref) = @_;
-    
+
     foreach my $col (keys %$datahashref) {
         # plain column
         if (not ref ($datahashref->{$col}) ) {
@@ -42,14 +44,14 @@
         elsif (ref ($datahashref->{$col}) eq 'ARRAY') {
             my @dbic_reltable = $dbic_obj->$col;
             my @hashref_reltable = @{$datahashref->{$col}};
-  
-            is (scalar @hashref_reltable, scalar @dbic_reltable, 'number of related entries');
 
+            is (scalar @dbic_reltable, scalar @hashref_reltable, 'number of related entries');
+
             # for my $index (0..scalar @hashref_reltable) {
             for my $index (0..scalar @dbic_reltable) {
                 my $dbic_reltable_obj       = $dbic_reltable[$index];
                 my $hashref_reltable_entry  = $hashref_reltable[$index];
-                
+
                 check_cols_of($dbic_reltable_obj, $hashref_reltable_entry);
             }
         }
@@ -135,3 +137,6 @@
   [{ $artist->get_columns, cds => [] }],
   'nested has_many prefetch without entries'
 );
+
+done_testing;
+

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/serialize.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/serialize.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/inflate/serialize.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -7,16 +7,14 @@
 
 my $schema = DBICTest->init_schema();
 
-use Data::Dumper;
-
 my @serializers = (
-    {	module => 'YAML.pm',
-	inflater => sub { YAML::Load (shift) },
-	deflater => sub { die "Expecting a reference" unless (ref $_[0]); YAML::Dump (shift) },
+    { module => 'YAML.pm',
+      inflater => sub { YAML::Load (shift) },
+      deflater => sub { die "Expecting a reference" unless (ref $_[0]); YAML::Dump (shift) },
     },
-    {	module => 'Storable.pm',
-	inflater => sub { Storable::thaw (shift) },
-	deflater => sub { die "Expecting a reference" unless (ref $_[0]); Storable::nfreeze (shift) },
+    { module => 'Storable.pm',
+      inflater => sub { Storable::thaw (shift) },
+      deflater => sub { die "Expecting a reference" unless (ref $_[0]); Storable::nfreeze (shift) },
     },
 );
 
@@ -25,14 +23,13 @@
 foreach my $serializer (@serializers) {
     eval { require $serializer->{module} };
     unless ($@) {
-	$selected = $serializer;
-	last;
+      $selected = $serializer;
+      last;
     }
 }
 
 plan (skip_all => "No suitable serializer found") unless $selected;
 
-plan (tests => 11);
 DBICTest::Schema::Serialized->inflate_column( 'serialized',
     { inflate => $selected->{inflater},
       deflate => $selected->{deflater},
@@ -42,17 +39,17 @@
 
 my $struct_hash = {
     a => 1,
-    b => [ 
+    b => [
         { c => 2 },
     ],
     d => 3,
 };
 
 my $struct_array = [
-    'a', 
-    { 
-	b => 1,
-	c => 2
+    'a',
+    {
+      b => 1,
+      c => 2,
     },
     'd',
 ];
@@ -63,7 +60,6 @@
 #======= testing hashref serialization
 
 my $object = $rs->create( { 
-    id => 1,
     serialized => '',
 } );
 ok($object->update( { serialized => $struct_hash } ), 'hashref deflation');
@@ -71,13 +67,19 @@
 is_deeply($inflated, $struct_hash, 'inflated hash matches original');
 
 $object = $rs->create( { 
-    id => 2,
     serialized => '',
 } );
-eval { $object->set_inflated_column('serialized', $struct_hash) };
-ok(!$@, 'set_inflated_column to a hashref');
+$object->set_inflated_column('serialized', $struct_hash);
 is_deeply($object->serialized, $struct_hash, 'inflated hash matches original');
 
+$object = $rs->new({});
+$object->serialized ($struct_hash);
+$object->insert;
+is_deeply (
+  $rs->find ({id => $object->id})->serialized,
+  $struct_hash,
+  'new/insert works',
+);
 
 #====== testing arrayref serialization
 
@@ -85,8 +87,16 @@
 ok($inflated = $object->serialized, 'arrayref inflation');
 is_deeply($inflated, $struct_array, 'inflated array matches original');
 
+$object = $rs->new({});
+$object->serialized ($struct_array);
+$object->insert;
+is_deeply (
+  $rs->find ({id => $object->id})->serialized,
+  $struct_array,
+  'new/insert works',
+);
 
-#===== make sure make_column_dirty ineracts reasonably with inflation
+#===== make sure make_column_dirty interacts reasonably with inflation
 $object = $rs->first;
 $object->update ({serialized => { x => 'y'}});
 
@@ -98,3 +108,5 @@
 $object->update;
 
 is_deeply ($rs->first->serialized, { x => 'z' }, 'changes made it to the db' );
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBIC/SqlMakerTest.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBIC/SqlMakerTest.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBIC/SqlMakerTest.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -41,7 +41,8 @@
   croak "Unexpected argument(s) supplied to is_same_sql_bind: " . join ('; ', @_)
     if @_;
 
-  SQL::Abstract::Test::is_same_sql_bind (@args);
+  @_ = @args;
+  goto &SQL::Abstract::Test::is_same_sql_bind;
 }
 
 *is_same_sql = \&SQL::Abstract::Test::is_same_sql;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Bogus/A.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Bogus/A.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Bogus/A.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,6 +1,5 @@
 package DBICNSTest::Bogus::A;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+use base qw/DBIx::Class::Core/;
 __PACKAGE__->table('a');
 __PACKAGE__->add_columns('a');
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Bogus/B.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Bogus/B.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Bogus/B.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,6 +1,5 @@
 package DBICNSTest::Result::B;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+use base qw/DBIx::Class::Core/;
 __PACKAGE__->table('b');
 __PACKAGE__->add_columns('b');
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/OtherRslt/D.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/OtherRslt/D.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/OtherRslt/D.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,6 +1,5 @@
 package DBICNSTest::OtherRslt::D;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+use base qw/DBIx::Class::Core/;
 __PACKAGE__->table('d');
 __PACKAGE__->add_columns('d');
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Result/A.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Result/A.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Result/A.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,6 +1,5 @@
 package DBICNSTest::Result::A;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+use base qw/DBIx::Class::Core/;
 __PACKAGE__->table('a');
 __PACKAGE__->add_columns('a');
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Result/B.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Result/B.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Result/B.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,6 +1,5 @@
 package DBICNSTest::Result::B;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+use base qw/DBIx::Class::Core/;
 __PACKAGE__->table('b');
 __PACKAGE__->add_columns('b');
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Rslt/A.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Rslt/A.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Rslt/A.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,6 +1,5 @@
 package DBICNSTest::Rslt::A;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+use base qw/DBIx::Class::Core/;
 __PACKAGE__->table('a');
 __PACKAGE__->add_columns('a');
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Rslt/B.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Rslt/B.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/Rslt/B.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,6 +1,5 @@
 package DBICNSTest::Rslt::B;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+use base qw/DBIx::Class::Core/;
 __PACKAGE__->table('b');
 __PACKAGE__->add_columns('b');
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/RtBug41083/Schema/Foo.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/RtBug41083/Schema/Foo.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/RtBug41083/Schema/Foo.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,8 +1,7 @@
 package DBICNSTest::RtBug41083::Schema::Foo;
 use strict;
 use warnings;
-use base 'DBIx::Class';
-__PACKAGE__->load_components('Core');
+use base 'DBIx::Class::Core';
 __PACKAGE__->table('foo');
 __PACKAGE__->add_columns('foo');
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/RtBug41083/Schema_A/A.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/RtBug41083/Schema_A/A.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICNSTest/RtBug41083/Schema_A/A.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,8 +1,7 @@
 package DBICNSTest::RtBug41083::Schema_A::A;
 use strict;
 use warnings;
-use base 'DBIx::Class';
-__PACKAGE__->load_components('Core');
+use base 'DBIx::Class::Core';
 __PACKAGE__->table('a');
 __PACKAGE__->add_columns('a');
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/AuthorCheck.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/AuthorCheck.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/AuthorCheck.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -68,21 +68,17 @@
 We have a number of reasons to believe that this is a development
 checkout and that you, the user, did not run `perl Makefile.PL`
 before using this code. You absolutely _must_ perform this step,
-as not doing so often results in a lot of wasted time for other
-contributors trying to assit you with "it broke!" problems.
+and ensure you have all required dependencies present. Not doing
+so often results in a lot of wasted time for other contributors
+trying to assit you with spurious "its broken!" problems.
 
 If you are seeing this message unexpectedly (i.e. you are in fact
-attempting a regular installation be it through CPAN or manually,
-set the variable DBICTEST_NO_MAKEFILE_VERIFICATION to a true value
-so you can continue. Also _make_absolutely_sure_ to report this to
-either the mailing list or to the irc channel as described in
+attempting a regular installation be it through CPAN or manually),
+please report the situation to either the mailing list or to the
+irc channel as described in
 
 http://search.cpan.org/dist/DBIx-Class/lib/DBIx/Class.pm#GETTING_HELP/SUPPORT
 
-Failure to do this will make us believe that all these checks are
-indeed foolproof and we will remove the ability to override this
-entirely.
-
 The DBIC team
 
 EOE
@@ -91,6 +87,19 @@
   }
 }
 
+# Mimic $Module::Install::AUTHOR
+sub is_author {
+
+  my $root = _find_co_root()
+    or return undef;
+
+  return (
+    ( not -d $root->subdir ('inc') )
+      or
+    ( -e $root->subdir ('inc')->file ($^O eq 'VMS' ? '_author' : '.author') )
+  );
+}
+
 # Try to determine the root of a checkout/untar if possible
 # or return undef
 sub _find_co_root {

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/BaseResult.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/BaseResult.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/BaseResult.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,10 +4,9 @@
 use strict;
 use warnings;
 
-use base qw/DBIx::Class/;
+use base qw/DBIx::Class::Core/;
 use DBICTest::BaseResultSet;
 
-__PACKAGE__->load_components (qw/Core/);
 __PACKAGE__->table ('bogus');
 __PACKAGE__->resultset_class ('DBICTest::BaseResultSet');
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/ResultSetManager/Foo.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/ResultSetManager/Foo.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/ResultSetManager/Foo.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,8 +1,8 @@
 package # hide from PAUSE 
     DBICTest::ResultSetManager::Foo;
-use base 'DBIx::Class';
+use base 'DBIx::Class::Core';
 
-__PACKAGE__->load_components(qw/ ResultSetManager Core /);
+__PACKAGE__->load_components(qw/ ResultSetManager /);
 __PACKAGE__->table('foo');
 
 sub bar : ResultSet { 'good' }

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Artist.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Artist.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Artist.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -30,6 +30,7 @@
   },
 );
 __PACKAGE__->set_primary_key('artistid');
+__PACKAGE__->add_unique_constraint(artist => ['artistid']); # do not remove, part of a test
 
 __PACKAGE__->mk_classdata('field_name_for', {
     artistid    => 'primary key',
@@ -43,6 +44,9 @@
 __PACKAGE__->has_many(
     cds_unordered => 'DBICTest::Schema::CD'
 );
+__PACKAGE__->has_many(
+    cds_very_very_very_long_relationship_name => 'DBICTest::Schema::CD'
+);
 
 __PACKAGE__->has_many( twokeys => 'DBICTest::Schema::TwoKeys' );
 __PACKAGE__->has_many( onekeys => 'DBICTest::Schema::OneKey' );
@@ -68,4 +72,11 @@
   }
 }
 
+sub store_column {
+  my ($self, $name, $value) = @_;
+  $value = 'X '.$value if ($name eq 'name' && $value && $value =~ /(X )?store_column test/);
+  $self->next::method($name, $value);
+}
+
+
 1;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/ArtistGUID.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/ArtistGUID.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/ArtistGUID.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,35 @@
+package # hide from PAUSE 
+    DBICTest::Schema::ArtistGUID;
+
+use base qw/DBICTest::BaseResult/;
+
+# test MSSQL uniqueidentifier type
+
+__PACKAGE__->table('artist');
+__PACKAGE__->add_columns(
+  'artistid' => {
+    data_type => 'uniqueidentifier' # auto_nextval not necessary for PK
+  },
+  'name' => {
+    data_type => 'varchar',
+    size      => 100,
+    is_nullable => 1,
+  },
+  rank => {
+    data_type => 'integer',
+    default_value => 13,
+  },
+  charfield => {
+    data_type => 'char',
+    size => 10,
+    is_nullable => 1,
+  },
+  a_guid => {
+    data_type => 'uniqueidentifier',
+    auto_nextval => 1, # necessary here, because not a PK
+    is_nullable => 1,
+  }
+);
+__PACKAGE__->set_primary_key('artistid');
+
+1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Artwork.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Artwork.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Artwork.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -7,6 +7,7 @@
 __PACKAGE__->add_columns(
   'cd_id' => {
     data_type => 'integer',
+    is_nullable => 0,
   },
 );
 __PACKAGE__->set_primary_key('cd_id');

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Bookmark.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Bookmark.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Bookmark.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -15,6 +15,7 @@
     },
     'link' => {
         data_type => 'integer',
+        is_nullable => 1,
     },
 );
 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/CD.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/CD.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/CD.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,7 +3,10 @@
 
 use base qw/DBICTest::BaseResult/;
 
-__PACKAGE__->table('cd');
+# this tests table name as scalar ref
+# DO NOT REMOVE THE \
+__PACKAGE__->table(\'cd');
+
 __PACKAGE__->add_columns(
   'cdid' => {
     data_type => 'integer',
@@ -23,6 +26,7 @@
   'genreid' => { 
     data_type => 'integer',
     is_nullable => 1,
+    accessor => undef,
   },
   'single_track' => {
     data_type => 'integer',
@@ -56,6 +60,7 @@
     { proxy => [ qw/notes/ ] },
 );
 __PACKAGE__->might_have(artwork => 'DBICTest::Schema::Artwork', 'cd_id');
+__PACKAGE__->has_one(mandatory_artwork => 'DBICTest::Schema::Artwork', 'cd_id');
 
 __PACKAGE__->many_to_many( producers => cd_to_producer => 'producer' );
 __PACKAGE__->many_to_many(

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/ComputedColumn.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/ComputedColumn.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/ComputedColumn.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,34 @@
+package # hide from PAUSE 
+    DBICTest::Schema::ComputedColumn;
+
+# for sybase and mssql computed column tests
+
+use base qw/DBICTest::BaseResult/;
+
+__PACKAGE__->table('computed_column_test');
+
+__PACKAGE__->add_columns(
+  'id' => {
+    data_type => 'integer',
+    is_auto_increment => 1,
+  },
+  'a_computed_column' => {
+    data_type => undef,
+    is_nullable => 0,
+    default_value => \'getdate()',
+  },
+  'a_timestamp' => {
+    data_type => 'timestamp',
+    is_nullable => 0,
+  },
+  'charfield' => {
+    data_type => 'varchar',
+    size => 20,
+    default_value => 'foo',
+    is_nullable => 0,
+  }
+);
+
+__PACKAGE__->set_primary_key('id');
+
+1;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/CustomSql.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/CustomSql.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/CustomSql.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,17 @@
+package # hide from PAUSE 
+    DBICTest::Schema::CustomSql;
+
+use base qw/DBICTest::Schema::Artist/;
+
+__PACKAGE__->table('dummy');
+
+__PACKAGE__->result_source_instance->name(\<<SQL);
+  ( SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year 
+  FROM artist a
+  JOIN cd ON cd.artist = a.artistid
+  WHERE cd.year = ?)
+SQL
+
+sub sqlt_deploy_hook { $_[1]->schema->drop_table($_[1]) }
+
+1;


Property changes on: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/CustomSql.pm
___________________________________________________________________
Name: svn:eol-style
   + native

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Event.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Event.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Event.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -15,6 +15,7 @@
   varchar_date => { data_type => 'varchar', inflate_date => 1, size => 20, is_nullable => 1 },
   varchar_datetime => { data_type => 'varchar', inflate_datetime => 1, size => 20, is_nullable => 1 },
   skip_inflation => { data_type => 'datetime', inflate_datetime => 0, is_nullable => 1 },
+  ts_without_tz => { data_type => 'datetime', is_nullable => 1 }, # used in EventTZPg
 );
 
 __PACKAGE__->set_primary_key('id');

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/EventTZPg.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/EventTZPg.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/EventTZPg.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -12,6 +12,7 @@
   id => { data_type => 'integer', is_auto_increment => 1 },
   starts_at => { data_type => 'datetime', timezone => "America/Chicago", locale => 'de_DE' },
   created_on => { data_type => 'timestamp with time zone', timezone => "America/Chicago" },
+  ts_without_tz => { data_type => 'timestamp without time zone' },
 );
 
 __PACKAGE__->set_primary_key('id');

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/ForceForeign.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/ForceForeign.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/ForceForeign.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -12,30 +12,21 @@
 
 # Normally this would not appear as a FK constraint
 # since it uses the PK
-__PACKAGE__->might_have(
-			'artist_1', 'DBICTest::Schema::Artist', {
-			    'foreign.artistid' => 'self.artist',
-			}, {
-			    is_foreign_key_constraint => 1,
-			},
+__PACKAGE__->might_have('artist_1', 'DBICTest::Schema::Artist',
+  { 'foreign.artistid' => 'self.artist' },
+  { is_foreign_key_constraint => 1 },
 );
 
 # Normally this would appear as a FK constraint
-__PACKAGE__->might_have(
-			'cd_1', 'DBICTest::Schema::CD', {
-			    'foreign.cdid' => 'self.cd',
-			}, {
-			    is_foreign_key_constraint => 0,
-			},
+__PACKAGE__->might_have('cd_1', 'DBICTest::Schema::CD',
+  { 'foreign.cdid' => 'self.cd' },
+  { is_foreign_key_constraint => 0 },
 );
 
 # Normally this would appear as a FK constraint
-__PACKAGE__->belongs_to(
-			'cd_3', 'DBICTest::Schema::CD', {
-			    'foreign.cdid' => 'self.cd',
-			}, {
-			    is_foreign_key_constraint => 0,
-			},
+__PACKAGE__->belongs_to('cd_3', 'DBICTest::Schema::CD',
+  { 'foreign.cdid' => 'self.cd' },
+  { is_foreign_key_constraint => 0 },
 );
 
 1;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Money.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Money.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Money.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,21 @@
+package # hide from PAUSE 
+    DBICTest::Schema::Money;
+
+use base qw/DBICTest::BaseResult/;
+
+__PACKAGE__->table('money_test');
+
+__PACKAGE__->add_columns(
+  'id' => {
+    data_type => 'integer',
+    is_auto_increment => 1,
+  },
+  'amount' => {
+    data_type => 'money',
+    is_nullable => 1,
+  },
+);
+
+__PACKAGE__->set_primary_key('id');
+
+1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Serialized.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Serialized.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Serialized.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -5,7 +5,7 @@
 
 __PACKAGE__->table('serialized');
 __PACKAGE__->add_columns(
-  'id' => { data_type => 'integer' },
+  'id' => { data_type => 'integer', is_auto_increment => 1 },
   'serialized' => { data_type => 'text' },
 );
 __PACKAGE__->set_primary_key('id');

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Track.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Track.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Track.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -14,7 +14,7 @@
     data_type => 'integer',
   },
   'position' => {
-    data_type => 'integer',
+    data_type => 'int',
     accessor => 'pos',
   },
   'title' => {
@@ -30,6 +30,10 @@
     data_type => 'datetime',
     is_nullable => 1
   },
+  small_dt => { # for mssql and sybase DT tests
+    data_type => 'smalldatetime',
+    is_nullable => 1
+  },
 );
 __PACKAGE__->set_primary_key('trackid');
 
@@ -46,4 +50,17 @@
 __PACKAGE__->might_have( cd_single => 'DBICTest::Schema::CD', 'single_track' );
 __PACKAGE__->might_have( lyrics => 'DBICTest::Schema::Lyrics', 'track_id' );
 
+__PACKAGE__->belongs_to(
+    "year1999cd",
+    "DBICTest::Schema::Year1999CDs",
+    { "foreign.cdid" => "self.cd" },
+    { join_type => 'left' },  # the relationship is of course optional
+);
+__PACKAGE__->belongs_to(
+    "year2000cd",
+    "DBICTest::Schema::Year2000CDs",
+    { "foreign.cdid" => "self.cd" },
+    { join_type => 'left' },
+);
+
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Year1999CDs.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Year1999CDs.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Year1999CDs.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,14 +3,13 @@
 ## Used in 104view.t
 
 use base qw/DBICTest::BaseResult/;
-use DBIx::Class::ResultSource::View;
 
 __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
 
 __PACKAGE__->table('year1999cds');
 __PACKAGE__->result_source_instance->is_virtual(1);
 __PACKAGE__->result_source_instance->view_definition(
-  "SELECT cdid, artist, title FROM cd WHERE year ='1999'"
+  "SELECT cdid, artist, title, single_track FROM cd WHERE year ='1999'"
 );
 __PACKAGE__->add_columns(
   'cdid' => {
@@ -24,9 +23,17 @@
     data_type => 'varchar',
     size      => 100,
   },
-
+  'single_track' => {
+    data_type => 'integer',
+    is_nullable => 1,
+    is_foreign_key => 1,
+  },
 );
 __PACKAGE__->set_primary_key('cdid');
 __PACKAGE__->add_unique_constraint([ qw/artist title/ ]);
 
+__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' );
+__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track',
+    { "foreign.cd" => "self.cdid" });
+
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Year2000CDs.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Year2000CDs.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema/Year2000CDs.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,31 +1,19 @@
-package # hide from PAUSE 
+package # hide from PAUSE
     DBICTest::Schema::Year2000CDs;
-## Used in 104view.t
 
-use base qw/DBICTest::BaseResult/;
-use DBIx::Class::ResultSource::View;
+use base qw/DBICTest::Schema::CD/;
 
 __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
-
 __PACKAGE__->table('year2000cds');
-__PACKAGE__->result_source_instance->view_definition(
-  "SELECT cdid, artist, title FROM cd WHERE year ='2000'"
-);
-__PACKAGE__->add_columns(
-  'cdid' => {
-    data_type => 'integer',
-    is_auto_increment => 1,
-  },
-  'artist' => {
-    data_type => 'integer',
-  },
-  'title' => {
-    data_type => 'varchar',
-    size      => 100,
-  },
 
-);
-__PACKAGE__->set_primary_key('cdid');
-__PACKAGE__->add_unique_constraint([ qw/artist title/ ]);
+# need to operate on the instance for things to work
+__PACKAGE__->result_source_instance->view_definition( sprintf (
+  'SELECT %s FROM cd WHERE year = "2000"',
+  join (', ', __PACKAGE__->columns),
+));
 
+__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' );
+__PACKAGE__->has_many( tracks => 'DBICTest::Schema::Track',
+    { "foreign.cd" => "self.cdid" });
+
 1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest/Schema.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -20,6 +20,8 @@
   Tag
   Year2000CDs
   Year1999CDs
+  CustomSql
+  Money
   /,
   { 'DBICTest::Schema' => [qw/
     LinerNotes

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICTest.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -32,7 +32,7 @@
     no_populate=>1,
     storage_type=>'::DBI::Replicated',
     storage_type_args=>{
-    	balancer_type=>'DBIx::Class::Storage::DBI::Replicated::Balancer::Random'
+      balancer_type=>'DBIx::Class::Storage::DBI::Replicated::Balancer::Random'
     },
   );
 
@@ -48,7 +48,7 @@
 =cut
 
 sub has_custom_dsn {
-	return $ENV{"DBICTEST_DSN"} ? 1:0;
+    return $ENV{"DBICTEST_DSN"} ? 1:0;
 }
 
 sub _sqlite_dbfilename {
@@ -59,7 +59,7 @@
     my $self = shift;
     my %args = @_;
     return $self->_sqlite_dbfilename if $args{sqlite_use_file} or $ENV{"DBICTEST_SQLITE_USE_FILE"};
-	return ":memory:";
+    return ":memory:";
 }
 
 sub _database {
@@ -85,7 +85,7 @@
     my %args = @_;
 
     my $schema;
-    
+
     if ($args{compose_connection}) {
       $schema = DBICTest::Schema->compose_connection(
                   'DBICTest', $self->_database(%args)
@@ -94,8 +94,8 @@
       $schema = DBICTest::Schema->compose_namespace('DBICTest');
     }
     if( $args{storage_type}) {
-    	$schema->storage_type($args{storage_type});
-    }    
+      $schema->storage_type($args{storage_type});
+    }
     if ( !$args{no_connect} ) {
       $schema = $schema->connect($self->_database(%args));
       $schema->storage->on_connect_do(['PRAGMA synchronous = OFF'])
@@ -127,7 +127,7 @@
     my $args = shift || {};
 
     if ($ENV{"DBICTEST_SQLT_DEPLOY"}) { 
-        $schema->deploy($args);    
+        $schema->deploy($args);
     } else {
       $schema->storage->run_file_against_storage(qw/t lib sqlite.sql/);
     }
@@ -146,6 +146,11 @@
     my $self = shift;
     my $schema = shift;
 
+    $schema->populate('Genre', [
+      [qw/genreid name/],
+      [qw/1       emo  /],
+    ]);
+
     $schema->populate('Artist', [
         [ qw/artistid name/ ],
         [ 1, 'Caterwauler McCrae' ],
@@ -154,8 +159,8 @@
     ]);
 
     $schema->populate('CD', [
-        [ qw/cdid artist title year/ ],
-        [ 1, 1, "Spoonful of bees", 1999 ],
+        [ qw/cdid artist title year genreid/ ],
+        [ 1, 1, "Spoonful of bees", 1999, 1 ],
         [ 2, 1, "Forkful of bees", 2001 ],
         [ 3, 1, "Caterwaulin' Blues", 1997 ],
         [ 4, 2, "Generic Manufactured Singles", 2001 ],
@@ -234,7 +239,7 @@
     
     $schema->populate('TreeLike', [
         [ qw/id parent name/ ],
-        [ 1, undef, 'root' ],        
+        [ 1, undef, 'root' ],
         [ 2, 1, 'foo'  ],
         [ 3, 2, 'bar'  ],
         [ 6, 2, 'blop' ],

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersionNew.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersionNew.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersionNew.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,56 +0,0 @@
-package DBICVersion::Table;
-
-use base 'DBIx::Class';
-use strict;
-use warnings;
-
-__PACKAGE__->load_components(qw/ Core/);
-__PACKAGE__->table('TestVersion');
-
-__PACKAGE__->add_columns
-    ( 'Version' => {
-        'data_type' => 'INTEGER',
-        'is_auto_increment' => 1,
-        'default_value' => undef,
-        'is_foreign_key' => 0,
-        'is_nullable' => 0,
-        'size' => ''
-        },
-      'VersionName' => {
-        'data_type' => 'VARCHAR',
-        'is_auto_increment' => 0,
-        'default_value' => undef,
-        'is_foreign_key' => 0,
-        'is_nullable' => 0,
-        'size' => '10'
-        },
-      'NewVersionName' => {
-        'data_type' => 'VARCHAR',
-        'is_auto_increment' => 0,
-        'default_value' => undef,
-        'is_foreign_key' => 0,
-        'is_nullable' => 1,
-        'size' => '20'
-        }
-      );
-
-__PACKAGE__->set_primary_key('Version');
-
-package DBICVersion::Schema;
-use base 'DBIx::Class::Schema';
-use strict;
-use warnings;
-
-our $VERSION = '2.0';
-
-__PACKAGE__->register_class('Table', 'DBICVersion::Table');
-__PACKAGE__->load_components('+DBIx::Class::Schema::Versioned');
-__PACKAGE__->upgrade_directory('t/var/');
-__PACKAGE__->backup_directory('t/var/backup/');
-
-#sub upgrade_directory
-#{
-#    return 't/var/';
-#}
-
-1;

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersionOrig.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersionOrig.pm	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersionOrig.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,46 +0,0 @@
-package DBICVersion::Table;
-
-use base 'DBIx::Class';
-use strict;
-use warnings;
-
-__PACKAGE__->load_components(qw/ Core/);
-__PACKAGE__->table('TestVersion');
-
-__PACKAGE__->add_columns
-    ( 'Version' => {
-        'data_type' => 'INTEGER',
-        'is_auto_increment' => 1,
-        'default_value' => undef,
-        'is_foreign_key' => 0,
-        'is_nullable' => 0,
-        'size' => ''
-        },
-      'VersionName' => {
-        'data_type' => 'VARCHAR',
-        'is_auto_increment' => 0,
-        'default_value' => undef,
-        'is_foreign_key' => 0,
-        'is_nullable' => 0,
-        'size' => '10'
-        },
-      );
-
-__PACKAGE__->set_primary_key('Version');
-
-package DBICVersion::Schema;
-use base 'DBIx::Class::Schema';
-use strict;
-use warnings;
-
-our $VERSION = '1.0';
-
-__PACKAGE__->register_class('Table', 'DBICVersion::Table');
-__PACKAGE__->load_components('+DBIx::Class::Schema::Versioned');
-
-sub upgrade_directory
-{
-    return 't/var/';
-}
-
-1;

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v1.pm (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersionOrig.pm)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v1.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v1.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,49 @@
+package DBICVersion::Table;
+
+use base 'DBIx::Class::Core';
+use strict;
+use warnings;
+
+__PACKAGE__->table('TestVersion');
+
+__PACKAGE__->add_columns
+    ( 'Version' => {
+        'data_type' => 'INTEGER',
+        'is_auto_increment' => 1,
+        'default_value' => undef,
+        'is_foreign_key' => 0,
+        'is_nullable' => 0,
+        'size' => ''
+        },
+      'VersionName' => {
+        'data_type' => 'VARCHAR',
+        'is_auto_increment' => 0,
+        'default_value' => undef,
+        'is_foreign_key' => 0,
+        'is_nullable' => 0,
+        'size' => '10'
+        },
+      );
+
+__PACKAGE__->set_primary_key('Version');
+
+package DBICVersion::Schema;
+use base 'DBIx::Class::Schema';
+use strict;
+use warnings;
+
+our $VERSION = '1.0';
+
+__PACKAGE__->register_class('Table', 'DBICVersion::Table');
+__PACKAGE__->load_components('+DBIx::Class::Schema::Versioned');
+
+sub upgrade_directory
+{
+    return 't/var/';
+}
+
+sub ordered_schema_versions {
+  return('1.0','2.0','3.0');
+}
+
+1;

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v2.pm (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersionNew.pm)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v2.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v2.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,55 @@
+package DBICVersion::Table;
+
+use base 'DBIx::Class::Core';
+use strict;
+use warnings;
+
+__PACKAGE__->table('TestVersion');
+
+__PACKAGE__->add_columns
+    ( 'Version' => {
+        'data_type' => 'INTEGER',
+        'is_auto_increment' => 1,
+        'default_value' => undef,
+        'is_foreign_key' => 0,
+        'is_nullable' => 0,
+        'size' => ''
+        },
+      'VersionName' => {
+        'data_type' => 'VARCHAR',
+        'is_auto_increment' => 0,
+        'default_value' => undef,
+        'is_foreign_key' => 0,
+        'is_nullable' => 0,
+        'size' => '10'
+        },
+      'NewVersionName' => {
+        'data_type' => 'VARCHAR',
+        'is_auto_increment' => 0,
+        'default_value' => undef,
+        'is_foreign_key' => 0,
+        'is_nullable' => 1,
+        'size' => '20'
+        }
+      );
+
+__PACKAGE__->set_primary_key('Version');
+
+package DBICVersion::Schema;
+use base 'DBIx::Class::Schema';
+use strict;
+use warnings;
+
+our $VERSION = '2.0';
+
+__PACKAGE__->register_class('Table', 'DBICVersion::Table');
+__PACKAGE__->load_components('+DBIx::Class::Schema::Versioned');
+__PACKAGE__->upgrade_directory('t/var/');
+__PACKAGE__->backup_directory('t/var/backup/');
+
+#sub upgrade_directory
+#{
+#    return 't/var/';
+#}
+
+1;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v3.pm
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v3.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v3.pm	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,58 @@
+package DBICVersion::Table;
+
+use base 'DBIx::Class::Core';
+use strict;
+use warnings;
+
+__PACKAGE__->table('TestVersion');
+
+__PACKAGE__->add_columns
+    ( 'Version' => {
+        'data_type' => 'INTEGER',
+        'is_auto_increment' => 1,
+        'default_value' => undef,
+        'is_foreign_key' => 0,
+        'is_nullable' => 0,
+        'size' => ''
+        },
+      'VersionName' => {
+        'data_type' => 'VARCHAR',
+        'is_auto_increment' => 0,
+        'default_value' => undef,
+        'is_foreign_key' => 0,
+        'is_nullable' => 0,
+        'size' => '10'
+        },
+      'NewVersionName' => {
+        'data_type' => 'VARCHAR',
+        'is_auto_increment' => 0,
+        'default_value' => undef,
+        'is_foreign_key' => 0,
+        'is_nullable' => 1,
+        'size' => '20'
+        },
+      'ExtraColumn' => {
+        'data_type' => 'VARCHAR',
+        'is_auto_increment' => 0,
+        'default_value' => undef,
+        'is_foreign_key' => 0,
+        'is_nullable' => 1,
+        'size' => '20'
+        }
+      );
+
+__PACKAGE__->set_primary_key('Version');
+
+package DBICVersion::Schema;
+use base 'DBIx::Class::Schema';
+use strict;
+use warnings;
+
+our $VERSION = '3.0';
+
+__PACKAGE__->register_class('Table', 'DBICVersion::Table');
+__PACKAGE__->load_components('+DBIx::Class::Schema::Versioned');
+__PACKAGE__->upgrade_directory('t/var/');
+__PACKAGE__->backup_directory('t/var/backup/');
+
+1;


Property changes on: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/DBICVersion_v3.pm
___________________________________________________________________
Name: svn:keywords
   + "Author Date Id Revision Url"
Name: svn:eol-style
   + native

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/lib/sqlite.sql
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/lib/sqlite.sql	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/lib/sqlite.sql	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,11 +1,9 @@
 -- 
 -- Created by SQL::Translator::Producer::SQLite
--- Created on Sat Jun 27 14:02:39 2009
+-- Created on Sat Jan 30 19:18:55 2010
 -- 
+;
 
-
-BEGIN TRANSACTION;
-
 --
 -- Table: artist
 --
@@ -16,6 +14,8 @@
   charfield char(10)
 );
 
+CREATE INDEX artist_name_hookidx ON artist (name);
+
 --
 -- Table: bindtype_test
 --
@@ -63,7 +63,8 @@
   created_on timestamp NOT NULL,
   varchar_date varchar(20),
   varchar_datetime varchar(20),
-  skip_inflation datetime
+  skip_inflation datetime,
+  ts_without_tz datetime
 );
 
 --
@@ -107,6 +108,14 @@
 );
 
 --
+-- Table: money_test
+--
+CREATE TABLE money_test (
+  id INTEGER PRIMARY KEY NOT NULL,
+  amount money
+);
+
+--
 -- Table: noprimarykey
 --
 CREATE TABLE noprimarykey (
@@ -225,7 +234,7 @@
 --
 CREATE TABLE bookmark (
   id INTEGER PRIMARY KEY NOT NULL,
-  link integer NOT NULL
+  link integer
 );
 
 CREATE INDEX bookmark_idx_link ON bookmark (link);
@@ -251,8 +260,6 @@
   cd integer NOT NULL
 );
 
-CREATE INDEX forceforeign_idx_artist ON forceforeign (artist);
-
 --
 -- Table: self_ref_alias
 --
@@ -272,10 +279,11 @@
 CREATE TABLE track (
   trackid INTEGER PRIMARY KEY NOT NULL,
   cd integer NOT NULL,
-  position integer NOT NULL,
+  position int NOT NULL,
   title varchar(100) NOT NULL,
   last_updated_on datetime,
-  last_updated_at datetime
+  last_updated_at datetime,
+  small_dt smalldatetime
 );
 
 CREATE INDEX track_idx_cd ON track (cd);
@@ -334,8 +342,6 @@
   cd_id INTEGER PRIMARY KEY NOT NULL
 );
 
-CREATE INDEX cd_artwork_idx_cd_id ON cd_artwork (cd_id);
-
 --
 -- Table: liner_notes
 --
@@ -344,8 +350,6 @@
   notes varchar(100) NOT NULL
 );
 
-CREATE INDEX liner_notes_idx_liner_id ON liner_notes (liner_id);
-
 --
 -- Table: lyric_versions
 --
@@ -441,6 +445,4 @@
 -- View: year2000cds
 --
 CREATE VIEW year2000cds AS
-    SELECT cdid, artist, title FROM cd WHERE year ='2000';
-
-COMMIT;
+    SELECT cdid, artist, title, year, genreid, single_track FROM cd WHERE year = "2000"
\ No newline at end of file

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/diamond.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/diamond.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/diamond.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,52 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+sub mc_diag { diag (@_) if $ENV{DBIC_MULTICREATE_DEBUG} };
+
+my $schema = DBICTest->init_schema();
+
+mc_diag (<<'DG');
+* Try a diamond multicreate
+
+Artist -> has_many -> Artwork_to_Artist -> belongs_to
+                                               /
+  belongs_to <- CD <- belongs_to <- Artwork <-/
+    \
+     \-> Artist2
+
+DG
+
+lives_ok (sub {
+  $schema->resultset ('Artist')->create ({
+    name => 'The wooled wolf',
+    artwork_to_artist => [{
+      artwork => {
+        cd => {
+          title => 'Wool explosive',
+          year => 1999,
+          artist => { name => 'The black exploding sheep' },
+        }
+      }
+    }],
+  });
+
+  my $art2 = $schema->resultset ('Artist')->find ({ name => 'The black exploding sheep' });
+  ok ($art2, 'Second artist exists');
+
+  my $cd = $art2->cds->single;
+  is ($cd->title, 'Wool explosive', 'correctly created CD');
+
+  is_deeply (
+    [ $cd->artwork->artists->get_column ('name')->all ],
+    [ 'The wooled wolf' ],
+    'Artist correctly attached to artwork',
+  );
+
+}, 'Diamond chain creation ok');
+
+done_testing;


Property changes on: DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/diamond.t
___________________________________________________________________
Name: svn:eol-style
   + native

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/existing_in_chain.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/existing_in_chain.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/existing_in_chain.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,105 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+# For fully intuitive multicreate any relationships in a chain
+# that do not exist for one reason or another should be created,
+# even if the preceeding relationship already exists.
+#
+# To get this to work a minor rewrite of find() is necessary, and
+# more importantly some sort of recursive_insert() call needs to 
+# be available. The way things will work then is:
+# *) while traversing the hierarchy code calls find_or_create()
+# *) this in turn calls find(%\nested_dataset)
+# *) this should return not only the existing object, but must
+#    also attach all non-existing (in fact maybe existing) related
+#    bits of data to it, with in_storage => 0
+# *) then before returning the result of the succesful find(), we
+#    simply call $obj->recursive_insert and all is dandy
+#
+# Since this will not be a very clean solution, todoifying for the
+# time being until an actual need arises
+#
+# ribasushi
+
+TODO: { my $f = __FILE__; local $TODO = "See comment at top of $f for discussion of the TODO";
+
+{
+  my $counts;
+  $counts->{$_} = $schema->resultset($_)->count for qw/Track CD Genre/;
+
+  lives_ok (sub {
+    my $existing_nogen_cd = $schema->resultset('CD')->search (
+      { 'genre.genreid' => undef },
+      { join => 'genre' },
+    )->first;
+
+    $schema->resultset('Track')->create ({
+      title => 'Sugar-coated',
+      cd => {
+        title => $existing_nogen_cd->title,
+        genre => {
+          name => 'sugar genre',
+        }
+      }
+    });
+
+    is ($schema->resultset('Track')->count, $counts->{Track} + 1, '1 new track');
+    is ($schema->resultset('CD')->count, $counts->{CD}, 'No new cds');
+    is ($schema->resultset('Genre')->count, $counts->{Genre} + 1, '1 new genre');
+
+    is ($existing_nogen_cd->genre->title,  'sugar genre', 'Correct genre assigned to CD');
+  }, 'create() did not throw');
+}
+{
+  my $counts;
+  $counts->{$_} = $schema->resultset($_)->count for qw/Artist CD Producer/;
+
+  lives_ok (sub {
+    my $artist = $schema->resultset('Artist')->first;
+    my $producer = $schema->resultset('Producer')->create ({ name => 'the queen of england' });
+
+    $schema->resultset('CD')->create ({
+      artist => $artist,
+      title => 'queen1',
+      year => 2007,
+      cd_to_producer => [
+        {
+          producer => {
+          name => $producer->name,
+            producer_to_cd => [
+              {
+                cd => {
+                  title => 'queen2',
+                  year => 2008,
+                  artist => $artist,
+                },
+              },
+            ],
+          },
+        },
+      ],
+    });
+
+    is ($schema->resultset('Artist')->count, $counts->{Artist}, 'No new artists');
+    is ($schema->resultset('Producer')->count, $counts->{Producer} + 1, '1 new producers');
+    is ($schema->resultset('CD')->count, $counts->{CD} + 2, '2 new cds');
+
+    is ($producer->cds->count, 2, 'CDs assigned to correct producer');
+    is_deeply (
+      [ $producer->cds->search ({}, { order_by => 'title' })->get_column('title')->all],
+      [ qw/queen1 queen2/ ],
+      'Correct cd names',
+    );
+  }, 'create() did not throw');
+}
+
+}
+
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/has_many.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/has_many.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/has_many.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,33 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 2;
+
+my $schema = DBICTest->init_schema();
+
+my $track_no_lyrics = $schema->resultset ('Track')
+              ->search ({ 'lyrics.lyric_id' => undef }, { join => 'lyrics' })
+                ->first;
+
+my $lyric = $track_no_lyrics->create_related ('lyrics', {
+  lyric_versions => [
+    { text => 'english doubled' },
+    { text => 'english doubled' },
+  ],
+});
+is ($lyric->lyric_versions->count, 2, "Two identical has_many's created");
+
+
+my $link = $schema->resultset ('Link')->create ({
+  url => 'lolcats!',
+  bookmarks => [
+    {},
+    {},
+  ]
+});
+is ($link->bookmarks->count, 2, "Two identical default-insert has_many's created");

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/m2m.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/m2m.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/m2m.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -23,7 +23,7 @@
 
   my $cd2 = $schema->resultset('CD')->search ( { cdid => { '!=', $cd->cdid } }, {rows => 1} )->single;  # retrieve a cd different from the first
   $cd2->add_to_producers ({name => 'new m2m producer'});                                                # attach to an existing producer
-  ok ($cd2->producers->find ({name => 'new m2m producer'}), 'Exsiting producer attached to existing cd');
+  ok ($cd2->producers->find ({name => 'new m2m producer'}), 'Existing producer attached to existing cd');
 
 }, 'Test far-end find_or_create over many_to_many');
 

Deleted: DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/multilev_might_have_PKeqFK.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/multilev_might_have_PKeqFK.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/multilev_might_have_PKeqFK.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,65 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use Test::Exception;
-use lib qw(t/lib);
-use DBICTest;
-
-sub mc_diag { diag (@_) if $ENV{DBIC_MULTICREATE_DEBUG} };
-
-plan tests => 8;
-
-my $schema = DBICTest->init_schema();
-
-mc_diag (<<'DG');
-* Test a multilevel might-have with a PK == FK in the might_have/has_many table
-
-CD -> might have -> Artwork
-                       \
-                        \-> has_many \
-                                      --> Artwork_to_Artist
-                        /-> has_many /
-                       /
-                     Artist
-DG
-
-lives_ok (sub {
-  my $someartist = $schema->resultset('Artist')->first;
-  my $cd = $schema->resultset('CD')->create ({
-    artist => $someartist,
-    title => 'Music to code by until the cows come home',
-    year => 2008,
-    artwork => {
-      artwork_to_artist => [
-        { artist => { name => 'cowboy joe' } },
-        { artist => { name => 'billy the kid' } },
-      ],
-    },
-  });
-
-  isa_ok ($cd, 'DBICTest::CD', 'Main CD object created');
-  is ($cd->title, 'Music to code by until the cows come home', 'Correct CD title');
-
-  my $art_obj = $cd->artwork;
-  ok ($art_obj->has_column_loaded ('cd_id'), 'PK/FK present on artwork object');
-  is ($art_obj->artists->count, 2, 'Correct artwork creator count via the new object');
-  is_deeply (
-    [ sort $art_obj->artists->get_column ('name')->all ],
-    [ 'billy the kid', 'cowboy joe' ],
-    'Artists named correctly when queried via object',
-  );
-
-  my $artwork = $schema->resultset('Artwork')->search (
-    { 'cd.title' => 'Music to code by until the cows come home' },
-    { join => 'cd' },
-  )->single;
-  is ($artwork->artists->count, 2, 'Correct artwork creator count via a new search');
-  is_deeply (
-    [ sort $artwork->artists->get_column ('name')->all ],
-    [ 'billy the kid', 'cowboy joe' ],
-    'Artists named correctly queried via a new search',
-  );
-}, 'multilevel might-have with a PK == FK in the might_have/has_many table ok');
-
-1;

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/multilev_single_PKeqFK.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/multilev_might_have_PKeqFK.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/multilev_single_PKeqFK.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/multilev_single_PKeqFK.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,103 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+sub mc_diag { diag (@_) if $ENV{DBIC_MULTICREATE_DEBUG} };
+
+my $schema = DBICTest->init_schema();
+
+mc_diag (<<'DG');
+* Test a multilevel might-have/has_one with a PK == FK in the mid-table
+
+CD -> might have -> Artwork
+    \- has_one -/     \
+                       \
+                        \-> has_many \
+                                      --> Artwork_to_Artist
+                        /-> has_many /
+                       /
+                     Artist
+DG
+
+my $rels = {
+  has_one => 'mandatory_artwork',
+  might_have => 'artwork',
+};
+
+for my $type (qw/has_one might_have/) {
+
+  lives_ok (sub {
+
+    my $rel = $rels->{$type};
+    my $cd_title = "Simple test $type cd";
+
+    my $cd = $schema->resultset('CD')->create ({
+      artist => 1,
+      title => $cd_title,
+      year => 2008,
+      $rel => {},
+    });
+
+    isa_ok ($cd, 'DBICTest::CD', 'Main CD object created');
+    is ($cd->title, $cd_title, 'Correct CD title');
+
+    isa_ok ($cd->$rel, 'DBICTest::Artwork', 'Related artwork present');
+    ok ($cd->$rel->in_storage, 'And in storage');
+
+  }, "Simple $type creation");
+}
+
+my $artist_rs = $schema->resultset('Artist');
+for my $type (qw/has_one might_have/) {
+
+  my $rel = $rels->{$type};
+
+  my $cd_title = "Test $type cd";
+  my $artist_names = [ map { "Artist via $type $_" } (1, 2) ];
+
+  my $someartist = $artist_rs->next;
+
+  lives_ok (sub {
+    my $cd = $schema->resultset('CD')->create ({
+      artist => $someartist,
+      title => $cd_title,
+      year => 2008,
+      $rel => {
+      artwork_to_artist => [ map {
+            { artist => { name => $_ } }
+          } (@$artist_names)
+        ]
+      },
+    });
+
+
+    isa_ok ($cd, 'DBICTest::CD', 'Main CD object created');
+    is ($cd->title, $cd_title, 'Correct CD title');
+
+    my $art_obj = $cd->$rel;
+    ok ($art_obj->has_column_loaded ('cd_id'), 'PK/FK present on artwork object');
+    is ($art_obj->artists->count, 2, 'Correct artwork creator count via the new object');
+    is_deeply (
+      [ sort $art_obj->artists->get_column ('name')->all ],
+      $artist_names,
+      'Artists named correctly when queried via object',
+    );
+
+    my $artwork = $schema->resultset('Artwork')->search (
+      { 'cd.title' => $cd_title },
+      { join => 'cd' },
+    )->single;
+    is ($artwork->artists->count, 2, 'Correct artwork creator count via a new search');
+    is_deeply (
+        [ sort $artwork->artists->get_column ('name')->all ],
+      $artist_names,
+      'Artists named correctly queried via a new search',
+    );
+  }, "multilevel $type with a PK == FK in the $type/has_many table ok");
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/standard.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/standard.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/multi_create/standard.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -6,7 +6,7 @@
 use lib qw(t/lib);
 use DBICTest;
 
-plan tests => 93;
+plan tests => 91;
 
 my $schema = DBICTest->init_schema();
 
@@ -72,7 +72,7 @@
       ],
     });
   },
-  qr/Recursive update is not supported over relationships of type multi/,
+  qr/Recursive update is not supported over relationships of type 'multi'/,
   'create via update of multi relationships throws an exception'
 );
 
@@ -329,80 +329,60 @@
 }, 'Nested find_or_create');
 
 lives_ok ( sub {
-  my $artist2 = $schema->resultset('Artist')->create({
-    name => 'Fred 4',
-    cds => [
-      {
-        title => 'Music to code by',
-        year => 2007,
-      },
+  my $artist = $schema->resultset('Artist')->first;
+  
+  my $cd_result = $artist->create_related('cds', {
+  
+    title => 'TestOneCD1',
+    year => 2007,
+    tracks => [
+      { title => 'TrackOne' },
+      { title => 'TrackTwo' },
     ],
-    cds_unordered => [
-      {
-        title => 'Music to code by',
-        year => 2007,
-      },
-    ]
-  });
 
-  is($artist2->in_storage, 1, 'artist with duplicate rels inserted okay');
-}, 'Multiple same level has_many create');
-
-lives_ok ( sub {
-	my $artist = $schema->resultset('Artist')->first;
-	
-	my $cd_result = $artist->create_related('cds', {
-	
-		title => 'TestOneCD1',
-		year => 2007,
-		tracks => [
-			{ title => 'TrackOne' },
-			{ title => 'TrackTwo' },
-		],
-
-	});
-	
-	isa_ok( $cd_result, 'DBICTest::CD', "Got Good CD Class");
-	ok( $cd_result->title eq "TestOneCD1", "Got Expected Title");
-	
-	my $tracks = $cd_result->tracks;
-	
-	isa_ok( $tracks, 'DBIx::Class::ResultSet', 'Got Expected Tracks ResultSet');
-	
-	foreach my $track ($tracks->all)
-	{
-		isa_ok( $track, 'DBICTest::Track', 'Got Expected Track Class');
-	}
+  });
+  
+  isa_ok( $cd_result, 'DBICTest::CD', "Got Good CD Class");
+  ok( $cd_result->title eq "TestOneCD1", "Got Expected Title");
+  
+  my $tracks = $cd_result->tracks;
+  
+  isa_ok( $tracks, 'DBIx::Class::ResultSet', 'Got Expected Tracks ResultSet');
+  
+  foreach my $track ($tracks->all)
+  {
+    isa_ok( $track, 'DBICTest::Track', 'Got Expected Track Class');
+  }
 }, 'First create_related pass');
 
 lives_ok ( sub {
-	my $artist = $schema->resultset('Artist')->first;
-	
-	my $cd_result = $artist->create_related('cds', {
-	
-		title => 'TestOneCD2',
-		year => 2007,
-		tracks => [
-			{ title => 'TrackOne' },
-			{ title => 'TrackTwo' },
-		],
+  my $artist = $schema->resultset('Artist')->first;
+  
+  my $cd_result = $artist->create_related('cds', {
+  
+    title => 'TestOneCD2',
+    year => 2007,
+    tracks => [
+      { title => 'TrackOne' },
+      { title => 'TrackTwo' },
+    ],
 
     liner_notes => { notes => 'I can haz liner notes?' },
 
-	});
-	
-	isa_ok( $cd_result, 'DBICTest::CD', "Got Good CD Class");
-	ok( $cd_result->title eq "TestOneCD2", "Got Expected Title");
+  });
+  
+  isa_ok( $cd_result, 'DBICTest::CD', "Got Good CD Class");
+  ok( $cd_result->title eq "TestOneCD2", "Got Expected Title");
   ok( $cd_result->notes eq 'I can haz liner notes?', 'Liner notes');
-	
-	my $tracks = $cd_result->tracks;
-	
-	isa_ok( $tracks, 'DBIx::Class::ResultSet', "Got Expected Tracks ResultSet");
-	
-	foreach my $track ($tracks->all)
-	{
-		isa_ok( $track, 'DBICTest::Track', 'Got Expected Track Class');
-	}
+  
+  my $tracks = $cd_result->tracks;
+  
+  isa_ok( $tracks, 'DBIx::Class::ResultSet', "Got Expected Tracks ResultSet");
+  
+  foreach my $track ($tracks->all)
+  {
+    isa_ok( $track, 'DBICTest::Track', 'Got Expected Track Class');
+  }
 }, 'second create_related with same arguments');
 
 lives_ok ( sub {
@@ -429,7 +409,7 @@
 
   is($a->name, 'Kurt Cobain', 'Artist insertion ok');
   is($a->cds && $a->cds->first && $a->cds->first->title, 
-		  'In Utero', 'CD insertion ok');
+      'In Utero', 'CD insertion ok');
 }, 'populate');
 
 ## Create foreign key col obj including PK
@@ -451,7 +431,7 @@
 }, 'Create foreign key col obj including PK');
 
 lives_ok ( sub {
-	$schema->resultset("CD")->create({ 
+  $schema->resultset("CD")->create({ 
               cdid => 28, 
               title => 'Boogie Wiggle', 
               year => '2007', 

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/attrs_untouched.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/attrs_untouched.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/attrs_untouched.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -4,7 +4,9 @@
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
+
 use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
 
 my $schema = DBICTest->init_schema();
 

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/count.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/count.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/count.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,101 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+plan tests => 23;
+
+my $schema = DBICTest->init_schema();
+
+my $cd_rs = $schema->resultset('CD')->search (
+  { 'tracks.cd' => { '!=', undef } },
+  { prefetch => ['tracks', 'artist'] },
+);
+
+
+is($cd_rs->count, 5, 'CDs with tracks count');
+is($cd_rs->search_related('tracks')->count, 15, 'Tracks associated with CDs count (before SELECT()ing)');
+
+is($cd_rs->all, 5, 'Amount of CD objects with tracks');
+is($cd_rs->search_related('tracks')->count, 15, 'Tracks associated with CDs count (after SELECT()ing)');
+
+is($cd_rs->search_related ('tracks')->all, 15, 'Track objects associated with CDs (after SELECT()ing)');
+
+my $artist = $schema->resultset('Artist')->create({name => 'xxx'});
+
+my $artist_rs = $schema->resultset('Artist')->search(
+  {artistid => $artist->id},
+  {prefetch=>'cds', join => 'twokeys' }
+);
+
+is($artist_rs->count, 1, "New artist found with prefetch turned on");
+is(scalar($artist_rs->all), 1, "New artist fetched with prefetch turned on");
+is($artist_rs->related_resultset('cds')->count, 0, "No CDs counted on a brand new artist");
+is(scalar($artist_rs->related_resultset('cds')->all), 0, "No CDs fetched on a brand new artist (count == fetch)");
+
+# create a cd, and make sure the non-existing join does not skew the count
+$artist->create_related ('cds', { title => 'yyy', year => '1999' });
+is($artist_rs->related_resultset('cds')->count, 1, "1 CDs counted on a brand new artist");
+is(scalar($artist_rs->related_resultset('cds')->all), 1, "1 CDs prefetched on a brand new artist (count == fetch)");
+
+# Really fuck shit up with one more cd and some insanity
+# this doesn't quite work as there are the prefetch gets lost
+# on search_related. This however is too esoteric to fix right
+# now
+
+my $cd2 = $artist->create_related ('cds', {
+    title => 'zzz',
+    year => '1999',
+    tracks => [{ title => 'ping' }, { title => 'pong' }],
+});
+
+my $cds = $cd2->search_related ('artist', {}, { join => 'twokeys' })
+                  ->search_related ('cds');
+my $tracks = $cds->search_related ('tracks');
+
+is($tracks->count, 2, "2 Tracks counted on cd via artist via one of the cds");
+is(scalar($tracks->all), 2, "2 Track objects on cd via artist via one of the cds");
+
+is($cds->count, 2, "2 CDs counted on artist via one of the cds");
+is(scalar($cds->all), 2, "2 CD objectson artist via one of the cds");
+
+# make sure the join collapses all the way
+is_same_sql_bind (
+  $tracks->count_rs->as_query,
+  '(
+    SELECT COUNT( * )
+      FROM artist me
+      LEFT JOIN twokeys twokeys ON twokeys.artist = me.artistid
+      JOIN cd cds ON cds.artist = me.artistid
+      JOIN track tracks ON tracks.cd = cds.cdid
+    WHERE ( me.artistid = ? )
+  )',
+  [ [ 'me.artistid' => 4 ] ],
+);
+
+
+TODO: {
+  local $TODO = "Chaining with prefetch is fundamentally broken";
+
+  my $queries;
+  $schema->storage->debugcb ( sub { $queries++ } );
+  $schema->storage->debug (1);
+
+  my $cds = $cd2->search_related ('artist', {}, { prefetch => { cds => 'tracks' }, join => 'twokeys' })
+                  ->search_related ('cds');
+
+  my $tracks = $cds->search_related ('tracks');
+
+  is($tracks->count, 2, "2 Tracks counted on cd via artist via one of the cds");
+  is(scalar($tracks->all), 2, "2 Tracks prefetched on cd via artist via one of the cds");
+  is($tracks->count, 2, "Cached 2 Tracks counted on cd via artist via one of the cds");
+
+  is($cds->count, 2, "2 CDs counted on artist via one of the cds");
+  is(scalar($cds->all), 2, "2 CDs prefetched on artist via one of the cds");
+  is($cds->count, 2, "Cached 2 CDs counted on artist via one of the cds");
+
+  is ($queries, 3, '2 counts + 1 prefetch?');
+}

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/diamond.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/diamond.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/diamond.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -96,12 +96,12 @@
   }
 }
 
-plan tests => (scalar (keys %tests) * 3);
-
 foreach my $name (keys %tests) {
   foreach my $artwork ($tests{$name}->all()) {
     is($artwork->id, 1, $name . ', correct artwork');
     is($artwork->cd->artist->artistid, 1, $name . ', correct artist_id over cd');
     is($artwork->artwork_to_artist->first->artist->artistid, 2, $name . ', correct artist_id over A2A');
   }
-}
\ No newline at end of file
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/double_prefetch.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/double_prefetch.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/double_prefetch.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -23,11 +23,11 @@
   '(
     SELECT
       cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track,
-      single_track.trackid, single_track.cd, single_track.position, single_track.title, single_track.last_updated_on, single_track.last_updated_at,
-      single_track_2.trackid, single_track_2.cd, single_track_2.position, single_track_2.title, single_track_2.last_updated_on, single_track_2.last_updated_at,
+      single_track.trackid, single_track.cd, single_track.position, single_track.title, single_track.last_updated_on, single_track.last_updated_at, single_track.small_dt,
+      single_track_2.trackid, single_track_2.cd, single_track_2.position, single_track_2.title, single_track_2.last_updated_on, single_track_2.last_updated_at, single_track_2.small_dt,
       cd.cdid, cd.artist, cd.title, cd.year, cd.genreid, cd.single_track
     FROM artist me
-      LEFT JOIN cd cds ON cds.artist = me.artistid
+      JOIN cd cds ON cds.artist = me.artistid
       LEFT JOIN track single_track ON single_track.trackid = cds.single_track
       LEFT JOIN track single_track_2 ON single_track_2.trackid = cds.single_track
       LEFT JOIN cd cd ON cd.cdid = single_track_2.cd

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/grouped.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/grouped.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/grouped.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,354 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+my $schema = DBICTest->init_schema();
+my $sdebug = $schema->storage->debug;
+
+my $cd_rs = $schema->resultset('CD')->search (
+  { 'tracks.cd' => { '!=', undef } },
+  { prefetch => 'tracks' },
+);
+
+# Database sanity check
+is($cd_rs->count, 5, 'CDs with tracks count');
+for ($cd_rs->all) {
+  is ($_->tracks->count, 3, '3 tracks for CD' . $_->id );
+}
+
+# Test a belongs_to prefetch of a has_many
+{
+  my $track_rs = $schema->resultset ('Track')->search (
+    { 'me.cd' => { -in => [ $cd_rs->get_column ('cdid')->all ] } },
+    {
+      select => [
+        'me.cd',
+        { count => 'me.trackid' },
+      ],
+      as => [qw/
+        cd
+        track_count
+      /],
+      group_by => [qw/me.cd/],
+      prefetch => 'cd',
+    },
+  );
+
+  # this used to fuck up ->all, do not remove!
+  ok ($track_rs->first, 'There is stuff in the rs');
+
+  is($track_rs->count, 5, 'Prefetched count with groupby');
+  is($track_rs->all, 5, 'Prefetched objects with groupby');
+
+  {
+    my $query_cnt = 0;
+    $schema->storage->debugcb ( sub { $query_cnt++ } );
+    $schema->storage->debug (1);
+
+    while (my $collapsed_track = $track_rs->next) {
+      my $cdid = $collapsed_track->get_column('cd');
+      is($collapsed_track->get_column('track_count'), 3, "Correct count of tracks for CD $cdid" );
+      ok($collapsed_track->cd->title, "Prefetched title for CD $cdid" );
+    }
+
+    is ($query_cnt, 1, 'Single query on prefetched titles');
+    $schema->storage->debugcb (undef);
+    $schema->storage->debug ($sdebug);
+  }
+
+  # Test sql by hand, as the sqlite db will simply paper over
+  # improper group/select combinations
+  #
+  is_same_sql_bind (
+    $track_rs->count_rs->as_query,
+    '(
+      SELECT COUNT( * )
+        FROM (
+          SELECT me.cd
+            FROM track me
+            JOIN cd cd ON cd.cdid = me.cd
+          WHERE ( me.cd IN ( ?, ?, ?, ?, ? ) )
+          GROUP BY me.cd
+        )
+      count_subq
+    )',
+    [ map { [ 'me.cd' => $_] } ($cd_rs->get_column ('cdid')->all) ],
+    'count() query generated expected SQL',
+  );
+
+  is_same_sql_bind (
+    $track_rs->as_query,
+    '(
+      SELECT me.cd, me.track_count, cd.cdid, cd.artist, cd.title, cd.year, cd.genreid, cd.single_track
+        FROM (
+          SELECT me.cd, COUNT (me.trackid) AS track_count
+            FROM track me
+            JOIN cd cd ON cd.cdid = me.cd
+          WHERE ( me.cd IN ( ?, ?, ?, ?, ? ) )
+          GROUP BY me.cd
+          ) me
+        JOIN cd cd ON cd.cdid = me.cd
+      WHERE ( me.cd IN ( ?, ?, ?, ?, ? ) )
+    )',
+    [ map { [ 'me.cd' => $_] } ( ($cd_rs->get_column ('cdid')->all) x 2 ) ],
+    'next() query generated expected SQL',
+  );
+
+
+  # add an extra track to one of the cds, and then make sure we can get it on top
+  # (check if limit works)
+  my $top_cd = $cd_rs->slice (1,1)->next;
+  $top_cd->create_related ('tracks', {
+    title => 'over the top',
+  });
+
+  my $top_cd_collapsed_track = $track_rs->search ({}, {
+    rows => 2,
+    order_by => [
+      { -desc => 'track_count' },
+    ],
+  });
+
+  is ($top_cd_collapsed_track->count, 2);
+
+  is (
+    $top_cd->title,
+    $top_cd_collapsed_track->first->cd->title,
+    'Correct collapsed track with prefetched CD returned on top'
+  );
+}
+
+# test a has_many/might_have prefetch at the same level
+# Note that one of the CDs now has 4 tracks instead of 3
+{
+  my $most_tracks_rs = $schema->resultset ('CD')->search (
+    {
+      'me.cdid' => { '!=' => undef },  # duh - this is just to test WHERE
+    },
+    {
+      prefetch => [qw/tracks liner_notes/],
+      select => ['me.cdid', { count => 'tracks.trackid' }, { max => 'tracks.trackid', -as => 'maxtr'} ],
+      as => [qw/cdid track_count max_track_id/],
+      group_by => 'me.cdid',
+      order_by => [ { -desc => 'track_count' }, { -asc => 'maxtr' } ],
+      rows => 2,
+    }
+  );
+
+  is_same_sql_bind (
+    $most_tracks_rs->count_rs->as_query,
+    '(
+      SELECT COUNT( * )
+        FROM (
+          SELECT me.cdid
+            FROM cd me
+          WHERE ( me.cdid IS NOT NULL )
+          GROUP BY me.cdid
+          LIMIT 2
+        ) count_subq
+    )',
+    [],
+    'count() query generated expected SQL',
+  );
+
+  is_same_sql_bind (
+    $most_tracks_rs->as_query,
+    '(
+      SELECT  me.cdid, me.track_count, me.maxtr,
+              tracks.trackid, tracks.cd, tracks.position, tracks.title, tracks.last_updated_on, tracks.last_updated_at, tracks.small_dt,
+              liner_notes.liner_id, liner_notes.notes
+        FROM (
+          SELECT me.cdid, COUNT( tracks.trackid ) AS track_count, MAX( tracks.trackid ) AS maxtr
+            FROM cd me
+            LEFT JOIN track tracks ON tracks.cd = me.cdid
+          WHERE ( me.cdid IS NOT NULL )
+          GROUP BY me.cdid
+          ORDER BY track_count DESC, maxtr ASC
+          LIMIT 2
+        ) me
+        LEFT JOIN track tracks ON tracks.cd = me.cdid
+        LEFT JOIN liner_notes liner_notes ON liner_notes.liner_id = me.cdid
+      WHERE ( me.cdid IS NOT NULL )
+      ORDER BY track_count DESC, maxtr ASC, tracks.cd
+    )',
+    [],
+    'next() query generated expected SQL',
+  );
+
+  is ($most_tracks_rs->count, 2, 'Limit works');
+  my $top_cd = $most_tracks_rs->first;
+  is ($top_cd->id, 2, 'Correct cd fetched on top'); # 2 because of the slice(1,1) earlier
+
+  my $query_cnt = 0;
+  $schema->storage->debugcb ( sub { $query_cnt++ } );
+  $schema->storage->debug (1);
+
+  is ($top_cd->get_column ('track_count'), 4, 'Track count fetched correctly');
+  is ($top_cd->tracks->count, 4, 'Count of prefetched tracks rs still correct');
+  is ($top_cd->tracks->all, 4, 'Number of prefetched track objects still correct');
+  is (
+    $top_cd->liner_notes->notes,
+    'Buy Whiskey!',
+    'Correct liner pre-fetched with top cd',
+  );
+
+  is ($query_cnt, 0, 'No queries executed during prefetched data access');
+  $schema->storage->debugcb (undef);
+  $schema->storage->debug ($sdebug);
+}
+
+# make sure that distinct still works
+{
+  my $rs = $schema->resultset("CD")->search({}, {
+    prefetch => 'tags',
+    order_by => 'cdid',
+    distinct => 1,
+  });
+
+  is_same_sql_bind (
+    $rs->as_query,
+    '(
+      SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+             tags.tagid, tags.cd, tags.tag
+        FROM (
+          SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+            FROM cd me
+          GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, cdid
+          ORDER BY cdid
+        ) me
+        LEFT JOIN tags tags ON tags.cd = me.cdid
+      ORDER BY cdid, tags.cd, tags.tag
+    )',
+    [],
+    'Prefetch + distinct resulted in correct group_by',
+  );
+
+  is ($rs->all, 5, 'Correct number of CD objects');
+  is ($rs->count, 5, 'Correct count of CDs');
+}
+
+# RT 47779, test group_by as a scalar ref
+{
+  my $track_rs = $schema->resultset ('Track')->search (
+    { 'me.cd' => { -in => [ $cd_rs->get_column ('cdid')->all ] } },
+    {
+      select => [
+        'me.cd',
+        { count => 'me.trackid' },
+      ],
+      as => [qw/
+        cd
+        track_count
+      /],
+      group_by => \'SUBSTR(me.cd, 1, 1)',
+      prefetch => 'cd',
+    },
+  );
+
+  is_same_sql_bind (
+    $track_rs->count_rs->as_query,
+    '(
+      SELECT COUNT( * )
+        FROM (
+          SELECT SUBSTR(me.cd, 1, 1)
+            FROM track me
+            JOIN cd cd ON cd.cdid = me.cd
+          WHERE ( me.cd IN ( ?, ?, ?, ?, ? ) )
+          GROUP BY SUBSTR(me.cd, 1, 1)
+        )
+      count_subq
+    )',
+    [ map { [ 'me.cd' => $_] } ($cd_rs->get_column ('cdid')->all) ],
+    'count() query generated expected SQL',
+  );
+}
+
+{
+    my $cd_rs = $schema->resultset('CD')->search({}, {
+            distinct => 1,
+            join     => [qw/ tracks /],
+            prefetch => [qw/ artist /],
+        });
+    is($cd_rs->count, 5, 'complex prefetch + non-prefetching has_many join count correct');
+    is($cd_rs->all, 5, 'complex prefetch + non-prefetching has_many join number of objects correct');
+
+    # make sure join tracks was thrown out
+    is_same_sql_bind (
+      $cd_rs->as_query,
+      '(
+        SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+               artist.artistid, artist.name, artist.rank, artist.charfield
+          FROM (
+            SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+              FROM cd me
+              JOIN artist artist ON artist.artistid = me.artist
+            GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+          ) me
+          JOIN artist artist ON artist.artistid = me.artist
+      )',
+      [],
+    );
+
+
+
+    # try the same as above, but add a condition so the tracks join can not be thrown away
+    my $cd_rs2 = $cd_rs->search ({ 'tracks.title' => { '!=' => 'ugabuganoexist' } });
+    is($cd_rs2->count, 5, 'complex prefetch + non-prefetching restricted has_many join count correct');
+    is($cd_rs2->all, 5, 'complex prefetch + non-prefetching restricted has_many join number of objects correct');
+
+    # the outer group_by seems like a necessary evil, if someone can figure out how to take it away
+    # without breaking compat - be my guest
+    is_same_sql_bind (
+      $cd_rs2->as_query,
+      '(
+        SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+               artist.artistid, artist.name, artist.rank, artist.charfield
+          FROM (
+            SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+              FROM cd me
+              LEFT JOIN track tracks ON tracks.cd = me.cdid
+              JOIN artist artist ON artist.artistid = me.artist
+            WHERE ( tracks.title != ? )
+            GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+          ) me
+          LEFT JOIN track tracks ON tracks.cd = me.cdid
+          JOIN artist artist ON artist.artistid = me.artist
+        WHERE ( tracks.title != ? )
+        GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+                 artist.artistid, artist.name, artist.rank, artist.charfield
+      )',
+      [ map { [ 'tracks.title' => 'ugabuganoexist' ] } (1 .. 2) ],
+    );
+}
+
+{
+    my $rs = $schema->resultset('CD')->search({},
+        {
+           '+select' => [{ count => 'tags.tag' }],
+           '+as' => ['test_count'],
+           prefetch => ['tags'],
+           distinct => 1,
+           order_by => {'-asc' => 'tags.tag'},
+           rows => 1
+        }
+    );
+    is_same_sql_bind($rs->as_query, q{
+        (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, me.test_count, tags.tagid, tags.cd, tags.tag
+          FROM (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, COUNT( tags.tag ) AS test_count
+                FROM cd me LEFT JOIN tags tags ON tags.cd = me.cdid
+            GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track, tags.tag
+            ORDER BY tags.tag ASC LIMIT 1)
+            me
+          LEFT JOIN tags tags ON tags.cd = me.cdid
+         ORDER BY tags.tag ASC, tags.cd, tags.tag
+        )
+    }, []);
+}
+
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/incomplete.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/incomplete.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/incomplete.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,53 @@
+use strict;
+use warnings;  
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 9;
+
+my $schema = DBICTest->init_schema();
+
+lives_ok(sub {
+  # while cds.* will be selected anyway (prefetch currently forces the result of _resolve_prefetch)
+  # only the requested me.name column will be fetched.
+
+  # reference sql with select => [...]
+  #   SELECT me.name, cds.title, cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track FROM ...
+
+  my $rs = $schema->resultset('Artist')->search(
+    { 'cds.title' => { '!=', 'Generic Manufactured Singles' } },
+    {
+      prefetch => [ qw/ cds / ],
+      order_by => [ { -desc => 'me.name' }, 'cds.title' ],
+      select => [qw/ me.name  cds.title / ],
+    }
+  );
+
+  is ($rs->count, 2, 'Correct number of collapsed artists');
+  my $we_are_goth = $rs->first;
+  is ($we_are_goth->name, 'We Are Goth', 'Correct first artist');
+  is ($we_are_goth->cds->count, 1, 'Correct number of CDs for first artist');
+  is ($we_are_goth->cds->first->title, 'Come Be Depressed With Us', 'Correct cd for artist');
+}, 'explicit prefetch on a keyless object works');
+
+
+lives_ok(sub {
+  # test implicit prefetch as well
+
+  my $rs = $schema->resultset('CD')->search(
+    { title => 'Generic Manufactured Singles' },
+    {
+      join=> 'artist',
+      select => [qw/ me.title artist.name / ],
+    }
+  );
+
+  my $cd = $rs->next;
+  is ($cd->title, 'Generic Manufactured Singles', 'CD title prefetched correctly');
+  isa_ok ($cd->artist, 'DBICTest::Artist');
+  is ($cd->artist->name, 'Random Boy Band', 'Artist object has correct name');
+
+}, 'implicit keyless prefetch works');

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/join_type.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/join_type.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/join_type.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,47 @@
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBIC::SqlMakerTest;
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+
+# a regular belongs_to prefetch
+my $cds = $schema->resultset('CD')->search ({}, { prefetch => 'artist' } );
+
+my $nulls = {
+  hashref => {},
+  arrayref => [],
+  undef => undef,
+};
+
+# make sure null-prefetches do not screw with the final sql:
+for my $type (keys %$nulls) {
+#  is_same_sql_bind (
+#    $cds->search({}, { prefetch => { artist => $nulls->{$type} } })->as_query,
+#    $cds->as_query,
+#    "same sql with null $type prefetch"
+#  );
+}
+
+# make sure left join is carried only starting from the first has_many
+is_same_sql_bind (
+  $cds->search({}, { prefetch => { artist => { cds => 'artist' } } })->as_query,
+  '(
+    SELECT  me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+            artist.artistid, artist.name, artist.rank, artist.charfield,
+            cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track,
+            artist_2.artistid, artist_2.name, artist_2.rank, artist_2.charfield
+      FROM cd me
+      JOIN artist artist ON artist.artistid = me.artist
+      LEFT JOIN cd cds ON cds.artist = artist.artistid
+      LEFT JOIN artist artist_2 ON artist_2.artistid = cds.artist
+    ORDER BY cds.artist, cds.year
+  )',
+  [],
+);
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/multiple_hasmany.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/multiple_hasmany.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/multiple_hasmany.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -5,14 +5,11 @@
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
-use Data::Dumper;
+use IO::File;
 
-plan tests => 10;
-
 my $schema = DBICTest->init_schema();
+my $sdebug = $schema->storage->debug;
 
-use IO::File;
-
 # once the following TODO is complete, remove the 2 warning tests immediately
 # after the TODO block
 # (the TODO block itself contains tests ensuring that the warns are removed)
@@ -44,19 +41,17 @@
     ok(! $o_mm_warn, 'no warning on attempt to prefetch several same level has_many\'s (1 -> M + M)');
 
     is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
+    $schema->storage->debugcb (undef);
+    $schema->storage->debug ($sdebug);
+
     is($pr_tracks_count, $tracks_count, 'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)');
+    is ($pr_tracks_rs->all, $tracks_rs->all, 'equal amount of objects returned with and without prefetch over several same level has_many\'s (1 -> M + M)');
 
-    for ($pr_tracks_rs, $tracks_rs) {
-        $_->result_class ('DBIx::Class::ResultClass::HashRefInflator');
-    }
-
-    is_deeply ([$pr_tracks_rs->all], [$tracks_rs->all], 'same structure returned with and without prefetch over several same level has_many\'s (1 -> M + M)');
-
     #( M -> 1 -> M + M )
     my $note_rs = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' });
     my $pr_note_rs = $note_rs->search ({}, {
         prefetch => {
-            cd => [qw/tags tracks/]
+            cd => [qw/tracks tags/]
         },
     });
 
@@ -79,14 +74,11 @@
     ok(! $m_o_mm_warn, 'no warning on attempt to prefetch several same level has_many\'s (M -> 1 -> M + M)');
 
     is($queries, 1, 'prefetch one->(has_many,has_many) ran exactly 1 query');
+    $schema->storage->debugcb (undef);
+    $schema->storage->debug ($sdebug);
 
     is($pr_tags_count, $tags_count, 'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)');
-
-    for ($pr_tags_rs, $tags_rs) {
-        $_->result_class ('DBIx::Class::ResultClass::HashRefInflator');
-    }
-
-    is_deeply ([$pr_tags_rs->all], [$tags_rs->all], 'same structure returned with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)');
+    is($pr_tags_rs->all, $tags_rs->all, 'equal amount of objects with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)');
 }
 
 # remove this closure once the TODO above is working
@@ -107,44 +99,4 @@
     is (@w, 1, 'warning on attempt prefetching several same level has_manys (M -> 1 -> M + M)');
 }
 
-__END__
-The solution is to rewrite ResultSet->_collapse_result() and
-ResultSource->resolve_prefetch() to focus on the final results from the collapse
-of the data. Right now, the code doesn't treat the columns from the various
-tables as grouped entities. While there is a concept of hierarchy (so that
-prefetching down relationships does work as expected), there is no idea of what
-the final product should look like and how the various columns in the row would
-play together. So, the actual prefetch datastructure from the search would be
-very useful in working through this problem. We already have access to the PKs
-and sundry for those. So, when collapsing the search result, we know we are
-looking for 1 cd object. We also know we're looking for tracks and tags records
--independently- of each other. So, we can grab the data for tracks and data for
-tags separately, uniqueing on the PK as appropriate. Then, when we're done with
-the given cd object's datastream, we know we're good. This should work for all
-the various scenarios.
-
-My reccommendation is the row's data is preprocessed first, breaking it up into
-the data for each of the component tables. (This could be done in the single
-table case, too, but probably isn't necessary.) So, starting with something
-like:
-  my $row = {
-    t1.col1 => 1,
-    t1.col2 => 2,
-    t2.col1 => 3,
-    t2.col2 => 4,
-    t3.col1 => 5,
-    t3.col2 => 6,
-  };
-it is massaged to look something like:
-  my $row_massaged = {
-    t1 => { col1 => 1, col2 => 2 },
-    t2 => { col1 => 3, col2 => 4 },
-    t3 => { col1 => 5, col2 => 6 },
-  };
-At this point, find the stuff that's different is easy enough to do and slotting
-things into the right spot is, likewise, pretty straightforward. Instead of
-storing things in a AoH, store them in a HoH keyed on the PKs of the the table,
-then convert to an AoH after all collapsing is done.
-
-This implies that the collapse attribute can probably disappear or, at the
-least, be turned into a boolean (which is how it's used in every other place).
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/one_to_many_to_one.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/one_to_many_to_one.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/one_to_many_to_one.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,35 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $artist = $schema->resultset ('Artist')->find ({artistid => 1});
+is ($artist->cds->count, 3, 'Correct number of CDs');
+is ($artist->cds->search_related ('genre')->count, 1, 'Only one of the cds has a genre');
+
+my $queries = 0;
+my $orig_cb = $schema->storage->debugcb;
+$schema->storage->debugcb(sub { $queries++ });
+$schema->storage->debug(1);
+
+
+my $pref = $schema->resultset ('Artist')
+                     ->search ({ 'me.artistid' => $artist->id }, { prefetch => { cds => 'genre' } })
+                      ->next;
+
+is ($pref->cds->count, 3, 'Correct number of CDs prefetched');
+is ($pref->cds->search_related ('genre')->count, 1, 'Only one of the prefetched cds has a prefetched genre');
+
+
+is ($queries, 1, 'All happened within one query only');
+$schema->storage->debugcb($orig_cb);
+$schema->storage->debug(0);
+
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/standard.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/standard.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/standard.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,25 +1,16 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
-use Data::Dumper;
 
 my $schema = DBICTest->init_schema();
-
 my $orig_debug = $schema->storage->debug;
 
-use IO::File;
+plan tests => 44;
 
-BEGIN {
-    eval "use DBD::SQLite";
-    plan $@
-        ? ( skip_all => 'needs DBD::SQLite for testing' )
-        : ( tests => 45 );
-}
-
 my $queries = 0;
 $schema->storage->debugcb(sub { $queries++; });
 $schema->storage->debug(1);
@@ -27,8 +18,6 @@
 my $search = { 'artist.name' => 'Caterwauler McCrae' };
 my $attr = { prefetch => [ qw/artist liner_notes/ ],
              order_by => 'me.cdid' };
-my $search_str = Dumper($search);
-my $attr_str = Dumper($attr);
 
 my $rs = $schema->resultset("CD")->search($search, $attr);
 my @cd = $rs->all;
@@ -227,29 +216,11 @@
 
 $tree_like = eval { $schema->resultset('TreeLike')->search(
     { 'children.id' => 3, 'children_2.id' => 6 }, 
-    { join => [qw/children children/] }
+    { join => [qw/children children children/] }
   )->search_related('children', { 'children_4.id' => 7 }, { prefetch => 'children' }
   )->first->children->first; };
 is(eval { $tree_like->name }, 'fong', 'Tree with multiple has_many joins ok');
 
-# test that collapsed joins don't get a _2 appended to the alias
-
-my $sql = '';
-$schema->storage->debugcb(sub { $sql = $_[1] });
-$schema->storage->debug(1);
-
-eval {
-  my $row = $schema->resultset('Artist')->search_related('cds', undef, {
-    join => 'tracks',
-    prefetch => 'tracks',
-  })->search_related('tracks')->first;
-};
-
-like( $sql, qr/^SELECT tracks_2\.trackid/, "join not collapsed for search_related" );
-
-$schema->storage->debug($orig_debug);
-$schema->storage->debugobj->callback(undef);
-
 $rs = $schema->resultset('Artist');
 $rs->create({ artistid => 4, name => 'Unknown singer-songwriter' });
 $rs->create({ artistid => 5, name => 'Emo 4ever' });
@@ -314,3 +285,5 @@
 
 is($queries, 0, 'chained search_related after has_many->has_many prefetch ran no queries');
 
+$schema->storage->debug($orig_debug);
+$schema->storage->debugobj->callback(undef);

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/via_search_related.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/via_search_related.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/via_search_related.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,139 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+lives_ok ( sub {
+  my $no_prefetch = $schema->resultset('Track')->search_related(cd =>
+    {
+      'cd.year' => "2000",
+    },
+    {
+      join => 'tags',
+      order_by => 'me.trackid',
+      rows => 1,
+    }
+  );
+
+  my $use_prefetch = $no_prefetch->search(
+    {},
+    {
+      prefetch => 'tags',
+    }
+  );
+
+  is($use_prefetch->count, $no_prefetch->count, 'counts with and without prefetch match');
+  is(
+    scalar ($use_prefetch->all),
+    scalar ($no_prefetch->all),
+    "Amount of returned rows is right"
+  );
+
+}, 'search_related prefetch with order_by works');
+
+TODO: { local $TODO = 'Unqualified columns in where clauses can not be fixed without an SQLA rewrite' if SQL::Abstract->VERSION < 2;
+lives_ok ( sub {
+  my $no_prefetch = $schema->resultset('Track')->search_related(cd =>
+    {
+      'cd.year' => "2000",
+      'tagid' => 1,
+    },
+    {
+      join => 'tags',
+      rows => 1,
+    }
+  );
+
+  my $use_prefetch = $no_prefetch->search(
+    undef,
+    {
+      prefetch => 'tags',
+    }
+  );
+
+  is(
+    scalar ($use_prefetch->all),
+    scalar ($no_prefetch->all),
+    "Amount of returned rows is right"
+  );
+  is($use_prefetch->count, $no_prefetch->count, 'counts with and without prefetch match');
+
+}, 'search_related prefetch with condition referencing unqualified column of a joined table works');
+}
+
+
+lives_ok (sub {
+    my $rs = $schema->resultset("Artwork")->search(undef, {distinct => 1})
+              ->search_related('artwork_to_artist')->search_related('artist',
+                undef,
+                { prefetch => 'cds' },
+              );
+    is($rs->all, 0, 'prefetch without WHERE (objects)');
+    is($rs->count, 0, 'prefetch without WHERE (count)');
+
+    $rs = $schema->resultset("Artwork")->search(undef, {distinct => 1})
+              ->search_related('artwork_to_artist')->search_related('artist',
+                { 'cds.title' => 'foo' },
+                { prefetch => 'cds' },
+              );
+    is($rs->all, 0, 'prefetch with WHERE (objects)');
+    is($rs->count, 0, 'prefetch with WHERE (count)');
+
+
+# test where conditions at the root of the related chain
+    my $artist_rs = $schema->resultset("Artist")->search({artistid => 2});
+    my $artist = $artist_rs->next;
+    $artist->create_related ('cds', $_) for (
+      {
+        year => 1999, title => 'vague cd', genre => { name => 'vague genre' }
+      },
+      {
+        year => 1999, title => 'vague cd2', genre => { name => 'vague genre' }
+      },
+    );
+
+    $rs = $artist_rs->search_related('cds')->search_related('genre',
+                    { 'genre.name' => 'vague genre' },
+                    { prefetch => 'cds' },
+                 );
+    is($rs->all, 1, 'base without distinct (objects)');
+    is($rs->count, 1, 'base without distinct (count)');
+    # artist -> 2 cds -> 2 genres -> 2 cds for each genre = 4
+    is($rs->search_related('cds')->all, 4, 'prefetch without distinct (objects)');
+    is($rs->search_related('cds')->count, 4, 'prefetch without distinct (count)');
+
+
+    $rs = $artist_rs->search_related('cds', {}, { distinct => 1})->search_related('genre',
+                    { 'genre.name' => 'vague genre' },
+                 );
+    is($rs->all, 2, 'distinct does not propagate over search_related (objects)');
+    is($rs->count, 2, 'distinct does not propagate over search_related (count)');
+
+    $rs = $rs->search ({}, { distinct => 1} );
+    is($rs->all, 1, 'distinct without prefetch (objects)');
+    is($rs->count, 1, 'distinct without prefetch (count)');
+
+
+    $rs = $artist_rs->search_related('cds')->search_related('genre',
+                    { 'genre.name' => 'vague genre' },
+                    { prefetch => 'cds', distinct => 1 },
+                 );
+    is($rs->all, 1, 'distinct with prefetch (objects)');
+    is($rs->count, 1, 'distinct with prefetch (count)');
+
+  TODO: {
+    local $TODO = "This makes another 2 trips to the database, it can't be right";
+    # artist -> 2 cds -> 2 genres -> 2 cds for each genre + distinct = 2
+    is($rs->search_related('cds')->all, 2, 'prefetched distinct with prefetch (objects)');
+    is($rs->search_related('cds')->count, 2, 'prefetched distinct with prefetch (count)');
+  }
+
+}, 'distinct generally works with prefetch on deep search_related chains');
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/with_limit.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/with_limit.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/prefetch/with_limit.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -8,8 +8,6 @@
 use lib qw(t/lib);
 use DBICTest;
 
-plan tests => 9;
-
 my $schema = DBICTest->init_schema();
 
 
@@ -25,6 +23,8 @@
 my $use_prefetch = $no_prefetch->search(
   {},
   {
+    select => ['me.artistid', 'me.name'],
+    as => ['artistid', 'name'],
     prefetch => 'cds',
     order_by => { -desc => 'name' },
   }
@@ -90,3 +90,4 @@
 my $artist2 = $use_prefetch->search({'cds.title' => { '!=' => $artist_many_cds->cds->first->title } })->slice (0,0)->next;
 is($artist2->cds->count, 2, "count on search limiting prefetched has_many");
 
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/after_update.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/after_update.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/after_update.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,7 +1,5 @@
-#!/usr/bin/perl -w
-
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use lib qw(t/lib);

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/core.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/core.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/core.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -5,11 +5,11 @@
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
+use DBIC::SqlMakerTest;
 
 my $schema = DBICTest->init_schema();
+my $sdebug = $schema->storage->debug;
 
-plan tests => 78;
-
 # has_a test
 my $cd = $schema->resultset("CD")->find(4);
 my ($artist) = ($INC{'DBICTest/HelperRels'}
@@ -57,7 +57,7 @@
   is($queries, 0, 'No SELECT made for belongs_to if key IS NULL');
   $big_flop_cd->genre_inefficient; #should trigger a select query
   is($queries, 1, 'SELECT made for belongs_to if key IS NULL when undef_on_null_fk disabled');
-  $schema->storage->debug(0);
+  $schema->storage->debug($sdebug);
   $schema->storage->debugcb(undef);
 }
 
@@ -133,7 +133,7 @@
   year => 2007,
 } );
 is( $cd->title, 'Greatest Hits 2: Louder Than Ever', 'find_or_new_related new record ok' );
-ok( ! $cd->in_storage, 'find_or_new_related on a new record: not in_storage' );
+is( $cd->in_storage, 0, 'find_or_new_related on a new record: not in_storage' );
 
 $cd->artist(undef);
 my $newartist = $cd->find_or_new_related( 'artist', {
@@ -259,8 +259,22 @@
 is($def_artist_cd->search_related('artist')->count, 0, 'closed search on null FK');
 
 # test undirected many-to-many relationship (e.g. "related artists")
-my $undir_maps = $schema->resultset("Artist")->find(1)->artist_undirected_maps;
+my $undir_maps = $schema->resultset("Artist")
+                          ->search ({artistid => 1})
+                            ->search_related ('artist_undirected_maps');
 is($undir_maps->count, 1, 'found 1 undirected map for artist 1');
+is_same_sql_bind (
+  $undir_maps->as_query,
+  '(
+    SELECT artist_undirected_maps.id1, artist_undirected_maps.id2
+      FROM artist me
+      JOIN artist_undirected_map artist_undirected_maps
+        ON artist_undirected_maps.id1 = me.artistid OR artist_undirected_maps.id2 = me.artistid
+    WHERE ( artistid = ? )
+  )',
+  [[artistid => 1]],
+  'expected join sql produced',
+);
 
 $undir_maps = $schema->resultset("Artist")->find(2)->artist_undirected_maps;
 is($undir_maps->count, 1, 'found 1 undirected map for artist 2');
@@ -275,11 +289,11 @@
 
 cmp_ok($searched->count, '==', 2, "Both artist returned from map after adding another condition");
 
-# check join through cascaded has_many relationships
+# check join through cascaded has_many relationships (also empty has_many rels)
 $artist = $schema->resultset("Artist")->find(1);
 my $trackset = $artist->cds->search_related('tracks');
-# LEFT join means we also see the trackless additional album...
-cmp_ok($trackset->count, '==', 11, "Correct number of tracks for artist");
+is($trackset->count, 10, "Correct number of tracks for artist");
+is($trackset->all, 10, "Correct number of track objects for artist");
 
 # now see about updating eveything that belongs to artist 2 to artist 3
 $artist = $schema->resultset("Artist")->find(2);
@@ -309,3 +323,5 @@
 
 $cds = $schema->resultset("CD")->search({ 'me.cdid' => 5 }, { join => { single_track => { cd => {} } } });
 is($cds->count, 1, "subjoins under left joins force_left (hashref)");
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/doesnt_exist.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/doesnt_exist.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/doesnt_exist.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,7 +1,5 @@
-#!/usr/bin/perl -w
-
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use lib qw(t/lib);

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/update_or_create_multi.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/update_or_create_multi.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/update_or_create_multi.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -8,9 +8,9 @@
 use DBIC::SqlMakerTest;
 
 my $schema = DBICTest->init_schema();
+my $sdebug = $schema->storage->debug;
 
-#plan tests => 4;
-plan 'no_plan';
+plan tests => 6;
 
 my $artist = $schema->resultset ('Artist')->first;
 
@@ -74,9 +74,11 @@
 });
 
 $schema->storage->debugcb(undef);
+$schema->storage->debug ($sdebug);
 
+my ($search_sql) = $sql[0] =~ /^(SELECT .+?)\:/;
 is_same_sql (
-  $sql[0],
+  $search_sql,
   'SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
     FROM cd me 
     WHERE ( me.artist = ? AND me.title = ? AND me.genreid = ? )

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/update_or_create_single.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/update_or_create_single.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/relationship/update_or_create_single.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -7,8 +7,7 @@
 
 my $schema = DBICTest->init_schema();
 
-#plan tests => 4;
-plan 'no_plan';
+plan tests => 9;
 
 my $artist = $schema->resultset ('Artist')->first;
 
@@ -79,7 +78,7 @@
 
 
 # expect a year update on the only related row
-# (non-qunique column only)
+# (non-unique column only)
 $genre->update_or_create_related ('model_cd', {
   year => 2011,
 });
@@ -96,5 +95,3 @@
   },
   'CD year column updated correctly without a disambiguator',
 );
-
-

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/as_query.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/as_query.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/as_query.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,10 +1,6 @@
-#!/usr/bin/perl
-
 use strict;
-use warnings FATAL => 'all';
+use warnings;
 
-use Data::Dumper;
-
 use Test::More;
 
 plan ( tests => 5 );

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/as_subselect_rs.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/as_subselect_rs.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/as_subselect_rs.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,25 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+my $schema = DBICTest->init_schema();
+
+my $new_rs = $schema->resultset('Artist')->search({
+   'artwork_to_artist.artist_id' => 1
+}, {
+   join => 'artwork_to_artist'
+});
+lives_ok { $new_rs->count } 'regular search works';
+lives_ok { $new_rs->search({ 'artwork_to_artist.artwork_cd_id' => 1})->count }
+   '... and chaining off that using join works';
+lives_ok { $new_rs->search({ 'artwork_to_artist.artwork_cd_id' => 1})->as_subselect_rs->count }
+   '... and chaining off the virtual view works';
+dies_ok  { $new_rs->as_subselect_rs->search({'artwork_to_artist.artwork_cd_id'=> 1})->count }
+   q{... but chaining off of a virtual view using join doesn't work};
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/is_ordered.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/is_ordered.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/is_ordered.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,90 @@
+use strict;
+use warnings;
+
+use lib qw(t/lib);
+use Test::More;
+use Test::Exception;
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+my $rs = $schema->resultset('Artist');
+
+ok !$rs->is_ordered, 'vanilla resultset is not ordered';
+
+# Simple ordering with a single column
+{
+  my $ordered = $rs->search(undef, { order_by => 'artistid' });
+  ok $ordered->is_ordered, 'Simple column ordering detected by is_ordered';
+}
+
+# Hashref order direction
+{
+  my $ordered = $rs->search(undef, { order_by => { -desc => 'artistid' } });
+  ok $ordered->is_ordered, 'resultset with order direction is_ordered';
+}
+
+# Column ordering with literal SQL
+{
+  my $ordered = $rs->search(undef, { order_by => \'artistid DESC' });
+  ok $ordered->is_ordered, 'resultset with literal SQL is_ordered';
+}
+
+# Multiple column ordering
+{
+  my $ordered = $rs->search(undef, { order_by => ['artistid', 'name'] });
+  ok $ordered->is_ordered, 'ordering with multiple columns as arrayref is ordered';
+}
+
+# More complicated ordering
+{
+  my $ordered = $rs->search(undef, { 
+    order_by => [
+      { -asc => 'artistid' }, 
+      { -desc => 'name' },
+    ] 
+  });
+  ok $ordered->is_ordered, 'more complicated resultset ordering is_ordered';
+}
+
+# Empty multi-column ordering arrayref
+{
+  my $ordered = $rs->search(undef, { order_by => [] });
+  ok !$ordered->is_ordered, 'ordering with empty arrayref is not ordered';
+}
+
+# Multi-column ordering syntax with empty hashref
+{
+  my $ordered = $rs->search(undef, { order_by => [{}] });
+  ok !$ordered->is_ordered, 'ordering with [{}] is not ordered';
+}
+
+# Remove ordering after being set
+{
+  my $ordered = $rs->search(undef, { order_by => 'artistid' });
+  ok $ordered->is_ordered, 'resultset with ordering applied works..';
+  my $unordered = $ordered->search(undef, { order_by => undef });
+  ok !$unordered->is_ordered, '..and is not ordered with ordering removed';
+}
+
+# Search without ordering
+{
+  my $ordered = $rs->search({ name => 'We Are Goth' }, { join => 'cds' });
+  ok !$ordered->is_ordered, 'WHERE clause but no order_by is not ordered';
+}
+
+# Other functions without ordering
+{
+  # Join
+  my $joined = $rs->search(undef, { join => 'cds' });
+  ok !$joined->is_ordered, 'join but no order_by is not ordered';
+
+  # Group By
+  my $grouped = $rs->search(undef, { group_by => 'rank' });
+  ok !$grouped->is_ordered, 'group_by but no order_by is not ordered';
+
+  # Paging
+  my $paged = $rs->search(undef, { page=> 5 });
+  ok !$paged->is_ordered, 'paging but no order_by is not ordered';
+}
+
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/is_paged.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/is_paged.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/is_paged.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,18 @@
+use strict;
+use warnings;
+
+use lib qw(t/lib);
+use Test::More;
+use Test::Exception;
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $tkfks = $schema->resultset('Artist');
+
+ok !$tkfks->is_paged, 'vanilla resultset is not paginated';
+
+my $paginated = $tkfks->search(undef, { page => 5 });
+ok $paginated->is_paged, 'resultset is paginated now';
+
+done_testing;


Property changes on: DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/is_paged.t
___________________________________________________________________
Name: svn:mergeinfo
   + 
Name: svn:eol-style
   + native

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/nulls_only.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/nulls_only.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/nulls_only.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,29 @@
+use strict;
+use warnings;
+
+use lib qw(t/lib);
+use Test::More;
+use Test::Exception;
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+
+my $cd_rs = $schema->resultset('CD')->search ({ genreid => undef }, { columns => [ 'genreid' ]} );
+my $count = $cd_rs->count;
+cmp_ok ( $count, '>', 1, 'several CDs with no genre');
+
+my @objects = $cd_rs->all;
+is (scalar @objects, $count, 'Correct amount of objects without limit');
+isa_ok ($_, 'DBICTest::CD') for @objects;
+
+is_deeply (
+  [ map { values %{{$_->get_columns}} } (@objects) ],
+  [ (undef) x $count ],
+  'All values are indeed undef'
+);
+
+
+isa_ok ($cd_rs->search ({}, { rows => 1 })->single, 'DBICTest::CD');
+
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/plus_select.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/plus_select.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/plus_select.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,63 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $cd_rs = $schema->resultset('CD')->search ({genreid => { '!=', undef } }, { order_by => 'cdid' });
+my $track_cnt = $cd_rs->search({}, { rows => 1 })->search_related ('tracks')->count;
+
+my %basecols = $cd_rs->first->get_columns;
+
+# the current implementation of get_inflated_columns will "inflate"
+# relationships by simply calling the accessor, when you have
+# identically named columns and relationships (you shouldn't anyway)
+# I consider this wrong, but at the same time appreciate the
+# ramifications of changing this. Thus the value override  and the
+# TODO to go with it. Delete all of this if ever resolved.
+my %todo_rel_inflation_override = ( artist => $basecols{artist} );
+TODO: {
+  local $TODO = 'Treating relationships as inflatable data is wrong - see comment in ' . __FILE__;
+  ok (! keys %todo_rel_inflation_override);
+}
+
+my $plus_rs = $cd_rs->search (
+  {},
+  { join => 'tracks', distinct => 1, '+select' => { count => 'tracks.trackid' }, '+as' => 'tr_cnt' },
+);
+
+is_deeply (
+  { $plus_rs->first->get_columns },
+  { %basecols, tr_cnt => $track_cnt },
+  'extra columns returned by get_columns',
+);
+
+is_deeply (
+  { $plus_rs->first->get_inflated_columns, %todo_rel_inflation_override },
+  { %basecols, tr_cnt => $track_cnt },
+  'extra columns returned by get_inflated_columns without inflatable columns',
+);
+
+SKIP: {
+  eval { require DateTime };
+  skip "Need DateTime for +select/get_inflated_columns tests", 1 if $@;
+
+  $schema->class('CD')->inflate_column( 'year',
+    { inflate => sub { DateTime->new( year => shift ) },
+      deflate => sub { shift->year } }
+  );
+
+  $basecols{year} = DateTime->new ( year => $basecols{year} );
+
+  is_deeply (
+    { $plus_rs->first->get_inflated_columns, %todo_rel_inflation_override },
+    { %basecols, tr_cnt => $track_cnt },
+    'extra columns returned by get_inflated_columns',
+  );
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/update_delete.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/update_delete.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/resultset/update_delete.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -79,8 +79,12 @@
 );
 
 # grouping on PKs only should pass
-$sub_rs->search ({}, { group_by => [ reverse $sub_rs->result_source->primary_columns ] })     # reverse to make sure the comaprison works
-          ->update ({ pilot_sequence => \ 'pilot_sequence + 1' });
+$sub_rs->search (
+  {},
+  {
+    group_by => [ reverse $sub_rs->result_source->primary_columns ],     # reverse to make sure the PK-list comaprison works
+  },
+)->update ({ pilot_sequence => \ 'pilot_sequence + 1' });
 
 is_deeply (
   [ $tkfks->search ({ autopilot => [qw/a b x y/]}, { order_by => 'autopilot' })
@@ -90,6 +94,19 @@
   'Only two rows incremented',
 );
 
+# also make sure weird scalarref usage works (RT#51409)
+$tkfks->search (
+  \ 'pilot_sequence BETWEEN 11 AND 21',
+)->update ({ pilot_sequence => \ 'pilot_sequence + 1' });
+
+is_deeply (
+  [ $tkfks->search ({ autopilot => [qw/a b x y/]}, { order_by => 'autopilot' })
+            ->get_column ('pilot_sequence')->all 
+  ],
+  [qw/12 22 30 40/],
+  'Only two rows incremented (where => scalarref works)',
+);
+
 $sub_rs->delete;
 
 is ($tkfks->count, $tkfk_cnt -= 2, 'Only two rows deleted');

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/schema/anon.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/schema/anon.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/schema/anon.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,13 @@
+use strict;
+use warnings;
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBICTest;
+
+lives_ok (sub {
+  DBICTest->init_schema()->resultset('Artist')->find({artistid => 1 })->update({name => 'anon test'});
+}, 'Schema object not lost in chaining');
+
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/schema/clone.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/schema/clone.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/schema/clone.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,13 @@
+use strict;
+use warnings;
+use Test::More;
+
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $clone = $schema->clone;
+cmp_ok ($clone->storage, 'eq', $schema->storage, 'Storage copied into new schema (not a new instance)');
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/search/preserve_original_rs.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/search/preserve_original_rs.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/search/preserve_original_rs.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -8,7 +8,10 @@
 use DBIC::SqlMakerTest;
 use DBIC::DebugObj;
 use DBICTest;
+
+# use Data::Dumper comparisons to avoid mesing with coderefs
 use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
 
 my $schema = DBICTest->init_schema();
 
@@ -86,4 +89,3 @@
 
   is_same_sql_bind ($rs->as_query, $q{$s}{query}, "$s resultset unmodified (as_query matches)" );
 }
-

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/search/related_strip_prefetch.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/search/related_strip_prefetch.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/search/related_strip_prefetch.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,42 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBIC::SqlMakerTest;
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $rs = $schema->resultset('CD')->search (
+  { 'tracks.id' => { '!=', 666 }},
+  { join => 'artist', prefetch => 'tracks', rows => 2 }
+);
+
+my $rel_rs = $rs->search_related ('tags', { 'tags.tag' => { '!=', undef }}, { distinct => 1});
+
+is_same_sql_bind (
+  $rel_rs->as_query,
+  '(
+    SELECT tags.tagid, tags.cd, tags.tag
+      FROM (
+        SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+          FROM cd me
+          JOIN artist artist ON artist.artistid = me.artist
+          LEFT JOIN track tracks ON tracks.cd = me.cdid 
+        WHERE ( tracks.id != ? )
+        LIMIT 2
+      ) me
+      JOIN artist artist ON artist.artistid = me.artist
+      JOIN tags tags ON tags.cd = me.cdid
+    WHERE ( tags.tag IS NOT NULL )
+    GROUP BY tags.tagid, tags.cd, tags.tag
+  )',
+
+  [ [ 'tracks.id' => 666 ] ],
+  'Prefetch spec successfully stripped on search_related'
+);
+
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/search/select_chains.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/search/select_chains.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/search/select_chains.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,61 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBIC::SqlMakerTest;
+use DBICTest;
+
+
+my $schema = DBICTest->init_schema();
+
+my @chain = (
+  {
+    columns     => [ 'cdid' ],
+    '+columns'  => [ { title_lc => { lower => 'title' } } ],
+    '+select'   => [ 'genreid' ],
+    '+as'       => [ 'genreid' ],
+  } => 'SELECT me.cdid, LOWER( title ), me.genreid FROM cd me',
+
+  {
+    '+columns'  => [ { max_year => { max => 'me.year' }}, ],
+    '+select'   => [ { count => 'me.cdid' }, ],
+    '+as'       => [ 'cnt' ],
+  } => 'SELECT me.cdid, LOWER( title ), MAX( me.year ), me.genreid, COUNT( me.cdid ) FROM cd me',
+
+  {
+    select      => [ { min => 'me.cdid' }, ],
+    as          => [ 'min_id' ],
+  } => 'SELECT MIN( me.cdid ) FROM cd me',
+
+  {
+    '+columns' => [ { cnt => { count => 'cdid' } } ],
+  } => 'SELECT MIN( me.cdid ), COUNT ( cdid ) FROM cd me',
+
+  {
+    columns => [ 'year' ],
+  } => 'SELECT me.year FROM cd me',
+);
+
+my $rs = $schema->resultset('CD');
+
+my $testno = 1;
+while (@chain) {
+  my $attrs = shift @chain;
+  my $sql = shift @chain;
+
+  $rs = $rs->search ({}, $attrs);
+
+  is_same_sql_bind (
+    $rs->as_query,
+    "($sql)",
+    [],
+    "Test $testno of SELECT assembly ok",
+  );
+
+  $testno++;
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/search/subquery.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/search/subquery.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/search/subquery.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -3,11 +3,8 @@
 use strict;
 use warnings;
 
-use Data::Dumper;
-
 use Test::More;
 
-
 use lib qw(t/lib);
 use DBICTest;
 use DBIC::SqlMakerTest;
@@ -19,11 +16,22 @@
 my @tests = (
   {
     rs => $cdrs,
+    search => \[ "title = ? AND year LIKE ?", 'buahaha', '20%' ],
+    attrs => { rows => 5 },
+    sqlbind => \[
+      "( SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE (title = ? AND year LIKE ?) LIMIT 5)",
+      'buahaha',
+      '20%',
+    ],
+  },
+
+  {
+    rs => $cdrs,
     search => {
       artist_id => { 'in' => $art_rs->search({}, { rows => 1 })->get_column( 'id' )->as_query },
     },
     sqlbind => \[
-      "( SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE artist_id IN ( SELECT id FROM artist me LIMIT 1 ) )",
+      "( SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE artist_id IN ( SELECT id FROM artist me LIMIT 1 ) )",
     ],
   },
 
@@ -60,7 +68,10 @@
       ],
     },
     sqlbind => \[
-      "( SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE id > ?) cd2 )",
+      "( SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (
+            SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE id > ?
+          ) cd2
+        )",
       [ 'id', 20 ]
     ],
   },
@@ -68,9 +79,13 @@
   {
     rs => $art_rs,
     attrs => {
-      from => [ { 'me' => 'artist' }, 
-        [ { 'cds' => $cdrs->search({},{ 'select' => [\'me.artist as cds_artist' ]})->as_query },
-        { 'me.artistid' => 'cds_artist' } ] ]
+      from => [
+        { 'me' => 'artist' },
+        [
+          { 'cds' => $cdrs->search({}, { 'select' => [\'me.artist as cds_artist' ]})->as_query },
+          { 'me.artistid' => 'cds_artist' } 
+        ]
+      ]
     },
     sqlbind => \[
       "( SELECT me.artistid, me.name, me.rank, me.charfield FROM artist me JOIN (SELECT me.artist as cds_artist FROM cd me) cds ON me.artistid = cds_artist )"
@@ -95,9 +110,9 @@
     sqlbind => \[
       "( SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track
         FROM
-          (SELECT cd3.cdid,cd3.artist,cd3.title,cd3.year,cd3.genreid,cd3.single_track
+          (SELECT cd3.cdid, cd3.artist, cd3.title, cd3.year, cd3.genreid, cd3.single_track
             FROM
-              (SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track
+              (SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
                 FROM cd me WHERE id < ?) cd3
             WHERE id > ?) cd2
       )",
@@ -130,7 +145,10 @@
       ],
     },
     sqlbind => \[
-      "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE title = ?) cd2)",
+      "(SELECT cd2.cdid, cd2.artist, cd2.title, cd2.year, cd2.genreid, cd2.single_track FROM (
+          SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE title = ?
+        ) cd2
+      )",
       [ 'title',
         'Thriller'
       ]

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/limit_dialects/toplimit.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/42toplimit.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/limit_dialects/toplimit.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/limit_dialects/toplimit.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,152 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+my $schema = DBICTest->init_schema;
+
+# Trick the sqlite DB to use Top limit emulation
+# We could test all of this via $sq->$op directly,
+# but some conditions need a $rsrc
+delete $schema->storage->_sql_maker->{_cached_syntax};
+$schema->storage->_sql_maker->limit_dialect ('Top');
+
+my $rs = $schema->resultset ('BooksInLibrary')->search ({}, { prefetch => 'owner', rows => 1, offset => 3 });
+
+sub default_test_order {
+   my $order_by = shift;
+   is_same_sql_bind(
+      $rs->search ({}, {order_by => $order_by})->as_query,
+      "(SELECT
+        TOP 1 me__id, source, owner, title, price, owner__id, name FROM
+         (SELECT
+           TOP 4 me.id AS me__id, me.source, me.owner, me.title, me.price, owner.id AS owner__id, owner.name
+           FROM books me
+           JOIN owners owner ON
+           owner.id = me.owner
+           WHERE ( source = ? )
+           ORDER BY me__id ASC
+         ) me ORDER BY me__id DESC
+       )",
+    [ [ source => 'Library' ] ],
+  );
+}
+
+sub test_order {
+  my $args = shift;
+
+  my $req_order = $args->{order_req}
+    ? "ORDER BY $args->{order_req}"
+    : ''
+  ;
+
+  is_same_sql_bind(
+    $rs->search ({}, {order_by => $args->{order_by}})->as_query,
+    "(SELECT
+      me__id, source, owner, title, price, owner__id, name FROM
+      (SELECT
+        TOP 1 me__id, source, owner, title, price, owner__id, name FROM
+         (SELECT
+           TOP 4 me.id AS me__id, me.source, me.owner, me.title, me.price, owner.id AS owner__id, owner.name FROM
+           books me
+           JOIN owners owner ON owner.id = me.owner
+           WHERE ( source = ? )
+           ORDER BY $args->{order_inner}
+         ) me ORDER BY $args->{order_outer}
+      ) me $req_order
+    )",
+    [ [ source => 'Library' ] ],
+  );
+}
+
+my @tests = (
+  {
+    order_by => \'foo DESC',
+    order_req => 'foo DESC',
+    order_inner => 'foo DESC',
+    order_outer => 'foo ASC'
+  },
+  {
+    order_by => { -asc => 'foo'  },
+    order_req => 'foo ASC',
+    order_inner => 'foo ASC',
+    order_outer => 'foo DESC',
+  },
+  {
+    order_by => 'foo',
+    order_req => 'foo',
+    order_inner => 'foo ASC',
+    order_outer => 'foo DESC',
+  },
+  {
+    order_by => [ qw{ foo bar}   ],
+    order_req => 'foo, bar',
+    order_inner => 'foo ASC, bar ASC',
+    order_outer => 'foo DESC, bar DESC',
+  },
+  {
+    order_by => { -desc => 'foo' },
+    order_req => 'foo DESC',
+    order_inner => 'foo DESC',
+    order_outer => 'foo ASC',
+  },
+  {
+    order_by => ['foo', { -desc => 'bar' } ],
+    order_req => 'foo, bar DESC',
+    order_inner => 'foo ASC, bar DESC',
+    order_outer => 'foo DESC, bar ASC',
+  },
+  {
+    order_by => { -asc => [qw{ foo bar }] },
+    order_req => 'foo ASC, bar ASC',
+    order_inner => 'foo ASC, bar ASC',
+    order_outer => 'foo DESC, bar DESC',
+  },
+  {
+    order_by => [
+      { -asc => 'foo' },
+      { -desc => [qw{bar}] },
+      { -asc  => [qw{hello sensors}]},
+    ],
+    order_req => 'foo ASC, bar DESC, hello ASC, sensors ASC',
+    order_inner => 'foo ASC, bar DESC, hello ASC, sensors ASC',
+    order_outer => 'foo DESC, bar ASC, hello DESC, sensors DESC',
+  },
+);
+
+my @default_tests = ( undef, '', {}, [] );
+
+plan (tests => scalar @tests + scalar @default_tests + 1);
+
+test_order ($_) for @tests;
+default_test_order ($_) for @default_tests;
+
+
+is_same_sql_bind (
+  $rs->search ({}, { group_by => 'title', order_by => 'title' })->as_query,
+'(SELECT
+me.id, me.source, me.owner, me.title, me.price, owner.id, owner.name FROM
+   ( SELECT
+      id, source, owner, title, price FROM
+      ( SELECT
+         TOP 1 id, source, owner, title, price FROM
+         ( SELECT
+            TOP 4 me.id, me.source, me.owner, me.title, me.price FROM
+            books me  JOIN
+            owners owner ON owner.id = me.owner
+            WHERE ( source = ? )
+            GROUP BY title
+            ORDER BY title ASC
+         ) me
+         ORDER BY title DESC
+      ) me
+      ORDER BY title
+   ) me  JOIN
+   owners owner ON owner.id = me.owner WHERE
+   ( source = ? )
+   ORDER BY title)' ,
+  [ [ source => 'Library' ], [ source => 'Library' ] ],
+);

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/quotes/quotes.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/19quotes.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/quotes/quotes.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/quotes/quotes.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,74 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+use DBIC::SqlMakerTest;
+
+BEGIN {
+    eval "use DBD::SQLite";
+    plan $@
+        ? ( skip_all => 'needs DBD::SQLite for testing' )
+        : ( tests => 7 );
+}
+
+
+use_ok('DBICTest');
+use_ok('DBIC::DebugObj');
+my $schema = DBICTest->init_schema();
+
+#diag('Testing against ' . join(' ', map { $schema->storage->dbh->get_info($_) } qw/17 18/));
+
+$schema->storage->sql_maker->quote_char('`');
+$schema->storage->sql_maker->name_sep('.');
+
+my ($sql, @bind);
+$schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind));
+$schema->storage->debug(1);
+
+my $rs;
+
+$rs = $schema->resultset('CD')->search(
+           { 'me.year' => 2001, 'artist.name' => 'Caterwauler McCrae' },
+           { join => 'artist' });
+eval { $rs->count };
+is_same_sql_bind(
+  $sql, \@bind,
+  "SELECT COUNT( * ) FROM cd `me`  JOIN `artist` `artist` ON ( `artist`.`artistid` = `me`.`artist` ) WHERE ( `artist`.`name` = ? AND `me`.`year` = ? )", ["'Caterwauler McCrae'", "'2001'"],
+  'got correct SQL for count query with quoting'
+);
+
+my $order = 'year DESC';
+$rs = $schema->resultset('CD')->search({},
+            { 'order_by' => $order });
+eval { $rs->first };
+like($sql, qr/ORDER BY `\Q${order}\E`/, 'quoted ORDER BY with DESC (should use a scalarref anyway)');
+
+$rs = $schema->resultset('CD')->search({},
+            { 'order_by' => \$order });
+eval { $rs->first };
+like($sql, qr/ORDER BY \Q${order}\E/, 'did not quote ORDER BY with scalarref');
+
+$schema->storage->sql_maker->quote_char([qw/[ ]/]);
+$schema->storage->sql_maker->name_sep('.');
+
+$rs = $schema->resultset('CD')->search(
+           { 'me.year' => 2001, 'artist.name' => 'Caterwauler McCrae' },
+           { join => 'artist' });
+eval { $rs->count };
+is_same_sql_bind(
+  $sql, \@bind,
+  "SELECT COUNT( * ) FROM cd [me]  JOIN [artist] [artist] ON ( [artist].[artistid] = [me].[artist] ) WHERE ( [artist].[name] = ? AND [me].[year] = ? )", ["'Caterwauler McCrae'", "'2001'"],
+  'got correct SQL for count query with bracket quoting'
+);
+
+my %data = (
+       name => 'Bill',
+       order => '12'
+);
+
+$schema->storage->sql_maker->quote_char('`');
+$schema->storage->sql_maker->name_sep('.');
+
+is($schema->storage->sql_maker->update('group', \%data), 'UPDATE `group` SET `name` = ?, `order` = ?', 'quoted table names for UPDATE');

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/quotes/quotes_newstyle.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/19quotes_newstyle.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/quotes/quotes_newstyle.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/quotes/quotes_newstyle.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,91 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+use DBIC::SqlMakerTest;
+
+BEGIN {
+    eval "use DBD::SQLite";
+    plan $@
+        ? ( skip_all => 'needs DBD::SQLite for testing' )
+        : ( tests => 7 );
+}
+
+use_ok('DBICTest');
+use_ok('DBIC::DebugObj');
+
+my $schema = DBICTest->init_schema();
+
+#diag('Testing against ' . join(' ', map { $schema->storage->dbh->get_info($_) } qw/17 18/));
+
+my $dsn = $schema->storage->_dbi_connect_info->[0];
+$schema->connection(
+  $dsn,
+  undef,
+  undef,
+  { AutoCommit => 1 },
+  { quote_char => '`', name_sep => '.' },
+);
+
+my ($sql, @bind);
+$schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind)),
+$schema->storage->debug(1);
+
+my $rs;
+
+$rs = $schema->resultset('CD')->search(
+           { 'me.year' => 2001, 'artist.name' => 'Caterwauler McCrae' },
+           { join => 'artist' });
+eval { $rs->count };
+is_same_sql_bind(
+  $sql, \@bind,
+  "SELECT COUNT( * ) FROM cd `me`  JOIN `artist` `artist` ON ( `artist`.`artistid` = `me`.`artist` ) WHERE ( `artist`.`name` = ? AND `me`.`year` = ? )", ["'Caterwauler McCrae'", "'2001'"],
+  'got correct SQL for count query with quoting'
+);
+
+my $order = 'year DESC';
+$rs = $schema->resultset('CD')->search({},
+            { 'order_by' => $order });
+eval { $rs->first };
+like($sql, qr/ORDER BY `\Q${order}\E`/, 'quoted ORDER BY with DESC (should use a scalarref anyway)');
+
+$rs = $schema->resultset('CD')->search({},
+            { 'order_by' => \$order });
+eval { $rs->first };
+like($sql, qr/ORDER BY \Q${order}\E/, 'did not quote ORDER BY with scalarref');
+
+$schema->connection(
+  $dsn,
+  undef,
+  undef,
+  { AutoCommit => 1, quote_char => [qw/[ ]/], name_sep => '.' }
+);
+
+$schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind)),
+$schema->storage->debug(1);
+
+$rs = $schema->resultset('CD')->search(
+           { 'me.year' => 2001, 'artist.name' => 'Caterwauler McCrae' },
+           { join => 'artist' });
+eval { $rs->count };
+is_same_sql_bind(
+  $sql, \@bind,
+  "SELECT COUNT( * ) FROM cd [me]  JOIN [artist] [artist] ON ( [artist].[artistid] = [me].[artist] ) WHERE ( [artist].[name] = ? AND [me].[year] = ? )", ["'Caterwauler McCrae'", "'2001'"],
+  'got correct SQL for count query with bracket quoting'
+);
+
+my %data = (
+       name => 'Bill',
+       order => '12'
+);
+
+$schema->connection(
+  $dsn,
+  undef,
+  undef,
+  { AutoCommit => 1, quote_char => '`', name_sep => '.' }
+);
+
+is($schema->storage->sql_maker->update('group', \%data), 'UPDATE `group` SET `name` = ?, `order` = ?', 'quoted table names for UPDATE');

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/sql_maker/sql_maker.t (from rev 6669, DBIx-Class/0.08/branches/run_file_against_storage/t/95sql_maker.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/sql_maker/sql_maker.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/sql_maker/sql_maker.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,78 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+
+use lib qw(t/lib);
+use DBIC::SqlMakerTest;
+
+use_ok('DBICTest');
+
+my $schema = DBICTest->init_schema(no_deploy => 1);
+
+my $sql_maker = $schema->storage->sql_maker;
+
+
+{
+  my ($sql, @bind) = $sql_maker->insert(
+            'lottery',
+            {
+              'day' => '2008-11-16',
+              'numbers' => [13, 21, 34, 55, 89]
+            }
+  );
+
+  is_same_sql_bind(
+    $sql, \@bind,
+    q/INSERT INTO lottery (day, numbers) VALUES (?, ?)/,
+      [ ['day' => '2008-11-16'], ['numbers' => [13, 21, 34, 55, 89]] ],
+    'sql_maker passes arrayrefs in insert'
+  );
+
+
+  ($sql, @bind) = $sql_maker->update(
+            'lottery',
+            {
+              'day' => '2008-11-16',
+              'numbers' => [13, 21, 34, 55, 89]
+            }
+  );
+
+  is_same_sql_bind(
+    $sql, \@bind,
+    q/UPDATE lottery SET day = ?, numbers = ?/,
+      [ ['day' => '2008-11-16'], ['numbers' => [13, 21, 34, 55, 89]] ],
+    'sql_maker passes arrayrefs in update'
+  );
+}
+
+# make sure the cookbook caveat of { $op, \'...' } no longer applies
+{
+  my ($sql, @bind) = $sql_maker->where({
+    last_attempt => \ '< now() - interval "12 hours"',
+    next_attempt => { '<', \ 'now() - interval "12 hours"' },
+    created => [
+      { '<=', \ '1969' },
+      \ '> 1984',
+    ],
+  });
+  is_same_sql_bind(
+    $sql,
+    \@bind,
+    'WHERE
+          (created <= 1969 OR created > 1984 )
+      AND last_attempt < now() - interval "12 hours"
+      AND next_attempt < now() - interval "12 hours"
+    ',
+    [],
+  );
+}
+
+# Make sure the carp/croak override in SQLA works (via SQLAHacks)
+my $file = quotemeta (__FILE__);
+throws_ok (sub {
+  $schema->resultset ('Artist')->search ({}, { order_by => { -asc => 'stuff', -desc => 'staff' } } )->as_query;
+}, qr/$file/, 'Exception correctly croak()ed');
+
+done_testing;

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/sql_maker/sql_maker_quote.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/95sql_maker_quote.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/sql_maker/sql_maker_quote.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/sqlahacks/sql_maker/sql_maker_quote.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,357 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+use DBIC::SqlMakerTest;
+
+use_ok('DBICTest');
+
+my $schema = DBICTest->init_schema();
+
+my $sql_maker = $schema->storage->sql_maker;
+
+$sql_maker->quote_char('`');
+$sql_maker->name_sep('.');
+
+my ($sql, @bind) = $sql_maker->select(
+          [
+            {
+              'me' => 'cd'
+            },
+            [
+              {
+                'artist' => 'artist',
+                '-join_type' => ''
+              },
+              {
+                'artist.artistid' => 'me.artist'
+              }
+            ],
+            [
+              {
+                'tracks' => 'tracks',
+                '-join_type' => 'left'
+              },
+              {
+                'tracks.cd' => 'me.cdid'
+              }
+            ],
+          ],
+          [
+            'me.cdid',
+            { count => 'tracks.cd' },
+            { min => 'me.year', -as => 'me.minyear' },
+          ],
+          {
+            'artist.name' => 'Caterwauler McCrae',
+            'me.year' => 2001
+          },
+          [],
+          undef,
+          undef
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/
+    SELECT `me`.`cdid`, COUNT( `tracks`.`cd` ), MIN( `me`.`year` ) AS `me`.`minyear`
+      FROM `cd` `me`
+      JOIN `artist` `artist` ON ( `artist`.`artistid` = `me`.`artist` )
+      LEFT JOIN `tracks` `tracks` ON ( `tracks`.`cd` = `me`.`cdid` )
+    WHERE ( `artist`.`name` = ? AND `me`.`year` = ? )
+  /,
+  [ ['artist.name' => 'Caterwauler McCrae'], ['me.year' => 2001] ],
+  'got correct SQL and bind parameters for complex select query with quoting'
+);
+
+
+($sql, @bind) = $sql_maker->select(
+          [
+            {
+              'me' => 'cd'
+            }
+          ],
+          [
+            'me.cdid',
+            'me.artist',
+            'me.title',
+            'me.year'
+          ],
+          undef,
+          'year DESC',
+          undef,
+          undef
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year` FROM `cd` `me` ORDER BY `year DESC`/, [],
+  'scalar ORDER BY okay (single value)'
+);
+
+
+($sql, @bind) = $sql_maker->select(
+          [
+            {
+              'me' => 'cd'
+            }
+          ],
+          [
+            'me.cdid',
+            'me.artist',
+            'me.title',
+            'me.year'
+          ],
+          undef,
+          [
+            'year DESC',
+            'title ASC'
+          ],
+          undef,
+          undef
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year` FROM `cd` `me` ORDER BY `year DESC`, `title ASC`/, [],
+  'scalar ORDER BY okay (multiple values)'
+);
+
+{
+  ($sql, @bind) = $sql_maker->select(
+            [
+              {
+                'me' => 'cd'
+              }
+            ],
+            [
+              'me.cdid',
+              'me.artist',
+              'me.title',
+              'me.year'
+            ],
+            undef,
+            { -desc => 'year' },
+            undef,
+            undef
+  );
+
+  is_same_sql_bind(
+    $sql, \@bind,
+    q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year` FROM `cd` `me` ORDER BY `year` DESC/, [],
+    'hashref ORDER BY okay (single value)'
+  );
+
+
+  ($sql, @bind) = $sql_maker->select(
+            [
+              {
+                'me' => 'cd'
+              }
+            ],
+            [
+              'me.cdid',
+              'me.artist',
+              'me.title',
+              'me.year'
+            ],
+            undef,
+            [
+              { -desc => 'year' },
+              { -asc => 'title' }
+            ],
+            undef,
+            undef
+  );
+
+  is_same_sql_bind(
+    $sql, \@bind,
+    q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year` FROM `cd` `me` ORDER BY `year` DESC, `title` ASC/, [],
+    'hashref ORDER BY okay (multiple values)'
+  );
+
+}
+
+
+($sql, @bind) = $sql_maker->select(
+          [
+            {
+              'me' => 'cd'
+            }
+          ],
+          [
+            'me.cdid',
+            'me.artist',
+            'me.title',
+            'me.year'
+          ],
+          undef,
+          \'year DESC',
+          undef,
+          undef
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year` FROM `cd` `me` ORDER BY year DESC/, [],
+  'did not quote ORDER BY with scalarref (single value)'
+);
+
+
+($sql, @bind) = $sql_maker->select(
+          [
+            {
+              'me' => 'cd'
+            }
+          ],
+          [
+            'me.cdid',
+            'me.artist',
+            'me.title',
+            'me.year'
+          ],
+          undef,
+          [
+            \'year DESC',
+            \'title ASC'
+          ],
+          undef,
+          undef
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title`, `me`.`year` FROM `cd` `me` ORDER BY year DESC, title ASC/, [],
+  'did not quote ORDER BY with scalarref (multiple values)'
+);
+
+
+($sql, @bind) = $sql_maker->select(
+  [ { me => 'cd' }                  ],
+  [qw/ me.cdid me.artist me.title  /],
+  { cdid => \['rlike ?', [cdid => 'X'] ]       },
+  { group_by => 'title', having => \['count(me.artist) > ?', [ cnt => 2] ] },
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title` FROM `cd` `me` WHERE ( `cdid` rlike ? ) GROUP BY `title` HAVING count(me.artist) > ?/,
+  [ [ cdid => 'X'], ['cnt' => '2'] ],
+  'Quoting works with where/having arrayrefsrefs',
+);
+
+
+($sql, @bind) = $sql_maker->select(
+  [ { me => 'cd' }                  ],
+  [qw/ me.cdid me.artist me.title  /],
+  { cdid => \'rlike X'              },
+  { group_by => 'title', having => \'count(me.artist) > 2' },
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title` FROM `cd` `me` WHERE ( `cdid` rlike X ) GROUP BY `title` HAVING count(me.artist) > 2/,
+  [],
+  'Quoting works with where/having scalarrefs',
+);
+
+
+($sql, @bind) = $sql_maker->update(
+          'group',
+          {
+            'order' => '12',
+            'name' => 'Bill'
+          }
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/UPDATE `group` SET `name` = ?, `order` = ?/, [ ['name' => 'Bill'], ['order' => '12'] ],
+  'quoted table names for UPDATE'
+);
+
+{
+  ($sql, @bind) = $sql_maker->select(
+        [
+          {
+            'me' => 'cd'
+          }
+        ],
+        [
+          'me.*'
+        ],
+        undef,
+        [],
+        undef,
+        undef    
+  );
+
+  is_same_sql_bind(
+    $sql, \@bind,
+    q/SELECT `me`.* FROM `cd` `me`/, [],
+    'select attr with me.* is right'
+  );
+}
+
+
+$sql_maker->quote_char([qw/[ ]/]);
+
+($sql, @bind) = $sql_maker->select(
+          [
+            {
+              'me' => 'cd'
+            },
+            [
+              {
+                'artist' => 'artist',
+                '-join_type' => ''
+              },
+              {
+                'artist.artistid' => 'me.artist'
+              }
+            ]
+          ],
+          [
+            {
+              max => 'rank',
+              -as => 'max_rank',
+            },
+            'rank',
+            {
+              'count' => '*',
+              -as => 'cnt',
+            }
+          ],
+          {
+            'artist.name' => 'Caterwauler McCrae',
+            'me.year' => 2001
+          },
+          [],
+          undef,
+          undef
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT MAX ( [rank] ) AS [max_rank], [rank], COUNT( * ) AS [cnt] FROM [cd] [me]  JOIN [artist] [artist] ON ( [artist].[artistid] = [me].[artist] ) WHERE ( [artist].[name] = ? AND [me].[year] = ? )/, [ ['artist.name' => 'Caterwauler McCrae'], ['me.year' => 2001] ],
+  'got correct SQL and bind parameters for count query with bracket quoting'
+);
+
+
+($sql, @bind) = $sql_maker->update(
+          'group',
+          {
+            'order' => '12',
+            'name' => 'Bill'
+          }
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/UPDATE [group] SET [name] = ?, [order] = ?/, [ ['name' => 'Bill'], ['order' => '12'] ],
+  'bracket quoted table names for UPDATE'
+);
+
+done_testing;

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/base.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/92storage.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/base.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/base.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,189 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Warn;
+use lib qw(t/lib);
+use DBICTest;
+use Data::Dumper;
+
+{
+    package DBICTest::ExplodingStorage::Sth;
+    use strict;
+    use warnings;
+
+    sub execute { die "Kablammo!" }
+
+    sub bind_param {}
+
+    package DBICTest::ExplodingStorage;
+    use strict;
+    use warnings;
+    use base 'DBIx::Class::Storage::DBI::SQLite';
+
+    my $count = 0;
+    sub sth {
+      my ($self, $sql) = @_;
+      return bless {},  "DBICTest::ExplodingStorage::Sth" unless $count++;
+      return $self->next::method($sql);
+    }
+
+    sub connected {
+      return 0 if $count == 1;
+      return shift->next::method(@_);
+    }
+}
+
+my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
+
+is( ref($schema->storage), 'DBIx::Class::Storage::DBI::SQLite',
+    'Storage reblessed correctly into DBIx::Class::Storage::DBI::SQLite' );
+
+my $storage = $schema->storage;
+$storage->ensure_connected;
+
+eval {
+    $schema->storage->throw_exception('test_exception_42');
+};
+like($@, qr/\btest_exception_42\b/, 'basic exception');
+
+eval {
+    $schema->resultset('CD')->search_literal('broken +%$#$1')->all;
+};
+like($@, qr/prepare_cached failed/, 'exception via DBI->HandleError, etc');
+
+bless $storage, "DBICTest::ExplodingStorage";
+$schema->storage($storage);
+
+eval { 
+    $schema->resultset('Artist')->create({ name => "Exploding Sheep" });
+};
+
+is($@, "", "Exploding \$sth->execute was caught");
+
+is(1, $schema->resultset('Artist')->search({name => "Exploding Sheep" })->count,
+  "And the STH was retired");
+
+
+# testing various invocations of connect_info ([ ... ])
+
+my $coderef = sub { 42 };
+my $invocations = {
+  'connect_info ([ $d, $u, $p, \%attr, \%extra_attr])' => {
+      args => [
+          'foo',
+          'bar',
+          undef,
+          {
+            on_connect_do => [qw/a b c/],
+            PrintError => 0,
+          },
+          {
+            AutoCommit => 1,
+            on_disconnect_do => [qw/d e f/],
+          },
+          {
+            unsafe => 1,
+            auto_savepoint => 1,
+          },
+        ],
+      dbi_connect_info => [
+          'foo',
+          'bar',
+          undef,
+          {
+            %{$storage->_default_dbi_connect_attributes || {} },
+            PrintError => 0,
+            AutoCommit => 1,
+          },
+      ],
+  },
+
+  'connect_info ([ \%code, \%extra_attr ])' => {
+      args => [
+          $coderef,
+          {
+            on_connect_do => [qw/a b c/],
+            PrintError => 0,
+            AutoCommit => 1,
+            on_disconnect_do => [qw/d e f/],
+          },
+          {
+            unsafe => 1,
+            auto_savepoint => 1,
+          },
+        ],
+      dbi_connect_info => [
+          $coderef,
+      ],
+  },
+
+  'connect_info ([ \%attr ])' => {
+      args => [
+          {
+            on_connect_do => [qw/a b c/],
+            PrintError => 1,
+            AutoCommit => 0,
+            on_disconnect_do => [qw/d e f/],
+            user => 'bar',
+            dsn => 'foo',
+          },
+          {
+            unsafe => 1,
+            auto_savepoint => 1,
+          },
+      ],
+      dbi_connect_info => [
+          'foo',
+          'bar',
+          undef,
+          {
+            %{$storage->_default_dbi_connect_attributes || {} },
+            PrintError => 1,
+            AutoCommit => 0,
+          },
+      ],
+  },
+  'connect_info ([ \%attr_with_coderef ])' => {
+      args => [ {
+        dbh_maker => $coderef,
+        dsn => 'blah',
+        user => 'bleh',
+        on_connect_do => [qw/a b c/],
+        on_disconnect_do => [qw/d e f/],
+      } ],
+      dbi_connect_info => [
+        $coderef
+      ],
+      warn => qr/Attribute\(s\) 'dsn', 'user' in connect_info were ignored/,
+  },
+};
+
+for my $type (keys %$invocations) {
+
+  # we can not use a cloner portably because of the coderef
+  # so compare dumps instead
+  local $Data::Dumper::Sortkeys = 1;
+  my $arg_dump = Dumper ($invocations->{$type}{args});
+
+  warnings_exist (
+    sub { $storage->connect_info ($invocations->{$type}{args}) },
+     $invocations->{$type}{warn} || (),
+    'Warned about ignored attributes',
+  );
+
+  is ($arg_dump, Dumper ($invocations->{$type}{args}), "$type didn't modify passed arguments");
+
+  is_deeply ($storage->_dbi_connect_info, $invocations->{$type}{dbi_connect_info}, "$type produced correct _dbi_connect_info");
+  ok ( (not $storage->auto_savepoint and not $storage->unsafe), "$type correctly ignored extra hashref");
+
+  is_deeply (
+    [$storage->on_connect_do, $storage->on_disconnect_do ],
+    [ [qw/a b c/], [qw/d e f/] ],
+    "$type correctly parsed DBIC specific on_[dis]connect_do",
+  );
+}
+
+done_testing;
+
+1;

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/dbh_do.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/dbh_do.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/dbh_do.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/dbh_do.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,33 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;  
+
+use Test::More tests => 8;
+use lib qw(t/lib);
+use DBICTest;
+
+
+my $schema = DBICTest->init_schema();
+my $storage = $schema->storage;
+
+my $test_func = sub {
+    is $_[0], $storage;
+    is $_[1], $storage->dbh;
+    is $_[2], "foo";
+    is $_[3], "bar";
+};
+
+$storage->dbh_do(
+    $test_func,
+    "foo", "bar"
+);
+
+my $storage_class = ref $storage;
+{
+    no strict 'refs';
+    *{$storage_class .'::__test_method'} = $test_func;
+}
+$storage->dbh_do("__test_method", "foo", "bar");
+
+    
\ No newline at end of file

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/dbi_coderef.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/32connect_code_ref.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/dbi_coderef.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/dbi_coderef.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,24 @@
+use strict;
+use warnings;  
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 1;
+
+# Set up the "usual" sqlite for DBICTest
+my $normal_schema = DBICTest->init_schema( sqlite_use_file => 1 );
+
+# Steal the dsn, which should be like 'dbi:SQLite:t/var/DBIxClass.db'
+my $normal_dsn = $normal_schema->storage->_dbi_connect_info->[0];
+
+# Make sure we have no active connection
+$normal_schema->storage->disconnect;
+
+# Make a new clone with a new connection, using a code reference
+my $code_ref_schema = $normal_schema->connect(sub { DBI->connect($normal_dsn); });
+
+# Stolen from 60core.t - this just verifies things seem to work at all
+my @art = $code_ref_schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
+cmp_ok(@art, '==', 3, "Three artists returned");

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/debug.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/91debug.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/debug.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/debug.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,67 @@
+use strict;
+use warnings; 
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::DebugObj;
+use DBIC::SqlMakerTest;
+use Path::Class qw/file/;
+
+my $schema = DBICTest->init_schema();
+
+
+ok ( $schema->storage->debug(1), 'debug' );
+$schema->storage->debugfh(file('t/var/sql.log')->openw);
+
+$schema->storage->debugfh->autoflush(1);
+my $rs = $schema->resultset('CD')->search({});
+$rs->count();
+
+my $log = file('t/var/sql.log')->openr;
+my $line = <$log>;
+$log->close();
+ok($line =~ /^SELECT COUNT/, 'Log success');
+
+$schema->storage->debugfh(undef);
+$ENV{'DBIC_TRACE'} = '=t/var/foo.log';
+$rs = $schema->resultset('CD')->search({});
+$rs->count();
+$log = file('t/var/foo.log')->openr;
+$line = <$log>;
+$log->close();
+ok($line =~ /^SELECT COUNT/, 'Log success');
+$schema->storage->debugobj->debugfh(undef);
+delete($ENV{'DBIC_TRACE'});
+open(STDERRCOPY, '>&STDERR');
+stat(STDERRCOPY); # nop to get warnings quiet
+close(STDERR);
+eval {
+    $rs = $schema->resultset('CD')->search({});
+    $rs->count();
+};
+ok($@, 'Died on closed FH');
+open(STDERR, '>&STDERRCOPY');
+
+# test trace output correctness for bind params
+{
+    my ($sql, @bind);
+    $schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind));
+
+    my @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
+    is_same_sql_bind(
+        $sql, \@bind,
+        "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) )",
+        [qw/'1' '1' '3'/],
+        'got correct SQL with all bind parameters (debugcb)'
+    );
+
+    @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
+    is_same_sql_bind(
+        $sql, \@bind,
+        "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) )", ["'1'", "'1'", "'3'"],
+        'got correct SQL with all bind parameters (debugobj)'
+    );
+}
+
+done_testing;

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/disable_sth_caching.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/35disable_sth_caching.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/disable_sth_caching.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/disable_sth_caching.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,19 @@
+use strict;
+use warnings;  
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 2;
+
+# Set up the "usual" sqlite for DBICTest
+my $schema = DBICTest->init_schema;
+
+my $sth_one = $schema->storage->sth('SELECT 42');
+my $sth_two = $schema->storage->sth('SELECT 42');
+$schema->storage->disable_sth_caching(1);
+my $sth_three = $schema->storage->sth('SELECT 42');
+
+ok($sth_one == $sth_two, "statement caching works");
+ok($sth_two != $sth_three, "disabling statement caching works");

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/error.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/18inserterror.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/error.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/error.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,29 @@
+use Class::C3;
+use strict;
+use Test::More;
+use warnings;
+
+BEGIN {
+    eval "use DBD::SQLite";
+    plan $@
+        ? ( skip_all => 'needs DBD::SQLite for testing' )
+        : ( tests => 4 );
+}
+
+use lib qw(t/lib);
+
+use_ok( 'DBICTest' );
+use_ok( 'DBICTest::Schema' );
+my $schema = DBICTest->init_schema;
+
+{
+       my $warnings;
+       local $SIG{__WARN__} = sub { $warnings .= $_[0] };
+       eval {
+         $schema->resultset('CD')
+                ->create({ title => 'vacation in antarctica' })
+       };
+       like $@, qr/NULL/;  # as opposed to some other error
+       unlike( $warnings, qr/uninitialized value/, "No warning from Storage" );
+}
+

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/exception.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/exception.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/exception.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,43 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::Schema;
+
+# make sure nothing eats the exceptions (an unchecked eval in Storage::DESTROY used to be a problem)
+
+{
+  package Dying::Storage;
+
+  use warnings;
+  use strict;
+
+  use base 'DBIx::Class::Storage::DBI';
+
+  sub _populate_dbh {
+    my $self = shift;
+    my $death = $self->_dbi_connect_info->[3]{die};
+
+    die "storage test died: $death" if $death eq 'before_populate';
+    my $ret = $self->next::method (@_);
+    die "storage test died: $death" if $death eq 'after_populate';
+
+    return $ret;
+  }
+}
+
+for (qw/before_populate after_populate/) {
+  dies_ok (sub {
+    my $schema = DBICTest::Schema->clone;
+    $schema->storage_type ('Dying::Storage');
+    $schema->connection (DBICTest->_database, { die => $_ });
+    $schema->storage->ensure_connected;
+  }, "$_ exception found");
+}
+
+done_testing;

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/on_connect_call.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/on_connect_call.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/on_connect_call.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,98 @@
+use strict;
+use warnings;
+no warnings qw/once redefine/;
+
+use lib qw(t/lib);
+use DBI;
+use DBICTest;
+use DBICTest::Schema;
+use DBIx::Class::Storage::DBI;
+
+# !!! do not replace this with done_testing - tests reside in the callbacks
+# !!! number of calls is important
+use Test::More tests => 16;
+# !!!
+
+my $schema = DBICTest::Schema->clone;
+
+{
+  *DBIx::Class::Storage::DBI::connect_call_foo = sub {
+    isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+      'got storage in connect_call method';
+    is $_[1], 'bar', 'got param in connect_call method';
+  };
+
+  *DBIx::Class::Storage::DBI::disconnect_call_foo = sub {
+    isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+      'got storage in disconnect_call method';
+  };
+
+  ok $schema->connection(
+      DBICTest->_database,
+    {
+      on_connect_call => [
+          [ do_sql => 'create table test1 (id integer)' ],
+          [ do_sql => [ 'insert into test1 values (?)', {}, 1 ] ],
+          [ do_sql => sub { ['insert into test1 values (2)'] } ],
+          [ sub { $_[0]->dbh->do($_[1]) }, 'insert into test1 values (3)' ],
+          # this invokes $storage->connect_call_foo('bar') (above)
+          [ foo => 'bar' ],
+      ],
+      on_connect_do => 'insert into test1 values (4)',
+      on_disconnect_call => 'foo',
+    },
+  ), 'connection()';
+
+  ok (! $schema->storage->connected, 'start disconnected');
+
+  is_deeply (
+    $schema->storage->dbh->selectall_arrayref('select * from test1'),
+    [ [ 1 ], [ 2 ], [ 3 ], [ 4 ] ],
+    'on_connect_call/do actions worked'
+  );
+
+  $schema->storage->disconnect;
+}
+
+{
+  *DBIx::Class::Storage::DBI::connect_call_foo = sub {
+    isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+      'got storage in connect_call method';
+  };
+
+  *DBIx::Class::Storage::DBI::connect_call_bar = sub {
+    isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+      'got storage in connect_call method';
+  };
+
+
+  ok $schema->connection(
+    DBICTest->_database,
+    {
+      # method list form
+      on_connect_call => [ 'foo', sub { ok 1, "coderef in list form" }, 'bar' ],
+    },
+  ), 'connection()';
+
+  ok (! $schema->storage->connected, 'start disconnected');
+  $schema->storage->ensure_connected;
+  $schema->storage->disconnect; # this should not fire any tests
+}
+
+{
+  ok $schema->connection(
+    sub { DBI->connect(DBICTest->_database) },
+    {
+      # method list form
+      on_connect_call => [ sub { ok 1, "on_connect_call after DT parser" }, ],
+      on_disconnect_call => [ sub { ok 1, "on_disconnect_call after DT parser" }, ],
+    },
+  ), 'connection()';
+
+  ok (! $schema->storage->connected, 'start disconnected');
+
+  $schema->storage->_determine_driver;  # this should connect due to the coderef
+
+  ok ($schema->storage->connected, 'determine driver connects');
+  $schema->storage->disconnect;
+}

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/on_connect_do.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/92storage_on_connect_do.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/on_connect_do.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/on_connect_do.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,89 @@
+use strict;
+use warnings;
+
+use Test::More tests => 12;
+
+use lib qw(t/lib);
+use base 'DBICTest';
+require DBI;
+
+
+my $schema = DBICTest->init_schema(
+    no_connect  => 1,
+    no_deploy   => 1,
+);
+
+ok $schema->connection(
+  DBICTest->_database,
+  {
+    on_connect_do => 'CREATE TABLE TEST_empty (id INTEGER)',
+  },
+), 'connection()';
+
+is_deeply (
+  $schema->storage->dbh->selectall_arrayref('SELECT * FROM TEST_empty'),
+  [],
+  'string version on_connect_do() worked'
+);
+
+$schema->storage->disconnect;
+
+ok $schema->connection(
+    sub { DBI->connect(DBICTest->_database) },
+    {
+        on_connect_do       => [
+            'CREATE TABLE TEST_empty (id INTEGER)',
+            [ 'INSERT INTO TEST_empty VALUES (?)', {}, 2 ],
+            \&insert_from_subref,
+        ],
+        on_disconnect_do    =>
+            [\&check_exists, 'DROP TABLE TEST_empty', \&check_dropped],
+    },
+), 'connection()';
+
+is_deeply (
+  $schema->storage->dbh->selectall_arrayref('SELECT * FROM TEST_empty'),
+  [ [ 2 ], [ 3 ], [ 7 ] ],
+  'on_connect_do() worked'
+);
+eval { $schema->storage->dbh->do('SELECT 1 FROM TEST_nonexistent'); };
+ok $@, 'Searching for nonexistent table dies';
+
+$schema->storage->disconnect();
+
+my($connected, $disconnected, @cb_args);
+ok $schema->connection(
+    DBICTest->_database,
+    {
+        on_connect_do       => sub { $connected = 1; @cb_args = @_; },
+        on_disconnect_do    => sub { $disconnected = 1 },
+    },
+), 'second connection()';
+$schema->storage->dbh->do('SELECT 1');
+ok $connected, 'on_connect_do() called after connect()';
+ok ! $disconnected, 'on_disconnect_do() not called after connect()';
+$schema->storage->disconnect();
+ok $disconnected, 'on_disconnect_do() called after disconnect()';
+
+isa_ok($cb_args[0], 'DBIx::Class::Storage', 'first arg to on_connect_do hook');
+
+sub check_exists {
+    my $storage = shift;
+    ok $storage->dbh->do('SELECT 1 FROM TEST_empty'), 'Table still exists';
+    return;
+}
+
+sub check_dropped {
+    my $storage = shift;
+    eval { $storage->dbh->do('SELECT 1 FROM TEST_empty'); };
+    ok $@, 'Reading from dropped table fails';
+    return;
+}
+
+sub insert_from_subref {
+    my $storage = shift;
+    return [
+        [ 'INSERT INTO TEST_empty VALUES (?)', {}, 3 ],
+        'INSERT INTO TEST_empty VALUES (7)',
+    ];
+}

Added: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/ping_count.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/ping_count.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/ping_count.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,60 @@
+use strict;
+use warnings;  
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+my $ping_count = 0;
+
+{
+  local $SIG{__WARN__} = sub {};
+  require DBIx::Class::Storage::DBI;
+
+  my $ping = \&DBIx::Class::Storage::DBI::_ping;
+
+  *DBIx::Class::Storage::DBI::_ping = sub {
+    $ping_count++;
+    goto &$ping;
+  };
+}
+
+
+# measure pings around deploy() separately
+my $schema = DBICTest->init_schema( sqlite_use_file => 1, no_populate => 1 );
+
+is ($ping_count, 0, 'no _ping() calls during deploy');
+$ping_count = 0;
+
+
+
+DBICTest->populate_schema ($schema);
+
+# perform some operations and make sure they don't ping
+
+$schema->resultset('CD')->create({
+  cdid => 6, artist => 3, title => 'mtfnpy', year => 2009
+});
+
+$schema->resultset('CD')->create({
+  cdid => 7, artist => 3, title => 'mtfnpy2', year => 2009
+});
+
+$schema->storage->_dbh->disconnect;
+
+$schema->resultset('CD')->create({
+  cdid => 8, artist => 3, title => 'mtfnpy3', year => 2009
+});
+
+$schema->storage->_dbh->disconnect;
+
+$schema->txn_do(sub {
+ $schema->resultset('CD')->create({
+   cdid => 9, artist => 3, title => 'mtfnpy4', year => 2009
+ });
+});
+
+is $ping_count, 0, 'no _ping() calls';
+
+done_testing;

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/reconnect.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/33storage_reconnect.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/reconnect.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/reconnect.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,73 @@
+use strict;
+use warnings;  
+
+use FindBin;
+use File::Copy;
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 6;
+
+my $db_orig = "$FindBin::Bin/../var/DBIxClass.db";
+my $db_tmp  = "$db_orig.tmp";
+
+# Set up the "usual" sqlite for DBICTest
+my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
+
+# Make sure we're connected by doing something
+my @art = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
+cmp_ok(@art, '==', 3, "Three artists returned");
+
+# Disconnect the dbh, and be sneaky about it
+# Also test if DBD::SQLite finaly knows how to ->disconnect properly
+{
+  my $w;
+  local $SIG{__WARN__} = sub { $w = shift };
+  $schema->storage->_dbh->disconnect;
+  ok ($w !~ /active statement handles/, 'SQLite can disconnect properly');
+}
+
+# Try the operation again - What should happen here is:
+#   1. S::DBI blindly attempts the SELECT, which throws an exception
+#   2. It catches the exception, checks ->{Active}/->ping, sees the disconnected state...
+#   3. Reconnects, and retries the operation
+#   4. Success!
+my @art_two = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
+cmp_ok(@art_two, '==', 3, "Three artists returned");
+
+### Now, disconnect the dbh, and move the db file;
+# create a new one and chmod 000 to prevent SQLite from connecting.
+$schema->storage->_dbh->disconnect;
+move( $db_orig, $db_tmp );
+open DBFILE, '>', $db_orig;
+print DBFILE 'THIS IS NOT A REAL DATABASE';
+close DBFILE;
+chmod 0000, $db_orig;
+
+### Try the operation again... it should fail, since there's no db
+{
+    # Catch the DBI connection error
+    local $SIG{__WARN__} = sub {};
+    eval {
+        my @art_three = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
+    };
+    ok( $@, 'The operation failed' );
+}
+
+### Now, move the db file back to the correct name
+unlink($db_orig);
+move( $db_tmp, $db_orig );
+
+SKIP: {
+    skip "Cannot reconnect if original connection didn't fail", 2
+        if ( $@ =~ /encrypted or is not a database/ );
+
+    ### Try the operation again... this time, it should succeed
+    my @art_four;
+    eval {
+        @art_four = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
+    };
+    ok( !$@, 'The operation succeeded' );
+    cmp_ok( @art_four, '==', 3, "Three artists returned" );
+}

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/replicated.t (from rev 6806, DBIx-Class/0.08/branches/run_file_against_storage/t/93storage_replication.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/replicated.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/replicated.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,907 @@
+use strict;
+use warnings;
+use lib qw(t/lib);
+use Test::More;
+use Test::Exception;
+use DBICTest;
+use List::Util 'first';
+use Scalar::Util 'reftype';
+use File::Spec;
+use IO::Handle;
+
+BEGIN {
+    eval { require Test::Moose; Test::Moose->import() };
+    plan skip_all => "Need Test::Moose to run this test" if $@;
+      require DBIx::Class;
+
+    plan skip_all => 'Test needs ' . DBIx::Class::Optional::Dependencies->req_missing_for ('replicated')
+      unless DBIx::Class::Optional::Dependencies->req_ok_for ('replicated');
+}
+
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+use_ok 'DBIx::Class::Storage::DBI::Replicated';
+
+use Moose();
+use MooseX::Types();
+diag "Using Moose version $Moose::VERSION and MooseX::Types version $MooseX::Types::VERSION";
+
+=head1 HOW TO USE
+
+    This is a test of the replicated storage system.  This will work in one of
+    two ways, either it was try to fake replication with a couple of SQLite DBs
+    and creative use of copy, or if you define a couple of %ENV vars correctly
+    will try to test those.  If you do that, it will assume the setup is properly
+    replicating.  Your results may vary, but I have demonstrated this to work with
+    mysql native replication.
+
+=cut
+
+
+## ----------------------------------------------------------------------------
+## Build a class to hold all our required testing data and methods.
+## ----------------------------------------------------------------------------
+
+TESTSCHEMACLASSES: {
+
+    ## --------------------------------------------------------------------- ##
+    ## Create an object to contain your replicated stuff.
+    ## --------------------------------------------------------------------- ##
+
+    package DBIx::Class::DBI::Replicated::TestReplication;
+
+    use DBICTest;
+    use base qw/Class::Accessor::Fast/;
+
+    __PACKAGE__->mk_accessors( qw/schema/ );
+
+    ## Initialize the object
+
+    sub new {
+        my ($class, $schema_method) = (shift, shift);
+        my $self = $class->SUPER::new(@_);
+
+        $self->schema( $self->init_schema($schema_method) );
+        return $self;
+    }
+
+    ## Get the Schema and set the replication storage type
+
+    sub init_schema {
+        # current SQLT SQLite producer does not handle DROP TABLE IF EXISTS, trap warnings here
+        local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/s };
+
+        my ($class, $schema_method) = @_;
+
+        my $method = "get_schema_$schema_method";
+        my $schema = $class->$method;
+
+        return $schema;
+    }
+
+    sub get_schema_by_storage_type {
+      DBICTest->init_schema(
+        sqlite_use_file => 1,
+        storage_type=>{
+          '::DBI::Replicated' => {
+            balancer_type=>'::Random',
+            balancer_args=>{
+              auto_validate_every=>100,
+          master_read_weight => 1
+            },
+          }
+        },
+        deploy_args=>{
+          add_drop_table => 1,
+        },
+      );
+    }
+
+    sub get_schema_by_connect_info {
+      DBICTest->init_schema(
+        sqlite_use_file => 1,
+        storage_type=> '::DBI::Replicated',
+        balancer_type=>'::Random',
+        balancer_args=> {
+          auto_validate_every=>100,
+      master_read_weight => 1
+        },
+        deploy_args=>{
+          add_drop_table => 1,
+        },
+      );
+    }
+
+    sub generate_replicant_connect_info {}
+    sub replicate {}
+    sub cleanup {}
+
+    ## --------------------------------------------------------------------- ##
+    ## Add a connect_info option to test option merging.
+    ## --------------------------------------------------------------------- ##
+    {
+    package DBIx::Class::Storage::DBI::Replicated;
+
+    use Moose;
+
+    __PACKAGE__->meta->make_mutable;
+
+    around connect_info => sub {
+      my ($next, $self, $info) = @_;
+      $info->[3]{master_option} = 1;
+      $self->$next($info);
+    };
+
+    __PACKAGE__->meta->make_immutable;
+
+    no Moose;
+    }
+
+    ## --------------------------------------------------------------------- ##
+    ## Subclass for when you are using SQLite for testing, this provides a fake
+    ## replication support.
+    ## --------------------------------------------------------------------- ##
+
+    package DBIx::Class::DBI::Replicated::TestReplication::SQLite;
+
+    use DBICTest;
+    use File::Copy;
+    use base 'DBIx::Class::DBI::Replicated::TestReplication';
+
+    __PACKAGE__->mk_accessors(qw/master_path slave_paths/);
+
+    ## Set the master path from DBICTest
+
+    sub new {
+        my $class = shift @_;
+        my $self = $class->SUPER::new(@_);
+
+        $self->master_path( DBICTest->_sqlite_dbfilename );
+        $self->slave_paths([
+            File::Spec->catfile(qw/t var DBIxClass_slave1.db/),
+            File::Spec->catfile(qw/t var DBIxClass_slave2.db/),
+        ]);
+
+        return $self;
+    }
+
+    ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
+    ## $storage->connect_info to be used for connecting replicants.
+
+    sub generate_replicant_connect_info {
+        my $self = shift @_;
+        my @dsn = map {
+            "dbi:SQLite:${_}";
+        } @{$self->slave_paths};
+
+        my @connect_infos = map { [$_,'','',{AutoCommit=>1}] } @dsn;
+
+        ## Make sure nothing is left over from a failed test
+        $self->cleanup;
+
+        ## try a hashref too
+        my $c = $connect_infos[0];
+        $connect_infos[0] = {
+          dsn => $c->[0],
+          user => $c->[1],
+          password => $c->[2],
+          %{ $c->[3] }
+        };
+
+        @connect_infos
+    }
+
+    ## Do a 'good enough' replication by copying the master dbfile over each of
+    ## the slave dbfiles.  If the master is SQLite we do this, otherwise we
+    ## just do a one second pause to let the slaves catch up.
+
+    sub replicate {
+        my $self = shift @_;
+        foreach my $slave (@{$self->slave_paths}) {
+            copy($self->master_path, $slave);
+        }
+    }
+
+    ## Cleanup after ourselves.  Unlink all gthe slave paths.
+
+    sub cleanup {
+        my $self = shift @_;
+        foreach my $slave (@{$self->slave_paths}) {
+            if(-e $slave) {
+                unlink $slave;
+            }
+        }
+    }
+
+    ## --------------------------------------------------------------------- ##
+    ## Subclass for when you are setting the databases via custom export vars
+    ## This is for when you have a replicating database setup that you are
+    ## going to test against.  You'll need to define the correct $ENV and have
+    ## two slave databases to test against, as well as a replication system
+    ## that will replicate in less than 1 second.
+    ## --------------------------------------------------------------------- ##
+
+    package DBIx::Class::DBI::Replicated::TestReplication::Custom;
+    use base 'DBIx::Class::DBI::Replicated::TestReplication';
+
+    ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
+    ## $storage->connect_info to be used for connecting replicants.
+
+    sub generate_replicant_connect_info {
+        return (
+            [$ENV{"DBICTEST_SLAVE0_DSN"}, $ENV{"DBICTEST_SLAVE0_DBUSER"}, $ENV{"DBICTEST_SLAVE0_DBPASS"}, {AutoCommit => 1}],
+            [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}],
+        );
+    }
+
+    ## pause a bit to let the replication catch up
+
+    sub replicate {
+        sleep 1;
+    }
+}
+
+## ----------------------------------------------------------------------------
+## Create an object and run some tests
+## ----------------------------------------------------------------------------
+
+## Thi first bunch of tests are basic, just make sure all the bits are behaving
+
+my $replicated_class = DBICTest->has_custom_dsn ?
+    'DBIx::Class::DBI::Replicated::TestReplication::Custom' :
+    'DBIx::Class::DBI::Replicated::TestReplication::SQLite';
+
+my $replicated;
+
+for my $method (qw/by_connect_info by_storage_type/) {
+  undef $replicated;
+  ok $replicated = $replicated_class->new($method)
+      => "Created a replication object $method";
+
+  isa_ok $replicated->schema
+      => 'DBIx::Class::Schema';
+
+  isa_ok $replicated->schema->storage
+      => 'DBIx::Class::Storage::DBI::Replicated';
+
+  isa_ok $replicated->schema->storage->balancer
+      => 'DBIx::Class::Storage::DBI::Replicated::Balancer::Random'
+      => 'configured balancer_type';
+}
+
+### check that all Storage::DBI methods are handled by ::Replicated
+{
+  my @storage_dbi_methods = Class::MOP::Class
+    ->initialize('DBIx::Class::Storage::DBI')->get_all_method_names;
+
+  my @replicated_methods  = DBIx::Class::Storage::DBI::Replicated->meta
+    ->get_all_method_names;
+
+# remove constants and OTHER_CRAP
+  @storage_dbi_methods = grep !/^[A-Z_]+\z/, @storage_dbi_methods;
+
+# remove CAG accessors
+  @storage_dbi_methods = grep !/_accessor\z/, @storage_dbi_methods;
+
+# remove DBIx::Class (the root parent, with CAG and stuff) methods
+  my @root_methods = Class::MOP::Class->initialize('DBIx::Class')
+    ->get_all_method_names;
+  my %count;
+  $count{$_}++ for (@storage_dbi_methods, @root_methods);
+
+  @storage_dbi_methods = grep $count{$_} != 2, @storage_dbi_methods;
+
+# make hashes
+  my %storage_dbi_methods;
+  @storage_dbi_methods{@storage_dbi_methods} = ();
+  my %replicated_methods;
+  @replicated_methods{@replicated_methods} = ();
+
+# remove ::Replicated-specific methods
+  for my $method (@replicated_methods) {
+    delete $replicated_methods{$method}
+      unless exists $storage_dbi_methods{$method};
+  }
+  @replicated_methods = keys %replicated_methods;
+
+# check that what's left is implemented
+  %count = ();
+  $count{$_}++ for (@storage_dbi_methods, @replicated_methods);
+
+  if ((grep $count{$_} == 2, @storage_dbi_methods) == @storage_dbi_methods) {
+    pass 'all DBIx::Class::Storage::DBI methods implemented';
+  }
+  else {
+    my @unimplemented = grep $count{$_} == 1, @storage_dbi_methods;
+
+    fail 'the following DBIx::Class::Storage::DBI methods are unimplemented: '
+      . "@unimplemented";
+  }
+}
+
+ok $replicated->schema->storage->meta
+    => 'has a meta object';
+
+isa_ok $replicated->schema->storage->master
+    => 'DBIx::Class::Storage::DBI';
+
+isa_ok $replicated->schema->storage->pool
+    => 'DBIx::Class::Storage::DBI::Replicated::Pool';
+
+does_ok $replicated->schema->storage->balancer
+    => 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+
+ok my @replicant_connects = $replicated->generate_replicant_connect_info
+    => 'got replication connect information';
+
+ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects)
+    => 'Created some storages suitable for replicants';
+
+our %debug;
+$replicated->schema->storage->debug(1);
+$replicated->schema->storage->debugcb(sub {
+    my ($op, $info) = @_;
+    ##warn "\n$op, $info\n";
+    %debug = (
+        op => $op,
+        info => $info,
+        dsn => ($info=~m/\[(.+)\]/)[0],
+        storage_type => $info=~m/REPLICANT/ ? 'REPLICANT' : 'MASTER',
+    );
+});
+
+ok my @all_storages = $replicated->schema->storage->all_storages
+    => '->all_storages';
+
+is scalar @all_storages,
+    3
+    => 'correct number of ->all_storages';
+
+is ((grep $_->isa('DBIx::Class::Storage::DBI'), @all_storages),
+    3
+    => '->all_storages are correct type');
+
+my @all_storage_opts =
+  grep { (reftype($_)||'') eq 'HASH' }
+    map @{ $_->_connect_info }, @all_storages;
+
+is ((grep $_->{master_option}, @all_storage_opts),
+    3
+    => 'connect_info was merged from master to replicants');
+
+my @replicant_names = keys %{ $replicated->schema->storage->replicants };
+
+ok @replicant_names, "found replicant names @replicant_names";
+
+## Silence warning about not supporting the is_replicating method if using the
+## sqlite dbs.
+$replicated->schema->storage->debugobj->silence(1)
+  if first { m{^t/} } @replicant_names;
+
+isa_ok $replicated->schema->storage->balancer->current_replicant
+    => 'DBIx::Class::Storage::DBI';
+
+$replicated->schema->storage->debugobj->silence(0);
+
+ok $replicated->schema->storage->pool->has_replicants
+    => 'does have replicants';
+
+is $replicated->schema->storage->pool->num_replicants => 2
+    => 'has two replicants';
+
+does_ok $replicated_storages[0]
+    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated_storages[1]
+    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated->schema->storage->replicants->{$replicant_names[0]}
+    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated->schema->storage->replicants->{$replicant_names[1]}
+    => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+## Add some info to the database
+
+$replicated
+    ->schema
+    ->populate('Artist', [
+        [ qw/artistid name/ ],
+        [ 4, "Ozric Tentacles"],
+    ]);
+
+    is $debug{storage_type}, 'MASTER',
+        "got last query from a master: $debug{dsn}";
+
+    like $debug{info}, qr/INSERT/, 'Last was an insert';
+
+## Make sure all the slaves have the table definitions
+
+$replicated->replicate;
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
+
+## Silence warning about not supporting the is_replicating method if using the
+## sqlite dbs.
+$replicated->schema->storage->debugobj->silence(1)
+  if first { m{^t/} } @replicant_names;
+
+$replicated->schema->storage->pool->validate_replicants;
+
+$replicated->schema->storage->debugobj->silence(0);
+
+## Make sure we can read the data.
+
+ok my $artist1 = $replicated->schema->resultset('Artist')->find(4)
+    => 'Created Result';
+
+## We removed testing here since master read weight is on, so we can't tell in
+## advance what storage to expect.  We turn master read weight off a bit lower
+## is $debug{storage_type}, 'REPLICANT'
+##     => "got last query from a replicant: $debug{dsn}, $debug{info}";
+
+isa_ok $artist1
+    => 'DBICTest::Artist';
+
+is $artist1->name, 'Ozric Tentacles'
+    => 'Found expected name for first result';
+
+## Check that master_read_weight is honored
+{
+    no warnings qw/once redefine/;
+
+    local
+    *DBIx::Class::Storage::DBI::Replicated::Balancer::Random::_random_number =
+    sub { 999 };
+
+    $replicated->schema->storage->balancer->increment_storage;
+
+    is $replicated->schema->storage->balancer->current_replicant,
+       $replicated->schema->storage->master
+       => 'master_read_weight is honored';
+
+    ## turn it off for the duration of the test
+    $replicated->schema->storage->balancer->master_read_weight(0);
+    $replicated->schema->storage->balancer->increment_storage;
+}
+
+## Add some new rows that only the master will have  This is because
+## we overload any type of write operation so that is must hit the master
+## database.
+
+$replicated
+    ->schema
+    ->populate('Artist', [
+        [ qw/artistid name/ ],
+        [ 5, "Doom's Children"],
+        [ 6, "Dead On Arrival"],
+        [ 7, "Watergate"],
+    ]);
+
+    is $debug{storage_type}, 'MASTER',
+        "got last query from a master: $debug{dsn}";
+
+    like $debug{info}, qr/INSERT/, 'Last was an insert';
+
+## Make sure all the slaves have the table definitions
+$replicated->replicate;
+
+## Should find some data now
+
+ok my $artist2 = $replicated->schema->resultset('Artist')->find(5)
+    => 'Sync succeed';
+
+is $debug{storage_type}, 'REPLICANT'
+    => "got last query from a replicant: $debug{dsn}";
+
+isa_ok $artist2
+    => 'DBICTest::Artist';
+
+is $artist2->name, "Doom's Children"
+    => 'Found expected name for first result';
+
+## What happens when we disconnect all the replicants?
+
+is $replicated->schema->storage->pool->connected_replicants => 2
+    => "both replicants are connected";
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->disconnect;
+$replicated->schema->storage->replicants->{$replicant_names[1]}->disconnect;
+
+is $replicated->schema->storage->pool->connected_replicants => 0
+    => "both replicants are now disconnected";
+
+## All these should pass, since the database should automatically reconnect
+
+ok my $artist3 = $replicated->schema->resultset('Artist')->find(6)
+    => 'Still finding stuff.';
+
+is $debug{storage_type}, 'REPLICANT'
+    => "got last query from a replicant: $debug{dsn}";
+
+isa_ok $artist3
+    => 'DBICTest::Artist';
+
+is $artist3->name, "Dead On Arrival"
+    => 'Found expected name for first result';
+
+is $replicated->schema->storage->pool->connected_replicants => 1
+    => "At Least One replicant reconnected to handle the job";
+
+## What happens when we try to select something that doesn't exist?
+
+ok ! $replicated->schema->resultset('Artist')->find(666)
+    => 'Correctly failed to find something.';
+
+is $debug{storage_type}, 'REPLICANT'
+    => "got last query from a replicant: $debug{dsn}";
+
+## test the reliable option
+
+TESTRELIABLE: {
+
+    $replicated->schema->storage->set_reliable_storage;
+
+    ok $replicated->schema->resultset('Artist')->find(2)
+        => 'Read from master 1';
+
+    is $debug{storage_type}, 'MASTER',
+        "got last query from a master: $debug{dsn}";
+
+    ok $replicated->schema->resultset('Artist')->find(5)
+        => 'Read from master 2';
+
+    is $debug{storage_type}, 'MASTER',
+        "got last query from a master: $debug{dsn}";
+
+    $replicated->schema->storage->set_balanced_storage;
+
+    ok $replicated->schema->resultset('Artist')->find(3)
+        => 'Read from replicant';
+
+    is $debug{storage_type}, 'REPLICANT',
+        "got last query from a replicant: $debug{dsn}";
+}
+
+## Make sure when reliable goes out of scope, we are using replicants again
+
+ok $replicated->schema->resultset('Artist')->find(1)
+    => 'back to replicant 1.';
+
+    is $debug{storage_type}, 'REPLICANT',
+        "got last query from a replicant: $debug{dsn}";
+
+ok $replicated->schema->resultset('Artist')->find(2)
+    => 'back to replicant 2.';
+
+    is $debug{storage_type}, 'REPLICANT',
+        "got last query from a replicant: $debug{dsn}";
+
+## set all the replicants to inactive, and make sure the balancer falls back to
+## the master.
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+
+{
+    ## catch the fallback to master warning
+    open my $debugfh, '>', \my $fallback_warning;
+    my $oldfh = $replicated->schema->storage->debugfh;
+    $replicated->schema->storage->debugfh($debugfh);
+
+    ok $replicated->schema->resultset('Artist')->find(2)
+        => 'Fallback to master';
+
+    is $debug{storage_type}, 'MASTER',
+        "got last query from a master: $debug{dsn}";
+
+    like $fallback_warning, qr/falling back to master/
+        => 'emits falling back to master warning';
+
+    $replicated->schema->storage->debugfh($oldfh);
+}
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
+
+## Silence warning about not supporting the is_replicating method if using the
+## sqlite dbs.
+$replicated->schema->storage->debugobj->silence(1)
+  if first { m{^t/} } @replicant_names;
+
+$replicated->schema->storage->pool->validate_replicants;
+
+$replicated->schema->storage->debugobj->silence(0);
+
+ok $replicated->schema->resultset('Artist')->find(2)
+    => 'Returned to replicates';
+
+is $debug{storage_type}, 'REPLICANT',
+    "got last query from a replicant: $debug{dsn}";
+
+## Getting slave status tests
+
+SKIP: {
+    ## We skip this tests unless you have a custom replicants, since the default
+    ## sqlite based replication tests don't support these functions.
+
+    skip 'Cannot Test Replicant Status on Non Replicating Database', 10
+     unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
+
+    $replicated->replicate; ## Give the slaves a chance to catchup.
+
+    ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating
+        => 'Replicants are replicating';
+
+    is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0
+        => 'Replicant is zero seconds behind master';
+
+    ## Test the validate replicants
+
+    $replicated->schema->storage->pool->validate_replicants;
+
+    is $replicated->schema->storage->pool->active_replicants, 2
+        => 'Still have 2 replicants after validation';
+
+    ## Force the replicants to fail the validate test by required their lag to
+    ## be negative (ie ahead of the master!)
+
+    $replicated->schema->storage->pool->maximum_lag(-10);
+    $replicated->schema->storage->pool->validate_replicants;
+
+    is $replicated->schema->storage->pool->active_replicants, 0
+        => 'No way a replicant be be ahead of the master';
+
+    ## Let's be fair to the replicants again.  Let them lag up to 5
+
+    $replicated->schema->storage->pool->maximum_lag(5);
+    $replicated->schema->storage->pool->validate_replicants;
+
+    is $replicated->schema->storage->pool->active_replicants, 2
+        => 'Both replicants in good standing again';
+
+    ## Check auto validate
+
+    is $replicated->schema->storage->balancer->auto_validate_every, 100
+        => "Got the expected value for auto validate";
+
+        ## This will make sure we auto validatge everytime
+        $replicated->schema->storage->balancer->auto_validate_every(0);
+
+        ## set all the replicants to inactive, and make sure the balancer falls back to
+        ## the master.
+
+        $replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+        $replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+
+        ## Ok, now when we go to run a query, autovalidate SHOULD reconnect
+
+    is $replicated->schema->storage->pool->active_replicants => 0
+        => "both replicants turned off";
+
+    ok $replicated->schema->resultset('Artist')->find(5)
+        => 'replicant reactivated';
+
+    is $debug{storage_type}, 'REPLICANT',
+        "got last query from a replicant: $debug{dsn}";
+
+    is $replicated->schema->storage->pool->active_replicants => 2
+        => "both replicants reactivated";
+}
+
+## Test the reliably callback
+
+ok my $reliably = sub {
+
+    ok $replicated->schema->resultset('Artist')->find(5)
+        => 'replicant reactivated';
+
+    is $debug{storage_type}, 'MASTER',
+        "got last query from a master: $debug{dsn}";
+
+} => 'created coderef properly';
+
+$replicated->schema->storage->execute_reliably($reliably);
+
+## Try something with an error
+
+ok my $unreliably = sub {
+
+    ok $replicated->schema->resultset('ArtistXX')->find(5)
+        => 'replicant reactivated';
+
+} => 'created coderef properly';
+
+throws_ok {$replicated->schema->storage->execute_reliably($unreliably)}
+    qr/Can't find source for ArtistXX/
+    => 'Bad coderef throws proper error';
+
+## Make sure replication came back
+
+ok $replicated->schema->resultset('Artist')->find(3)
+    => 'replicant reactivated';
+
+is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+
+## make sure transactions are set to execute_reliably
+
+ok my $transaction = sub {
+
+    my $id = shift @_;
+
+    $replicated
+        ->schema
+        ->populate('Artist', [
+            [ qw/artistid name/ ],
+            [ $id, "Children of the Grave"],
+        ]);
+
+    ok my $result = $replicated->schema->resultset('Artist')->find($id)
+        => "Found expected artist for $id";
+
+    is $debug{storage_type}, 'MASTER',
+        "got last query from a master: $debug{dsn}";
+
+    ok my $more = $replicated->schema->resultset('Artist')->find(1)
+        => 'Found expected artist again for 1';
+
+    is $debug{storage_type}, 'MASTER',
+        "got last query from a master: $debug{dsn}";
+
+   return ($result, $more);
+
+} => 'Created a coderef properly';
+
+## Test the transaction with multi return
+{
+    ok my @return = $replicated->schema->txn_do($transaction, 666)
+        => 'did transaction';
+
+        is $return[0]->id, 666
+            => 'first returned value is correct';
+
+        is $debug{storage_type}, 'MASTER',
+            "got last query from a master: $debug{dsn}";
+
+        is $return[1]->id, 1
+            => 'second returned value is correct';
+
+        is $debug{storage_type}, 'MASTER',
+             "got last query from a master: $debug{dsn}";
+
+}
+
+## Test that asking for single return works
+{
+    ok my @return = $replicated->schema->txn_do($transaction, 777)
+        => 'did transaction';
+
+        is $return[0]->id, 777
+            => 'first returned value is correct';
+
+        is $return[1]->id, 1
+            => 'second returned value is correct';
+}
+
+## Test transaction returning a single value
+
+{
+    ok my $result = $replicated->schema->txn_do(sub {
+        ok my $more = $replicated->schema->resultset('Artist')->find(1)
+        => 'found inside a transaction';
+        is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+        return $more;
+    }) => 'successfully processed transaction';
+
+    is $result->id, 1
+       => 'Got expected single result from transaction';
+}
+
+## Make sure replication came back
+
+ok $replicated->schema->resultset('Artist')->find(1)
+    => 'replicant reactivated';
+
+is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+
+## Test Discard changes
+
+{
+    ok my $artist = $replicated->schema->resultset('Artist')->find(2)
+        => 'got an artist to test discard changes';
+
+    is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+
+    ok $artist->get_from_storage({force_pool=>'master'})
+       => 'properly discard changes';
+
+    is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+
+    ok $artist->discard_changes({force_pool=>'master'})
+       => 'properly called discard_changes against master (manual attrs)';
+
+    is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+
+    ok $artist->discard_changes()
+       => 'properly called discard_changes against master (default attrs)';
+
+    is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+
+    ok $artist->discard_changes({force_pool=>$replicant_names[0]})
+       => 'properly able to override the default attributes';
+
+    is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}"
+}
+
+## Test some edge cases, like trying to do a transaction inside a transaction, etc
+
+{
+    ok my $result = $replicated->schema->txn_do(sub {
+        return $replicated->schema->txn_do(sub {
+            ok my $more = $replicated->schema->resultset('Artist')->find(1)
+            => 'found inside a transaction inside a transaction';
+            is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+            return $more;
+        });
+    }) => 'successfully processed transaction';
+
+    is $result->id, 1
+       => 'Got expected single result from transaction';
+}
+
+{
+    ok my $result = $replicated->schema->txn_do(sub {
+        return $replicated->schema->storage->execute_reliably(sub {
+            return $replicated->schema->txn_do(sub {
+                return $replicated->schema->storage->execute_reliably(sub {
+                    ok my $more = $replicated->schema->resultset('Artist')->find(1)
+                      => 'found inside crazy deep transactions and execute_reliably';
+                    is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+                    return $more;
+                });
+            });
+        });
+    }) => 'successfully processed transaction';
+
+    is $result->id, 1
+       => 'Got expected single result from transaction';
+}
+
+## Test the force_pool resultset attribute.
+
+{
+    ok my $artist_rs = $replicated->schema->resultset('Artist')
+        => 'got artist resultset';
+
+    ## Turn on Forced Pool Storage
+    ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'})
+        => 'Created a resultset using force_pool storage';
+
+    ok my $artist = $reliable_artist_rs->find(2)
+        => 'got an artist result via force_pool storage';
+
+    is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+}
+
+## Test the force_pool resultset attribute part two.
+
+{
+    ok my $artist_rs = $replicated->schema->resultset('Artist')
+        => 'got artist resultset';
+
+    ## Turn on Forced Pool Storage
+    ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>$replicant_names[0]})
+        => 'Created a resultset using force_pool storage';
+
+    ok my $artist = $reliable_artist_rs->find(2)
+        => 'got an artist result via force_pool storage';
+
+    is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+}
+## Delete the old database files
+$replicated->cleanup;
+
+done_testing;
+
+# vim: sw=4 sts=4 :

Copied: DBIx-Class/0.08/branches/run_file_against_storage/t/storage/stats.t (from rev 6548, DBIx-Class/0.08/branches/run_file_against_storage/t/31stats.t)
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/storage/stats.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/storage/stats.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -0,0 +1,104 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use Test::More;
+
+plan tests => 12;
+
+use lib qw(t/lib);
+
+use_ok('DBICTest');
+my $schema = DBICTest->init_schema();
+
+my $cbworks = 0;
+
+$schema->storage->debugcb(sub { $cbworks = 1; });
+$schema->storage->debug(0);
+my $rs = $schema->resultset('CD')->search({});
+$rs->count();
+ok(!$cbworks, 'Callback not called with debug disabled');
+
+$schema->storage->debug(1);
+
+$rs->count();
+ok($cbworks, 'Debug callback worked.');
+
+my $prof = new DBIx::Test::Profiler();
+$schema->storage->debugobj($prof);
+
+# Test non-transaction calls.
+$rs->count();
+ok($prof->{'query_start'}, 'query_start called');
+ok($prof->{'query_end'}, 'query_end called');
+ok(!$prof->{'txn_begin'}, 'txn_begin not called');
+ok(!$prof->{'txn_commit'}, 'txn_commit not called');
+
+$prof->reset();
+
+# Test transaction calls
+$schema->txn_begin();
+ok($prof->{'txn_begin'}, 'txn_begin called');
+
+$rs = $schema->resultset('CD')->search({});
+$rs->count();
+ok($prof->{'query_start'}, 'query_start called');
+ok($prof->{'query_end'}, 'query_end called');
+
+$schema->txn_commit();
+ok($prof->{'txn_commit'}, 'txn_commit called');
+
+$prof->reset();
+
+# Test a rollback
+$schema->txn_begin();
+$rs = $schema->resultset('CD')->search({});
+$rs->count();
+$schema->txn_rollback();
+ok($prof->{'txn_rollback'}, 'txn_rollback called');
+
+$schema->storage->debug(0);
+
+package DBIx::Test::Profiler;
+use strict;
+
+sub new {
+    my $self = bless({});
+}
+
+sub query_start {
+    my $self = shift();
+    $self->{'query_start'} = 1;
+}
+
+sub query_end {
+    my $self = shift();
+    $self->{'query_end'} = 1;
+}
+
+sub txn_begin {
+    my $self = shift();
+    $self->{'txn_begin'} = 1;
+}
+
+sub txn_rollback {
+    my $self = shift();
+    $self->{'txn_rollback'} = 1;
+}
+
+sub txn_commit {
+    my $self = shift();
+    $self->{'txn_commit'} = 1;
+}
+
+sub reset {
+    my $self = shift();
+
+    $self->{'query_start'} = 0;
+    $self->{'query_end'} = 0;
+    $self->{'txn_begin'} = 0;
+    $self->{'txn_rollback'} = 0;
+    $self->{'txn_end'} = 0;
+}
+
+1;

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/zzzzzzz_perl_perf_bug.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/zzzzzzz_perl_perf_bug.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/zzzzzzz_perl_perf_bug.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -1,6 +1,7 @@
 use strict;
 use warnings;
 use Test::More;
+use Benchmark;
 use lib qw(t/lib);
 use DBICTest; # do not remove even though it is not used
 
@@ -25,9 +26,6 @@
 plan skip_all => 'Skipping as AUTOMATED_TESTING is set'
   if ( $ENV{AUTOMATED_TESTING} );
 
-eval "use Benchmark ':all'";
-plan skip_all => 'needs Benchmark for testing' if $@;
-
 plan tests => 3;
 
 ok( 1, 'Dummy - prevents next test timing out' );

Modified: DBIx-Class/0.08/branches/run_file_against_storage/t/zzzzzzz_sqlite_deadlock.t
===================================================================
--- DBIx-Class/0.08/branches/run_file_against_storage/t/zzzzzzz_sqlite_deadlock.t	2010-02-16 10:09:58 UTC (rev 8719)
+++ DBIx-Class/0.08/branches/run_file_against_storage/t/zzzzzzz_sqlite_deadlock.t	2010-02-16 10:26:12 UTC (rev 8720)
@@ -10,11 +10,17 @@
 use DBICTest::Schema;
 
 plan tests => 2;
-my $wait_for = 10;  # how many seconds to wait
+my $wait_for = 30;  # how many seconds to wait
 
 for my $close (0,1) {
 
-  my $tmp = File::Temp->new( UNLINK => 1, TMPDIR => 1, SUFFIX => '.sqlite' );
+  my $tmp = File::Temp->new(
+    UNLINK => 1,
+    TMPDIR => 1,
+    SUFFIX => '.sqlite',
+    EXLOCK => 0,  # important for BSD and derivatives
+  );
+
   my $tmp_fn = $tmp->filename;
   close $tmp if $close;
 
@@ -28,7 +34,7 @@
   lives_ok (sub {
     my $schema = DBICTest::Schema->connect ("DBI:SQLite:$tmp_fn");
     DBICTest->deploy_schema ($schema);
-    DBICTest->populate_schema ($schema);
+    #DBICTest->populate_schema ($schema);
   });
 
   alarm 0;




More information about the Bast-commits mailing list