[Bast-commits] r9401 - in DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps: . examples/Schema examples/Schema/MyDatabase/Main/Result lib/DBIx lib/DBIx/Class lib/DBIx/Class/Admin lib/DBIx/Class/CDBICompat lib/DBIx/Class/InflateColumn lib/DBIx/Class/Manual lib/DBIx/Class/Optional lib/DBIx/Class/PK lib/DBIx/Class/Relationship lib/DBIx/Class/ResultSource lib/DBIx/Class/SQLAHacks lib/DBIx/Class/Schema lib/DBIx/Class/Serialize lib/DBIx/Class/Storage lib/DBIx/Class/Storage/DBI lib/DBIx/Class/Storage/DBI/ADO lib/DBIx/Class/Storage/DBI/ODBC lib/DBIx/Class/Storage/DBI/Oracle lib/DBIx/Class/Storage/DBI/Replicated lib/DBIx/Class/Storage/DBI/Replicated/Balancer lib/DBIx/Class/Storage/DBI/Role lib/DBIx/Class/Storage/DBI/Sybase lib/DBIx/Class/Storage/DBI/Sybase/ASE lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server lib/SQL/Translator/Parser/DBIx maint script t t/admin t/bind t/cdbi t/cdbi/abstract t/cdbi/testlib t/count t/delete t/inflate t/lib t/lib/DBICNSTest/Bogus t/lib/DBICNSTest/OtherRslt t/lib/DBICNSTest/Result t/lib/DBICNSTest/ResultSet t/lib/DBICNSTest/Rslt t/lib/DBICNSTest/RtBug41083/Schema t/lib/DBICNSTest/RtBug41083/Schema_A t/lib/DBICTest t/lib/DBICTest/ResultSetManager t/lib/DBICTest/Schema t/multi_create t/ordered t/prefetch t/relationship t/resultset t/row t/schema t/search t/sqlahacks t/sqlahacks/limit_dialects t/sqlahacks/quotes t/sqlahacks/sql_maker t/storage

ribasushi at dev.catalyst.perl.org ribasushi at dev.catalyst.perl.org
Mon May 17 14:31:47 GMT 2010


Author: ribasushi
Date: 2010-05-17 15:31:46 +0100 (Mon, 17 May 2010)
New Revision: 9401

Added:
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/.gitignore
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/Descriptive.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/Types.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/Usage.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/FilterColumn.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Optional/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Optional/Dependencies.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/SQLite.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ADO.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ADO/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/AutoCast.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Informix.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/InterBase.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/Firebird.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/SQL_Anywhere.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/SQLAnywhere.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/ASE/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/UniqueIdentifier.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBIHacks.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/benchmark_datafetch.pl
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/joint_deps.pl
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/06notabs.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/07eol.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/10optional_deps.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/747mssql_ado.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/748informix.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/749sybase_asa.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/750firebird.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/93autocast.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/admin/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/admin/01load.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/admin/02ddl.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/admin/03data.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/admin/10script.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/count/group_by_func.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/count/search_related.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/delete/complex.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/inflate/datetime_determine_parser.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/inflate/datetime_firebird.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/inflate/datetime_informix.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/inflate/datetime_sybase.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/inflate/datetime_sybase_asa.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICNSTest/Result/D.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICNSTest/ResultSet/D.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/ComputedColumn.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/TimestampPrimaryKey.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICVersion_v1.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICVersion_v2.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICVersion_v3.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/prefetch/join_type.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/prefetch/one_to_many_to_one.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/relationship/unresolvable.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/resultset/as_subselect_rs.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/resultset/is_ordered.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/resultset/is_paged.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/resultset/nulls_only.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/resultset/plus_select.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/row/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/row/filter_column.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/row/inflate_result.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/row/pkless.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/schema/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/schema/anon.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/schema/clone.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/search/related_strip_prefetch.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/search/select_chains.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/limit_dialects/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/limit_dialects/rno.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/limit_dialects/rownum.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/limit_dialects/toplimit.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/oraclejoin.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/order_by_func.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/quotes/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/quotes/quotes.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/quotes/quotes_newstyle.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/sql_maker/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/sql_maker/sql_maker.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/sql_maker/sql_maker_quote.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/sqlahacks/sqlite.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/base.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/dbh_do.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/dbi_coderef.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/dbi_env.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/debug.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/deploy.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/disable_sth_caching.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/error.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/exception.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/global_destruction.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/on_connect_call.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/on_connect_do.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/ping_count.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/reconnect.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/replicated.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/storage/stats.t
Removed:
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/AmbiguousGlob.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/18inserterror.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/19quotes.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/19quotes_newstyle.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/31stats.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/32connect_code_ref.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/33storage_reconnect.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/35disable_sth_caching.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/36datetime.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/41orrible.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/42toplimit.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/89dbicadmin.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/91debug.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/92storage.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/92storage_on_connect_call.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/92storage_on_connect_do.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/92storage_ping_count.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/93storage_replication.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/95sql_maker.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/95sql_maker_quote.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/Binary.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/PgBase.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/dbh_do.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICVersionNew.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICVersionOrig.pm
Modified:
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/Changes
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/MANIFEST.SKIP
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/Makefile.PL
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/TODO
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/MyDatabase/Main/Result/Artist.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/MyDatabase/Main/Result/Cd.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/MyDatabase/Main/Result/Track.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/insertdb.pl
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/AccessorGroup.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/AbstractSearch.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/ColumnCase.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/Constructor.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/Copy.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/Iterator.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Componentised.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Core.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Cursor.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/InflateColumn.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/InflateColumn/DateTime.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/InflateColumn/File.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Component.pod
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Cookbook.pod
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/DocMap.pod
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Example.pod
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/FAQ.pod
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Intro.pod
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Joining.pod
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Reading.pod
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Troubleshooting.pod
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Ordered.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/PK.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/PK/Auto.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/Accessor.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/Base.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/BelongsTo.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/CascadeActions.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/HasMany.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/HasOne.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/ManyToMany.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSet.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSetColumn.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSource.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSource/Table.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSource/View.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSourceHandle.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSourceProxy.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Row.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/MSSQL.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/MySQL.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/OracleJoins.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Schema.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Schema/Versioned.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Serialize/Storable.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/StartupCheck.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Cursor.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/DB2.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/MSSQL.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/MultiColumnIn.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/NoBindVars.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Oracle.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Oracle/WhereJoins.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Pg.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Role/QueryCounter.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/SQLite.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/mysql.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/Statistics.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/TxnScopeGuard.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/UTF8Columns.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/SQL/Translator/Parser/DBIx/Class.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/gen-schema.pl
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/svn-log.perl
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/script/dbicadmin
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/02pod.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/03podcoverage.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/05components.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/100populate.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/101populate_rs.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/103many_to_many_warning.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/104view.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/20setuperrors.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/26dumper.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/39load_namespaces_1.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/39load_namespaces_3.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/46where_attribute.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/51threads.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/51threadtxn.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/52cycle.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/60core.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/69update.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/71mysql.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/72pg.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/73oracle.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/745db2.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/746mssql.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/746sybase.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/74mssql.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/75limit.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/76joins.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/76select.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/79aliasing.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/80unique.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/81transactions.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/83cache.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/85utf8.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/86might_have.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/86sqlt.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/87ordered.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/88result_set_column.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/90join_torture.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/93single_accessor_object.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/94versioning.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/98savepoints.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/99dbic_sqlt_parser.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/bind/attribute.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/bind/bindtype_columns.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/01-columns.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/02-Film.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/03-subclassing.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/04-lazy.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/06-hasa.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/09-has_many.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/11-triggers.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/12-filter.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/13-constraint.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/14-might_have.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/15-accessor.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/18-has_a.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/19-set_sql.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/21-iterator.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/22-deflate_order.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/26-mutator.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/30-pager.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/98-failure.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/abstract/search_where.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/columns_as_hashes.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/Actor.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/ActorAlias.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/Blurb.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/Director.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/Film.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/Lazy.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/Log.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/MyBase.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/MyFilm.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/MyFoo.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/MyStar.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/MyStarLink.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/MyStarLinkMCPK.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/Order.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/cdbi/testlib/OtherFilm.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/count/count_rs.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/count/distinct.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/count/grouped_pager.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/count/in_subquery.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/count/prefetch.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/from_subquery.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/inflate/core.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/inflate/datetime_mssql.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/inflate/file_column.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/inflate/hri.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/inflate/serialize.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICNSTest/Bogus/A.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICNSTest/Bogus/B.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICNSTest/OtherRslt/D.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICNSTest/Result/A.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICNSTest/Result/B.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICNSTest/Rslt/A.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICNSTest/Rslt/B.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICNSTest/RtBug41083/Schema/Foo.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICNSTest/RtBug41083/Schema_A/A.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/AuthorCheck.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/BaseResult.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/ResultSetManager/Foo.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/Artist.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/CD.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/Employee.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/Encoded.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/Event.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/EventTZPg.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/FileColumn.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/ForceForeign.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/Track.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/Year1999CDs.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/DBICTest/Schema/Year2000CDs.pm
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/lib/sqlite.sql
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/multi_create/in_memory.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/multi_create/standard.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/ordered/cascade_delete.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/prefetch/attrs_untouched.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/prefetch/diamond.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/prefetch/double_prefetch.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/prefetch/grouped.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/prefetch/multiple_hasmany.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/prefetch/standard.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/prefetch/via_search_related.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/prefetch/with_limit.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/relationship/after_update.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/relationship/core.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/relationship/doesnt_exist.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/relationship/update_or_create_multi.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/relationship/update_or_create_single.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/resultset/as_query.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/resultset/update_delete.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/search/preserve_original_rs.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/search/subquery.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/zzzzzzz_perl_perf_bug.t
   DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/zzzzzzz_sqlite_deadlock.t
Log:
 r7332 at Thesaurus (orig r7329):  caelum | 2009-08-18 06:19:12 +0200
 always reconnect in odbc:mssql:connect_call_use_dynamic_cursors
 r7333 at Thesaurus (orig r7330):  caelum | 2009-08-18 06:43:35 +0200
 minor change
 r7335 at Thesaurus (orig r7332):  ribasushi | 2009-08-18 08:51:20 +0200
  r7248 at Thesaurus (orig r7245):  rbuels | 2009-08-06 21:39:05 +0200
  making topic branch for "currval undefined" problem when not qualifying tables with their schema names
  r7249 at Thesaurus (orig r7246):  rbuels | 2009-08-06 21:40:39 +0200
  failing (crashing, really) test for this strange pg thing.  could not figure out a way to make a non-crashing test
  r7250 at Thesaurus (orig r7247):  rbuels | 2009-08-06 21:42:30 +0200
  fix for pg non-schema-qualified thing, with a nice vague commit message.  performance should be the same as before, for the common (schema-qualified) case
  r7251 at Thesaurus (orig r7248):  rbuels | 2009-08-06 22:41:19 +0200
  woops, pg search path fix needed support for quoted schema names in search paths
  r7295 at Thesaurus (orig r7292):  rbuels | 2009-08-10 20:45:50 +0200
  added caching of pg search path in Pg storage object
  r7296 at Thesaurus (orig r7293):  rbuels | 2009-08-10 22:37:31 +0200
  added test for empty table before non-schema-qualified pg sequence test in 72pg.t
  r7299 at Thesaurus (orig r7296):  rbuels | 2009-08-11 00:46:35 +0200
  added blub to Changes for pg_unqualified_schema branch
  r7300 at Thesaurus (orig r7297):  rbuels | 2009-08-11 00:48:53 +0200
  added me (rbuels) to contributors
  r7328 at Thesaurus (orig r7325):  rbuels | 2009-08-17 23:46:21 +0200
  added POD section about schema support to DBIx::Class::Storage::Pg
  r7329 at Thesaurus (orig r7326):  rbuels | 2009-08-17 23:51:40 +0200
  added more tests for multi-schema support in 72pg.t
  r7334 at Thesaurus (orig r7331):  ribasushi | 2009-08-18 08:49:03 +0200
  Un-plan test and fix authorship
 
 r7341 at Thesaurus (orig r7338):  ribasushi | 2009-08-18 10:55:23 +0200
  r7337 at Thesaurus (orig r7334):  ribasushi | 2009-08-18 09:00:03 +0200
  Pre-release branch
  r7338 at Thesaurus (orig r7335):  ribasushi | 2009-08-18 10:32:13 +0200
  Disambiguate POD
  r7339 at Thesaurus (orig r7336):  ribasushi | 2009-08-18 10:32:53 +0200
  Release 0.08109
 
 r7346 at Thesaurus (orig r7343):  robkinyon | 2009-08-19 21:44:48 +0200
 Applied doc patch by spb
 r7347 at Thesaurus (orig r7344):  ribasushi | 2009-08-20 07:50:49 +0200
 Fix a weird-ass sqlt invocation in deployment_statements()
 r7348 at Thesaurus (orig r7345):  ribasushi | 2009-08-20 08:19:07 +0200
 Apply pod patch by arthas (slightly modified)
 r7353 at Thesaurus (orig r7350):  abraxxa | 2009-08-20 15:07:29 +0200
 pod patch for 'Tracing SQL' examples
 
 r7356 at Thesaurus (orig r7353):  spb | 2009-08-20 19:53:02 +0200
 Minor fix to the previous doc patch
 r7357 at Thesaurus (orig r7354):  frew | 2009-08-20 23:54:04 +0200
 add some basic guards to get rid of warnings
 r7361 at Thesaurus (orig r7358):  ribasushi | 2009-08-21 11:18:43 +0200
 Because prefetch uses the cache system, it is not possible to set HRI on a prefetched rs without upsetting the tests - don't compare
 r7372 at Thesaurus (orig r7369):  caelum | 2009-08-24 12:32:57 +0200
 bump CAG dep
 r7391 at Thesaurus (orig r7388):  ribasushi | 2009-08-25 13:43:38 +0200
 typo
 r7392 at Thesaurus (orig r7389):  ribasushi | 2009-08-25 14:29:37 +0200
  r7354 at Thesaurus (orig r7351):  abraxxa | 2009-08-20 17:46:06 +0200
  new branch grouped_has_many_join
  
  r7382 at Thesaurus (orig r7379):  ribasushi | 2009-08-24 22:50:13 +0200
  Seems like abraxxa's bug is fixed
  r7385 at Thesaurus (orig r7382):  ribasushi | 2009-08-25 11:33:40 +0200
  One more test
 
 r7396 at Thesaurus (orig r7393):  ribasushi | 2009-08-26 18:07:51 +0200
 Stop testing deprecated json::syck
 r7397 at Thesaurus (orig r7394):  ribasushi | 2009-08-26 18:08:24 +0200
 Make sure sqlt_type gets called after determining driver
 r7398 at Thesaurus (orig r7395):  ribasushi | 2009-08-26 18:21:53 +0200
 Make POD::Coverage happy... again
 r7399 at Thesaurus (orig r7396):  ribasushi | 2009-08-26 18:31:54 +0200
 Clarify
 r7400 at Thesaurus (orig r7397):  frew | 2009-08-26 22:24:19 +0200
 Remove dead, sketchtowne link
 r7404 at Thesaurus (orig r7401):  ribasushi | 2009-08-27 18:50:12 +0200
 Changes
 r7406 at Thesaurus (orig r7403):  ribasushi | 2009-08-28 00:11:29 +0200
 Add a test proving how dumb I am
 r7407 at Thesaurus (orig r7404):  ribasushi | 2009-08-28 16:34:46 +0200
 Warning to spare mst explanations
 r7422 at Thesaurus (orig r7419):  caelum | 2009-08-29 08:34:07 +0200
  r7381 at hlagh (orig r7380):  ribasushi | 2009-08-24 17:07:58 -0400
  Branch to add autocast support as a standalone piece of code
  r7382 at hlagh (orig r7381):  ribasushi | 2009-08-25 05:06:43 -0400
  Move storage tests to their own dir
  r7385 at hlagh (orig r7384):  ribasushi | 2009-08-25 06:35:19 -0400
  Switch storage class loading to ensure_class_loaded
  r7386 at hlagh (orig r7385):  ribasushi | 2009-08-25 06:37:48 -0400
  Change a datatype for test purposes
  r7387 at hlagh (orig r7386):  ribasushi | 2009-08-25 06:45:35 -0400
  Fix two storage tests
  r7388 at hlagh (orig r7387):  ribasushi | 2009-08-25 06:45:52 -0400
  Actual autocast code
  r18697 at hlagh (orig r7416):  caelum | 2009-08-29 01:42:29 -0400
  rename method and add docs
  r18698 at hlagh (orig r7417):  ribasushi | 2009-08-29 02:07:18 -0400
  Make sure arrays work
  r18699 at hlagh (orig r7418):  caelum | 2009-08-29 02:11:14 -0400
  rename _map_data_type to _native_data_type
 
 r7425 at Thesaurus (orig r7422):  ribasushi | 2009-08-29 08:55:12 +0200
 Make podcoverage happy
 r7426 at Thesaurus (orig r7423):  ribasushi | 2009-08-29 09:06:07 +0200
 Reduce the number of heavy dbh_do calls
 r7439 at Thesaurus (orig r7436):  ribasushi | 2009-08-30 08:54:10 +0200
  r7435 at Thesaurus (orig r7432):  caelum | 2009-08-30 02:53:21 +0200
  new branch
  r7436 at Thesaurus (orig r7433):  caelum | 2009-08-30 03:14:36 +0200
  add dbh_maker option to connect_info hash
  r7437 at Thesaurus (orig r7434):  ribasushi | 2009-08-30 08:51:14 +0200
  Minor cleanup and test enhancement
  r7438 at Thesaurus (orig r7435):  ribasushi | 2009-08-30 08:53:59 +0200
  Changes
 
 r7444 at Thesaurus (orig r7441):  ribasushi | 2009-08-30 09:53:04 +0200
 Sanify 03podcoverage.t, allow wildcard skipping
 r7449 at Thesaurus (orig r7446):  caelum | 2009-08-31 04:36:08 +0200
 support coderef connect_infos for repicated storage
 r7450 at Thesaurus (orig r7447):  caelum | 2009-08-31 04:58:43 +0200
 make replicant dsn detection a bit nicer
 r7451 at Thesaurus (orig r7448):  caelum | 2009-08-31 17:30:37 +0200
 fix case where repelicant coderef dsn does not connect
 r7452 at Thesaurus (orig r7449):  arcanez | 2009-08-31 23:13:50 +0200
 remove . from end of =head links
 r7455 at Thesaurus (orig r7452):  ribasushi | 2009-09-01 10:38:37 +0200
 Quote deps, avoid floating problems
 r7456 at Thesaurus (orig r7453):  ribasushi | 2009-09-01 11:10:11 +0200
 Fix misleading FAQ entry
 r7464 at Thesaurus (orig r7461):  ribasushi | 2009-09-01 16:51:58 +0200
 Fix insert_bulk with rebless
 r7465 at Thesaurus (orig r7462):  ribasushi | 2009-09-01 16:52:39 +0200
 Comment
 r7466 at Thesaurus (orig r7463):  matthewt | 2009-09-01 17:17:08 +0200
 clearer copyright
 r7467 at Thesaurus (orig r7464):  matthewt | 2009-09-01 17:18:31 +0200
 split copyright and license
 r7469 at Thesaurus (orig r7466):  frew | 2009-09-01 20:27:36 +0200
 pod describing strife with MSSQL
 r7483 at Thesaurus (orig r7480):  ribasushi | 2009-09-02 11:07:04 +0200
 Streamline pg test-schemas cleanup
 r7484 at Thesaurus (orig r7481):  ribasushi | 2009-09-02 11:20:25 +0200
 Centralize handling of minimum sqlt version to DBIx::Class
 Bump version to the latest unborked sqlt (still just a recommend)
 r7485 at Thesaurus (orig r7482):  ribasushi | 2009-09-02 11:31:50 +0200
 Some cleanup... don't remember where it came from
 r7486 at Thesaurus (orig r7483):  ribasushi | 2009-09-02 12:19:11 +0200
 First part of mysql insanity
 r7487 at Thesaurus (orig r7484):  ribasushi | 2009-09-02 12:25:35 +0200
 Invoke default_join_type only on undefined types
 r7488 at Thesaurus (orig r7485):  ribasushi | 2009-09-02 12:42:39 +0200
 No fancy methods for the default_jointype, as we don't have proper sqlahacks inheritance and they are... well hacks
 r7489 at Thesaurus (orig r7486):  ribasushi | 2009-09-02 13:00:07 +0200
 Mysql v3 support (ick)
 r7494 at Thesaurus (orig r7491):  rbuels | 2009-09-02 20:33:47 +0200
 POD patch, corrected erroneous usage of dbh_do in Storage::DBI synopsis
 r7500 at Thesaurus (orig r7497):  ribasushi | 2009-09-03 11:11:29 +0200
 POD lists the storable hooks, but does no load them
 r7501 at Thesaurus (orig r7498):  ribasushi | 2009-09-03 11:11:50 +0200
 Storable sanification
 r7502 at Thesaurus (orig r7499):  ribasushi | 2009-09-03 11:24:17 +0200
 Storable is now in Core
 r7503 at Thesaurus (orig r7500):  ribasushi | 2009-09-03 11:36:58 +0200
 Make sure mysql is fixed
 r7506 at Thesaurus (orig r7503):  ribasushi | 2009-09-03 17:16:17 +0200
 Add podcoverage skip
 r7507 at Thesaurus (orig r7504):  ribasushi | 2009-09-03 17:23:19 +0200
 Consolidate _verify_pid calls
 r7511 at Thesaurus (orig r7508):  matthewt | 2009-09-03 20:12:53 +0200
 get the COPYRIGHT in the right pless to not confuse META.yml generation
 r7513 at Thesaurus (orig r7510):  ribasushi | 2009-09-03 20:41:22 +0200
 
 r7514 at Thesaurus (orig r7511):  ribasushi | 2009-09-03 20:41:34 +0200
  r7472 at Thesaurus (orig r7469):  norbi | 2009-09-01 21:43:08 +0200
   r7635 at vger:  mendel | 2009-09-01 21:02:23 +0200
   Added pointer to 'SQL functions on the lhs' to the 'using stored procs' section.
  
 
 r7515 at Thesaurus (orig r7512):  ribasushi | 2009-09-03 20:41:44 +0200
  r7473 at Thesaurus (orig r7470):  norbi | 2009-09-01 21:43:19 +0200
   r7636 at vger:  mendel | 2009-09-01 21:09:43 +0200
   Mentions the possibiliby of creating indexes on SQL function return values.
  
 
 r7516 at Thesaurus (orig r7513):  ribasushi | 2009-09-03 20:41:52 +0200
  r7474 at Thesaurus (orig r7471):  norbi | 2009-09-01 21:43:31 +0200
   r7637 at vger:  mendel | 2009-09-01 21:19:14 +0200
   Rewrote 'SQL functions on the lhs' to use the new SQLA literal SQL + bind feature.
  
 
 r7517 at Thesaurus (orig r7514):  ribasushi | 2009-09-03 20:41:59 +0200
  r7475 at Thesaurus (orig r7472):  norbi | 2009-09-01 21:43:42 +0200
   r7638 at vger:  mendel | 2009-09-01 21:20:17 +0200
   Added a comment to the example code to stress that it does not work.
  
 
 r7518 at Thesaurus (orig r7515):  ribasushi | 2009-09-03 20:42:10 +0200
  r7476 at Thesaurus (orig r7473):  norbi | 2009-09-01 21:43:54 +0200
   r7639 at vger:  mendel | 2009-09-01 21:28:18 +0200
   Added pointer to DBIx::Class::DynamicSubclass.
  
 
 r7519 at Thesaurus (orig r7516):  ribasushi | 2009-09-03 20:42:15 +0200
  r7477 at Thesaurus (orig r7474):  norbi | 2009-09-01 21:44:03 +0200
   r7640 at vger:  mendel | 2009-09-01 21:30:13 +0200
   Replaced deprecated \'colname DESC' order_by syntax with { -desc => 'colname' } syntax.
  
 
 r7520 at Thesaurus (orig r7517):  ribasushi | 2009-09-03 20:42:22 +0200
  r7478 at Thesaurus (orig r7475):  norbi | 2009-09-01 21:44:17 +0200
   r7641 at vger:  mendel | 2009-09-01 21:32:48 +0200
   Rewrote 'SQL functions on the lhs' to use the new SQLA literal SQL + bind feature.
  
 
 r7521 at Thesaurus (orig r7518):  ribasushi | 2009-09-03 20:42:26 +0200
  r7479 at Thesaurus (orig r7476):  norbi | 2009-09-01 21:44:28 +0200
   r7642 at vger:  mendel | 2009-09-01 21:42:25 +0200
   Added many-to-many add_to_*() example to stress that it returns the related row and not the linking table row.
  
 
 r7522 at Thesaurus (orig r7519):  ribasushi | 2009-09-03 20:42:32 +0200
  r7480 at Thesaurus (orig r7477):  norbi | 2009-09-01 22:14:25 +0200
   r7653 at vger:  mendel | 2009-09-01 22:14:11 +0200
   Fixed wrong literal SQL + bind examples (missing operator and placeholders).
  
 
 r7523 at Thesaurus (orig r7520):  ribasushi | 2009-09-03 20:42:37 +0200
  r7481 at Thesaurus (orig r7478):  norbi | 2009-09-01 22:30:48 +0200
   r7655 at vger:  mendel | 2009-09-01 22:30:35 +0200
   Fixed the bind value column names in the SQL literal + bind examples.
  
 
 r7524 at Thesaurus (orig r7521):  ribasushi | 2009-09-03 20:42:45 +0200
  r7482 at Thesaurus (orig r7479):  norbi | 2009-09-01 22:52:21 +0200
   r7657 at vger:  mendel | 2009-09-01 22:52:09 +0200
   Further improvement in the bind value column names in the SQL literal + bind examples.
  
 
 r7549 at Thesaurus (orig r7546):  ribasushi | 2009-09-04 08:47:19 +0200
 Stop connecting to determine dt-parser (test is in pg branch)
 r7553 at Thesaurus (orig r7550):  ribasushi | 2009-09-04 11:20:48 +0200
 Require sqla with bool support
 r7560 at Thesaurus (orig r7557):  ribasushi | 2009-09-04 19:17:32 +0200
 Dumper follies
 r7561 at Thesaurus (orig r7558):  ribasushi | 2009-09-04 19:27:50 +0200
 Even better sqla
 r7570 at Thesaurus (orig r7567):  ribasushi | 2009-09-04 20:49:53 +0200
  r7459 at Thesaurus (orig r7456):  rbuels | 2009-09-01 12:46:46 +0200
  making another pg_unqualified_schema branch, for real this time
  r7460 at Thesaurus (orig r7457):  rbuels | 2009-09-01 12:51:31 +0200
  reworked tests for pg last_insert_id in presence of un-schema-qualified things. adds some todo tests, including a case for which is does not seem to be possible to correctly guess the sequence to use for the liid
  r7461 at Thesaurus (orig r7458):  rbuels | 2009-09-01 12:54:34 +0200
  in Pg storage, added a warning for case when the nextval sequence is not schema qualified
  r7462 at Thesaurus (orig r7459):  rbuels | 2009-09-01 13:01:31 +0200
  tweak to Pg test, warnings_like -> warnings_exist
  r7463 at Thesaurus (orig r7460):  ribasushi | 2009-09-01 13:34:59 +0200
  Rewrap todo properly
  r7490 at Thesaurus (orig r7487):  ribasushi | 2009-09-02 14:16:01 +0200
  Make pg sequence autodetect deterministic (or throw exceptions). Test needs adjusting
  r7491 at Thesaurus (orig r7488):  rbuels | 2009-09-02 19:15:01 +0200
  some reorganization and cleanup of pg-specific tests
  r7492 at Thesaurus (orig r7489):  rbuels | 2009-09-02 20:08:31 +0200
  more cleanup of 72pg.t
  r7495 at Thesaurus (orig r7492):  rbuels | 2009-09-02 20:48:12 +0200
  more cleanup of pg tests, added cascade to drop function, cleaned up create and drop of schemas to use dbh_do
  r7496 at Thesaurus (orig r7493):  rbuels | 2009-09-02 20:50:42 +0200
  oops, missed something screwed up by the pull
  r7525 at Thesaurus (orig r7522):  rbuels | 2009-09-03 20:45:53 +0200
  added __END__ before pod in Pg storage
  r7526 at Thesaurus (orig r7523):  rbuels | 2009-09-03 20:46:00 +0200
  renamed pg test schemas to be more organized
  r7531 at Thesaurus (orig r7528):  rbuels | 2009-09-04 00:28:11 +0200
  more pg test cleanup
  r7532 at Thesaurus (orig r7529):  rbuels | 2009-09-04 00:28:17 +0200
  more pg test cleanup
  r7533 at Thesaurus (orig r7530):  rbuels | 2009-09-04 00:28:25 +0200
  starting work on extended set of Pg auto-pk tests
  r7534 at Thesaurus (orig r7531):  rbuels | 2009-09-04 00:28:31 +0200
  more work on extended set of Pg auto-pk tests
  r7535 at Thesaurus (orig r7532):  rbuels | 2009-09-04 00:28:39 +0200
  more work on pg tests
  r7536 at Thesaurus (orig r7533):  rbuels | 2009-09-04 00:28:45 +0200
  more work on extended set of Pg auto-pk tests
  r7537 at Thesaurus (orig r7534):  rbuels | 2009-09-04 00:28:50 +0200
  added .gitignore for users of git-svn
  r7538 at Thesaurus (orig r7535):  rbuels | 2009-09-04 00:28:58 +0200
  more work on extended set of Pg auto-pk tests
  r7539 at Thesaurus (orig r7536):  rbuels | 2009-09-04 00:29:04 +0200
  added darcs and git to MANIFEST.SKIP version control skipping section
  r7540 at Thesaurus (orig r7537):  rbuels | 2009-09-04 00:41:26 +0200
  more work on extended set of Pg auto-pk tests
  r7541 at Thesaurus (orig r7538):  rbuels | 2009-09-04 00:41:32 +0200
  more work on extended set of Pg auto-pk tests
  r7542 at Thesaurus (orig r7539):  rbuels | 2009-09-04 00:41:38 +0200
  more work on extended set of Pg auto-pk tests
  r7543 at Thesaurus (orig r7540):  rbuels | 2009-09-04 02:20:23 +0200
  more work on extended set of Pg auto-pk tests
  r7544 at Thesaurus (orig r7541):  rbuels | 2009-09-04 02:20:32 +0200
  rewrote autoinc fetcher as a query into the pg_catalog.  all the old tests pass now, but not my new tests.  the new tests might be buggy
  r7545 at Thesaurus (orig r7542):  rbuels | 2009-09-04 02:20:39 +0200
  oops, forgot to put the drop for the extended tests back in the pg tests
  r7546 at Thesaurus (orig r7543):  rbuels | 2009-09-04 02:41:56 +0200
  couple of comment/documentation tweaks to pg storage driver
  r7547 at Thesaurus (orig r7544):  rbuels | 2009-09-04 02:42:02 +0200
  fixed my tests
  r7548 at Thesaurus (orig r7545):  rbuels | 2009-09-04 02:42:09 +0200
  clarified the POD in Pg storage driver regarding multi-schema support
  r7551 at Thesaurus (orig r7548):  ribasushi | 2009-09-04 08:51:30 +0200
  Proper unconnected test
  r7554 at Thesaurus (orig r7551):  ribasushi | 2009-09-04 11:26:12 +0200
  Fixes to pg test after review:
  - Move the store_column test to 60core.t
  - Streamline the select ... for update test
  - Disable all exception warnings for normal test runs
  
  r7555 at Thesaurus (orig r7552):  ribasushi | 2009-09-04 11:56:00 +0200
  Rewrite selector using sqla
  r7562 at Thesaurus (orig r7559):  rbuels | 2009-09-04 19:42:52 +0200
  moved search_path querying function from Pg storage driver into tests
  r7563 at Thesaurus (orig r7560):  rbuels | 2009-09-04 19:43:00 +0200
  refactored how Pg storage driver calls sequence search, made erorror message more informative when query into pg_catalog fails
  r7564 at Thesaurus (orig r7561):  rbuels | 2009-09-04 19:43:08 +0200
  tweaked pg sequence discovery error message a bit more
  r7565 at Thesaurus (orig r7562):  rbuels | 2009-09-04 19:43:17 +0200
  added big block comment explaining Pg sequence discovery strategy
  r7566 at Thesaurus (orig r7563):  rbuels | 2009-09-04 20:35:10 +0200
  added code to use DBD::Pg column_info to fetch column default if recent enough
  r7567 at Thesaurus (orig r7564):  rbuels | 2009-09-04 20:35:18 +0200
  tweaked comment
  r7568 at Thesaurus (orig r7565):  rbuels | 2009-09-04 20:35:30 +0200
  oops, DBD::Pg 2.15.1 should be included in working versions
 
 r7572 at Thesaurus (orig r7569):  ribasushi | 2009-09-04 21:32:01 +0200
 Stop double-caching datetime_parser - keep it in the storage only
 r7573 at Thesaurus (orig r7570):  ribasushi | 2009-09-04 21:36:39 +0200
 No Serialize::Storable in core
 r7574 at Thesaurus (orig r7571):  ribasushi | 2009-09-04 21:49:54 +0200
 Changes
 r7580 at Thesaurus (orig r7577):  ribasushi | 2009-09-06 12:28:44 +0200
 Add mysterious exception test
 r7582 at Thesaurus (orig r7579):  ribasushi | 2009-09-06 15:43:10 +0200
 No connection - no cleanup
 r7583 at Thesaurus (orig r7580):  ribasushi | 2009-09-06 15:45:51 +0200
 Streamline test
 r7584 at Thesaurus (orig r7581):  ribasushi | 2009-09-06 17:39:03 +0200
 Test cleanup:
 Benchmark and Data::Dumper have been in core forever
 Make POD testing conditional as shown in http://use.perl.org/~Alias/journal/38822
 Remove some dead cdbi test files
 Stop openly giving contributors an option to override the authorcheck
 
 r7585 at Thesaurus (orig r7582):  ribasushi | 2009-09-06 17:48:32 +0200
 Done long time ago
 r7586 at Thesaurus (orig r7583):  ribasushi | 2009-09-06 17:56:27 +0200
 Release 0.08110
 r7588 at Thesaurus (orig r7585):  ribasushi | 2009-09-06 18:33:46 +0200
 Stop eating exceptions in ::Storage::DBI::DESTROY
 r7589 at Thesaurus (orig r7586):  ribasushi | 2009-09-06 20:35:30 +0200
 Centralize identity insert control for mssql (it seems that issuing an OFF is not necessary)
 r7590 at Thesaurus (orig r7587):  ribasushi | 2009-09-06 20:45:41 +0200
 Clearer MSSQL error message
 r7591 at Thesaurus (orig r7588):  ribasushi | 2009-09-06 23:58:22 +0200
 Fix mssql pod
 r7592 at Thesaurus (orig r7589):  ribasushi | 2009-09-07 09:06:05 +0200
 Release 0.08111
 r7598 at Thesaurus (orig r7595):  wreis | 2009-09-07 15:31:38 +0200
 improved warn for Storable hooks in ResultSourceHandle
 r7600 at Thesaurus (orig r7597):  ribasushi | 2009-09-07 16:26:59 +0200
 Whoops - last_insert_id allows for multiple autoinc columns - support it in pg
 r7601 at Thesaurus (orig r7598):  ribasushi | 2009-09-07 16:46:14 +0200
 Prune duplicate constraints from the find() condition
 r7606 at Thesaurus (orig r7603):  frew | 2009-09-08 20:13:29 +0200
 Turn IDENTITY_INSERT back off after inserts
 r7616 at Thesaurus (orig r7613):  ribasushi | 2009-09-09 14:16:12 +0200
 Fix warning
 r7617 at Thesaurus (orig r7614):  ribasushi | 2009-09-09 14:42:49 +0200
 Really sanify exception text
 r7624 at Thesaurus (orig r7621):  mo | 2009-09-10 18:53:32 +0200
 added test to make sure that store_column is called even for non-dirty columns
 r7625 at Thesaurus (orig r7622):  bluefeet | 2009-09-10 19:03:21 +0200
 Fix RSC->reset() to no longer return $self, which fixes Cursor::Cached + RSC.
 r7626 at Thesaurus (orig r7623):  ribasushi | 2009-09-10 19:32:03 +0200
 The real fix
 r7627 at Thesaurus (orig r7624):  matthewt | 2009-09-11 02:33:17 +0200
 make it clear that we are not supposed to have optional deps
 r7628 at Thesaurus (orig r7625):  ribasushi | 2009-09-11 06:30:03 +0200
 Changes so far
 r7629 at Thesaurus (orig r7626):  ribasushi | 2009-09-11 06:39:45 +0200
 Fix borked makefile
 r7630 at Thesaurus (orig r7627):  ribasushi | 2009-09-11 15:39:42 +0200
 Fixed minor problem with txn scope guard - rollback exceptions were never reported
 r7632 at Thesaurus (orig r7629):  ribasushi | 2009-09-11 23:06:54 +0200
 Extend prefetch tests
 r7633 at Thesaurus (orig r7630):  ribasushi | 2009-09-11 23:13:45 +0200
 Reverting http://dev.catalyst.perl.org/svnweb/bast/revision?rev=4278 - it seems to pass fine now
 r7634 at Thesaurus (orig r7631):  ribasushi | 2009-09-12 00:15:50 +0200
 Add single() ro RSC
 r7635 at Thesaurus (orig r7632):  ribasushi | 2009-09-12 00:44:01 +0200
 This is how the txnguard should really work
 r7636 at Thesaurus (orig r7633):  ribasushi | 2009-09-12 00:58:21 +0200
 Fix borked example
 r7637 at Thesaurus (orig r7634):  ribasushi | 2009-09-12 00:58:58 +0200
 scopeguard almost done
 r7638 at Thesaurus (orig r7635):  brunov | 2009-09-12 01:25:12 +0200
 Update DBIx::Class::Manual::Example.pod to reflect previous changes in examples/Schema/insertdb.pl
 
 r7639 at Thesaurus (orig r7636):  brunov | 2009-09-12 01:27:17 +0200
 Added Bruno Vecchi to the Contributors section in DBIx/Class.pm
 
 
 r7640 at Thesaurus (orig r7637):  ribasushi | 2009-09-12 01:31:16 +0200
 Final scopeguard tweak (?)
 r7644 at Thesaurus (orig r7641):  ribasushi | 2009-09-12 12:46:51 +0200
 Even better localization of $@, and don't use Test::Warn for the time being, as something is freaking out Sub::UpLevel
 r7670 at Thesaurus (orig r7659):  ribasushi | 2009-09-14 18:24:44 +0200
 Someone claimed this is a problem...
 r7673 at Thesaurus (orig r7662):  ribasushi | 2009-09-15 09:43:46 +0200
 Warn when distinct is used with group_by
 r7674 at Thesaurus (orig r7663):  rbuels | 2009-09-15 22:45:32 +0200
 doc patch, clarified warning about using find_or_create() and friends on tables with auto-increment or similar columns
 r7675 at Thesaurus (orig r7664):  rbuels | 2009-09-15 22:55:15 +0200
 another doc clarification regarding auto-inc columns with find_or_create() and such functions
 r7683 at Thesaurus (orig r7672):  ribasushi | 2009-09-17 13:54:44 +0200
 Fix left-join chaining
 r7694 at Thesaurus (orig r7683):  ribasushi | 2009-09-18 12:36:42 +0200
  r6389 at Thesaurus (orig r6388):  caelum | 2009-05-23 22:48:06 +0200
  recreating Sybase branch
  r6395 at Thesaurus (orig r6394):  caelum | 2009-05-24 01:47:32 +0200
  try not to fuck mssql with the sybase crap
  r6488 at Thesaurus (orig r6487):  caelum | 2009-06-03 17:31:24 +0200
  resolve conflict
  r6490 at Thesaurus (orig r6489):  caelum | 2009-06-03 18:25:36 +0200
  add missing files to sybase branch
  r6492 at Thesaurus (orig r6491):  caelum | 2009-06-04 01:51:39 +0200
  fix Sybase DT stuff and storage bases
  r6493 at Thesaurus (orig r6492):  caelum | 2009-06-04 02:10:45 +0200
  fix base for mssql (can't be a sybase anymore)
  r6494 at Thesaurus (orig r6493):  caelum | 2009-06-04 02:20:37 +0200
  test sybase SMALLDATETIME inflation
  r6495 at Thesaurus (orig r6494):  caelum | 2009-06-04 04:52:31 +0200
  update Sybase docs
  r6501 at Thesaurus (orig r6500):  caelum | 2009-06-04 14:50:49 +0200
  sybase limit count without offset now works
  r6504 at Thesaurus (orig r6503):  caelum | 2009-06-04 18:03:01 +0200
  use TOP for sybase limit count thanks to refactored count
  r6505 at Thesaurus (orig r6504):  caelum | 2009-06-04 18:41:54 +0200
  back to counting rows for Sybase LIMIT counts
  r6506 at Thesaurus (orig r6505):  caelum | 2009-06-04 19:07:48 +0200
  minor sybase count fix
  r6512 at Thesaurus (orig r6511):  caelum | 2009-06-05 01:02:48 +0200
  test sybase group_by count, works
  r6513 at Thesaurus (orig r6512):  caelum | 2009-06-05 01:28:18 +0200
  set date format on _rebless correctly
  r6516 at Thesaurus (orig r6515):  caelum | 2009-06-05 02:24:46 +0200
  manually merged in sybase_noquote branch
  r6518 at Thesaurus (orig r6517):  caelum | 2009-06-05 06:34:25 +0200
  shit doesn't work yet
  r6520 at Thesaurus (orig r6519):  caelum | 2009-06-05 16:55:41 +0200
  update sybase types which shouldn't be quoted
  r6525 at Thesaurus (orig r6524):  caelum | 2009-06-06 04:40:51 +0200
  tweaks to sybase types
  r6527 at Thesaurus (orig r6526):  caelum | 2009-06-06 05:36:03 +0200
  temporary sybase noquote hack
  r6595 at Thesaurus (orig r6594):  caelum | 2009-06-10 13:46:37 +0200
  Sybase::NoBindVars now correctly quotes
  r6596 at Thesaurus (orig r6595):  caelum | 2009-06-10 14:04:19 +0200
  cache rsrc in NoBindVars, use name_sep
  r6597 at Thesaurus (orig r6596):  caelum | 2009-06-10 14:35:52 +0200
  Sybase count by first pk, if available
  r6599 at Thesaurus (orig r6598):  caelum | 2009-06-10 15:00:42 +0200
  cache rsrc in NoBindVars correctly
  r6600 at Thesaurus (orig r6599):  caelum | 2009-06-10 15:27:41 +0200
  handle unknown rsrc in NoBindVars and Sybase::NoBindVars
  r6605 at Thesaurus (orig r6604):  caelum | 2009-06-10 18:17:31 +0200
  cache rsrc properly in NoBindVars, return undef if no rsrc
  r6658 at Thesaurus (orig r6657):  caelum | 2009-06-13 05:57:40 +0200
  switch to DateTime::Format::Sybase
  r6700 at Thesaurus (orig r6699):  caelum | 2009-06-17 16:25:28 +0200
  rename and document dt setup method, will be an on_connect_call at later merge point
  r6701 at Thesaurus (orig r6700):  caelum | 2009-06-17 16:30:08 +0200
  more dt docs reorg
  r6715 at Thesaurus (orig r6714):  caelum | 2009-06-19 01:28:17 +0200
  todo tests for text/image columns in sybase
  r6716 at Thesaurus (orig r6715):  caelum | 2009-06-19 01:46:56 +0200
  added connect_call_blob_setup for Sybase
  r6724 at Thesaurus (orig r6723):  caelum | 2009-06-19 17:12:20 +0200
  cleanups
  r6771 at Thesaurus (orig r6770):  caelum | 2009-06-23 16:42:32 +0200
  minor changes
  r6788 at Thesaurus (orig r6787):  caelum | 2009-06-25 05:31:06 +0200
  fixup POD, comment out count
  r6811 at Thesaurus (orig r6810):  caelum | 2009-06-28 02:14:56 +0200
  prototype blob implementation
  r6857 at Thesaurus (orig r6856):  caelum | 2009-06-29 23:45:19 +0200
  branch pushed, removing
  r6868 at Thesaurus (orig r6867):  caelum | 2009-06-30 03:39:51 +0200
  merge on_connect_call updates
  r6877 at Thesaurus (orig r6876):  caelum | 2009-06-30 12:46:43 +0200
  code cleanups
  r6957 at Thesaurus (orig r6956):  caelum | 2009-07-03 02:32:48 +0200
  minor changes
  r6959 at Thesaurus (orig r6958):  caelum | 2009-07-03 05:04:12 +0200
  fix sybase mro
  r7001 at Thesaurus (orig r7000):  caelum | 2009-07-07 13:34:23 +0200
  fix sybase rebless to NoBindVars
  r7021 at Thesaurus (orig r7020):  caelum | 2009-07-10 12:52:13 +0200
  fix NoBindVars
  r7053 at Thesaurus (orig r7052):  caelum | 2009-07-15 01:39:02 +0200
  set maxConnect in DSN and add docs
  r7065 at Thesaurus (orig r7064):  caelum | 2009-07-17 09:39:54 +0200
  make insertion of blobs into tables with identity columns work, other minor fixes
  r7070 at Thesaurus (orig r7069):  caelum | 2009-07-17 23:30:13 +0200
  some compatibility updated for older DBD::Sybase versions, some initial work on _select_args for blobs
  r7072 at Thesaurus (orig r7071):  caelum | 2009-07-19 23:57:11 +0200
  mangling _select_args turned out to be unnecessary
  r7073 at Thesaurus (orig r7072):  caelum | 2009-07-20 01:02:19 +0200
  minor cleanups
  r7074 at Thesaurus (orig r7073):  caelum | 2009-07-20 15:47:48 +0200
  blob update now works
  r7076 at Thesaurus (orig r7075):  caelum | 2009-07-20 19:06:46 +0200
  change the (incorrect) version check to a check for FreeTDS
  r7077 at Thesaurus (orig r7076):  caelum | 2009-07-20 19:13:25 +0200
  better check for FreeTDS thanks to arcanez
  r7089 at Thesaurus (orig r7086):  caelum | 2009-07-22 07:09:21 +0200
  minor cleanups
  r7091 at Thesaurus (orig r7088):  caelum | 2009-07-22 17:05:37 +0200
  remove unnecessary test Result class
  r7092 at Thesaurus (orig r7089):  caelum | 2009-07-23 00:47:14 +0200
  fix doc for how to check for FreeTDS
  r7095 at Thesaurus (orig r7092):  caelum | 2009-07-23 14:35:53 +0200
  doc tweak
  r7115 at Thesaurus (orig r7112):  caelum | 2009-07-24 09:58:24 +0200
  add support for IDENTITY_INSERT
  r7117 at Thesaurus (orig r7114):  caelum | 2009-07-24 16:19:08 +0200
  savepoint support
  r7120 at Thesaurus (orig r7117):  caelum | 2009-07-24 20:35:37 +0200
  fix race condition in last_insert_id with placeholders
  r7121 at Thesaurus (orig r7118):  caelum | 2009-07-24 21:22:25 +0200
  code cleanup
  r7124 at Thesaurus (orig r7121):  caelum | 2009-07-25 16:19:58 +0200
  use _resolve_column_info in NoBindVars
  r7125 at Thesaurus (orig r7122):  caelum | 2009-07-25 21:23:49 +0200
  make insert work as a nested transaction too
  r7126 at Thesaurus (orig r7123):  caelum | 2009-07-25 22:52:17 +0200
  add money type support
  r7128 at Thesaurus (orig r7125):  caelum | 2009-07-27 03:48:35 +0200
  better FreeTDS support
  r7130 at Thesaurus (orig r7127):  caelum | 2009-07-28 06:23:54 +0200
  minor refactoring, cleanups, doc updates
  r7131 at Thesaurus (orig r7128):  caelum | 2009-07-28 09:32:45 +0200
  forgot to set mro in dbi::cursor
  r7141 at Thesaurus (orig r7138):  caelum | 2009-07-30 10:21:20 +0200
  better test for "smalldatetime" in Sybase
  r7146 at Thesaurus (orig r7143):  caelum | 2009-07-30 15:37:18 +0200
  update sqlite test schema
  r7207 at Thesaurus (orig r7204):  caelum | 2009-08-04 23:40:16 +0200
  update Changes
  r7222 at Thesaurus (orig r7219):  caelum | 2009-08-05 11:02:26 +0200
  fix a couple minor issues after pull from trunk
  r7260 at Thesaurus (orig r7257):  caelum | 2009-08-07 14:45:18 +0200
  add note about where to get Schema::Loader
  r7273 at Thesaurus (orig r7270):  ribasushi | 2009-08-09 01:19:49 +0200
  Changes and minor code rewrap
  r7285 at Thesaurus (orig r7282):  ribasushi | 2009-08-10 08:08:06 +0200
  pesky whitespace
  r7286 at Thesaurus (orig r7283):  ribasushi | 2009-08-10 08:11:46 +0200
  privatize dormant method - it may be useful for sybase at *some* point
  r7287 at Thesaurus (orig r7284):  ribasushi | 2009-08-10 08:19:55 +0200
  Whoops
  r7289 at Thesaurus (orig r7286):  caelum | 2009-08-10 08:44:51 +0200
  document placeholders_with_type_conversion_supported and add a redispatch to reblessed storage in DBI::update
  r7290 at Thesaurus (orig r7287):  caelum | 2009-08-10 10:07:45 +0200
  fix and test redispatch to reblessed storage insert/update
  r7292 at Thesaurus (orig r7289):  caelum | 2009-08-10 10:32:37 +0200
  rename get_connected_schema to get_schema in sybase test
  r7345 at Thesaurus (orig r7342):  ribasushi | 2009-08-18 22:45:06 +0200
  Fix Changes
  r7367 at Thesaurus (orig r7364):  ribasushi | 2009-08-23 10:00:34 +0200
  Minaor speedup
  r7368 at Thesaurus (orig r7365):  ribasushi | 2009-08-23 10:01:10 +0200
  Generalize and hide placeholder support check
  r7369 at Thesaurus (orig r7366):  ribasushi | 2009-08-23 10:04:26 +0200
  Rename the common sybase driver
  r7373 at Thesaurus (orig r7370):  caelum | 2009-08-24 13:21:51 +0200
  make insert only use a txn if needed, add connect_call_unsafe_insert
  r7374 at Thesaurus (orig r7371):  caelum | 2009-08-24 14:42:57 +0200
  add test for IDENTITY_INSERT
  r7378 at Thesaurus (orig r7375):  caelum | 2009-08-24 15:51:48 +0200
  use debugobj->callback instead of local *_query_start in test to capture query
  r7379 at Thesaurus (orig r7376):  caelum | 2009-08-24 17:19:46 +0200
  remove duplicate oracle method and fix an mssql method call
  r7417 at Thesaurus (orig r7414):  caelum | 2009-08-29 07:23:45 +0200
  update link to Schema::Loader branch
  r7427 at Thesaurus (orig r7424):  caelum | 2009-08-29 09:31:41 +0200
  switch to ::DBI::AutoCast
  r7428 at Thesaurus (orig r7425):  ribasushi | 2009-08-29 13:36:22 +0200
  Cleanup:
  Added commented method signatures for easier debugging
  privatize transform_unbound_value as _prep_bind_value
  Remove \@_ splice's in lieu of of simple shifts
  Exposed TYPE_MAPPING used by native_data_type via our
  Removed use of txn_do - internal code uses the scope guard
  Renamed some variables, whitespace cleanup, the works
  r7429 at Thesaurus (orig r7426):  ribasushi | 2009-08-29 13:40:48 +0200
  Varname was absolutely correct
  r7430 at Thesaurus (orig r7427):  caelum | 2009-08-29 14:09:13 +0200
  minor changes for tests to pass again
  r7431 at Thesaurus (orig r7428):  caelum | 2009-08-29 21:08:51 +0200
  fix inserts with active cursors
  r7432 at Thesaurus (orig r7429):  caelum | 2009-08-29 22:53:02 +0200
  remove extra connection
  r7434 at Thesaurus (orig r7431):  caelum | 2009-08-30 00:02:20 +0200
  test correlated subquery
  r7442 at Thesaurus (orig r7439):  ribasushi | 2009-08-30 09:07:00 +0200
  Put the ocmment back
  r7443 at Thesaurus (orig r7440):  ribasushi | 2009-08-30 09:15:41 +0200
  Change should_quote_value to interpolate_unquoted to make it harder to stop quoting by accident (it's easier to return a undef by accident than a 1)
  r7446 at Thesaurus (orig r7443):  caelum | 2009-08-30 18:19:46 +0200
  added txn_scope_guards for blob operations
  r7447 at Thesaurus (orig r7444):  ribasushi | 2009-08-30 18:56:43 +0200
  Rename insert_txn to unsafe_insert
  r7512 at Thesaurus (orig r7509):  ribasushi | 2009-09-03 20:24:14 +0200
  Minor cleanups
  r7575 at Thesaurus (orig r7572):  caelum | 2009-09-05 07:23:57 +0200
  pending review by mpeppler
  r7593 at Thesaurus (orig r7590):  ribasushi | 2009-09-07 09:10:05 +0200
  Release 0.08111 tag
  r7594 at Thesaurus (orig r7591):  ribasushi | 2009-09-07 09:14:33 +0200
  Whoops this should not have committed
  r7602 at Thesaurus (orig r7599):  caelum | 2009-09-07 21:31:38 +0200
  fix _insert_dbh code to only connect when needed, doc update
  r7607 at Thesaurus (orig r7604):  caelum | 2009-09-09 02:15:54 +0200
  remove unsafe_insert
  r7608 at Thesaurus (orig r7605):  ribasushi | 2009-09-09 09:14:20 +0200
  Localisation ain't free, we don't do it unless we have to
  r7609 at Thesaurus (orig r7606):  ribasushi | 2009-09-09 09:40:29 +0200
  Much simpler
  r7610 at Thesaurus (orig r7607):  ribasushi | 2009-09-09 10:38:41 +0200
  Reduce amount of perl-golf :)
  r7611 at Thesaurus (orig r7608):  ribasushi | 2009-09-09 10:41:15 +0200
  This should not have worked - I guess we lack tests?
  r7614 at Thesaurus (orig r7611):  caelum | 2009-09-09 12:08:36 +0200
  test multi-row blob update
  r7619 at Thesaurus (orig r7616):  caelum | 2009-09-09 18:01:15 +0200
  remove Sub::Name hack for method dispatch, pass $next instead
  r7620 at Thesaurus (orig r7617):  caelum | 2009-09-10 02:16:03 +0200
  do blob update over _insert_dbh
  r7661 at Thesaurus (orig r7650):  caelum | 2009-09-13 10:27:44 +0200
  change _insert_dbh to _insert_storage
  r7663 at Thesaurus (orig r7652):  caelum | 2009-09-13 11:52:20 +0200
  make sure _init doesn't loop, steal insert_bulk from mssql, add some insert_bulk tests
  r7664 at Thesaurus (orig r7653):  caelum | 2009-09-13 13:27:51 +0200
  allow subclassing of methods proxied to _writer_storage
  r7666 at Thesaurus (orig r7655):  caelum | 2009-09-14 15:09:21 +0200
  sybase bulk API support stuff (no blobs yet, coming soon...)
  r7667 at Thesaurus (orig r7656):  caelum | 2009-09-14 15:33:14 +0200
  add another test for sybase bulk stuff (passes)
  r7668 at Thesaurus (orig r7657):  caelum | 2009-09-14 15:44:06 +0200
  minor change (fix inverted boolean for warning)
  r7669 at Thesaurus (orig r7658):  caelum | 2009-09-14 15:48:52 +0200
  remove @args from DBI::sth, use full arg list
  r7676 at Thesaurus (orig r7665):  caelum | 2009-09-16 15:06:35 +0200
  use execute_array for insert_bulk, test insert_bulk with blobs, clean up blob tests a bit
  r7680 at Thesaurus (orig r7669):  ribasushi | 2009-09-16 19:36:19 +0200
  Remove branched changes
  r7682 at Thesaurus (orig r7671):  caelum | 2009-09-17 03:03:34 +0200
  I'll rewrite this bit tomorrow to be less retarded
  r7684 at Thesaurus (orig r7673):  caelum | 2009-09-18 04:03:15 +0200
  fix yesterday's stuff, identity_update works, blob updates are better
  r7686 at Thesaurus (orig r7675):  caelum | 2009-09-18 04:22:38 +0200
  column no longer necessary in test
  r7688 at Thesaurus (orig r7677):  caelum | 2009-09-18 08:33:14 +0200
  fix freetds
  r7691 at Thesaurus (orig r7680):  ribasushi | 2009-09-18 12:25:42 +0200
   r7678 at Thesaurus (orig r7667):  ribasushi | 2009-09-16 19:31:14 +0200
   New subbranch
   r7679 at Thesaurus (orig r7668):  ribasushi | 2009-09-16 19:34:29 +0200
   Caelum's work so far
   r7690 at Thesaurus (orig r7679):  caelum | 2009-09-18 11:10:16 +0200
   support for blobs in insert_bulk fallback
  
  r7692 at Thesaurus (orig r7681):  ribasushi | 2009-09-18 12:28:09 +0200
  Rollback all bulk insert code before merge
 
 r7699 at Thesaurus (orig r7688):  ribasushi | 2009-09-18 14:12:05 +0200
 Cleanup exception handling
 r7700 at Thesaurus (orig r7689):  ribasushi | 2009-09-18 14:22:02 +0200
 duh
 r7701 at Thesaurus (orig r7690):  ribasushi | 2009-09-18 14:25:06 +0200
 Minor cleanup of RSC with has_many joins
 r7702 at Thesaurus (orig r7691):  ribasushi | 2009-09-18 14:32:15 +0200
 Changes and dev notes in makefile
 r7705 at Thesaurus (orig r7694):  ribasushi | 2009-09-18 14:52:26 +0200
 Nothing says the grouping column can not be nullable
 r7706 at Thesaurus (orig r7695):  ribasushi | 2009-09-18 14:53:33 +0200
 Changes
 r7707 at Thesaurus (orig r7696):  ribasushi | 2009-09-18 20:09:04 +0200
 This code belogs in Storage::DBI
 r7708 at Thesaurus (orig r7697):  ribasushi | 2009-09-18 20:38:26 +0200
 Clear up some legacy cruft and straighten inheritance
 r7710 at Thesaurus (orig r7699):  ribasushi | 2009-09-21 00:25:20 +0200
 Backout sybase changes
 r7713 at Thesaurus (orig r7702):  ribasushi | 2009-09-21 00:46:32 +0200
 Missed a part of the revert
 r7720 at Thesaurus (orig r7709):  ribasushi | 2009-09-21 02:49:11 +0200
 Oops
 r7721 at Thesaurus (orig r7710):  ribasushi | 2009-09-21 11:02:14 +0200
 Changes
 r7722 at Thesaurus (orig r7711):  ribasushi | 2009-09-21 12:49:30 +0200
 Undocument the from attribute (the description was mostly outdated anyway)
 r7723 at Thesaurus (orig r7712):  ribasushi | 2009-09-21 12:58:58 +0200
 Release 0.08112
 r7726 at Thesaurus (orig r7715):  ribasushi | 2009-09-21 16:26:07 +0200
 A test for an obscure join syntax - make sure we don't break it
 r7732 at Thesaurus (orig r7721):  ribasushi | 2009-09-22 12:58:09 +0200
 this would break in the future - sanitize sql fed to the tester
 r7735 at Thesaurus (orig r7724):  ribasushi | 2009-09-22 13:07:31 +0200
 The hack is no longer necessary with a recent sqla
 r7740 at Thesaurus (orig r7729):  caelum | 2009-09-24 23:44:01 +0200
 add test for multiple active statements in mssql over dbd::sybase
 r7741 at Thesaurus (orig r7730):  caelum | 2009-09-25 08:46:22 +0200
 test on_connect_do with a coderef connect_info too
 r7742 at Thesaurus (orig r7731):  caelum | 2009-09-25 23:26:52 +0200
 failing test for simple transaction with mssql via dbd::sybase
 r7765 at Thesaurus (orig r7753):  ribasushi | 2009-10-03 15:49:14 +0200
 Test reorg (no changes)
 r7766 at Thesaurus (orig r7754):  ribasushi | 2009-10-03 15:55:25 +0200
 Add failing tests for RT#50003
 r7767 at Thesaurus (orig r7755):  caelum | 2009-10-03 16:09:45 +0200
 fix on_connect_ with coderef connect_info
 r7771 at Thesaurus (orig r7759):  ribasushi | 2009-10-04 13:17:53 +0200
 Fix AutoCast's POD
 r7782 at Thesaurus (orig r7770):  ribasushi | 2009-10-09 06:57:20 +0200
  r7777 at Thesaurus (orig r7765):  frew | 2009-10-07 20:05:05 +0200
  add method to check if an rs is paginated
  r7778 at Thesaurus (orig r7766):  frew | 2009-10-07 20:31:02 +0200
  is_paginated method and test
  r7780 at Thesaurus (orig r7768):  frew | 2009-10-09 06:45:36 +0200
  change name of method
  r7781 at Thesaurus (orig r7769):  frew | 2009-10-09 06:47:31 +0200
  add message to changelog for is_paged
 
 r7785 at Thesaurus (orig r7773):  ribasushi | 2009-10-09 11:00:36 +0200
 Ugh CRLF
 r7786 at Thesaurus (orig r7774):  ribasushi | 2009-10-09 11:04:35 +0200
 Skip versioning test on really old perls lacking Time::HiRes
 r7787 at Thesaurus (orig r7775):  ribasushi | 2009-10-09 11:04:50 +0200
 Changes
 r7788 at Thesaurus (orig r7776):  triode | 2009-10-09 22:32:04 +0200
 added troubleshooting case of excessive memory allocation involving TEXT/BLOB/etc
 columns and large LongReadLen
 
 r7789 at Thesaurus (orig r7777):  triode | 2009-10-09 22:44:21 +0200
 added my name to contributors list
 
 r7790 at Thesaurus (orig r7778):  ribasushi | 2009-10-10 18:49:15 +0200
 Whoops, this isn't right
 r7791 at Thesaurus (orig r7779):  ribasushi | 2009-10-11 15:44:18 +0200
 More ordered fixes
 r7793 at Thesaurus (orig r7781):  norbi | 2009-10-13 11:27:18 +0200
  r7982 at vger:  mendel | 2009-10-13 11:26:11 +0200
  Fixed a typo and a POD error.
 
 r7805 at Thesaurus (orig r7793):  ribasushi | 2009-10-16 14:28:35 +0200
 Fix test to stop failing when DT-support is not present
 r7811 at Thesaurus (orig r7799):  caelum | 2009-10-18 11:13:29 +0200
  r20728 at hlagh (orig r7703):  ribasushi | 2009-09-20 18:51:16 -0400
  Another try at a clean sybase branch
  r20730 at hlagh (orig r7705):  ribasushi | 2009-09-20 18:58:09 -0400
  Part one of the sybase work by Caelum (mostly reviewed)
  r20731 at hlagh (orig r7706):  ribasushi | 2009-09-20 19:18:40 -0400
  main sybase branch ready
  r21051 at hlagh (orig r7797):  caelum | 2009-10-18 04:57:43 -0400
   r20732 at hlagh (orig r7707):  ribasushi | 2009-09-20 19:20:00 -0400
   Branch for bulk insert
   r20733 at hlagh (orig r7708):  ribasushi | 2009-09-20 20:06:21 -0400
   All sybase bulk-insert code by Caelum
   r20750 at hlagh (orig r7725):  caelum | 2009-09-24 02:47:39 -0400
   clean up set_identity stuff
   r20751 at hlagh (orig r7726):  caelum | 2009-09-24 05:21:18 -0400
   minor cleanups, test update of blob to NULL
   r20752 at hlagh (orig r7727):  caelum | 2009-09-24 08:45:04 -0400
   remove some duplicate code
   r20753 at hlagh (orig r7728):  caelum | 2009-09-24 09:57:58 -0400
   fix insert with all defaults
   r20786 at hlagh (orig r7732):  caelum | 2009-09-25 21:17:16 -0400
   some cleanups
   r20804 at hlagh (orig r7736):  caelum | 2009-09-28 05:31:38 -0400
   minor changes
   r20805 at hlagh (orig r7737):  caelum | 2009-09-28 06:25:48 -0400
   fix DT stuff
   r20809 at hlagh (orig r7741):  caelum | 2009-09-28 22:25:55 -0400
   removed some dead code, added fix and test for _execute_array_empty
   r20811 at hlagh (orig r7743):  caelum | 2009-09-29 13:36:20 -0400
   minor changes after review
   r20812 at hlagh (orig r7744):  caelum | 2009-09-29 14:16:03 -0400
   do not clobber $rv from execute_array
   r20813 at hlagh (orig r7745):  caelum | 2009-09-29 14:38:14 -0400
   make insert_bulk atomic
   r20815 at hlagh (orig r7747):  caelum | 2009-09-29 20:35:26 -0400
   remove _exhaaust_statements
   r20816 at hlagh (orig r7748):  caelum | 2009-09-29 21:48:38 -0400
   fix insert_bulk when not using bulk api inside a txn
   r20831 at hlagh (orig r7749):  caelum | 2009-09-30 02:53:42 -0400
   added test for populate being atomic
   r20832 at hlagh (orig r7750):  caelum | 2009-09-30 03:00:59 -0400
   factor out subclass-specific _execute_array callback
   r20833 at hlagh (orig r7751):  caelum | 2009-10-01 11:59:30 -0400
   remove a piece of dead code
   r20840 at hlagh (orig r7758):  caelum | 2009-10-03 15:46:56 -0400
   remove _pretty_print
   r20842 at hlagh (orig r7760):  caelum | 2009-10-04 16:19:56 -0400
   minor optimization for insert_bulk
   r21050 at hlagh (orig r7796):  caelum | 2009-10-18 04:56:54 -0400
   error checking related to literal SQL for insert_bulk
  
 
 r7820 at Thesaurus (orig r7808):  caelum | 2009-10-21 03:10:39 +0200
 add test for populate with literal sql mixed with binds, improve error messages
 r7823 at Thesaurus (orig r7811):  ribasushi | 2009-10-21 16:33:45 +0200
 Show what's wrong with the current populate code
 r7824 at Thesaurus (orig r7812):  caelum | 2009-10-22 11:10:38 +0200
 stringify values passed to populate/insert_bulk
 r7825 at Thesaurus (orig r7813):  ribasushi | 2009-10-22 13:17:41 +0200
 Some smoker run the suite for 30 *minutes* - the timeout seems to be too short for them (boggle)
 r7826 at Thesaurus (orig r7814):  caelum | 2009-10-22 14:41:37 +0200
 a few extra tests can never hurt, right? :)
 r7827 at Thesaurus (orig r7815):  ribasushi | 2009-10-23 10:51:05 +0200
 Prevent sqlt from failing silently
 r7828 at Thesaurus (orig r7816):  ribasushi | 2009-10-23 10:52:49 +0200
 { is_foreign_key_constraint => 0, on_delete => undef } is a valid construct - no need to carp
 r7832 at Thesaurus (orig r7820):  robkinyon | 2009-10-26 20:11:22 +0100
 Fixed bad if-check in columns()
 r7840 at Thesaurus (orig r7828):  caelum | 2009-10-31 14:01:56 +0100
 change repository in meta to point to real svn url rather than svnweb
 r7842 at Thesaurus (orig r7830):  caelum | 2009-10-31 21:04:39 +0100
 pass sqlite_version to SQLT
 r7843 at Thesaurus (orig r7831):  caelum | 2009-10-31 21:22:37 +0100
 fix regex to numify sqlite_version
 r7844 at Thesaurus (orig r7832):  caelum | 2009-10-31 23:59:19 +0100
 work-around disconnect bug with DBD::Pg 2.15.1
 r7855 at Thesaurus (orig r7843):  ribasushi | 2009-11-04 10:55:51 +0100
  r7817 at Thesaurus (orig r7805):  rbuels | 2009-10-21 02:37:28 +0200
  making a branch, here we go again with the pg_unqualified_schema
  r7818 at Thesaurus (orig r7806):  rbuels | 2009-10-21 02:38:59 +0200
  more pg unqualified schema tests, which expose a gap in the coverage
  r7819 at Thesaurus (orig r7807):  rbuels | 2009-10-21 03:10:38 +0200
  gutted Pg storage driver's sequence discovery to just rely on DBD::Pg's last_insert_id.  this needs testing with older versions of DBD::Pg
  r7821 at Thesaurus (orig r7809):  rbuels | 2009-10-21 04:00:39 +0200
  more coverage in Pg sequence-discovery tests.  i think this shows why last_insert_id cannot be used.
  r7822 at Thesaurus (orig r7810):  rbuels | 2009-10-21 04:07:05 +0200
  reverted [7807], and just changed code to use the custom pg_catalog query, which is the only thing that works in the pathological case where DBIC is told a different primary key from the primary key that is set on the table in the DB ([7809] added testing for this)
  r7852 at Thesaurus (orig r7840):  rbuels | 2009-11-03 18:47:05 +0100
  added Changes line mentioning tweak to Pg auto-inc fix
  r7854 at Thesaurus (orig r7842):  ribasushi | 2009-11-04 10:55:35 +0100
  Cleanup exceptions
 
 r7858 at Thesaurus (orig r7846):  caelum | 2009-11-06 16:01:30 +0100
 transactions for MSSQL over DBD::Sybase
 r7861 at Thesaurus (orig r7849):  caelum | 2009-11-10 13:16:18 +0100
 made commit/rollback when disconnected an exception
 r7862 at Thesaurus (orig r7850):  robkinyon | 2009-11-10 17:19:57 +0100
 Added a note about select
 r7863 at Thesaurus (orig r7851):  ribasushi | 2009-11-10 18:23:10 +0100
 Changes
 r7867 at Thesaurus (orig r7855):  frew | 2009-11-11 21:56:37 +0100
 RT50874
 r7868 at Thesaurus (orig r7856):  frew | 2009-11-11 23:50:43 +0100
 RT50828
 r7869 at Thesaurus (orig r7857):  frew | 2009-11-11 23:54:15 +0100
 clearer test message
 r7870 at Thesaurus (orig r7858):  frew | 2009-11-12 00:37:27 +0100
 some cleanup for $rs->populate
 r7872 at Thesaurus (orig r7860):  ribasushi | 2009-11-12 01:35:36 +0100
 Fix find on resultset with custom result_class
 r7873 at Thesaurus (orig r7861):  ribasushi | 2009-11-12 01:40:14 +0100
 Fix return value of in_storage
 r7874 at Thesaurus (orig r7862):  ribasushi | 2009-11-12 01:43:48 +0100
 Extra FAQ entry
 r7875 at Thesaurus (orig r7863):  ribasushi | 2009-11-12 02:11:25 +0100
 Sanify _determine_driver handling in ::Storage::DBI
 r7876 at Thesaurus (orig r7864):  ribasushi | 2009-11-12 02:14:37 +0100
 Add mysql determine_driver test by Pedro Melo
 r7881 at Thesaurus (orig r7869):  ribasushi | 2009-11-12 11:10:04 +0100
 _cond_for_update_delete is hopelessly broken attempting to introspect SQLA1. Replace with a horrific but effective hack
 r7882 at Thesaurus (orig r7870):  ribasushi | 2009-11-12 11:15:12 +0100
 Clarifying comment
 r7884 at Thesaurus (orig r7872):  ribasushi | 2009-11-13 00:13:40 +0100
 The real fix for the non-introspectable condition bug, mst++
 r7885 at Thesaurus (orig r7873):  ribasushi | 2009-11-13 00:24:56 +0100
 Some cleanup
 r7887 at Thesaurus (orig r7875):  frew | 2009-11-13 10:01:37 +0100
 fix subtle bug with Sybase database type determination
 r7892 at Thesaurus (orig r7880):  frew | 2009-11-14 00:53:29 +0100
 release woo!
 r7894 at Thesaurus (orig r7882):  caelum | 2009-11-14 03:57:52 +0100
 fix oracle dep in Makefile.PL
 r7895 at Thesaurus (orig r7883):  caelum | 2009-11-14 04:20:53 +0100
 skip Oracle BLOB tests on DBD::Oracle == 1.23
 r7897 at Thesaurus (orig r7885):  caelum | 2009-11-14 09:40:01 +0100
  r7357 at pentium (orig r7355):  caelum | 2009-08-20 17:58:23 -0400
  branch to support MSSQL over ADO
  r7358 at pentium (orig r7356):  caelum | 2009-08-21 00:32:14 -0400
  something apparently working
  r7359 at pentium (orig r7357):  caelum | 2009-08-21 00:53:53 -0400
  slightly better mars test, still passes
 
 r7899 at Thesaurus (orig r7887):  caelum | 2009-11-14 09:41:54 +0100
  r7888 at pentium (orig r7886):  caelum | 2009-11-14 03:41:25 -0500
  add TODO test for large column list in select
 
 r7901 at Thesaurus (orig r7889):  caelum | 2009-11-14 09:47:16 +0100
 add ADO/MSSQL to Changes
 r7902 at Thesaurus (orig r7890):  caelum | 2009-11-14 10:27:29 +0100
 fix the large column list test for ADO/MSSQL, now passes
 r7904 at Thesaurus (orig r7892):  caelum | 2009-11-14 12:20:58 +0100
 fix Changes (ADO change in wrong release)
 r7905 at Thesaurus (orig r7893):  ribasushi | 2009-11-14 19:23:23 +0100
 Release 0.08114
 r7907 at Thesaurus (orig r7895):  ribasushi | 2009-11-15 12:09:17 +0100
 Failing test to highlight mssql autoconnect regression
 r7908 at Thesaurus (orig r7896):  ribasushi | 2009-11-15 12:20:25 +0100
 Fix plan
 r7913 at Thesaurus (orig r7901):  ribasushi | 2009-11-15 13:11:38 +0100
  r7773 at Thesaurus (orig r7761):  norbi | 2009-10-05 14:49:06 +0200
  Created branch 'prefetch_bug-unqualified_column_in_search_related_cond': A bug that manifests when a prefetched table's column is referenced without the table name in the condition of a search_related() on an M:N relationship.
  r7878 at Thesaurus (orig r7866):  ribasushi | 2009-11-12 02:36:08 +0100
  Factor some code out
  r7879 at Thesaurus (orig r7867):  ribasushi | 2009-11-12 09:11:03 +0100
  Factor out more stuff
  r7880 at Thesaurus (orig r7868):  ribasushi | 2009-11-12 09:21:04 +0100
  Saner naming/comments
  r7910 at Thesaurus (orig r7898):  ribasushi | 2009-11-15 12:39:29 +0100
  Move more code to DBIHacks, put back the update/delete rs check, just in case
  r7911 at Thesaurus (orig r7899):  ribasushi | 2009-11-15 13:01:34 +0100
  TODOify test until we get an AST
  r7912 at Thesaurus (orig r7900):  ribasushi | 2009-11-15 13:10:15 +0100
  Hide from pause
 
 r7921 at Thesaurus (orig r7909):  ribasushi | 2009-11-15 14:17:48 +0100
  r7871 at Thesaurus (orig r7859):  ribasushi | 2009-11-12 00:46:07 +0100
  Branches to test some ideas
  r7889 at Thesaurus (orig r7877):  abraxxa | 2009-11-13 12:05:50 +0100
  added rels to view result classes in test schema
  
  r7890 at Thesaurus (orig r7878):  abraxxa | 2009-11-13 13:05:45 +0100
  seems I found the bugger
  
  r7917 at Thesaurus (orig r7905):  ribasushi | 2009-11-15 13:29:23 +0100
  FK constraints towards a view don't quite work
  r7918 at Thesaurus (orig r7906):  ribasushi | 2009-11-15 14:10:10 +0100
  Turn into a straight-inheritance view class
  r7919 at Thesaurus (orig r7907):  ribasushi | 2009-11-15 14:11:03 +0100
  Extensive test of virtual and classic view relationships
  r7920 at Thesaurus (orig r7908):  ribasushi | 2009-11-15 14:17:23 +0100
  Fix non-sqlt schema file
 
 r7923 at Thesaurus (orig r7911):  caelum | 2009-11-15 18:31:37 +0100
 fix MSSQL via DBD::Sybase regression
 r7930 at Thesaurus (orig r7918):  ribasushi | 2009-11-16 19:15:45 +0100
  r7864 at Thesaurus (orig r7852):  edenc | 2009-11-10 20:15:15 +0100
  branching for fixes related to prefetch, distinct and group by
  r7865 at Thesaurus (orig r7853):  edenc | 2009-11-10 20:21:38 +0100
  added test case for ensuring a column mentioned in the order by clause is also included in the group by clause
  r7926 at Thesaurus (orig r7914):  ribasushi | 2009-11-16 08:09:30 +0100
  Make _resolve_column_info function without supplying column names
  r7927 at Thesaurus (orig r7915):  ribasushi | 2009-11-16 08:11:17 +0100
  Fix order_by/distinct bug
 
 r7937 at Thesaurus (orig r7925):  ribasushi | 2009-11-19 12:04:21 +0100
 Bail out eary in Versioned if no versioning checks are requested
 r7938 at Thesaurus (orig r7926):  ribasushi | 2009-11-19 12:06:13 +0100
 POD fixes
 r7940 at Thesaurus (orig r7928):  caelum | 2009-11-22 11:03:33 +0100
 fix connection setup for Sybase
 r7943 at Thesaurus (orig r7931):  caelum | 2009-11-22 13:27:43 +0100
 override _run_connection_actions for internal connection setup in sybase stuff, much cleaner this way
 r7947 at Thesaurus (orig r7935):  ribasushi | 2009-11-23 01:18:28 +0100
 Whoops
 r7948 at Thesaurus (orig r7936):  ribasushi | 2009-11-23 01:28:50 +0100
 Fix ::Versioned regression introduced in r7925
 r7951 at Thesaurus (orig r7939):  caelum | 2009-11-23 12:32:10 +0100
 add subname to rdbms_specific_methods wrapper
 r7953 at Thesaurus (orig r7941):  caelum | 2009-11-23 13:23:14 +0100
  r21187 at hlagh (orig r7933):  ribasushi | 2009-11-22 18:38:34 -0500
  New sybase refactor branch
  r21188 at hlagh (orig r7934):  ribasushi | 2009-11-22 19:06:48 -0500
  refactor part1
  r21192 at hlagh (orig r7938):  ribasushi | 2009-11-22 19:30:05 -0500
  refactor part 2
  r21194 at hlagh (orig r7940):  caelum | 2009-11-23 07:06:46 -0500
  fix test
 
 r7955 at Thesaurus (orig r7943):  ribasushi | 2009-11-23 16:30:13 +0100
 Add missing Sub::Name invocations and improve the SQLA Carp overrides
 r7957 at Thesaurus (orig r7945):  ribasushi | 2009-11-24 10:12:49 +0100
  r7749 at Thesaurus (orig r7738):  norbi | 2009-09-28 22:01:39 +0200
  Created branch 'void_populate_resultset_cond': Fixing a bug: $rs->populate in void context does not use the conditions from $rs.
  r7751 at Thesaurus (orig r7740):  norbi | 2009-09-28 23:26:06 +0200
   r7935 at vger:  mendel | 2009-09-28 23:25:52 +0200
   Undid the previous tweaks to the already existing tests and added new tests instead.
  
  r7928 at Thesaurus (orig r7916):  ribasushi | 2009-11-16 08:48:42 +0100
  Change plan
  r7956 at Thesaurus (orig r7944):  ribasushi | 2009-11-24 10:10:49 +0100
  Better naming and a bit leaner implementation. Main idea remains the same
 
 r7959 at Thesaurus (orig r7947):  ribasushi | 2009-11-24 10:39:52 +0100
 Changes and prevent a spurious todo-pass
 r7962 at Thesaurus (orig r7950):  ribasushi | 2009-11-24 19:43:42 +0100
 Extra sqla quoting test
 r7963 at Thesaurus (orig r7951):  ribasushi | 2009-11-24 19:48:01 +0100
 Extra sqla quoting test(2)
 r7964 at Thesaurus (orig r7952):  ribasushi | 2009-11-25 21:24:10 +0100
 wtf
 r7967 at Thesaurus (orig r7955):  ribasushi | 2009-11-26 11:07:06 +0100
 cleanups
 r7968 at Thesaurus (orig r7956):  ribasushi | 2009-11-26 12:11:21 +0100
 Sanify search_related chaining code (no functional changes)
 r7969 at Thesaurus (orig r7957):  ribasushi | 2009-11-26 12:52:05 +0100
 Another count() quirk down
 r7970 at Thesaurus (orig r7958):  ribasushi | 2009-11-26 14:23:28 +0100
 Add a no-accessor column to generally test handling
 r7972 at Thesaurus (orig r7960):  ribasushi | 2009-11-26 15:32:17 +0100
 Whoops, wrong accessor (things still work though)
 r7977 at Thesaurus (orig r7965):  ribasushi | 2009-11-26 16:43:21 +0100
  r7971 at Thesaurus (orig r7959):  ribasushi | 2009-11-26 14:54:17 +0100
  New branch for get_inflated_column bugfix
  r7974 at Thesaurus (orig r7962):  ribasushi | 2009-11-26 15:56:20 +0100
  Fix for rt46953
  r7975 at Thesaurus (orig r7963):  ribasushi | 2009-11-26 16:05:17 +0100
  Make Test::More happy
  r7976 at Thesaurus (orig r7964):  ribasushi | 2009-11-26 16:43:09 +0100
  Changes
 
 r7980 at Thesaurus (orig r7968):  ribasushi | 2009-11-27 01:38:11 +0100
 Fix search_related wrt grouped resultsets (distinct is currently passed to the new resultset, this is probably wrong)
 r7987 at Thesaurus (orig r7975):  ribasushi | 2009-11-28 16:54:23 +0100
 Cleanup the s.c.o. index
 r7988 at Thesaurus (orig r7976):  ribasushi | 2009-11-28 16:57:04 +0100
 Test based on http://lists.scsys.co.uk/pipermail/dbix-class/2009-November/008599.html
 r8007 at Thesaurus (orig r7995):  castaway | 2009-11-30 16:20:19 +0100
 Remove over-emphasis on +select/+as. Add docs on prefetch and other ways to get related data, with caveats etc. 
 
 r8009 at Thesaurus (orig r7997):  dew | 2009-11-30 19:37:00 +0100
 Alter the docs for has_many relationships to make them a little easier to grok
 r8021 at Thesaurus (orig r8009):  castaway | 2009-12-02 14:19:40 +0100
 Added note about prefetch and has_many related objects
 
 r8029 at Thesaurus (orig r8017):  ribasushi | 2009-12-03 13:24:04 +0100
 Source sanity check on subqueried update/delete
 r8030 at Thesaurus (orig r8018):  ribasushi | 2009-12-03 14:39:37 +0100
 Sanify populate arg handling
 r8040 at Thesaurus (orig r8028):  ribasushi | 2009-12-04 02:46:20 +0100
  r7935 at Thesaurus (orig r7923):  ribasushi | 2009-11-19 11:05:04 +0100
  Branches for RTs
  r7965 at Thesaurus (orig r7953):  ribasushi | 2009-11-26 00:19:21 +0100
  Test and fix scalarref in an inflatable slot corner-case
  r7966 at Thesaurus (orig r7954):  ribasushi | 2009-11-26 00:24:23 +0100
  Looks like we nailed a todo
  r8038 at Thesaurus (orig r8026):  ribasushi | 2009-12-04 02:45:40 +0100
  Changes
  r8039 at Thesaurus (orig r8027):  ribasushi | 2009-12-04 02:46:08 +0100
  Changes(2)
 
 r8055 at Thesaurus (orig r8043):  ribasushi | 2009-12-07 15:11:25 +0100
 Forgotten auto-savepoint example patch
 r8057 at Thesaurus (orig r8045):  ribasushi | 2009-12-08 14:13:38 +0100
 Weird test case
 r8058 at Thesaurus (orig r8046):  ribasushi | 2009-12-08 14:23:31 +0100
 Fix the test - code is correct
 r8063 at Thesaurus (orig r8051):  ribasushi | 2009-12-09 02:33:30 +0100
 It's almost 2010 - load_components ('Core') is like ewwww
 r8067 at Thesaurus (orig r8055):  caelum | 2009-12-09 18:13:33 +0100
 workaround for evil ADO bug
 r8068 at Thesaurus (orig r8056):  ribasushi | 2009-12-09 23:13:59 +0100
  r8022 at Thesaurus (orig r8010):  frew | 2009-12-02 17:57:17 +0100
  branch for replacing TOP with RNO in MSSQL
  r8027 at Thesaurus (orig r8015):  frew | 2009-12-03 02:48:36 +0100
  Switch to RowNumberOver for MSSQL
  r8028 at Thesaurus (orig r8016):  ribasushi | 2009-12-03 10:03:18 +0100
  The correct top100 mssql solution and test
  r8031 at Thesaurus (orig r8019):  frew | 2009-12-03 15:56:35 +0100
  fix RNO for MSSQL to not use a kludgy regexp
  r8032 at Thesaurus (orig r8020):  frew | 2009-12-04 01:33:28 +0100
  initial (broken) version of 42rno.t
  r8033 at Thesaurus (orig r8021):  frew | 2009-12-04 01:37:06 +0100
  first shot at moving stuff around
  r8034 at Thesaurus (orig r8022):  frew | 2009-12-04 01:45:42 +0100
  rename files to get rid of numbers and use folders
  r8035 at Thesaurus (orig r8023):  frew | 2009-12-04 01:48:00 +0100
  missed toplimit
  r8036 at Thesaurus (orig r8024):  frew | 2009-12-04 01:52:44 +0100
  still broken rno test, but now it actually tests mssql
  r8042 at Thesaurus (orig r8030):  ribasushi | 2009-12-04 09:34:56 +0100
  Variable clash
  r8043 at Thesaurus (orig r8031):  ribasushi | 2009-12-04 11:44:47 +0100
  The complex prefetch rewrite actually takes care of this as cleanly as possible
  r8044 at Thesaurus (orig r8032):  ribasushi | 2009-12-04 11:47:22 +0100
  Smarter implementation of the select top 100pct subselect handling
  r8045 at Thesaurus (orig r8033):  ribasushi | 2009-12-04 12:07:05 +0100
  Add support for unordered limited resultsets
  Rename the limit helper to signify it is MS specific
  Make sure we don't lose group_by/having clauses
  r8046 at Thesaurus (orig r8034):  ribasushi | 2009-12-04 12:07:56 +0100
  Un-todoify mssql limit tests - no changes necessary (throw away the obsolete generated sql checks)
  r8047 at Thesaurus (orig r8035):  ribasushi | 2009-12-04 12:24:13 +0100
  Tests for bindvar propagation and Changes
  r8049 at Thesaurus (orig r8037):  ribasushi | 2009-12-04 15:01:32 +0100
  KISS - a select(1) makes perfect ordering criteria
  r8050 at Thesaurus (orig r8038):  ribasushi | 2009-12-04 15:06:11 +0100
  Unify the MSSQL and DB2 RNO implementations - they are the same
  r8051 at Thesaurus (orig r8039):  ribasushi | 2009-12-05 10:29:50 +0100
  Wrap mssql selects in yet another subquery to make limited right-ordered join resultsets possible
  r8052 at Thesaurus (orig r8040):  ribasushi | 2009-12-05 10:46:41 +0100
  Better not touch Top - it's too complex at this point
  r8053 at Thesaurus (orig r8041):  ribasushi | 2009-12-05 11:03:00 +0100
  Extend test just a bit more
  r8054 at Thesaurus (orig r8042):  ribasushi | 2009-12-05 11:44:25 +0100
  DB2 and MSSQL have different default order syntaxes
  r8056 at Thesaurus (orig r8044):  frew | 2009-12-08 02:10:06 +0100
  add version check for mssql 2005 and greater
  r8059 at Thesaurus (orig r8047):  frew | 2009-12-08 16:15:50 +0100
  real exception instead of die
  r8061 at Thesaurus (orig r8049):  ribasushi | 2009-12-09 00:19:49 +0100
  Test for immediate connection with known storage type
  r8062 at Thesaurus (orig r8050):  frew | 2009-12-09 01:24:45 +0100
  fix mssql version check so it's lazier
  r8064 at Thesaurus (orig r8052):  ribasushi | 2009-12-09 02:40:51 +0100
  Fix comment
  r8066 at Thesaurus (orig r8054):  caelum | 2009-12-09 16:12:56 +0100
  fix _get_mssql_version for ODBC
 
 r8071 at Thesaurus (orig r8059):  frew | 2009-12-10 00:32:55 +0100
 fail nicely if user doesn't have perms for xp_msver
 r8073 at Thesaurus (orig r8061):  ribasushi | 2009-12-10 09:36:21 +0100
 Changes
 r8074 at Thesaurus (orig r8062):  ribasushi | 2009-12-10 09:53:38 +0100
 First half of distinct cleanup
 r8075 at Thesaurus (orig r8063):  frew | 2009-12-10 16:04:37 +0100
 release 0.08115
 r8076 at Thesaurus (orig r8064):  ribasushi | 2009-12-12 12:31:12 +0100
 Even clearer unloaded FK exception
 r8078 at Thesaurus (orig r8066):  ribasushi | 2009-12-12 14:27:18 +0100
 As clear as it gets
 r8141 at Thesaurus (orig r8129):  ovid | 2009-12-16 17:40:50 +0100
 Have has_one/might_have warn if set on nullable columns.
 
 r8143 at Thesaurus (orig r8131):  caelum | 2009-12-17 13:30:10 +0100
 somewhat better fix for ADO
 r8144 at Thesaurus (orig r8132):  caelum | 2009-12-17 13:34:20 +0100
 minor changes
 r8146 at Thesaurus (orig r8134):  caelum | 2009-12-17 17:44:34 +0100
 cleanup source_bind_attributes for ADO
 r8147 at Thesaurus (orig r8135):  caelum | 2009-12-17 18:09:55 +0100
 more types for ADO fix, and documentation
 r8148 at Thesaurus (orig r8136):  abraxxa | 2009-12-17 19:54:55 +0100
 Cookbook POD fix for add_drop_table instead of add_drop_tables
 
 r8158 at Thesaurus (orig r8146):  ribasushi | 2009-12-18 14:55:53 +0100
  r8150 at Thesaurus (orig r8138):  abraxxa | 2009-12-17 23:22:07 +0100
  Views without a view_definition won't be added to the SQL::Translator::Schema by the parser + tests
  
  r8151 at Thesaurus (orig r8139):  abraxxa | 2009-12-17 23:23:33 +0100
  test cleanups
  
  r8153 at Thesaurus (orig r8141):  abraxxa | 2009-12-18 14:34:14 +0100
  throw_exception if view_definition is missing instead of silent skipping + test changes
  
  r8154 at Thesaurus (orig r8142):  abraxxa | 2009-12-18 14:40:32 +0100
  use Test::Exception
  
  r8155 at Thesaurus (orig r8143):  abraxxa | 2009-12-18 14:42:00 +0100
  fixed Changes
  
  r8156 at Thesaurus (orig r8144):  abraxxa | 2009-12-18 14:44:52 +0100
  test cleanups
  
  r8157 at Thesaurus (orig r8145):  ribasushi | 2009-12-18 14:46:26 +0100
  Another bitr
 
 r8160 at Thesaurus (orig r8148):  ribasushi | 2009-12-18 15:04:34 +0100
 Fix no_index entries
 r8162 at Thesaurus (orig r8150):  abraxxa | 2009-12-18 15:59:58 +0100
 Schema POD inprovement for dclone
 
 r8163 at Thesaurus (orig r8151):  abraxxa | 2009-12-18 16:07:27 +0100
 link to DBIx::Class::Row
 
 r8164 at Thesaurus (orig r8152):  abraxxa | 2009-12-18 16:08:56 +0100
 fixed typo in Changes
 
 r8165 at Thesaurus (orig r8153):  abraxxa | 2009-12-18 16:14:47 +0100
 dclone pod take #2
 
 r8169 at Thesaurus (orig r8157):  ribasushi | 2009-12-19 18:47:42 +0100
 detabify
 r8170 at Thesaurus (orig r8158):  ribasushi | 2009-12-19 19:41:42 +0100
 Fix RT52812
 r8171 at Thesaurus (orig r8159):  caelum | 2009-12-23 07:16:29 +0100
 minor POD fixes
 r8175 at Thesaurus (orig r8163):  ribasushi | 2009-12-24 09:59:52 +0100
 Fix deployment_statements context sensitivity regression
 r8176 at Thesaurus (orig r8164):  ribasushi | 2009-12-24 10:13:37 +0100
 Don't call the PK setter if no PK
 r8204 at Thesaurus (orig r8192):  caelum | 2009-12-30 22:58:47 +0100
 bump CAG dep
 r8231 at Thesaurus (orig r8219):  matthewt | 2010-01-02 01:41:12 +0100
 fix typo in variable name
 r8238 at Thesaurus (orig r8226):  rafl | 2010-01-02 18:46:40 +0100
 Merge branch 'native_traits'
 
 * native_traits:
   Port replicated storage from MXAH to native traits.
   Create branch native_traits
 r8244 at Thesaurus (orig r8232):  caelum | 2010-01-04 00:30:51 +0100
 fix _rebless into sybase/mssql/nobindvars
 r8247 at Thesaurus (orig r8235):  caelum | 2010-01-05 13:54:56 +0100
  r22328 at hlagh (orig r8201):  caelum | 2009-12-31 12:29:51 -0500
  new branch to fix table aliases in queries over the 30char limit
  r22329 at hlagh (orig r8202):  caelum | 2009-12-31 12:55:50 -0500
  failing test
  r22330 at hlagh (orig r8203):  caelum | 2009-12-31 13:00:35 -0500
  switch oracle tests to done_testing()
  r22331 at hlagh (orig r8204):  caelum | 2009-12-31 15:02:50 -0500
  got something working
  r22332 at hlagh (orig r8205):  caelum | 2009-12-31 15:08:30 -0500
  POD touchups
  r22343 at hlagh (orig r8216):  caelum | 2010-01-01 07:42:03 -0500
  fix uninitialized warning and a bug in ResultSet
  r22419 at hlagh (orig r8234):  caelum | 2010-01-05 07:53:18 -0500
  append half of a base64 MD5 to shortened table aliases for Oracle
 
 r8249 at Thesaurus (orig r8237):  caelum | 2010-01-05 15:27:40 +0100
 minor change: use more of the hash if possible for oracle table alias shortening
 r8251 at Thesaurus (orig r8239):  caelum | 2010-01-06 02:20:17 +0100
 bump perl_version to 5.8.1
 r8252 at Thesaurus (orig r8240):  caelum | 2010-01-06 02:21:41 +0100
 remove alignment mark on base64 md5
 r8260 at Thesaurus (orig r8248):  ribasushi | 2010-01-07 11:21:55 +0100
 5.8.1 is minimum required perl
 r8261 at Thesaurus (orig r8249):  ribasushi | 2010-01-07 11:22:42 +0100
 Minor optimization
 r8262 at Thesaurus (orig r8250):  ribasushi | 2010-01-07 11:23:35 +0100
 Wrong title
 r8265 at Thesaurus (orig r8253):  ribasushi | 2010-01-08 17:48:50 +0100
 Resolve problem reported by http://lists.scsys.co.uk/pipermail/dbix-class/2009-December/008699.html
 r8266 at Thesaurus (orig r8254):  ribasushi | 2010-01-08 17:52:01 +0100
 Put utf8columns in line with the store_column fix
 r8267 at Thesaurus (orig r8255):  ribasushi | 2010-01-08 19:03:26 +0100
 Tests while hunting for something else
 r8268 at Thesaurus (orig r8256):  ribasushi | 2010-01-08 19:14:42 +0100
 Make test look even more like http://lists.scsys.co.uk/pipermail/dbix-class/2009-November/008599.html
 r8277 at Thesaurus (orig r8265):  ribasushi | 2010-01-09 02:16:14 +0100
  r8263 at Thesaurus (orig r8251):  ribasushi | 2010-01-08 15:43:38 +0100
  New branch to find a leak
  r8264 at Thesaurus (orig r8252):  ribasushi | 2010-01-08 15:52:46 +0100
  Weird test failures
  r8272 at Thesaurus (orig r8260):  ribasushi | 2010-01-09 01:24:56 +0100
  Proper invocation
  r8273 at Thesaurus (orig r8261):  ribasushi | 2010-01-09 01:35:34 +0100
  Test for the real leak reason
  r8274 at Thesaurus (orig r8262):  ribasushi | 2010-01-09 01:37:33 +0100
  Void ctx as it should be
  r8275 at Thesaurus (orig r8263):  ribasushi | 2010-01-09 02:10:13 +0100
  A "fix" for sqlt-related schema leaks
  r8276 at Thesaurus (orig r8264):  ribasushi | 2010-01-09 02:15:53 +0100
  Changes
 
 r8287 at Thesaurus (orig r8275):  caelum | 2010-01-10 11:29:06 +0100
  r22483 at hlagh (orig r8272):  caelum | 2010-01-09 05:52:15 -0500
  new branch to add "normalize_connect_info" class method to Storage::DBI
  r22495 at hlagh (orig r8274):  caelum | 2010-01-10 05:27:42 -0500
  split connect_info parser out into private _normalize_connect_info
 
 r8289 at Thesaurus (orig r8277):  caelum | 2010-01-10 12:04:52 +0100
 fix connection details in ::DBI::Replicated docs
 r8291 at Thesaurus (orig r8279):  ribasushi | 2010-01-11 09:50:21 +0100
  r8077 at Thesaurus (orig r8065):  ribasushi | 2009-12-12 14:24:30 +0100
  Branch for yet another mssql ordered prefetch problem
  r8079 at Thesaurus (orig r8067):  ribasushi | 2009-12-12 14:37:48 +0100
  prefetch does not get disassembled properly
  r8112 at Thesaurus (orig r8100):  ribasushi | 2009-12-13 00:07:00 +0100
  Extra test to highlight search_related inefficiency
  r8113 at Thesaurus (orig r8101):  ribasushi | 2009-12-13 00:17:44 +0100
  Real test for search_related and prefetch
  r8114 at Thesaurus (orig r8102):  ribasushi | 2009-12-13 00:19:57 +0100
  Fix corner case regression on search_related on a prefetching rs
  r8115 at Thesaurus (orig r8103):  ribasushi | 2009-12-13 00:21:05 +0100
  Isolate prefetch heads using RNO with a subquery
  r8116 at Thesaurus (orig r8104):  ribasushi | 2009-12-13 00:23:46 +0100
  Changes
  r8125 at Thesaurus (orig r8113):  ribasushi | 2009-12-15 13:06:26 +0100
  Extend mssql limited prefetch tests
  r8126 at Thesaurus (orig r8114):  ribasushi | 2009-12-15 13:08:56 +0100
  Add extra test to prove Alan wrong :)
  r8132 at Thesaurus (orig r8120):  ribasushi | 2009-12-16 00:38:04 +0100
  Do not realias tables in the RNO subqueries
  r8133 at Thesaurus (orig r8121):  ribasushi | 2009-12-16 00:50:52 +0100
  Deliberately disturb alphabetical order
  r8134 at Thesaurus (orig r8122):  ribasushi | 2009-12-16 10:26:43 +0100
  Got a failing test
  r8135 at Thesaurus (orig r8123):  ribasushi | 2009-12-16 10:49:10 +0100
  Cleanup
  r8136 at Thesaurus (orig r8124):  ribasushi | 2009-12-16 10:51:58 +0100
  More moving around
  r8137 at Thesaurus (orig r8125):  ribasushi | 2009-12-16 11:25:37 +0100
  The real mssql problem - it's... bad
  r8138 at Thesaurus (orig r8126):  ribasushi | 2009-12-16 11:29:20 +0100
  Clearer debug
  r8139 at Thesaurus (orig r8127):  ribasushi | 2009-12-16 11:47:48 +0100
  This is horrific but the tests pass... maybe someone will figure out something better
  r8140 at Thesaurus (orig r8128):  ribasushi | 2009-12-16 16:45:47 +0100
  cleanup tests
  r8187 at Thesaurus (orig r8175):  ribasushi | 2009-12-24 16:22:30 +0100
  Ordered subqueries do not work in mssql after all
  r8271 at Thesaurus (orig r8259):  ribasushi | 2010-01-08 23:58:13 +0100
  Cleaner RNO sql
  r8279 at Thesaurus (orig r8267):  ribasushi | 2010-01-09 10:13:16 +0100
  Subqueries no longer experimental
  r8280 at Thesaurus (orig r8268):  ribasushi | 2010-01-09 11:26:46 +0100
  Close the book on mssql ordered subqueries
  r8281 at Thesaurus (orig r8269):  ribasushi | 2010-01-09 11:36:36 +0100
  Changes and typos
  r8283 at Thesaurus (orig r8271):  ribasushi | 2010-01-09 11:42:21 +0100
  Highlight the real problem
  r8285 at Thesaurus (orig r8273):  ribasushi | 2010-01-10 10:07:10 +0100
  Rename subquery to subselect and rewrite POD (per castaway)
  r8290 at Thesaurus (orig r8278):  ribasushi | 2010-01-10 17:01:24 +0100
  rename as per mst
 
 r8295 at Thesaurus (orig r8283):  caelum | 2010-01-11 23:42:30 +0100
 make a public ::Schema::unregister_source
 r8298 at Thesaurus (orig r8286):  abraxxa | 2010-01-12 18:04:18 +0100
 fixed a typo in Changes
 more detailed explanation for the warning about has_one/might_have rels on nullable columns
 
 r8307 at Thesaurus (orig r8295):  abraxxa | 2010-01-13 17:28:05 +0100
 added the sources parser arg to the example code
 
 r8327 at Thesaurus (orig r8315):  ribasushi | 2010-01-15 01:25:39 +0100
  r8167 at Thesaurus (orig r8155):  ribasushi | 2009-12-19 12:50:13 +0100
  New branch for null-only-result fix
  r8168 at Thesaurus (orig r8156):  ribasushi | 2009-12-19 12:51:21 +0100
  Failing test
  r8322 at Thesaurus (orig r8310):  ribasushi | 2010-01-15 00:48:09 +0100
  Correct test order
  r8323 at Thesaurus (orig r8311):  ribasushi | 2010-01-15 01:15:33 +0100
  Generalize the to-node inner-join-er to apply to all related_resultset calls, not just counts
  r8324 at Thesaurus (orig r8312):  ribasushi | 2010-01-15 01:16:05 +0100
  Adjust sql-emitter tests
  r8326 at Thesaurus (orig r8314):  ribasushi | 2010-01-15 01:25:10 +0100
  One more sql-test fix and changes
 
 r8328 at Thesaurus (orig r8316):  ribasushi | 2010-01-15 01:31:58 +0100
 Strict mysql bugfix
 r8329 at Thesaurus (orig r8317):  ribasushi | 2010-01-15 01:38:53 +0100
 Better description of mysql strict option
 r8331 at Thesaurus (orig r8319):  ribasushi | 2010-01-15 03:12:13 +0100
 Update troubleshooting doc
 r8337 at Thesaurus (orig r8325):  ribasushi | 2010-01-15 17:13:28 +0100
 RT52674
 r8346 at Thesaurus (orig r8334):  ribasushi | 2010-01-17 09:41:49 +0100
 No method aliasing in OO code, *ever*
 r8373 at Thesaurus (orig r8360):  ribasushi | 2010-01-18 11:54:51 +0100
 Adjust my email
 r8387 at Thesaurus (orig r8374):  ribasushi | 2010-01-19 13:07:07 +0100
  r8340 at Thesaurus (orig r8328):  abraxxa | 2010-01-15 19:21:20 +0100
  added branch no_duplicate_indexes_for_pk_cols with test and fix
  
  r8343 at Thesaurus (orig r8331):  abraxxa | 2010-01-15 19:32:16 +0100
  don't use eq_set in test
  
  r8344 at Thesaurus (orig r8332):  abraxxa | 2010-01-15 19:44:04 +0100
  don't sort the primary columns because order matters for indexes
  
  r8345 at Thesaurus (orig r8333):  abraxxa | 2010-01-15 19:56:46 +0100
  don't sort the key columns because the order of columns is important for indexes
  
  r8372 at Thesaurus (orig r8359):  abraxxa | 2010-01-18 10:22:09 +0100
  don't sort the columns in the tests either
  
  r8378 at Thesaurus (orig r8365):  abraxxa | 2010-01-18 15:39:28 +0100
  added pod section for parser args
  
  r8379 at Thesaurus (orig r8366):  abraxxa | 2010-01-18 15:53:08 +0100
  better pod thanks to ribasushi
  
  r8380 at Thesaurus (orig r8367):  abraxxa | 2010-01-18 16:04:34 +0100
  test and pod fixes
  
  r8383 at Thesaurus (orig r8370):  abraxxa | 2010-01-19 12:38:44 +0100
  fixed Authors section
  added License section
  fixed t/86sqlt.t tests
  
  r8384 at Thesaurus (orig r8371):  ribasushi | 2010-01-19 12:59:52 +0100
  Regenaretd under new parser
  r8385 at Thesaurus (orig r8372):  ribasushi | 2010-01-19 13:03:51 +0100
  Minor style change and white space trim
  r8386 at Thesaurus (orig r8373):  ribasushi | 2010-01-19 13:06:54 +0100
  Changes abraxxa++
 
 r8390 at Thesaurus (orig r8377):  ribasushi | 2010-01-19 13:41:03 +0100
 Some minor test refactor and tab cleanups
 r8394 at Thesaurus (orig r8381):  frew | 2010-01-19 17:34:10 +0100
 add test to ensure no tabs in perl files
 
 r8397 at Thesaurus (orig r8384):  frew | 2010-01-19 18:00:12 +0100
 fix test to be an author dep
 r8398 at Thesaurus (orig r8385):  ribasushi | 2010-01-19 18:19:40 +0100
 First round of detabification
 r8399 at Thesaurus (orig r8386):  frew | 2010-01-19 23:42:50 +0100
 Add EOL test
 
 r8401 at Thesaurus (orig r8388):  ribasushi | 2010-01-20 08:32:39 +0100
 Fix minor RSC bug
 r8402 at Thesaurus (orig r8389):  roman | 2010-01-20 15:47:26 +0100
 Added a FAQ entry titled: How do I override a run time method (e.g. a relationship accessor)?
 r8403 at Thesaurus (orig r8390):  roman | 2010-01-20 16:31:41 +0100
 Added myself as a contributor.
 r8408 at Thesaurus (orig r8395):  jhannah | 2010-01-21 06:48:14 +0100
 Added FAQ: Custom methods in Result classes
 
 r8413 at Thesaurus (orig r8400):  frew | 2010-01-22 04:17:20 +0100
 add _is_numeric to ::Row
 r8418 at Thesaurus (orig r8405):  ribasushi | 2010-01-22 11:00:05 +0100
 Generalize autoinc/count test
 r8420 at Thesaurus (orig r8407):  ribasushi | 2010-01-22 11:11:49 +0100
 Final round of detabify
 r8421 at Thesaurus (orig r8408):  ribasushi | 2010-01-22 11:12:54 +0100
 Temporarily disable whitespace checkers
 r8426 at Thesaurus (orig r8413):  ribasushi | 2010-01-22 11:35:15 +0100
 Moev failing regression test away from trunk
 r8431 at Thesaurus (orig r8418):  frew | 2010-01-22 17:05:12 +0100
 fix name of _is_numeric to _is_column_numeric
 
 r8437 at Thesaurus (orig r8424):  ribasushi | 2010-01-26 09:33:42 +0100
 Switch to Test::Exception
 r8438 at Thesaurus (orig r8425):  ribasushi | 2010-01-26 09:48:30 +0100
 Test txn_scope_guard regression
 r8439 at Thesaurus (orig r8426):  ribasushi | 2010-01-26 10:10:11 +0100
 Fix txn_begin on external non-AC coderef regression
 r8443 at Thesaurus (orig r8430):  ribasushi | 2010-01-26 14:19:50 +0100
  r8304 at Thesaurus (orig r8292):  nigel | 2010-01-13 16:05:48 +0100
  Branch to extend ::Schema::Versioned to handle series of upgrades
  r8320 at Thesaurus (orig r8308):  nigel | 2010-01-14 16:52:50 +0100
  Changes to support multiple step schema version updates
  r8321 at Thesaurus (orig r8309):  nigel | 2010-01-14 17:05:21 +0100
  Changelog for Changes to support multiple step schema version updates
  r8393 at Thesaurus (orig r8380):  ribasushi | 2010-01-19 13:59:51 +0100
  Botched merge (tests still fail)
  r8395 at Thesaurus (orig r8382):  ribasushi | 2010-01-19 17:37:07 +0100
  More cleanup
  r8396 at Thesaurus (orig r8383):  ribasushi | 2010-01-19 17:48:09 +0100
  Fix last pieces of retardation and UNtodo the quick cycle
  r8442 at Thesaurus (orig r8429):  ribasushi | 2010-01-26 14:18:53 +0100
  No need for 2 statements to get the version
 
 r8445 at Thesaurus (orig r8432):  ribasushi | 2010-01-26 14:22:16 +0100
  r8161 at Thesaurus (orig r8149):  ovid | 2009-12-18 15:59:56 +0100
  Prefetch queries make inefficient SQL when combined with a pager.  This branch
  is to try to isolate some of the join conditions and figure out if we can fix
  this.
  
  r8166 at Thesaurus (orig r8154):  ovid | 2009-12-18 18:17:55 +0100
  Refactor internals to expose some join logic. Awful method and args :(
  
  r8319 at Thesaurus (orig r8307):  ovid | 2010-01-14 15:37:35 +0100
  Attempt to factor our alias handling has mostly failed.
  
  r8330 at Thesaurus (orig r8318):  ribasushi | 2010-01-15 03:02:21 +0100
  Better refactor
  r8332 at Thesaurus (orig r8320):  ribasushi | 2010-01-15 03:14:39 +0100
  Better varnames
  r8347 at Thesaurus (orig r8335):  ribasushi | 2010-01-17 11:33:55 +0100
  More mangling
  r8348 at Thesaurus (orig r8336):  ribasushi | 2010-01-17 13:44:00 +0100
  Getting warmer
  r8349 at Thesaurus (orig r8337):  ribasushi | 2010-01-17 14:00:20 +0100
  That was tricky :)
  r8352 at Thesaurus (orig r8340):  ribasushi | 2010-01-17 15:57:06 +0100
  Turned out to be much trickier
  r8354 at Thesaurus (orig r8342):  ribasushi | 2010-01-17 16:29:20 +0100
  This is made out of awesome
  r8355 at Thesaurus (orig r8343):  ribasushi | 2010-01-17 16:46:02 +0100
  Changes
  r8400 at Thesaurus (orig r8387):  ribasushi | 2010-01-20 08:17:44 +0100
  Whoops - need to dsable quoting
 
 r8459 at Thesaurus (orig r8446):  ribasushi | 2010-01-27 11:56:15 +0100
 Clean up some stuff
 r8463 at Thesaurus (orig r8450):  ribasushi | 2010-01-27 12:08:04 +0100
 Merge some cleanups from the prefetch branch
 r8466 at Thesaurus (orig r8453):  ribasushi | 2010-01-27 12:33:33 +0100
 DSNs can not be empty
 r8471 at Thesaurus (orig r8458):  frew | 2010-01-27 21:38:42 +0100
 fix silly multipk bug
 r8472 at Thesaurus (orig r8459):  ribasushi | 2010-01-28 11:13:16 +0100
 Consolidate insert_bulk guards (and make them show up correctly in the trace)
 r8473 at Thesaurus (orig r8460):  ribasushi | 2010-01-28 11:28:30 +0100
 Fix bogus test DDL
 r8480 at Thesaurus (orig r8467):  ribasushi | 2010-01-28 22:11:59 +0100
  r8381 at Thesaurus (orig r8368):  moses | 2010-01-18 16:41:38 +0100
  Test commit
  r8425 at Thesaurus (orig r8412):  ribasushi | 2010-01-22 11:25:01 +0100
  Informix test + cleanups
  r8428 at Thesaurus (orig r8415):  ribasushi | 2010-01-22 11:59:25 +0100
  Initial informix support
 
 r8482 at Thesaurus (orig r8469):  ribasushi | 2010-01-28 22:19:23 +0100
 Informix changes
 r8483 at Thesaurus (orig r8470):  ribasushi | 2010-01-29 12:01:41 +0100
 Require non-warning-spewing MooseX::Types
 r8484 at Thesaurus (orig r8471):  ribasushi | 2010-01-29 12:15:15 +0100
 Enhance warning test a bit (seems to fail on 5.8)
 r8485 at Thesaurus (orig r8472):  ribasushi | 2010-01-29 13:00:54 +0100
 Fugly 5.8 workaround
 r8494 at Thesaurus (orig r8481):  frew | 2010-01-31 06:47:42 +0100
 cleanup (3 arg open, 1 grep instead of 3)
 r8496 at Thesaurus (orig r8483):  ribasushi | 2010-01-31 10:04:43 +0100
 better skip message
 r8510 at Thesaurus (orig r8497):  caelum | 2010-02-01 12:07:13 +0100
 throw exception on attempt to insert a blob with DBD::Oracle == 1.23
 r8511 at Thesaurus (orig r8498):  caelum | 2010-02-01 12:12:48 +0100
 add RT link for Oracle blob bug in DBD::Oracle == 1.23
 r8527 at Thesaurus (orig r8514):  caelum | 2010-02-02 23:20:17 +0100
  r22968 at hlagh (orig r8502):  caelum | 2010-02-02 05:30:47 -0500
  branch to support Sybase SQL Anywhere
  r22971 at hlagh (orig r8505):  caelum | 2010-02-02 07:21:13 -0500
  ASA last_insert_id and limit support, still needs BLOB support
  r22972 at hlagh (orig r8506):  caelum | 2010-02-02 08:33:57 -0500
  deref table name if needed, check all columns for identity column not just PK
  r22973 at hlagh (orig r8507):  caelum | 2010-02-02 08:48:11 -0500
  test blobs, they work, didn't have to do anything
  r22974 at hlagh (orig r8508):  caelum | 2010-02-02 09:15:44 -0500
  fix stupid identity bug, test empty insert (works), test DTs (not working yet)
  r22976 at hlagh (orig r8510):  caelum | 2010-02-02 14:31:00 -0500
  rename ::Sybase::ASA to ::SQLAnywhere, per mst
  r22978 at hlagh (orig r8512):  caelum | 2010-02-02 17:02:29 -0500
  DT inflation now works
  r22979 at hlagh (orig r8513):  caelum | 2010-02-02 17:18:06 -0500
  minor POD update
 
 r8528 at Thesaurus (orig r8515):  caelum | 2010-02-02 23:23:26 +0100
  r22895 at hlagh (orig r8473):  caelum | 2010-01-30 03:57:26 -0500
  branch to fix computed columns in Sybase ASE
  r22911 at hlagh (orig r8489):  caelum | 2010-01-31 07:18:33 -0500
  empty insert into a Sybase table with computed columns and either data_type => undef or default_value => SCALARREF works now
  r22912 at hlagh (orig r8490):  caelum | 2010-01-31 07:39:32 -0500
  add POD about computed columns and timestamps for Sybase
  r22918 at hlagh (orig r8496):  caelum | 2010-02-01 05:09:07 -0500
  update POD about Schema::Loader for Sybase
 
 r8531 at Thesaurus (orig r8518):  ribasushi | 2010-02-02 23:57:27 +0100
  r8512 at Thesaurus (orig r8499):  boghead | 2010-02-01 23:38:13 +0100
  - Creating a branch for adding _post_inflate_datetime and _pre_deflate_datetime to
    InflateColumn::DateTime
  
  r8513 at Thesaurus (orig r8500):  boghead | 2010-02-01 23:42:14 +0100
  - Add _post_inflate_datetime and _pre_deflate_datetime to InflateColumn::DateTime to allow
    for modifying DateTime objects after inflation or before deflation.
  
  r8524 at Thesaurus (orig r8511):  boghead | 2010-02-02 22:59:28 +0100
  - Simplify by allowing moving column_info depreciated {extra}{timezone} data to
    {timezone} (and the same with locale)
  
 
 r8533 at Thesaurus (orig r8520):  caelum | 2010-02-03 05:19:59 +0100
 support for Sybase SQL Anywhere through ODBC
 r8536 at Thesaurus (orig r8523):  ribasushi | 2010-02-03 08:27:54 +0100
 Changes
 r8537 at Thesaurus (orig r8524):  ribasushi | 2010-02-03 08:31:20 +0100
 Quote fail
 r8538 at Thesaurus (orig r8525):  caelum | 2010-02-03 13:21:37 +0100
 test DT inflation for Sybase SQL Anywhere over ODBC too
 r8539 at Thesaurus (orig r8526):  caelum | 2010-02-03 17:36:39 +0100
 minor code cleanup for SQL Anywhere last_insert_id
 r8540 at Thesaurus (orig r8527):  ribasushi | 2010-02-04 11:28:33 +0100
 Fix bug reported by tommyt
 r8548 at Thesaurus (orig r8535):  ribasushi | 2010-02-04 14:34:45 +0100
 Prepare for new SQLA release
 r8560 at Thesaurus (orig r8547):  ribasushi | 2010-02-05 08:59:04 +0100
 Refactor some evil code
 r8565 at Thesaurus (orig r8552):  ribasushi | 2010-02-05 17:00:12 +0100
 Looks like RSC is finally (halfway) fixed
 r8566 at Thesaurus (orig r8553):  ribasushi | 2010-02-05 17:07:13 +0100
 RSC subquery can not include the prefetch
 r8567 at Thesaurus (orig r8554):  ribasushi | 2010-02-05 17:10:29 +0100
 Fix typo and borked test
 r8569 at Thesaurus (orig r8556):  ribasushi | 2010-02-05 17:33:12 +0100
 Release 0.08116
 r8571 at Thesaurus (orig r8558):  ribasushi | 2010-02-05 18:01:33 +0100
 No idea how I missed all these fails...
 r8572 at Thesaurus (orig r8559):  ribasushi | 2010-02-05 18:13:34 +0100
 Release 0.08117
 r8574 at Thesaurus (orig r8561):  ribasushi | 2010-02-05 18:51:12 +0100
 Try to distinguish trunk from official versions
 r8580 at Thesaurus (orig r8567):  gshank | 2010-02-05 22:29:24 +0100
 add doc on 'where' attribute
 
 r8587 at Thesaurus (orig r8574):  frew | 2010-02-07 21:07:03 +0100
 add as_subselect_rs
 r8588 at Thesaurus (orig r8575):  frew | 2010-02-07 21:13:04 +0100
 fix longstanding unmentioned bug ("me")
 r8589 at Thesaurus (orig r8576):  frew | 2010-02-08 06:17:43 +0100
 another example of as_subselect_rs
 r8590 at Thesaurus (orig r8577):  frew | 2010-02-08 06:23:58 +0100
 fix bug in UTF8Columns
 r8591 at Thesaurus (orig r8578):  ribasushi | 2010-02-08 09:31:01 +0100
 Extend utf8columns test to trap fixed bug
 r8592 at Thesaurus (orig r8579):  ribasushi | 2010-02-08 12:03:23 +0100
 Cleanup rel accessor type handling
 r8593 at Thesaurus (orig r8580):  ribasushi | 2010-02-08 12:20:47 +0100
 Fix some fallout
 r8595 at Thesaurus (orig r8582):  ribasushi | 2010-02-08 12:38:19 +0100
 Merge some obsolete code cleanup from the prefetch branch
 r8596 at Thesaurus (orig r8583):  ribasushi | 2010-02-08 12:42:09 +0100
 Merge fix of RT54039 from prefetch branch
 r8598 at Thesaurus (orig r8585):  ribasushi | 2010-02-08 12:48:31 +0100
 Release 0.08118
 r8600 at Thesaurus (orig r8587):  ribasushi | 2010-02-08 12:52:33 +0100
 Bump trunk version
 r8606 at Thesaurus (orig r8593):  ribasushi | 2010-02-08 16:16:44 +0100
 cheaper lookup
 r8609 at Thesaurus (orig r8596):  ribasushi | 2010-02-10 12:40:37 +0100
 Consolidate last_insert_id handling with a fallback-attempt on DBI::last_insert_id
 r8614 at Thesaurus (orig r8601):  caelum | 2010-02-10 21:29:51 +0100
 workaround for Moose bug affecting Replicated storage
 r8615 at Thesaurus (orig r8602):  caelum | 2010-02-10 21:40:07 +0100
 revert Moose bug workaround, bump Moose dep for Replicated to 0.98
 r8616 at Thesaurus (orig r8603):  caelum | 2010-02-10 22:48:34 +0100
 add a couple proxy methods to Replicated so it can run
 r8628 at Thesaurus (orig r8615):  caelum | 2010-02-11 11:35:01 +0100
  r21090 at hlagh (orig r7836):  caelum | 2009-11-02 06:40:52 -0500
  new branch to fix unhandled methods in Storage::DBI::Replicated
  r21091 at hlagh (orig r7837):  caelum | 2009-11-02 06:42:00 -0500
  add test to display unhandled methods
  r21092 at hlagh (orig r7838):  caelum | 2009-11-02 06:55:34 -0500
  minor fix to last committed test
  r21093 at hlagh (orig r7839):  caelum | 2009-11-02 09:26:00 -0500
  minor test code cleanup
  r23125 at hlagh (orig r8607):  caelum | 2010-02-10 19:25:51 -0500
  add unimplemented Storage::DBI methods to ::DBI::Replicated
  r23130 at hlagh (orig r8612):  ribasushi | 2010-02-11 05:12:48 -0500
  Podtesting exclusion
 
 r8630 at Thesaurus (orig r8617):  frew | 2010-02-11 11:45:54 +0100
 Changes (from a while ago)
 r8631 at Thesaurus (orig r8618):  caelum | 2010-02-11 11:46:58 +0100
 savepoints for SQLAnywhere
 r8640 at Thesaurus (orig r8627):  ribasushi | 2010-02-11 12:33:19 +0100
  r8424 at Thesaurus (orig r8411):  ribasushi | 2010-01-22 11:19:40 +0100
  Chaining POC test
 
 r8641 at Thesaurus (orig r8628):  ribasushi | 2010-02-11 12:34:19 +0100
  r8426 at Thesaurus (orig r8413):  ribasushi | 2010-01-22 11:35:15 +0100
  Moev failing regression test away from trunk
 
 r8642 at Thesaurus (orig r8629):  ribasushi | 2010-02-11 12:34:56 +0100
 
 r8643 at Thesaurus (orig r8630):  ribasushi | 2010-02-11 12:35:03 +0100
  r8507 at Thesaurus (orig r8494):  frew | 2010-02-01 04:33:08 +0100
  small refactor to put select/as/+select/+as etc merging in it's own function
 
 r8644 at Thesaurus (orig r8631):  ribasushi | 2010-02-11 12:35:11 +0100
  r8514 at Thesaurus (orig r8501):  frew | 2010-02-02 05:12:29 +0100
  revert actual changes from yesterday as per ribasushis advice
 
 r8645 at Thesaurus (orig r8632):  ribasushi | 2010-02-11 12:35:16 +0100
  r8522 at Thesaurus (orig r8509):  frew | 2010-02-02 19:39:33 +0100
  delete +stuff if stuff exists
 
 r8646 at Thesaurus (orig r8633):  ribasushi | 2010-02-11 12:35:23 +0100
  r8534 at Thesaurus (orig r8521):  frew | 2010-02-03 06:14:44 +0100
  change deletion/overriding to fix t/76
 
 r8647 at Thesaurus (orig r8634):  ribasushi | 2010-02-11 12:35:30 +0100
  r8535 at Thesaurus (orig r8522):  frew | 2010-02-03 06:57:15 +0100
  some basic readability factorings (aka, fewer nested ternaries and long maps)
 
 r8648 at Thesaurus (orig r8635):  ribasushi | 2010-02-11 12:36:01 +0100
  r8558 at Thesaurus (orig r8545):  frew | 2010-02-04 20:32:54 +0100
  fix incorrect test in t/76select.t and posit an incorrect solution
 
 r8649 at Thesaurus (orig r8636):  ribasushi | 2010-02-11 12:38:47 +0100
 
 r8650 at Thesaurus (orig r8637):  ribasushi | 2010-02-11 12:38:57 +0100
  r8578 at Thesaurus (orig r8565):  ribasushi | 2010-02-05 19:11:09 +0100
  Should not be needed
 
 r8651 at Thesaurus (orig r8638):  ribasushi | 2010-02-11 12:39:03 +0100
  r8579 at Thesaurus (orig r8566):  ribasushi | 2010-02-05 19:13:24 +0100
  SQLA now fixed
 
 r8652 at Thesaurus (orig r8639):  ribasushi | 2010-02-11 12:39:10 +0100
  r8624 at Thesaurus (orig r8611):  ribasushi | 2010-02-11 10:31:08 +0100
  MOAR testing
 
 r8653 at Thesaurus (orig r8640):  ribasushi | 2010-02-11 12:39:17 +0100
  r8626 at Thesaurus (orig r8613):  frew | 2010-02-11 11:16:30 +0100
  fix bad test
 
 r8654 at Thesaurus (orig r8641):  ribasushi | 2010-02-11 12:39:23 +0100
  r8627 at Thesaurus (orig r8614):  frew | 2010-02-11 11:21:52 +0100
  fix t/76, break rsc tests
 
 r8655 at Thesaurus (orig r8642):  ribasushi | 2010-02-11 12:39:30 +0100
  r8632 at Thesaurus (orig r8619):  frew | 2010-02-11 11:53:50 +0100
  fix incorrect test
 
 r8656 at Thesaurus (orig r8643):  ribasushi | 2010-02-11 12:39:35 +0100
  r8633 at Thesaurus (orig r8620):  frew | 2010-02-11 11:54:49 +0100
  make t/76s and t/88 pass by deleting from the correct attr hash
 
 r8657 at Thesaurus (orig r8644):  ribasushi | 2010-02-11 12:39:40 +0100
  r8634 at Thesaurus (orig r8621):  frew | 2010-02-11 11:55:41 +0100
  fix a test due to ordering issues
 
 r8658 at Thesaurus (orig r8645):  ribasushi | 2010-02-11 12:39:45 +0100
  r8635 at Thesaurus (orig r8622):  frew | 2010-02-11 11:58:23 +0100
  this is why you run tests before you commit them.
 
 r8659 at Thesaurus (orig r8646):  ribasushi | 2010-02-11 12:39:51 +0100
  r8636 at Thesaurus (orig r8623):  frew | 2010-02-11 12:00:59 +0100
  fix another ordering issue
 
 r8660 at Thesaurus (orig r8647):  ribasushi | 2010-02-11 12:39:57 +0100
  r8637 at Thesaurus (orig r8624):  frew | 2010-02-11 12:11:31 +0100
  fix for search/select_chains
 
 r8661 at Thesaurus (orig r8648):  ribasushi | 2010-02-11 12:40:03 +0100
 
 r8662 at Thesaurus (orig r8649):  caelum | 2010-02-11 12:40:07 +0100
 test nanosecond precision for SQLAnywhere
 r8663 at Thesaurus (orig r8650):  ribasushi | 2010-02-11 12:40:09 +0100
  r8639 at Thesaurus (orig r8626):  ribasushi | 2010-02-11 12:33:03 +0100
  Changes and small ommission
 
 r8666 at Thesaurus (orig r8653):  ribasushi | 2010-02-11 18:16:45 +0100
 Changes
 r8674 at Thesaurus (orig r8661):  ribasushi | 2010-02-12 09:12:45 +0100
 Fix moose dep
 r8680 at Thesaurus (orig r8667):  dew | 2010-02-12 18:05:11 +0100
 Add is_ordered to DBIC::ResultSet
 r8688 at Thesaurus (orig r8675):  ribasushi | 2010-02-13 09:36:29 +0100
  r8667 at Thesaurus (orig r8654):  ribasushi | 2010-02-11 18:17:35 +0100
  Try a dep-handling idea
  r8675 at Thesaurus (orig r8662):  ribasushi | 2010-02-12 12:46:11 +0100
  Move optional deps out of the Makefile
  r8676 at Thesaurus (orig r8663):  ribasushi | 2010-02-12 13:40:53 +0100
  Support methods to verify group dependencies
  r8677 at Thesaurus (orig r8664):  ribasushi | 2010-02-12 13:45:18 +0100
  Move sqlt dephandling to Optional::Deps
  r8679 at Thesaurus (orig r8666):  ribasushi | 2010-02-12 14:03:17 +0100
  Move replicated to Opt::Deps
  r8684 at Thesaurus (orig r8671):  ribasushi | 2010-02-13 02:47:52 +0100
  Auto-POD for Optional Deps
  r8685 at Thesaurus (orig r8672):  ribasushi | 2010-02-13 02:53:20 +0100
  Privatize the full list method
  r8686 at Thesaurus (orig r8673):  ribasushi | 2010-02-13 02:59:51 +0100
  Scary warning
  r8687 at Thesaurus (orig r8674):  ribasushi | 2010-02-13 09:35:01 +0100
  Changes
 
 r8691 at Thesaurus (orig r8678):  ribasushi | 2010-02-13 10:07:15 +0100
 Autogen comment for Dependencies.pod
 r8692 at Thesaurus (orig r8679):  ribasushi | 2010-02-13 10:11:24 +0100
 Ask for newer M::I
 r8698 at Thesaurus (orig r8685):  ribasushi | 2010-02-13 11:11:10 +0100
 Add author/license to pod
 r8699 at Thesaurus (orig r8686):  arcanez | 2010-02-13 13:43:22 +0100
 fix typo per nuba on irc
 r8705 at Thesaurus (orig r8692):  ribasushi | 2010-02-13 15:15:33 +0100
  r8001 at Thesaurus (orig r7989):  goraxe | 2009-11-30 01:14:47 +0100
  Branch for dbicadmin script refactor
  
  r8003 at Thesaurus (orig r7991):  goraxe | 2009-11-30 01:26:39 +0100
  add DBIx::Class::Admin
  r8024 at Thesaurus (orig r8012):  goraxe | 2009-12-02 22:49:27 +0100
  get deployment tests to pass
  r8025 at Thesaurus (orig r8013):  goraxe | 2009-12-02 22:50:42 +0100
  get deployment tests to pass
  r8026 at Thesaurus (orig r8014):  goraxe | 2009-12-02 23:52:40 +0100
  all ddl tests now pass
  r8083 at Thesaurus (orig r8071):  goraxe | 2009-12-12 17:01:11 +0100
  add quite attribute to DBIx::Class admin
  r8086 at Thesaurus (orig r8074):  goraxe | 2009-12-12 17:36:58 +0100
  add tests for data manipulation ported from 89dbicadmin.t
  r8088 at Thesaurus (orig r8076):  goraxe | 2009-12-12 17:38:07 +0100
  add sleep 1 to t/admin/02ddl.t so insert into upgrade table does not happen too quickly
  r8089 at Thesaurus (orig r8077):  goraxe | 2009-12-12 17:40:33 +0100
  update DBIx::Class::Admin data manip functions to pass the test
  r8095 at Thesaurus (orig r8083):  goraxe | 2009-12-12 19:36:22 +0100
  change passing of preversion to be a parameter
  r8096 at Thesaurus (orig r8084):  goraxe | 2009-12-12 19:38:26 +0100
  add some pod to DBIx::Class::Admin
  r8103 at Thesaurus (orig r8091):  goraxe | 2009-12-12 22:08:55 +0100
  some changes to make DBIx::Class::Admin more compatible with dbicadmin interface
  r8104 at Thesaurus (orig r8092):  goraxe | 2009-12-12 22:09:39 +0100
  commit refactored dbicadmin script and very minor changes to its existing test suite
  r8107 at Thesaurus (orig r8095):  goraxe | 2009-12-12 22:34:35 +0100
  add compatability for --op for dbicadmin, revert test suite
  r8127 at Thesaurus (orig r8115):  goraxe | 2009-12-15 22:14:20 +0100
  dep check to end of module
  r8128 at Thesaurus (orig r8116):  goraxe | 2009-12-15 23:15:25 +0100
  add namespace::autoclean to DBIx::Class::Admin
  r8129 at Thesaurus (orig r8117):  goraxe | 2009-12-15 23:16:00 +0100
  update test suite to skip if cannot load DBIx::Class::Admin
  r8130 at Thesaurus (orig r8118):  goraxe | 2009-12-15 23:18:35 +0100
  add deps check for 89dbicadmin.t
  r8131 at Thesaurus (orig r8119):  goraxe | 2009-12-15 23:19:01 +0100
  include deps for dbicadmin DBIx::Class::Admin to Makefile.PL
  r8149 at Thesaurus (orig r8137):  goraxe | 2009-12-17 23:21:50 +0100
  use DBICTest::_database over creating a schema object to steal conn info
  r8338 at Thesaurus (orig r8326):  goraxe | 2010-01-15 19:00:17 +0100
  change white space to not be tabs
  r8339 at Thesaurus (orig r8327):  goraxe | 2010-01-15 19:10:42 +0100
  remove Module::Load from test suite
  r8358 at Thesaurus (orig r8346):  ribasushi | 2010-01-17 17:52:10 +0100
  Real detabify
  r8359 at Thesaurus (orig r8347):  ribasushi | 2010-01-17 18:01:53 +0100
  Fix POD (spacing matters)
  r8360 at Thesaurus (orig r8348):  ribasushi | 2010-01-17 21:57:53 +0100
  More detabification
  r8361 at Thesaurus (orig r8349):  ribasushi | 2010-01-17 22:33:12 +0100
  Test cleanup
  r8362 at Thesaurus (orig r8350):  ribasushi | 2010-01-17 22:41:11 +0100
  More tets cleanup
  r8363 at Thesaurus (orig r8351):  ribasushi | 2010-01-17 22:43:57 +0100
  And more cleanup
  r8364 at Thesaurus (orig r8352):  ribasushi | 2010-01-17 22:51:21 +0100
  Disallow mucking with INC
  r8365 at Thesaurus (orig r8353):  ribasushi | 2010-01-17 23:23:15 +0100
  More cleanup
  r8366 at Thesaurus (orig r8354):  ribasushi | 2010-01-17 23:27:49 +0100
  Add lib path to ENV so that $^X can see it
  r8367 at Thesaurus (orig r8355):  ribasushi | 2010-01-17 23:33:10 +0100
  Move script-test
  r8368 at Thesaurus (orig r8356):  goraxe | 2010-01-17 23:35:03 +0100
  change warns/dies -> carp/throw_exception
  r8369 at Thesaurus (orig r8357):  goraxe | 2010-01-17 23:53:54 +0100
  add goraxe to contributors
  r8370 at Thesaurus (orig r8358):  goraxe | 2010-01-17 23:54:15 +0100
  remove comment headers 
  r8404 at Thesaurus (orig r8391):  caelum | 2010-01-20 20:54:29 +0100
  minor fixups
  r8405 at Thesaurus (orig r8392):  goraxe | 2010-01-20 21:13:24 +0100
  add private types to coerce
  r8406 at Thesaurus (orig r8393):  goraxe | 2010-01-20 21:17:19 +0100
  remove un-needed coerce from schema_class of type Str
  r8411 at Thesaurus (orig r8398):  caelum | 2010-01-21 23:36:25 +0100
  minor documentation updates
  r8436 at Thesaurus (orig r8423):  caelum | 2010-01-25 02:56:30 +0100
  this code never runs anyway
  r8440 at Thesaurus (orig r8427):  caelum | 2010-01-26 14:05:53 +0100
  prefer JSON::DWIW for barekey support
  r8693 at Thesaurus (orig r8680):  ribasushi | 2010-02-13 10:27:18 +0100
  dbicadmin dependencies
  r8694 at Thesaurus (orig r8681):  ribasushi | 2010-02-13 10:28:04 +0100
  Some cleaup, make use of Text::CSV
  r8695 at Thesaurus (orig r8682):  ribasushi | 2010-02-13 10:34:19 +0100
  We use Try::Tiny in a single spot, not grounds for inlusion in deps
  r8696 at Thesaurus (orig r8683):  ribasushi | 2010-02-13 10:37:30 +0100
  POD section
  r8697 at Thesaurus (orig r8684):  ribasushi | 2010-02-13 11:05:17 +0100
  Switch tests to Optional::Deps
  r8700 at Thesaurus (orig r8687):  ribasushi | 2010-02-13 14:32:50 +0100
  Switch Admin/dbicadmin to Opt::Deps
  r8702 at Thesaurus (orig r8689):  ribasushi | 2010-02-13 14:39:24 +0100
  JSON dep is needed for Admin.pm itself
  r8703 at Thesaurus (orig r8690):  ribasushi | 2010-02-13 15:06:28 +0100
  Test fixes
  r8704 at Thesaurus (orig r8691):  ribasushi | 2010-02-13 15:13:31 +0100
  Changes
 
 r8707 at Thesaurus (orig r8694):  ribasushi | 2010-02-13 16:37:57 +0100
 Test for optional deps manager
 r8710 at Thesaurus (orig r8697):  caelum | 2010-02-14 05:22:03 +0100
 add doc on maximum cursors for SQLAnywhere
 r8711 at Thesaurus (orig r8698):  ribasushi | 2010-02-14 09:23:09 +0100
 Cleanup dependencies / Admin inheritance
 r8712 at Thesaurus (orig r8699):  ribasushi | 2010-02-14 09:28:29 +0100
 Some formatting
 r8715 at Thesaurus (orig r8702):  ribasushi | 2010-02-14 10:46:51 +0100
 This is Moose, so use CMOP
 r8720 at Thesaurus (orig r8707):  ribasushi | 2010-02-15 10:28:22 +0100
 Final POD touches
 r8721 at Thesaurus (orig r8708):  ribasushi | 2010-02-15 10:31:38 +0100
 Spellcheck (jawnsy++)
 r8722 at Thesaurus (orig r8709):  ribasushi | 2010-02-15 10:32:24 +0100
 One more
 r8723 at Thesaurus (orig r8710):  ribasushi | 2010-02-15 14:49:26 +0100
 Release 0.08119
 r8725 at Thesaurus (orig r8712):  ribasushi | 2010-02-15 14:50:56 +0100
 Bump trunl version
 r8726 at Thesaurus (orig r8713):  rafl | 2010-02-15 15:49:55 +0100
 Make sure we actually run all tests, given we're using done_testing.
 r8727 at Thesaurus (orig r8714):  rafl | 2010-02-15 15:50:01 +0100
 Make sure overriding deployment_statements is possible from within schemas.
 r8728 at Thesaurus (orig r8715):  rafl | 2010-02-15 15:56:06 +0100
 Changelogging.
 r8729 at Thesaurus (orig r8716):  rafl | 2010-02-15 15:58:09 +0100
 Make some cookbook code compile.
 r8730 at Thesaurus (orig r8717):  nuba | 2010-02-15 16:11:52 +0100
 spelling fixes in the documaentation, sholud be gud now ;)
 r8732 at Thesaurus (orig r8719):  caelum | 2010-02-16 11:09:58 +0100
 use OO interface of Hash::Merge for ::DBI::Replicated
 r8734 at Thesaurus (orig r8721):  ribasushi | 2010-02-16 11:41:06 +0100
 Augment did-author-run-makefile check to include OptDeps
 r8735 at Thesaurus (orig r8722):  ribasushi | 2010-02-16 12:16:06 +0100
 Reorg support section, add live-chat link
 r8739 at Thesaurus (orig r8726):  caelum | 2010-02-16 14:51:58 +0100
 set behavior for Hash::Merge in ::DBI::Replicated, otherwise it uses the global setting
 r8740 at Thesaurus (orig r8727):  caelum | 2010-02-16 15:43:25 +0100
 POD touchups
 r8759 at Thesaurus (orig r8746):  ribasushi | 2010-02-19 00:30:37 +0100
 Fix bogus test
 r8760 at Thesaurus (orig r8747):  ribasushi | 2010-02-19 00:34:22 +0100
 Retire useless abstraction (all rdbms need this anyway)
 r8761 at Thesaurus (orig r8748):  ribasushi | 2010-02-19 00:35:01 +0100
 Fix count of group_by over aliased function
 r8765 at Thesaurus (orig r8752):  ribasushi | 2010-02-19 10:11:20 +0100
  r8497 at Thesaurus (orig r8484):  ribasushi | 2010-01-31 10:06:29 +0100
  Branch to unify mandatory PK handling
  r8498 at Thesaurus (orig r8485):  ribasushi | 2010-01-31 10:20:36 +0100
  This is not really used for anything (same code in DBI)
  r8499 at Thesaurus (orig r8486):  ribasushi | 2010-01-31 10:25:55 +0100
  Helper primary_columns wrapper to throw if a PK is not defined
  r8500 at Thesaurus (orig r8487):  ribasushi | 2010-01-31 11:07:25 +0100
  Stupid errors
  r8501 at Thesaurus (orig r8488):  ribasushi | 2010-01-31 12:18:57 +0100
  Saner handling of nonexistent/partial conditions
  r8762 at Thesaurus (orig r8749):  ribasushi | 2010-02-19 10:07:40 +0100
  trap unresolvable conditions due to incomplete relationship specification
  r8764 at Thesaurus (orig r8751):  ribasushi | 2010-02-19 10:11:09 +0100
  Changes
 
 r8767 at Thesaurus (orig r8754):  ribasushi | 2010-02-19 11:14:30 +0100
 Fix for RT54697
 r8769 at Thesaurus (orig r8756):  caelum | 2010-02-19 12:21:53 +0100
 bump Test::Pod dep
 r8770 at Thesaurus (orig r8757):  caelum | 2010-02-19 12:23:07 +0100
 bump Test::Pod dep in Optional::Dependencies too
 r8773 at Thesaurus (orig r8760):  rabbit | 2010-02-19 16:41:24 +0100
 Fix stupid sqlt parser regression
 r8774 at Thesaurus (orig r8761):  rabbit | 2010-02-19 16:42:40 +0100
 Port remaining tests to the Opt::Dep reposiory
 r8775 at Thesaurus (orig r8762):  rabbit | 2010-02-19 16:43:36 +0100
 Some test cleanups
 r8780 at Thesaurus (orig r8767):  rabbit | 2010-02-20 20:59:20 +0100
 Test::Deep actually isn't required
 r8786 at Thesaurus (orig r8773):  rabbit | 2010-02-20 22:21:41 +0100
 These are core for perl 5.8
 r8787 at Thesaurus (orig r8774):  rabbit | 2010-02-21 10:52:40 +0100
 Shuffle tests a bit
 r8788 at Thesaurus (orig r8775):  rabbit | 2010-02-21 12:09:25 +0100
 Bogus require
 r8789 at Thesaurus (orig r8776):  rabbit | 2010-02-21 12:09:48 +0100
 Bogus unnecessary dep
 r8800 at Thesaurus (orig r8787):  rabbit | 2010-02-21 13:39:21 +0100
  r8748 at Thesaurus (orig r8735):  goraxe | 2010-02-17 23:17:15 +0100
  branch for dbicadmin pod fixes
  
  r8778 at Thesaurus (orig r8765):  goraxe | 2010-02-20 20:35:00 +0100
  add G:L:D sub classes to generate pod
  r8779 at Thesaurus (orig r8766):  goraxe | 2010-02-20 20:56:16 +0100
  dbicadmin: use subclassed G:L:D to generate some pod
  r8782 at Thesaurus (orig r8769):  goraxe | 2010-02-20 21:48:29 +0100
  adjust Makefile.pl to generate dbicadmin.pod
  r8783 at Thesaurus (orig r8770):  goraxe | 2010-02-20 21:50:55 +0100
  add svn-ignore for dbicadmin.pod
  r8784 at Thesaurus (orig r8771):  goraxe | 2010-02-20 22:01:41 +0100
  change Options to Arguments
  r8785 at Thesaurus (orig r8772):  goraxe | 2010-02-20 22:10:29 +0100
  add DBIx::Class::Admin::{Descriptive,Usage} to podcover ignore list
  r8790 at Thesaurus (orig r8777):  rabbit | 2010-02-21 12:35:38 +0100
  Cleanup the makefile regen a bit
  r8792 at Thesaurus (orig r8779):  rabbit | 2010-02-21 12:53:01 +0100
  Bah humbug
  r8793 at Thesaurus (orig r8780):  rabbit | 2010-02-21 12:55:18 +0100
  And another one
  r8797 at Thesaurus (orig r8784):  rabbit | 2010-02-21 13:32:03 +0100
  The minimal pod seems to confuse the manpage generator, commenting out for now
  r8798 at Thesaurus (orig r8785):  rabbit | 2010-02-21 13:38:03 +0100
  Add license/author to dbicadmin autogen POD
  r8799 at Thesaurus (orig r8786):  rabbit | 2010-02-21 13:38:58 +0100
  Reorder makefile author actions to make output more readable
 
 r8803 at Thesaurus (orig r8790):  ribasushi | 2010-02-21 14:24:15 +0100
 Fix exception text
 r8804 at Thesaurus (orig r8791):  ribasushi | 2010-02-21 15:14:58 +0100
 Extra testdep
 r8808 at Thesaurus (orig r8795):  caelum | 2010-02-22 20:16:07 +0100
 with_deferred_fk_checks for Oracle
 r8809 at Thesaurus (orig r8796):  rabbit | 2010-02-22 21:26:20 +0100
 Add a hidden option to dbicadmin to self-inject autogenerated POD
 r8810 at Thesaurus (orig r8797):  caelum | 2010-02-22 21:48:43 +0100
 improve with_deferred_fk_checks for Oracle, add tests
 r8812 at Thesaurus (orig r8799):  rbuels | 2010-02-22 23:09:40 +0100
 added package name to DBD::Pg warning in Pg storage driver to make it explicit where the warning is coming from
 r8815 at Thesaurus (orig r8802):  rabbit | 2010-02-23 11:21:10 +0100
 Looks like the distdir wrapping is finally taken care of
 r8818 at Thesaurus (orig r8805):  rabbit | 2010-02-23 14:05:14 +0100
 remove POD
 r8819 at Thesaurus (orig r8806):  rabbit | 2010-02-23 14:05:32 +0100
 More index exclusions
 r8821 at Thesaurus (orig r8808):  goraxe | 2010-02-23 15:00:38 +0100
 remove short options from dbicadmin
 r8822 at Thesaurus (orig r8809):  rabbit | 2010-02-23 15:15:00 +0100
 Whitespace
 r8826 at Thesaurus (orig r8813):  rabbit | 2010-02-24 09:28:43 +0100
  r8585 at Thesaurus (orig r8572):  faxm0dem | 2010-02-06 23:01:04 +0100
  sqlt::producer::oracle is now able to handle quotes correctly. Now we need to take advantage of that as currently the oracle producer capitalises everything
  r8586 at Thesaurus (orig r8573):  faxm0dem | 2010-02-06 23:03:31 +0100
  the way I thought. ribasushi suggested to override deploy(ment_statements)
  r8607 at Thesaurus (orig r8594):  faxm0dem | 2010-02-09 21:53:48 +0100
  should work now
  r8714 at Thesaurus (orig r8701):  faxm0dem | 2010-02-14 09:49:44 +0100
  oracle_version
  r8747 at Thesaurus (orig r8734):  faxm0dem | 2010-02-17 18:54:45 +0100
  still need to uc source_name if quotes off
  r8817 at Thesaurus (orig r8804):  rabbit | 2010-02-23 12:03:23 +0100
  Cleanup code (hopefully no functional changes)
  r8820 at Thesaurus (orig r8807):  rabbit | 2010-02-23 14:14:19 +0100
  Proper error message
  r8823 at Thesaurus (orig r8810):  faxm0dem | 2010-02-23 15:46:11 +0100
  Schema Object Naming Rules :
  [...]
  However, database names, global database names, and database link names are always case insensitive and are stored as uppercase.
  
  # source: http://download.oracle.com/docs/cd/B19306_01/server.102/b14200/sql_elements008.htm
  
  r8824 at Thesaurus (orig r8811):  rabbit | 2010-02-23 16:09:36 +0100
  Changes and dep-bump
 
 r8828 at Thesaurus (orig r8815):  rabbit | 2010-02-24 09:32:53 +0100
 Changelogging
 r8829 at Thesaurus (orig r8816):  rabbit | 2010-02-24 09:37:14 +0100
 Protect dbicadmin from self-injection when not in make
 r8830 at Thesaurus (orig r8817):  rabbit | 2010-02-24 10:00:43 +0100
 Release 0.08120
 r8832 at Thesaurus (orig r8819):  rabbit | 2010-02-24 10:02:36 +0100
 Bump trunk version
 r8833 at Thesaurus (orig r8820):  goraxe | 2010-02-24 14:21:23 +0100
  do not include hidden opts in generated pod
 r8834 at Thesaurus (orig r8821):  rabbit | 2010-02-24 15:50:34 +0100
 small tool to query cpan deps
 r8835 at Thesaurus (orig r8822):  rabbit | 2010-02-26 00:22:51 +0100
 Typo
 r8849 at Thesaurus (orig r8836):  rabbit | 2010-03-01 01:32:03 +0100
 Cleanup logic in RSC
 r8850 at Thesaurus (orig r8837):  rabbit | 2010-03-01 01:36:24 +0100
 Fix incorrect placement of condition resolution failure trap
 r8851 at Thesaurus (orig r8838):  rabbit | 2010-03-01 01:37:53 +0100
 Changes
 r8855 at Thesaurus (orig r8842):  rabbit | 2010-03-01 18:04:23 +0100
 Add has_relationship proxy to row
 r8856 at Thesaurus (orig r8843):  rabbit | 2010-03-02 10:29:18 +0100
 Do not autoviv empty ENV vars
 r8857 at Thesaurus (orig r8844):  rabbit | 2010-03-02 11:09:06 +0100
 This test is identical to the one above it
 r8858 at Thesaurus (orig r8845):  rabbit | 2010-03-02 11:13:55 +0100
 Test belongs_to accessor in-memory tie
 r8859 at Thesaurus (orig r8846):  rabbit | 2010-03-02 11:35:19 +0100
 proving rafl wrong
 r8865 at Thesaurus (orig r8852):  mo | 2010-03-03 12:05:51 +0100
 Fix for SQLite to ignore the { for => ... } attribute
 r8866 at Thesaurus (orig r8853):  rabbit | 2010-03-03 12:15:22 +0100
 Fix whitespace
 r8869 at Thesaurus (orig r8856):  castaway | 2010-03-03 23:07:40 +0100
 Minor doc tweaks
 
 r8870 at Thesaurus (orig r8857):  castaway | 2010-03-03 23:33:07 +0100
 Added note+warning about how Ordered works, from steveo_aa
 
 r8900 at Thesaurus (orig r8887):  rabbit | 2010-03-04 19:11:32 +0100
 Fix identity fiasco
 r8901 at Thesaurus (orig r8888):  rjbs | 2010-03-04 19:39:54 +0100
 fix a typo in FAQ
 r8904 at Thesaurus (orig r8891):  acmoore | 2010-03-05 22:37:55 +0100
 Fix regression where SQL files with comments were not handled properly by ::Schema::Versioned.
 
 
 r8913 at Thesaurus (orig r8900):  ribasushi | 2010-03-06 12:26:10 +0100
 More checks for weird usage of _determine_driver (maint/gen-schema)
 r8916 at Thesaurus (orig r8903):  rabbit | 2010-03-06 12:30:56 +0100
  r8422 at Thesaurus (orig r8409):  ribasushi | 2010-01-22 11:14:57 +0100
  Branches for some stuff
  r8477 at Thesaurus (orig r8464):  ribasushi | 2010-01-28 12:26:40 +0100
  RT#52681
  r8478 at Thesaurus (orig r8465):  ribasushi | 2010-01-28 12:41:25 +0100
  Deprecate IC::File
  r8479 at Thesaurus (orig r8466):  ribasushi | 2010-01-28 12:41:56 +0100
  Deprecate IC::File(2)
  r8487 at Thesaurus (orig r8474):  ribasushi | 2010-01-30 13:11:18 +0100
  Draft PK explanation
  r8488 at Thesaurus (orig r8475):  frew | 2010-01-30 21:19:30 +0100
  clarify PK stuff in intro just a bit
  r8489 at Thesaurus (orig r8476):  frew | 2010-01-30 21:24:21 +0100
  no first in POD
  r8910 at Thesaurus (orig r8897):  rabbit | 2010-03-06 11:37:12 +0100
  Improve POD about PKs and why they matter
  r8912 at Thesaurus (orig r8899):  rabbit | 2010-03-06 11:42:41 +0100
  One more PODlink
  r8915 at Thesaurus (orig r8902):  rabbit | 2010-03-06 12:27:29 +0100
  Fully deprecate IC::File
 
 r8919 at Thesaurus (orig r8906):  rabbit | 2010-03-06 12:44:59 +0100
 Fix RT54063
 r8920 at Thesaurus (orig r8907):  rabbit | 2010-03-06 13:18:02 +0100
 me-- not thinking
 r8925 at Thesaurus (orig r8912):  wreis | 2010-03-06 18:51:59 +0100
 improvements for HasOne relationship validationn
 r8934 at Thesaurus (orig r8921):  rabbit | 2010-03-07 00:52:51 +0100
 Cascading delete needs a guard to remain atomic
 r8936 at Thesaurus (orig r8923):  rabbit | 2010-03-07 02:35:51 +0100
 Fix the docs for select/as
 r8937 at Thesaurus (orig r8924):  rabbit | 2010-03-07 02:58:09 +0100
 Unmark Opt::Deps experimental and add extra method as per RT55211
 r8938 at Thesaurus (orig r8925):  rabbit | 2010-03-07 03:22:08 +0100
 Switch NoTab/EOL checks to Opt::Deps
 Enable NoTab checks
 Disable EOL checks
 r8939 at Thesaurus (orig r8926):  rabbit | 2010-03-07 10:23:24 +0100
 Cleanup a bit
 r8941 at Thesaurus (orig r8928):  rabbit | 2010-03-07 11:38:35 +0100
 Fix MC bug reported by felix
 r8944 at Thesaurus (orig r8931):  caelum | 2010-03-07 11:55:06 +0100
  r23004 at hlagh (orig r8530):  moritz | 2010-02-04 07:41:29 -0500
  create branch for Storage::DBI::InterBase
  
  r23005 at hlagh (orig r8531):  moritz | 2010-02-04 07:44:02 -0500
  primitive, non-working and very specific Storage::DBI::InterBase
  r23006 at hlagh (orig r8532):  moritz | 2010-02-04 08:00:05 -0500
  [Storage::DBI::InterBase] remove cruft copied from MSSQL
  r23008 at hlagh (orig r8534):  moritz | 2010-02-04 08:34:22 -0500
  [Storage::DBI::InterBase] remove more cruft
  r23014 at hlagh (orig r8540):  caelum | 2010-02-04 10:08:27 -0500
  test file for firebird, not passing yet
  r23015 at hlagh (orig r8541):  caelum | 2010-02-04 11:24:51 -0500
  Firebird: fix test cleanup, add ODBC wrapper
  r23016 at hlagh (orig r8542):  caelum | 2010-02-04 13:18:48 -0500
  limit and better autoinc for Firebird
  r23018 at hlagh (orig r8544):  caelum | 2010-02-04 14:19:51 -0500
  override quoting columns for RETURNING in Firebird ODBC (where it doesn't work) and generate a RETURNING clause only when necessary
  r23022 at hlagh (orig r8548):  caelum | 2010-02-05 03:55:43 -0500
  fix up my Row code for non-pk autoincs, add pretty crappy DT inflation for Firebird
  r23023 at hlagh (orig r8549):  caelum | 2010-02-05 04:26:03 -0500
  rename a couple of variables
  r23024 at hlagh (orig r8550):  caelum | 2010-02-05 04:46:31 -0500
  check for both NULL and null, rename _fb_auto_incs to _auto_incs
  r23025 at hlagh (orig r8551):  caelum | 2010-02-05 05:07:14 -0500
  support autoinc PKs without is_auto_increment set
  r23047 at hlagh (orig r8570):  caelum | 2010-02-06 07:35:31 -0500
  move Firebird ODBC override for RETURNING to a SQLAHacks class
  r23048 at hlagh (orig r8571):  caelum | 2010-02-06 08:06:44 -0500
  Firebird: add POD, fix BLOB tests
  r23085 at hlagh (orig r8588):  caelum | 2010-02-08 08:26:41 -0500
  better DT inflation for Firebird and _ping
  r23087 at hlagh (orig r8590):  moritz | 2010-02-08 08:32:26 -0500
  test ->update({...}) for firebird
  r23088 at hlagh (orig r8591):  caelum | 2010-02-08 08:33:09 -0500
  test update
  r23089 at hlagh (orig r8592):  moritz | 2010-02-08 08:43:50 -0500
  use quoting in firebird tests
  r23115 at hlagh (orig r8597):  caelum | 2010-02-10 07:05:21 -0500
  default to sql dialect 3 unless overridden
  r23116 at hlagh (orig r8598):  caelum | 2010-02-10 07:42:17 -0500
  turn on ib_softcommit, savepoint tests now pass for DBD::InterBase
  r23123 at hlagh (orig r8605):  caelum | 2010-02-10 17:38:24 -0500
  fix savepoints for Firebird ODBC
  r23170 at hlagh (orig r8652):  caelum | 2010-02-11 07:27:19 -0500
  support the DATE data type for Firebird
  r23186 at hlagh (orig r8668):  caelum | 2010-02-12 14:43:20 -0500
  special bind_param_array move to make DBD::InterBase happy (RT#54561)
  r23213 at hlagh (orig r8695):  caelum | 2010-02-13 15:15:46 -0500
  fix fail in t/72pg.t related to new autoinc retrieval code in ::Row
  r23214 at hlagh (orig r8696):  caelum | 2010-02-13 15:18:27 -0500
  fix multiple cursor test
  r23246 at hlagh (orig r8728):  caelum | 2010-02-16 09:47:43 -0500
  POD fix
  r23358 at hlagh (orig r8758):  caelum | 2010-02-19 06:25:27 -0500
  s/primary_columns/_pri_cols/ for Firebird
  r23420 at hlagh (orig r8800):  rkitover | 2010-02-22 19:33:13 -0500
  don't use ib_softcommit by default
  r23496 at hlagh (orig r8841):  rkitover | 2010-03-01 04:22:19 -0500
  update POD
  r23545 at hlagh (orig r8855):  rkitover | 2010-03-03 12:59:41 -0500
  destroy cached statements in $storage->disconnect too
  r23582 at hlagh (orig r8892):  rkitover | 2010-03-05 18:06:33 -0500
  auto_nextval support for Firebird
  r23598 at hlagh (orig r8908):  rkitover | 2010-03-06 11:48:41 -0500
  remove that code for non-pk autoincs from Row, move to ::DBI::InterBase
  r23599 at hlagh (orig r8909):  rkitover | 2010-03-06 12:00:15 -0500
  remove BindType2 test class
  r23601 at hlagh (orig r8911):  rkitover | 2010-03-06 12:12:55 -0500
  cache autoinc sequence in column_info
  r23609 at hlagh (orig r8919):  rkitover | 2010-03-06 18:05:24 -0500
  remove connect_info from maint/gen-schema.pl
  r23610 at hlagh (orig r8920):  rkitover | 2010-03-06 18:15:13 -0500
  don't die on insert in firebird with no pk
  r23612 at hlagh (orig r8922):  ribasushi | 2010-03-06 19:18:46 -0500
  What I really meant
  r23619 at hlagh (orig r8929):  rkitover | 2010-03-07 05:46:04 -0500
  fix RETURNING for empty INSERT
 
 r8946 at Thesaurus (orig r8933):  caelum | 2010-03-07 12:08:00 +0100
 remove unnecessary transaction_depth check in DBI::insert_bulk
 r8963 at Thesaurus (orig r8950):  ilmari | 2010-03-09 15:06:48 +0100
 Fix POD link
 r8965 at Thesaurus (orig r8952):  hobbs | 2010-03-09 20:29:50 +0100
 Support add_columns('+colname'=>{...}) syntax to augment column definitions.
 
 r8966 at Thesaurus (orig r8953):  rabbit | 2010-03-10 09:34:38 +0100
 docpatch close RT52681
 r8974 at Thesaurus (orig r8961):  rabbit | 2010-03-11 08:08:57 +0100
 Where is my spellchecker (not that it would catch this)
 r9005 at Thesaurus (orig r8992):  caelum | 2010-03-13 00:47:40 +0100
 update Firebird docs
 r9006 at Thesaurus (orig r8993):  mo | 2010-03-13 10:03:24 +0100
 test the dynamic subclassing example
 r9008 at Thesaurus (orig r8995):  mo | 2010-03-13 13:09:59 +0100
 call inflate_result on new_result, but not from the CDBI compat layer
 r9009 at Thesaurus (orig r8996):  mo | 2010-03-13 13:37:40 +0100
 reverting 8995, was supposed to go to a branch
 r9010 at Thesaurus (orig r8997):  nigel | 2010-03-14 18:09:26 +0100
 Corrected a link to connect_info in Manual::Intro
 r9018 at Thesaurus (orig r9005):  rabbit | 2010-03-15 14:55:17 +0100
 Proper fix for RETURNING with default insert
 r9026 at Thesaurus (orig r9013):  nigel | 2010-03-15 18:36:44 +0100
 Documentation on Unicode use with DBIC
 r9027 at Thesaurus (orig r9014):  rabbit | 2010-03-16 02:55:27 +0100
 Horrible horrible rewrite of the aliastype scanner, but folks are starting to complain that their unqualified columns are making joins go away (this was the initial idea). Hopefully this code will silently die some day. /me can haz shame
 r9028 at Thesaurus (orig r9015):  rabbit | 2010-03-16 16:49:45 +0100
 Regenerate test DDL
 r9029 at Thesaurus (orig r9016):  caelum | 2010-03-16 22:01:21 +0100
 _ping for MSSQL
 r9030 at Thesaurus (orig r9017):  caelum | 2010-03-17 11:49:51 +0100
 add connect_call_use_foreign_keys for SQLite
 r9031 at Thesaurus (orig r9018):  abraxxa | 2010-03-17 16:36:13 +0100
 fixed Alexander Hartmaier's mail address
 
 r9039 at Thesaurus (orig r9026):  frew | 2010-03-18 15:59:55 +0100
 use update instead of set_columns in update_all
 r9040 at Thesaurus (orig r9027):  frew | 2010-03-18 20:53:28 +0100
 Ch Ch Ch Ch Changes!
 r9041 at Thesaurus (orig r9028):  caelum | 2010-03-19 16:03:41 +0100
 POD fixups
 r9042 at Thesaurus (orig r9029):  rabbit | 2010-03-19 18:39:02 +0100
 Fix UTF8Column out of order loading warning
 r9043 at Thesaurus (orig r9030):  rabbit | 2010-03-20 09:00:00 +0100
 Something is wrong with HRI inflation - too slow
 r9044 at Thesaurus (orig r9031):  rabbit | 2010-03-20 09:26:12 +0100
 Extend benchmark
 r9045 at Thesaurus (orig r9032):  rabbit | 2010-03-20 09:41:30 +0100
 MOAR bench
 r9048 at Thesaurus (orig r9035):  caelum | 2010-03-22 16:10:38 +0100
 redo Pg auto-columns using INSERT RETURNING
 r9049 at Thesaurus (orig r9036):  caelum | 2010-03-22 16:45:55 +0100
 move INSERT ... RETURNING code into ::DBI::InsertReturning component for Pg and Firebird
 r9050 at Thesaurus (orig r9037):  rabbit | 2010-03-22 18:03:13 +0100
 Even cleaner way of handling returning (no column interrogation in storage)
 r9051 at Thesaurus (orig r9038):  caelum | 2010-03-22 23:43:19 +0100
 update proxied methods for DBI::Replicated
 r9052 at Thesaurus (orig r9039):  caelum | 2010-03-23 06:56:12 +0100
 fix sort
 r9056 at Thesaurus (orig r9043):  rabbit | 2010-03-24 11:27:37 +0100
 A better illustration how to add relationships at runtime
 r9057 at Thesaurus (orig r9044):  rabbit | 2010-03-24 11:33:04 +0100
 Clearer 'no such rel' errors, correct exception on pkless prefetch
 r9058 at Thesaurus (orig r9045):  rabbit | 2010-03-24 11:44:50 +0100
 One missed step
 r9059 at Thesaurus (orig r9046):  ribasushi | 2010-03-24 12:11:12 +0100
 Straight_join support RT55579
 r9060 at Thesaurus (orig r9047):  rabbit | 2010-03-24 12:43:02 +0100
 bump SQLA dep
 r9061 at Thesaurus (orig r9048):  ribasushi | 2010-03-24 14:10:33 +0100
 Really fix INSERT RETURNING - simply make it a flag on the storage and keep the machinery in core
 r9062 at Thesaurus (orig r9049):  rabbit | 2010-03-24 14:30:17 +0100
 Cosmetics + changes
 r9063 at Thesaurus (orig r9050):  caelum | 2010-03-24 20:44:15 +0100
 Pg version check for can_insert_returning
 r9064 at Thesaurus (orig r9051):  caelum | 2010-03-24 21:25:24 +0100
 collect _server_info on connection
 r9065 at Thesaurus (orig r9052):  caelum | 2010-03-24 21:49:38 +0100
 s/_get_server_info/_populate_server_info/
 r9066 at Thesaurus (orig r9053):  caelum | 2010-03-25 01:24:09 +0100
 remove _get_mssql_version
 r9067 at Thesaurus (orig r9054):  caelum | 2010-03-25 06:32:51 +0100
 minor fix for SQLite version check
 r9068 at Thesaurus (orig r9055):  caelum | 2010-03-25 07:37:36 +0100
 add storage->_server_info->{dbms_ver_normalized}
 r9069 at Thesaurus (orig r9056):  caelum | 2010-03-26 09:55:46 +0100
 a couple minor Informix fixes
 r9070 at Thesaurus (orig r9057):  caelum | 2010-03-26 10:55:55 +0100
 savepoints for Informix
 r9071 at Thesaurus (orig r9058):  caelum | 2010-03-26 12:23:26 +0100
 InflateColumn::DateTime support for Informix
 r9072 at Thesaurus (orig r9059):  caelum | 2010-03-26 15:08:16 +0100
 with_deferred_fk_checks for Informix
 r9073 at Thesaurus (orig r9060):  caelum | 2010-03-26 15:28:24 +0100
 minor cleanups
 r9074 at Thesaurus (orig r9061):  castaway | 2010-03-26 21:16:44 +0100
 Added clarification of quoting to cookbook pod for sql funcs, from metaperl
 
 r9075 at Thesaurus (orig r9062):  caelum | 2010-03-27 00:12:37 +0100
 missing local
 r9076 at Thesaurus (orig r9063):  caelum | 2010-03-27 00:19:56 +0100
 move warning suppression into ::DBI::InterBase
 r9077 at Thesaurus (orig r9064):  caelum | 2010-03-27 00:30:02 +0100
 a bit cleaner warning suppression for DBD::InterBase only
 r9083 at Thesaurus (orig r9070):  rabbit | 2010-03-29 10:12:44 +0200
 pod error
 r9092 at Thesaurus (orig r9079):  boghead | 2010-04-02 22:44:32 +0200
 - Minor language cleanup in some of the Cookbook documentation
   (thanks metaperl and jester)
 - Fix the synopsis for DBIC::Storage::DBI.  ->datetime_parser returns a class,
   so you need to call a method on it in order to transform a DateTime object
 
 
 r9096 at Thesaurus (orig r9083):  ribasushi | 2010-04-05 21:53:13 +0200
 Minor test cleanups
 r9097 at Thesaurus (orig r9084):  caelum | 2010-04-05 22:08:48 +0200
 fix test count
 r9098 at Thesaurus (orig r9085):  ribasushi | 2010-04-06 05:36:04 +0200
 Fix embarassing join optimizer bug
 r9112 at Thesaurus (orig r9099):  caelum | 2010-04-07 02:13:38 +0200
 UUID support for SQL Anywhere
 r9114 at Thesaurus (orig r9101):  caelum | 2010-04-07 19:23:53 +0200
 clean up UUID stringification for SQL Anywhere
 r9115 at Thesaurus (orig r9102):  rabbit | 2010-04-08 11:36:35 +0200
 Fix utf8columns loading-order test/code (really just as POC at this point)
 r9116 at Thesaurus (orig r9103):  ribasushi | 2010-04-08 12:10:12 +0200
 Make the insert_returning capability private (and saner naming)
 r9117 at Thesaurus (orig r9104):  rabbit | 2010-04-08 12:36:06 +0200
 Refactor the version handling
 Clean up normalization wrt non-numeric version parts (i.e. mysql)
 r9118 at Thesaurus (orig r9105):  ribasushi | 2010-04-08 12:56:33 +0200
 Even safer version normalization
 r9119 at Thesaurus (orig r9106):  rabbit | 2010-04-08 13:16:19 +0200
 Changes
 r9121 at Thesaurus (orig r9108):  caelum | 2010-04-08 18:17:29 +0200
 syntax error
 r9122 at Thesaurus (orig r9109):  caelum | 2010-04-08 18:38:59 +0200
 use min dbms_version for ::Replicated
 r9123 at Thesaurus (orig r9110):  matthewt | 2010-04-08 19:19:58 +0200
 fix POD links
 r9126 at Thesaurus (orig r9113):  rabbit | 2010-04-09 13:29:38 +0200
 Test to show utf8columns being indeed broken (sqlite papers over it)
 r9127 at Thesaurus (orig r9114):  rabbit | 2010-04-09 14:16:23 +0200
 Use a sloppy but recommended fix for Test warnings
 r9128 at Thesaurus (orig r9115):  ribasushi | 2010-04-11 10:43:56 +0200
 RT 55865
 r9135 at Thesaurus (orig r9122):  frew | 2010-04-11 19:28:54 +0200
 bump SQLA dep
 r9136 at Thesaurus (orig r9123):  rabbit | 2010-04-11 19:32:20 +0200
 Warn about both UTF8Columns and ForceUTF8 when loaded improperly
 r9137 at Thesaurus (orig r9124):  rabbit | 2010-04-11 20:35:53 +0200
 Deprecate UTF8Columns with a lot of warning whistles
 r9138 at Thesaurus (orig r9125):  frew | 2010-04-11 20:51:23 +0200
 Release 0.08121
 r9139 at Thesaurus (orig r9126):  frew | 2010-04-11 20:54:43 +0200
 set version for dev users
 r9146 at Thesaurus (orig r9133):  caelum | 2010-04-12 20:23:11 +0200
 better way to find minimal dbms version in ::Replicated
 r9155 at Thesaurus (orig r9142):  rabbit | 2010-04-14 15:41:51 +0200
 Add forgotten changes
 r9156 at Thesaurus (orig r9143):  caelum | 2010-04-14 17:04:00 +0200
 support $ENV{DBI_DSN} and $ENV{DBI_DRIVER} (patch from Possum)
 r9157 at Thesaurus (orig r9144):  rabbit | 2010-04-14 17:50:58 +0200
 Fix exception message
 r9190 at Thesaurus (orig r9177):  caelum | 2010-04-15 01:41:26 +0200
 datetime millisecond precision for MSSQL
 r9200 at Thesaurus (orig r9187):  ribasushi | 2010-04-18 23:06:29 +0200
 Fix leftover tabs
 r9201 at Thesaurus (orig r9188):  castaway | 2010-04-20 08:06:26 +0200
 Warn if a class found in ResultSet/ is not a subclass of ::ResultSet
 
 r9203 at Thesaurus (orig r9190):  rbuels | 2010-04-20 21:12:22 +0200
 create_ddl_dir mkpaths its dir if necessary.  also, added storage/deploy.t as place to put deployment tests
 r9204 at Thesaurus (orig r9191):  rbuels | 2010-04-20 21:20:06 +0200
 do not croak, rbuels!  jeez.
 r9205 at Thesaurus (orig r9192):  castaway | 2010-04-21 08:03:08 +0200
 Added missing test file (oops)
 
 r9213 at Thesaurus (orig r9200):  rabbit | 2010-04-24 02:23:05 +0200
 10% speed up on quoted statement generation
 r9215 at Thesaurus (orig r9202):  rabbit | 2010-04-24 02:27:47 +0200
 Revert bogus commit
 r9216 at Thesaurus (orig r9203):  ribasushi | 2010-04-24 02:31:06 +0200
 _quote is now properly handled in SQLA
 r9217 at Thesaurus (orig r9204):  caelum | 2010-04-24 02:32:58 +0200
 add "IMPROVING PERFORMANCE" section to Cookbook
 r9231 at Thesaurus (orig r9218):  ribasushi | 2010-04-26 13:13:13 +0200
 Bump CAG and SQLA dependencies
 r9232 at Thesaurus (orig r9219):  ribasushi | 2010-04-26 15:27:38 +0200
 Bizarre fork failure
 r9233 at Thesaurus (orig r9220):  castaway | 2010-04-26 21:45:32 +0200
 Add tests using select/as to sqlahacks
 
 r9234 at Thesaurus (orig r9221):  castaway | 2010-04-26 21:49:10 +0200
 Add test for fetching related obj/col as well
 
 r9245 at Thesaurus (orig r9232):  abraxxa | 2010-04-27 15:58:56 +0200
 fixed missing ' in update_or_create with key attr example
 
 r9247 at Thesaurus (orig r9234):  ribasushi | 2010-04-27 16:53:06 +0200
 Better concurrency in test (parent blocks)
 r9248 at Thesaurus (orig r9235):  ribasushi | 2010-04-27 16:53:34 +0200
 Reformat tests/comments a bit
 r9249 at Thesaurus (orig r9236):  ribasushi | 2010-04-27 18:40:10 +0200
 Better comment
 r9250 at Thesaurus (orig r9237):  ribasushi | 2010-04-27 18:40:31 +0200
 Rename test
 r9251 at Thesaurus (orig r9238):  ribasushi | 2010-04-27 19:11:45 +0200
 Fix global destruction problems
 r9271 at Thesaurus (orig r9258):  ribasushi | 2010-04-28 11:10:00 +0200
 Refactor SQLA/select interaction (in reality just cleanup)
 r9272 at Thesaurus (orig r9259):  caelum | 2010-04-28 11:20:08 +0200
 update ::DBI::Replicated
 r9273 at Thesaurus (orig r9260):  caelum | 2010-04-28 12:20:01 +0200
 add _verify_pid and _verify_tid to methods that croak in ::Replicated
 r9274 at Thesaurus (orig r9261):  ribasushi | 2010-04-28 14:39:02 +0200
 Fix failing test and some warnings
 r9288 at Thesaurus (orig r9275):  rabbit | 2010-04-29 10:32:10 +0200
 Allow limit syntax change in-flight without digging into internals
 r9292 at Thesaurus (orig r9279):  castaway | 2010-04-30 12:26:52 +0200
 Argh.. committing missing test file for load_namespaces tests
 
 r9295 at Thesaurus (orig r9282):  rabbit | 2010-05-01 11:06:21 +0200
 The final version of the test
 r9309 at Thesaurus (orig r9296):  rabbit | 2010-05-04 09:44:51 +0200
 Test for RT#56257
 r9310 at Thesaurus (orig r9297):  rabbit | 2010-05-04 10:00:11 +0200
 Refactor count handling, make count-resultset attribute lists inclusive rather than exclusive (side effect - solves RT#56257
 r9318 at Thesaurus (orig r9305):  rabbit | 2010-05-05 11:49:51 +0200
  r9296 at Thesaurus (orig r9283):  ribasushi | 2010-05-01 11:51:15 +0200
  Branch to clean up various limit dialects
  r9297 at Thesaurus (orig r9284):  rabbit | 2010-05-01 11:55:04 +0200
  Preliminary version
  r9301 at Thesaurus (orig r9288):  rabbit | 2010-05-03 18:31:24 +0200
  Fix incorrect comparison
  r9302 at Thesaurus (orig r9289):  rabbit | 2010-05-03 18:32:36 +0200
  Do not add TOP prefixes to queries already containing it
  r9303 at Thesaurus (orig r9290):  rabbit | 2010-05-03 18:33:15 +0200
  Add an as selector to a prefetch subquery to aid the subselecting-limit analyzer
  r9304 at Thesaurus (orig r9291):  rabbit | 2010-05-03 18:34:49 +0200
  Rewrite mssql test to verify both types of limit dialects with and without quoting, rewrite the RNO, Top and RowNum dialects to rely on a factored out column re-aliaser
  r9305 at Thesaurus (orig r9292):  rabbit | 2010-05-03 21:06:01 +0200
  Fix Top tests, make extra col selector order consistent
  r9307 at Thesaurus (orig r9294):  ribasushi | 2010-05-04 00:50:35 +0200
  Fix test warning
  r9308 at Thesaurus (orig r9295):  ribasushi | 2010-05-04 01:04:32 +0200
  Some databases (db2) do not like leading __s - use a different weird identifier for extra selector names
  r9313 at Thesaurus (orig r9300):  rabbit | 2010-05-05 11:08:33 +0200
  Rename test
  r9314 at Thesaurus (orig r9301):  rabbit | 2010-05-05 11:11:32 +0200
  If there was no offset, there is no sense in reordering
  r9315 at Thesaurus (orig r9302):  rabbit | 2010-05-05 11:12:19 +0200
  Split and fix oracle tests
  r9317 at Thesaurus (orig r9304):  rabbit | 2010-05-05 11:49:33 +0200
  Changes
 
 r9321 at Thesaurus (orig r9308):  rabbit | 2010-05-05 13:01:35 +0200
 Changes
 r9322 at Thesaurus (orig r9309):  rabbit | 2010-05-05 13:02:39 +0200
 Fix obsucre bug with as_subselect_rs (gah wrong commit msg)
 r9323 at Thesaurus (orig r9310):  rabbit | 2010-05-05 14:56:38 +0200
 Forgotten pieces
 r9329 at Thesaurus (orig r9316):  rabbit | 2010-05-07 10:15:52 +0200
 Failure to determine dbms version is *not* a fatal error - trap exceptions
 r9330 at Thesaurus (orig r9317):  caelum | 2010-05-07 11:57:24 +0200
 detect row_number() over support in MSSQL if version detection fails
 r9331 at Thesaurus (orig r9318):  caelum | 2010-05-07 14:56:57 +0200
 minor change
 r9332 at Thesaurus (orig r9319):  nigel | 2010-05-07 15:03:00 +0200
 empty update OK even if row is not in database
 r9333 at Thesaurus (orig r9320):  nigel | 2010-05-07 15:28:06 +0200
 Added reference to cascade_* in relationship attributes
 r9334 at Thesaurus (orig r9321):  nigel | 2010-05-07 15:39:37 +0200
 empty update OK even if row is not in database (fixed)
 r9335 at Thesaurus (orig r9322):  nigel | 2010-05-07 15:48:19 +0200
 empty update OK even if row is not in database (fixed2)
 r9336 at Thesaurus (orig r9323):  nigel | 2010-05-07 15:54:36 +0200
 Clarification to cascade_update attribute documentation
 r9337 at Thesaurus (orig r9324):  nigel | 2010-05-07 16:08:17 +0200
 Clarification cascade_* attribute defaults documentation
 r9350 at Thesaurus (orig r9337):  rabbit | 2010-05-08 11:23:56 +0200
 Make sure missing author-deps do not kill makefile creation
 r9358 at Thesaurus (orig r9344):  rabbit | 2010-05-11 16:46:47 +0200
  r9147 at Thesaurus (orig r9134):  frew | 2010-04-13 16:54:24 +0200
  branch for FilterColumn
  r9148 at Thesaurus (orig r9135):  frew | 2010-04-13 18:09:57 +0200
  change names wrap accessors
  r9158 at Thesaurus (orig r9145):  frew | 2010-04-14 17:55:14 +0200
  basic tests and a tiny fix
  r9159 at Thesaurus (orig r9146):  frew | 2010-04-14 19:30:46 +0200
  working filter column impl
  r9160 at Thesaurus (orig r9147):  frew | 2010-04-14 19:31:18 +0200
  useless var
  r9161 at Thesaurus (orig r9148):  frew | 2010-04-14 20:10:57 +0200
  MultiCreate test
  r9163 at Thesaurus (orig r9150):  frew | 2010-04-14 20:22:10 +0200
  test db in MC
  r9178 at Thesaurus (orig r9165):  rabbit | 2010-04-14 23:35:00 +0200
  Not sure how this was never noticed, but it definitely doesn't seem right and all tests pass...
  r9191 at Thesaurus (orig r9178):  frew | 2010-04-15 06:34:16 +0200
  better namiology
  r9193 at Thesaurus (orig r9180):  frew | 2010-04-15 16:14:28 +0200
  method and arg rename
  r9194 at Thesaurus (orig r9181):  frew | 2010-04-15 16:35:25 +0200
  use result source for filtering instead of result
  r9195 at Thesaurus (orig r9182):  frew | 2010-04-15 17:04:38 +0200
  initial stab at incomplete docs
  r9278 at Thesaurus (orig r9265):  frew | 2010-04-28 22:05:36 +0200
  doc, removal of source stuff, and Changes
  r9324 at Thesaurus (orig r9311):  frew | 2010-05-06 01:49:25 +0200
  test caching
  r9327 at Thesaurus (orig r9314):  rabbit | 2010-05-06 16:30:36 +0200
  Play nicer with lower-level methods
  r9328 at Thesaurus (orig r9315):  frew | 2010-05-07 04:27:18 +0200
  no filter and inflate column
  r9352 at Thesaurus (orig r9339):  rabbit | 2010-05-10 13:40:00 +0200
  Maintain full coherence between filtered cache and unfiltered results, including store_column
  r9353 at Thesaurus (orig r9340):  rabbit | 2010-05-10 13:40:48 +0200
  Fix typo
  r9357 at Thesaurus (orig r9343):  rabbit | 2010-05-11 16:45:50 +0200
  Comment weird looking code
 
 r9360 at Thesaurus (orig r9346):  caelum | 2010-05-11 17:44:15 +0200
 clearer logic
 r9364 at Thesaurus (orig r9350):  wreis | 2010-05-12 03:44:39 +0200
 add failing test for order_by using a function
 r9378 at Thesaurus (orig r9364):  rabbit | 2010-05-14 11:57:45 +0200
 cleanup test by wreis
 r9396 at Thesaurus (orig r9382):  rabbit | 2010-05-15 17:50:58 +0200
 Fix stupid typo-bug
 r9397 at Thesaurus (orig r9383):  rabbit | 2010-05-15 18:04:59 +0200
 Revert erroneous commit (belongs in a branch)
 r9402 at Thesaurus (orig r9388):  ash | 2010-05-16 12:28:13 +0200
 Fix how Schema::Versioned gets connection attributes
 r9408 at Thesaurus (orig r9394):  caelum | 2010-05-16 19:29:14 +0200
 add sql_maker to @rdbms_specific_methods



Property changes on: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps
___________________________________________________________________
Modified: svk:merge
   - 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7237
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:5651
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
   + 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/cookbook_fixes:7657
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/prefetch_bug-unqualified_column_in_search_related_cond:7959
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/void_populate_resultset_cond:7935
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7982
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/ado_mssql:7886
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/autocast:7418
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/chaining_fixes:8626
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connect_info_hash:7435
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connected_schema_leak:8264
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cookbook_fixes:7479
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/create_scalarref_rt51559:8027
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/dbicadmin_pod:8786
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/dbicadmin_refactor:8691
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/dephandling:8674
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/filter_column:9343
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/get_inflated_columns_rt46953:7964
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_has_many_join:7382
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/handle_all_storage_methods_in_replicated:8612
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/ic_dt_post_inflate:8517
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/informix:8434
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/is_resultset_paginated:7769
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_limit_regression:8278
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_rno_pagination:8054
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multiple_version_upgrade:8429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/no_duplicate_indexes_for_pk_cols:8373
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/normalize_connect_info:8274
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/null_column_regression:8314
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_quotes:8812
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_shorten_aliases:8234
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7842
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pod_fixes:8902
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch-group_by:7917
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_bug-unqualified_column_in_search_related_cond:7900
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_pager:8431
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pri_key_refactor:8751
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqlt_parser_view:8145
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-interbase:8929
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subqueried_limit_fixes:9304
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:7682
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_asa:8513
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulk_insert:7679
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulkinsert_support:7796
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_computed_columns:8496
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_refactor:7940
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_support:7797
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/view_rels:7908
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/void_populate_resultset_cond:7944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:9394
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/.gitignore
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/.gitignore	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/.gitignore	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,7 @@
+META.yml
+Makefile
+README
+blib/
+inc/
+pm_to_blib
+t/var/

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/Changes
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/Changes	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/Changes	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,5 +1,243 @@
 Revision history for DBIx::Class
 
+        - Add a warning to load_namespaces if a class in ResultSet/
+          is not a subclass of DBIx::Class::ResultSet
+        - ::Storage::DBI now correctly preserves a parent $dbh from
+          terminating children, even during interpreter-global
+          out-of-order destruction
+        - Add DBIx::Class::FilterColumn for non-ref filtering
+        - InflateColumn::DateTime support for MSSQL via DBD::Sybase
+        - Millisecond precision support for MSSQL datetimes for
+          InflateColumn::DateTime
+        - Support connecting using $ENV{DBI_DSN} and $ENV{DBI_DRIVER}
+        - current_source_alias method on ResultSet objects to
+          determine the alias to use in programatically assembled
+          search()es (originally added in 0.08100 but unmentioned)
+        - Rewrite/unification of all subselecting limit emulations
+          (RNO, Top, RowNum) to be much more robust wrt complex joined
+          resultsets
+        - MSSQL limits now don't require nearly as many applications of
+          the unsafe_subselect_ok attribute, due to optimized queries
+        - Fix as_subselect_rs to not inject resultset class-wide where
+          conditions outside of the resulting subquery
+        - Fix nasty potentially data-eating bug when deleting/updating
+          a limited resultset
+        - Depend on optimized SQL::Abstract (faster SQL generation)
+        - update() on row not in_storage no longer throws an exception
+          if there are no dirty columns to update (fixes cascaded update
+          annoyances)
+        - Update Schema::Versioned to respect hashref style of connection_info
+
+0.08121 2010-04-11 18:43:00 (UTC)
+        - Support for Firebird RDBMS with DBD::InterBase and ODBC
+        - Add core support for INSERT RETURNING (for storages that
+          supports this syntax, currently PostgreSQL and Firebird)
+        - Fix spurious warnings on multiple UTF8Columns component loads
+        - DBIx::Class::UTF8Columns entered deprecated state
+        - DBIx::Class::InflateColumn::File entered deprecated state
+        - DBIx::Class::Optional::Dependencies left experimental state
+        - Add req_group_list to Opt::Deps (RT#55211)
+        - Add support for mysql-specific STRAIGHT_JOIN (RT#55579)
+        - Cascading delete/update are now wrapped in a transaction
+          for atomicity
+        - Fix accidental autovivification of ENV vars
+        - Fix update_all and delete_all to be wrapped in a transaction
+        - Fix multiple deficiencies when using MultiCreate with
+          data-encoder components (e.g. ::EncodedColumn)
+        - Fix regression where SQL files with comments were not
+          handled properly by ::Schema::Versioned.
+        - Fix regression on not properly throwing when $obj->relationship
+          is unresolvable
+        - Fix the join-optimiser to consider unqualified column names
+          whenever possible
+        - Fix an issue with multiple same-table joins confusing the join
+          optimizier
+        - Add has_relationship method to row objects
+        - Fix regression in set_column on PK-less objects
+        - Better error text on malformed/missing relationships
+        - Add POD about the significance of PK columns
+        - Fix for SQLite to ignore the (unsupported) { for => ... }
+          attribute
+        - Fix ambiguity in default directory handling of create_ddl_dir
+          (RT#54063)
+        - Support add_columns('+colname' => { ... }) to augment column
+          definitions.
+
+0.08120 2010-02-24 08:58:00 (UTC)
+        - Make sure possibly overwritten deployment_statements methods in
+          schemas get called on $schema->deploy
+        - Fix count() with group_by aliased-function resultsets
+        - with_deferred_fk_checks() Oracle support
+        - Massive refactor and cleanup of primary key handling
+        - Fixed regression losing custom result_class (really this time)
+          (RT#54697)
+        - Fixed regression in DBIC SQLT::Parser failing with a classname
+          (as opposed to a schema object)
+        - Changes to Storage::DBI::Oracle to accomodate changes in latest
+          SQL::Translator (quote handling)
+        - Make sure deployment_statements is per-storage overridable
+        - Fix dbicadmin's (lack of) POD
+
+0.08119 2010-02-15 09:36:00 (UTC)
+        - Add $rs->is_ordered to test for existing order_by on a resultset
+        - Add as_subselect_rs to DBIC::ResultSet from
+          DBIC::Helper::ResultSet::VirtualView::as_virtual_view
+        - Refactor dbicadmin adding DDL manipulation capabilities
+        - New optional dependency manager to aid extension writers
+        - Depend on newest bugfixed Moose
+        - Make resultset chaining consistent wrt selection specification
+        - Storage::DBI::Replicated cleanup
+        - Fix autoinc PKs without an autoinc flag on Sybase ASA
+
+0.08118 2010-02-08 11:53:00 (UTC)
+        - Fix a bug causing UTF8 columns not to be decoded (RT#54395)
+        - Fix bug in One->Many->One prefetch-collapse handling (RT#54039)
+        - Cleanup handling of relationship accessor types
+
+0.08117 2010-02-05 17:10:00 (UTC)
+        - Perl 5.8.1 is now the minimum supported version
+        - Massive optimization of the join resolution code - now joins
+          will be removed from the resulting SQL if DBIC can prove they
+          are not referenced by anything
+        - Subqueries no longer marked experimental
+        - Support for Informix RDBMS (limit/offset and auto-inc columns)
+        - Support for Sybase SQLAnywhere, both native and via ODBC
+        - might_have/has_one now warn if applied calling class's column
+          has is_nullable set to true.
+        - Fixed regression in deploy() with a {sources} table limit applied
+          (RT#52812)
+        - Views without a view_definition will throw an exception when
+          parsed by SQL::Translator::Parser::DBIx::Class
+        - Stop the SQLT parser from auto-adding indexes identical to the
+          Primary Key
+        - InflateColumn::DateTime refactoring to allow fine grained method
+          overloads
+        - Fix ResultSetColumn improperly selecting more than the requested
+          column when +columns/+select is present
+        - Fix failure when update/delete of resultsets with complex WHERE
+          SQLA structures
+        - Fix regression in context sensitiveness of deployment_statements
+        - Fix regression resulting in overcomplicated query on
+          search_related from prefetching resultsets
+        - Fix regression on all-null returning searches (properly switch
+          LEFT JOIN to JOIN in order to distinguish between both cases)
+        - Fix regression in groupedresultset count() used on strict-mode
+          MySQL connections
+        - Better isolation of RNO-limited queries from the rest of a
+          prefetching resultset
+        - New MSSQL specific resultset attribute to allow hacky ordered
+          subquery support
+        - Fix nasty schema/dbhandle leak due to SQL::Translator
+        - Initial implementation of a mechanism for Schema::Version to
+          apply multiple step upgrades
+        - Fix regression on externally supplied $dbh with AutoCommit=0
+        - FAQ "Custom methods in Result classes"
+        - Cookbook POD fix for add_drop_table instead of add_drop_tables
+        - Schema POD improvement for dclone
+
+0.08115 2009-12-10 09:02:00 (CST)
+        - Real limit/offset support for MSSQL server (via Row_Number)
+        - Fix distinct => 1 with non-selecting order_by (the columns
+          in order_by also need to be aded to the resulting group_by)
+        - Do not attempt to deploy FK constraints pointing to a View
+        - Fix count/objects from search_related on limited resultset
+        - Stop propagating distinct => 1 over search_related chains
+        - Make sure populate() inherits the resultset conditions just
+          like create() does
+        - Make get_inflated_columns behave identically to get_columns
+          wrt +select/+as (RT#46953)
+        - Fix problems with scalarrefs under InflateColumn (RT#51559)
+        - Throw exception on delete/update of PK-less resultsets
+        - Refactored Sybase storage driver into a central ::DBI::Sybase
+          dispatcher, and a sybase-specific ::DBI::Sybase::ASE
+        - Fixed an atrocious DBD::ADO bind-value bug
+        - Cookbook/Intro POD improvements
+
+0.08114 2009-11-14 17:45:00 (UTC)
+        - Preliminary support for MSSQL via DBD::ADO
+        - Fix botched 0.08113 release (invalid tarball)
+
+0.08113 2009-11-13 23:13:00 (UTC)
+        - Fix populate with has_many bug
+          (RT #50828)
+        - Fix Oracle autoincrement broken for Resultsets with scalar refs
+          (RT #50874)
+        - Complete Sybase RDBMS support including:
+          - Support for TEXT/IMAGE columns
+          - Support for the 'money' datatype
+          - Transaction savepoints support
+          - DateTime inflation support
+          - Support for bind variables when connecting to a newer Sybase with
+             OpenClient libraries
+          - Support for connections via FreeTDS with CASTs for bind variables
+             when needed
+          - Support for interpolated variables with proper quoting when
+             connecting to an older Sybase and/or via FreeTDS
+          - bulk API support for populate()
+        - Transaction support for MSSQL via DBD::Sybase
+        - Add is_paged method to DBIx::Class::ResultSet so that we can
+          check that if we want a pager
+        - Skip versioning test on really old perls lacking Time::HiRes
+          (RT #50209)
+        - Fixed on_connect_do/call regression when used with a coderef
+          connector (RT #50003)
+        - A couple of fixes to Ordered to remedy subclassing issues
+        - Fixed another lingering problem with PostgreSQL
+          auto-increment support and its interaction with multiple
+          schemas
+        - Remove some IN workarounds, and require a recent version of
+          SQLA instead
+        - Improvements to populate's handling of mixed scalarref values
+        - Fixed regression losing result_class after $rs->find (introduced
+          in 0.08108)
+        - Fix in_storage() to return 1|0 as per existing documentation
+        - Centralize handling of _determine_driver calls prior to certain
+          ::Storage::DBI methods
+        - Fix update/delete arbitrary condition handling (RT#51409)
+        - POD improvements
+
+0.08112 2009-09-21 10:57:00 (UTC)
+        - Remove the recommends from Makefile.PL, DBIx::Class is not
+          supposed to have optional dependencies. ever.
+        - Mangle the DBIx/Class.pm POD to be more clear about
+          copyright and license
+        - Put back PG's multiple autoinc per table support, accidentally
+          dropped during the serial-autodetection rewrite
+        - Make sure ResultSetColumn does not depend on the (undefined)
+          return value of ->cursor->reset()
+        - Add single() to ResultSetColumn (same semantics as ResultSet)
+        - Make sure to turn off IDENTITY_INSERT after insert() on MSSQL
+          tables that needed it
+        - More informative exception on failing _resolve_relationship
+        - Allow undef/NULL as the sole grouping value in Ordered
+        - Fix unreported rollback exceptions in TxnScopeGuard
+        - Fix overly-eager left-join chain enforcing code
+        - Warn about using distinct with an existing group_by
+        - Warn about attempting to $rs->get_column a non-unique column
+          when has_many joins are added to resultset
+        - Refactor of the exception handling system (now everything is a
+          DBIx::Class::Exception object)
+
+0.08111 2009-09-06 21:58:00 (UTC)
+        - The hashref to connection_info now accepts a 'dbh_maker'
+          coderef, allowing better intergration with Catalyst
+        - Fixed a complex prefetch + regular join regression introduced
+          in 0.08108
+        - Fixed insert_bulk rebless handling
+        - Fixed Storable roundtrip regression, and general serialization
+          cleanup
+        - SQLT related fixes:
+          - sqlt_type is now called on the correct storage object
+          - hooks can now see the correct producer_type (RT#47891)
+          - optional SQLT requirements for e.g. deploy() bumped to 0.11002
+        - Really fixed (and greatly cleaned up) postgresql autoinc sequence
+          autodetection
+        - Automatically detect MySQL v3 and use INNER JOIN instead of JOIN
+        - POD improvements (including RT#48769)
+        - Test suite tweaks (including fixes for recent CPANTS fails)
+        - Better support for MSSQL IDENTITY_INSERT ON
+
+0.08109 2009-08-18 08:35:00 (UTC)
         - Replication updates:
           - Improved the replication tests so that they are more reliable
             and accurate, and hopefully solve some cross platform issues.
@@ -21,7 +259,7 @@
         - Support for MSSQL 'money' type
         - Support for 'smalldatetime' type used in MSSQL and Sybase for
           InflateColumn::DateTime
-        - support for Postgres 'timestamp without timezone' type in
+        - Support for Postgres 'timestamp without timezone' type in
           InflateColumn::DateTime (RT#48389)
         - Added new MySQL specific on_connect_call macro 'set_strict_mode'
           (also known as make_mysql_not_suck_as_much)
@@ -38,6 +276,8 @@
         - Some fixes of multi-create corner cases
         - Multiple POD improvements
         - Added exception when resultset is called without an argument
+        - Improved support for non-schema-qualified tables under
+          Postgres (fixed last_insert_id sequence name auto-detection)
 
 0.08108 2009-07-05 23:15:00 (UTC)
         - Fixed the has_many prefetch with limit/group deficiency -
@@ -58,7 +298,7 @@
           nonexisting prefetch
         - make_column_dirty() now overwrites the deflated value with an
           inflated one if such exists
-        - Fixed set_$rel with where restriction deleting rows outside 
+        - Fixed set_$rel with where restriction deleting rows outside
           the restriction
         - populate() returns the created objects or an arrayref of the
           created objects depending on scalar vs. list context
@@ -110,7 +350,7 @@
           side of the relation, to avoid duplicates
         - DBIC now properly handles empty inserts (invoking all default
           values from the DB, normally via INSERT INTO tbl DEFAULT VALUES
-        - Fix find_or_new/create to stop returning random rows when 
+        - Fix find_or_new/create to stop returning random rows when
           default value insert is requested (RT#28875)
         - Make IC::DT extra warning state the column name too
         - It is now possible to transparrently search() on columns
@@ -132,9 +372,9 @@
         - Change ->count code to work correctly with DISTINCT (distinct => 1)
           via GROUP BY
         - Removed interpolation of bind vars for as_query - placeholders
-          are preserved and nested query bind variables are properly 
+          are preserved and nested query bind variables are properly
           merged in the correct order
-        - Refactor DBIx::Class::Storage::DBI::Sybase to automatically 
+        - Refactor DBIx::Class::Storage::DBI::Sybase to automatically
           load a subclass, namely Microsoft_SQL_Server.pm
           (similar to DBIx::Class::Storage::DBI::ODBC)
         - Refactor InflateColumn::DateTime to allow components to
@@ -197,7 +437,7 @@
           - not try and insert things tagged on via new_related unless required
         - Possible to set locale in IC::DateTime extra => {} config
         - Calling the accessor of a belongs_to when the foreign_key
-          was NULL and the row was not stored would unexpectedly fail 
+          was NULL and the row was not stored would unexpectedly fail
         - Split sql statements for deploy only if SQLT::Producer returned a scalar
           containing all statements to be executed
         - Add as_query() for ResultSet and ResultSetColumn. This makes subqueries
@@ -225,8 +465,8 @@
         - new order_by => { -desc => 'colname' } syntax supported
         - PG array datatype supported
         - insert should use store_column, not set_column to avoid marking
-          clean just-stored values as dirty. New test for this 
-        - regression test for source_name 
+          clean just-stored values as dirty. New test for this
+        - regression test for source_name
 
 0.08099_05 2008-10-30 21:30:00 (UTC)
         - Rewrite of Storage::DBI::connect_info(), extended with an
@@ -240,7 +480,7 @@
         - Fixed up related resultsets and multi-create
         - Fixed superfluous connection in ODBC::_rebless
         - Fixed undef PK for first insert in ODBC::Microsoft_SQL_Server
-        - Added virtual method to Versioned so a user can create upgrade 
+        - Added virtual method to Versioned so a user can create upgrade
           path across multiple versions (jgoulah)
         - Better (and marginally faster) implementation of the HashRefInflator
           hash construction algorithm
@@ -249,7 +489,7 @@
 
 0.08099_04 2008-07-24 01:00:00
         - Functionality to storage to enable a sub to be run without FK checks
-        - Fixed $schema->clone bug which caused clone and source to share 
+        - Fixed $schema->clone bug which caused clone and source to share
           internal hash refs
         - Added register_extra_source methods for additional sources
         - Added datetime_undef_if_invalid for InflateColumn::DateTime to
@@ -275,11 +515,11 @@
         - Add warnings for non-unique ResultSet::find queries
         - Changed Storage::DBI::Replication to Storage::DBI::Replicated and
           refactored support.
-        - By default now deploy/diff et al. will ignore constraint and index 
+        - By default now deploy/diff et al. will ignore constraint and index
           names
         - Add ResultSet::_is_deterministic_value, make new_result filter the
           values passed to new to drop values that would generate invalid SQL.
-        - Use Sub::Name to name closures before installing them. Fixes 
+        - Use Sub::Name to name closures before installing them. Fixes
           incompatibility with Moose method modifiers on generated methods.
 
 0.08010 2008-03-01 10:30
@@ -288,7 +528,7 @@
 0.08009 2008-01-20 13:30
         - Made search_rs smarter about when to preserve the cache to fix
           mm prefetch usage
-        - Added Storage::DBI subclass for MSSQL over ODBC. 
+        - Added Storage::DBI subclass for MSSQL over ODBC.
         - Added freeze, thaw and dclone methods to Schema so that thawed
           objects will get re-attached to the schema.
         - Moved dbicadmin to JSON::Any wrapped JSON.pm for a sane API
@@ -302,20 +542,20 @@
           foreign and self parts the wrong way round in the condition
         - ResultSetColumn::func() now returns all results if called in list
           context; this makes things like func('DISTINCT') work as expected
-        - Many-to-many relationships now warn if the utility methods would 
+        - Many-to-many relationships now warn if the utility methods would
           clash
         - InflateColumn::DateTime now accepts an extra parameter of timezone
           to set timezone on the DT object (thanks Sergio Salvi)
-        - Added sqlt_deploy_hook to result classes so that indexes can be 
+        - Added sqlt_deploy_hook to result classes so that indexes can be
           added.
-        - Added startup checks to warn loudly if we appear to be running on 
+        - Added startup checks to warn loudly if we appear to be running on
           RedHat systems from perl-5.8.8-10 and up that have the bless/overload
           patch applied (badly) which causes 2x -> 100x performance penalty.
           (Jon Schutz)
-        - ResultSource::reverse_relationship_info can distinguish between 
+        - ResultSource::reverse_relationship_info can distinguish between
           sources using the same table
         - Row::insert will now not fall over if passed duplicate related objects
-        - Row::copy will not fall over if you have two relationships to the 
+        - Row::copy will not fall over if you have two relationships to the
           same source with a unique constraint on it
 
 0.08007 2007-09-04 19:36:00
@@ -327,7 +567,7 @@
         - Move to using Class::C3::Componentised
         - Remove warn statement from DBIx::Class::Row
 
-0.08005 2007-08-06 
+0.08005 2007-08-06
         - add timestamp fix re rt.cpan 26978 - no test yet but change
           clearly should cause no regressions
         - provide alias for related_resultset via local() so it's set
@@ -342,7 +582,7 @@
           (original fix from diz)
 
 0.08004 2007-08-06 19:00:00
-        - fix storage connect code to not trigger bug via auto-viv 
+        - fix storage connect code to not trigger bug via auto-viv
           (test from aherzog)
         - fixup cursor_class to be an 'inherited' attr for per-package defaults
         - add default_resultset_attributes entry to Schema

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/MANIFEST.SKIP
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/MANIFEST.SKIP	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/MANIFEST.SKIP	2010-05-17 14:31:46 UTC (rev 9401)
@@ -6,6 +6,9 @@
 \bCVS\b
 ,v$
 \B\.svn\b
+\B\.git\b
+\B\.gitignore\b
+\b_darcs\b
 
 # Avoid Makemaker generated and utility files.
 \bMakefile$

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/Makefile.PL
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/Makefile.PL	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/Makefile.PL	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,163 +1,192 @@
-use inc::Module::Install 0.89;
+use inc::Module::Install 0.93;
 use strict;
 use warnings;
 use POSIX ();
 
-use 5.006001; # delete this line if you want to send patches for earlier.
+use 5.008001;
 
+use FindBin;
+use lib "$FindBin::Bin/lib";
+
+# adjust ENV for $AUTHOR system() calls
+use Config;
+$ENV{PERL5LIB} = join ($Config{path_sep}, @INC);
+
+
+###
+### DO NOT ADD OPTIONAL DEPENDENCIES HERE, EVEN AS recommends()
+### All of them should go to DBIx::Class::Optional::Dependencies
+###
+
 name     'DBIx-Class';
-perl_version '5.006001';
+perl_version '5.008001';
 all_from 'lib/DBIx/Class.pm';
 
+my $build_requires = {
+  'DBD::SQLite'              => '1.25',
+};
 
-test_requires 'Test::Builder'       => 0.33;
-test_requires 'Test::Deep'          => 0;
-test_requires 'Test::Exception'     => 0;
-test_requires 'Test::More'          => 0.92;
-test_requires 'Test::Warn'          => 0.11;
+my $test_requires = {
+  'File::Temp'               => '0.22',
+  'Test::Builder'            => '0.33',
+  'Test::Exception'          => '0',
+  'Test::More'               => '0.92',
+  'Test::Warn'               => '0.21',
+};
 
-test_requires 'File::Temp'          => 0.22;
+my $runtime_requires = {
+  'Carp::Clan'               => '6.0',
+  'Class::Accessor::Grouped' => '0.09003',
+  'Class::C3::Componentised' => '1.0005',
+  'Class::Inspector'         => '1.24',
+  'Data::Page'               => '2.00',
+  'DBI'                      => '1.609',
+  'MRO::Compat'              => '0.09',
+  'Module::Find'             => '0.06',
+  'Path::Class'              => '0.18',
+  'SQL::Abstract'            => '1.66',
+  'SQL::Abstract::Limit'     => '0.13',
+  'Sub::Name'                => '0.04',
+  'Data::Dumper::Concise'    => '1.000',
+  'Scope::Guard'             => '0.03',
+  'Context::Preserve'        => '0.01',
+};
 
+# this is so we can order requires alphabetically
+# copies are needed for author requires injection
+my $reqs = {
+  build_requires => { %$build_requires },
+  requires => { %$runtime_requires },
+  test_requires => { %$test_requires },
+};
 
-# Core
-requires 'List::Util'               => 0;
-requires 'Scalar::Util'             => 0;
-requires 'Storable'                 => 0;
 
-# Perl 5.8.0 doesn't have utf8::is_utf8()
-requires 'Encode'                   => 0 if ($] <= 5.008000);
+# require extra modules for testing if we're in a checkout
+if ($Module::Install::AUTHOR) {
+  warn <<'EOW';
+******************************************************************************
+******************************************************************************
+***                                                                        ***
+*** AUTHOR MODE: all optional test dependencies converted to hard requires ***
+***                                                                        ***
+******************************************************************************
+******************************************************************************
 
-# Dependencies (keep in alphabetical order)
-requires 'Carp::Clan'               => 6.0;
-requires 'Class::Accessor::Grouped' => 0.08003;
-requires 'Class::C3::Componentised' => 1.0005;
-requires 'Class::Inspector'         => 1.24;
-requires 'Data::Page'               => 2.00;
-requires 'DBD::SQLite'              => 1.25;
-requires 'DBI'                      => 1.605;
-requires 'JSON::Any'                => 1.18;
-requires 'MRO::Compat'              => 0.09;
-requires 'Module::Find'             => 0.06;
-requires 'Path::Class'              => 0.16;
-requires 'Scope::Guard'             => 0.03;
-requires 'SQL::Abstract'            => 1.56;
-requires 'SQL::Abstract::Limit'     => 0.13;
-requires 'Sub::Name'                => 0.04;
+EOW
 
-recommends 'SQL::Translator'        => 0.09004;
+  require DBIx::Class::Optional::Dependencies;
+  $reqs->{test_requires} = {
+    %{$reqs->{test_requires}},
+    map { %$_ } (values %{DBIx::Class::Optional::Dependencies->req_group_list}),
+  };
+}
 
-my %replication_requires = (
-  'Moose',                    => 0.87,
-  'MooseX::AttributeHelpers'  => 0.21,
-  'MooseX::Types',            => 0.16,
-  'namespace::clean'          => 0.11,
-  'Hash::Merge',              => 0.11,
-);
+# compose final req list, for alphabetical ordering
+my %final_req;
+for my $rtype (keys %$reqs) {
+  for my $mod (keys %{$reqs->{$rtype}} ) {
 
-my %force_requires_if_author = (
-  %replication_requires,
+    # sanity check req duplications
+    if ($final_req{$mod}) {
+      die "$mod specified as both a '$rtype' and a '$final_req{$mod}[0]'\n";
+    }
 
-#  'Module::Install::Pod::Inherit' => 0.01,
-  'Test::Pod::Coverage'       => 1.04,
-  'SQL::Translator'           => 0.09007,
+    $final_req{$mod} = [ $rtype, $reqs->{$rtype}{$mod}||0 ],
+  }
+}
 
-  # CDBI-compat related
-  'DBIx::ContextualFetch'     => 0,
-  'Class::DBI::Plugin::DeepAbstractSearch' => 0,
-  'Class::Trigger'            => 0,
-  'Time::Piece::MySQL'        => 0,
-  'Clone'                     => 0,
-  'Date::Simple'              => 3.03,
+# actual require
+for my $mod (sort keys %final_req) {
+  my ($rtype, $ver) = @{$final_req{$mod}};
+  no strict 'refs';
+  $rtype->($mod, $ver);
+}
 
-  # t/52cycle.t
-  'Test::Memory::Cycle'       => 0,
-  'Devel::Cycle'              => 1.10,
+auto_install();
 
-  # t/36datetime.t
-  # t/60core.t
-  'DateTime::Format::SQLite'  => 0,
+# re-create various autogenerated documentation bits
+if ($Module::Install::AUTHOR) {
 
-  # t/96_is_deteministic_value.t
-  'DateTime::Format::Strptime'=> 0,
+  print "Regenerating README\n";
+  system('pod2text lib/DBIx/Class.pm > README');
 
-  # database-dependent reqs
-  #
-  $ENV{DBICTEST_PG_DSN}
-    ? (
-      'Sys::SigAction' => 0,
-      'DBD::Pg' => 2.009002,
-      'DateTime::Format::Pg' => 0,
-    ) : ()
-  ,
+  if (-f 'MANIFEST') {
+    print "Removing MANIFEST\n";
+    unlink 'MANIFEST';
+  }
 
-  $ENV{DBICTEST_MYSQL_DSN}
-    ? (
-      'DateTime::Format::MySQL' => 0,
-    ) : ()
-  ,
+  print "Regenerating Optional/Dependencies.pod\n";
+  require DBIx::Class::Optional::Dependencies;
+  DBIx::Class::Optional::Dependencies->_gen_pod;
 
-  $ENV{DBICTEST_ORACLE_DSN}
-    ? (
-      'DateTime::Format::Oracle' => 0,
-    ) : ()
-  ,
-);
+  # FIXME Disabled due to unsolved issues, ask theorbtwo
+  #  require Module::Install::Pod::Inherit;
+  #  PodInherit();
+}
 
+tests_recursive (qw|
+    t
+|);
 
 install_script (qw|
     script/dbicadmin
 |);
 
-tests_recursive (qw|
-    t
-|);
 
-resources 'IRC'         => 'irc://irc.perl.org/#dbix-class';
-resources 'license'     => 'http://dev.perl.org/licenses/';
-resources 'repository'  => 'http://dev.catalyst.perl.org/svnweb/bast/browse/DBIx-Class/';
-resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
+### Mangle makefile - read the comments for more info
+#
+postamble <<"EOP";
 
+# This will add an extra dep-spec for the distdir target,
+# which `make` will fold together in a first-come first-serve
+# fashion. What we do here is essentially adding extra
+# commands to execute once the distdir is assembled (via
+# create_distdir), but before control is returned to a higher
+# calling rule.
+distdir : dbicadmin_pod_inject
 
-# re-build README and require extra modules for testing if we're in a checkout
+# The pod self-injection code is in fact a hidden option in
+# dbicadmin itself
+dbicadmin_pod_inject :
+\tcd \$(DISTVNAME) && \$(ABSPERL) -Ilib script/dbicadmin --selfinject-pod
 
-if ($Module::Install::AUTHOR) {
-  warn <<'EOW';
-******************************************************************************
-******************************************************************************
-***                                                                        ***
-*** AUTHOR MODE: all optional test dependencies converted to hard requires ***
-***                                                                        ***
-******************************************************************************
-******************************************************************************
+# Regenerate manifest before running create_distdir.
+create_distdir : manifest
 
-EOW
+EOP
 
-  foreach my $module (sort keys %force_requires_if_author) {
-    build_requires ($module => $force_requires_if_author{$module});
-  }
 
-  print "Regenerating README\n";
-  system('pod2text lib/DBIx/Class.pm > README');
 
-  if (-f 'MANIFEST') {
-    print "Removing MANIFEST\n";
-    unlink 'MANIFEST';
-  }
+resources 'IRC'         => 'irc://irc.perl.org/#dbix-class';
+resources 'license'     => 'http://dev.perl.org/licenses/';
+resources 'repository'  => 'http://dev.catalyst.perl.org/repos/bast/DBIx-Class/';
+resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
 
-#  require Module::Install::Pod::Inherit;
-#  PodInherit();
-}
+# Deprecated/internal modules need no exposure
+no_index directory => $_ for (qw|
+  lib/DBIx/Class/Admin
+  lib/DBIx/Class/SQLAHacks
+  lib/DBIx/Class/PK/Auto
+  lib/DBIx/Class/CDBICompat
+|);
+no_index package => $_ for (qw/
+  DBIx::Class::SQLAHacks DBIx::Class::Storage::DBIHacks
+/);
 
-auto_install();
 
 WriteAll();
 
+
 # Re-write META.yml to _exclude_ all forced requires (we do not want to ship this)
 if ($Module::Install::AUTHOR) {
 
-  Meta->{values}{build_requires} = [ grep 
-    { not exists $force_requires_if_author{$_->[0]} }
-    ( @{Meta->{values}{build_requires}} )
+  # FIXME test_requires is not yet part of META
+  my %original_build_requires = ( %$build_requires, %$test_requires );
+
+  print "Regenerating META with author requires excluded\n";
+  Meta->{values}{build_requires} = [ grep
+    { exists $original_build_requires{$_->[0]} }
+   ( @{Meta->{values}{build_requires}} )
   ];
 
   Meta->write;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/TODO
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/TODO	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/TODO	2010-05-17 14:31:46 UTC (rev 9401)
@@ -25,13 +25,6 @@
    __PACKAGE__->table(__PACKAGE__->table()); for the result set to 
    return the correct object type.
 
-2006-03-27 by mst
- Add the ability for deploy to be given a directory and grab <dbname>.sql 
- out of there if available. Try SQL::Translator if not. If none of the above, 
- cry (and die()).  Then you can have a script that pre-gens for all available 
- SQLT modules so an app can do its own deploy without SQLT on the target 
- system
-
 2006-05-25 by mst (TODOed by bluefeet)
  Add the search attributes "limit" and "rows_per_page".
  limit: work as expected just like offset does

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/MyDatabase/Main/Result/Artist.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/MyDatabase/Main/Result/Artist.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/MyDatabase/Main/Result/Artist.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,9 +1,16 @@
 package MyDatabase::Main::Result::Artist;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+
+use warnings;
+use strict;
+
+use base qw/DBIx::Class::Core/;
+
 __PACKAGE__->table('artist');
+
 __PACKAGE__->add_columns(qw/ artistid name /);
+
 __PACKAGE__->set_primary_key('artistid');
+
 __PACKAGE__->has_many('cds' => 'MyDatabase::Main::Result::Cd');
 
 1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/MyDatabase/Main/Result/Cd.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/MyDatabase/Main/Result/Cd.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/MyDatabase/Main/Result/Cd.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,9 +1,16 @@
 package MyDatabase::Main::Result::Cd;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+
+use warnings;
+use strict;
+
+use base qw/DBIx::Class::Core/;
+
 __PACKAGE__->table('cd');
+
 __PACKAGE__->add_columns(qw/ cdid artist title/);
+
 __PACKAGE__->set_primary_key('cdid');
+
 __PACKAGE__->belongs_to('artist' => 'MyDatabase::Main::Result::Artist');
 __PACKAGE__->has_many('tracks' => 'MyDatabase::Main::Result::Track');
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/MyDatabase/Main/Result/Track.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/MyDatabase/Main/Result/Track.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/MyDatabase/Main/Result/Track.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,9 +1,16 @@
 package MyDatabase::Main::Result::Track;
-use base qw/DBIx::Class/;
-__PACKAGE__->load_components(qw/PK::Auto Core/);
+
+use warnings;
+use strict;
+
+use base qw/DBIx::Class::Core/;
+
 __PACKAGE__->table('track');
+
 __PACKAGE__->add_columns(qw/ trackid cd title/);
+
 __PACKAGE__->set_primary_key('trackid');
+
 __PACKAGE__->belongs_to('cd' => 'MyDatabase::Main::Result::Cd');
 
 1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/insertdb.pl
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/insertdb.pl	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/examples/Schema/insertdb.pl	2010-05-17 14:31:46 UTC (rev 9401)
@@ -23,10 +23,10 @@
 
 my @cds;
 foreach my $lp (keys %albums) {
-    my $artist = $schema->resultset('Artist')->search({
+    my $artist = $schema->resultset('Artist')->find({
         name => $albums{$lp}
     });
-    push @cds, [$lp, $artist->first];
+    push @cds, [$lp, $artist->id];
 }
 
 $schema->populate('Cd', [
@@ -47,10 +47,10 @@
 
 my @tracks;
 foreach my $track (keys %tracks) {
-    my $cdname = $schema->resultset('Cd')->search({
+    my $cd = $schema->resultset('Cd')->find({
         title => $tracks{$track},
     });
-    push @tracks, [$cdname->first, $track];
+    push @tracks, [$cd->id, $track];
 }
 
 $schema->populate('Track',[

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/AccessorGroup.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/AccessorGroup.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/AccessorGroup.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -17,8 +17,6 @@
 
 This class now exists in its own right on CPAN as Class::Accessor::Grouped
 
-1;
-
 =head1 AUTHORS
 
 Matt S. Trout <mst at shadowcatsystems.co.uk>

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/Descriptive.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/Descriptive.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/Descriptive.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,10 @@
+package     # hide from PAUSE
+    DBIx::Class::Admin::Descriptive;
+
+use DBIx::Class::Admin::Usage;
+
+use base 'Getopt::Long::Descriptive';
+
+sub usage_class { 'DBIx::Class::Admin::Usage'; }
+
+1;

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/Types.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/Types.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/Types.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,48 @@
+package # hide from PAUSE
+    DBIx::Class::Admin::Types;
+
+use MooseX::Types -declare => [qw(
+    DBICConnectInfo
+    DBICArrayRef
+    DBICHashRef
+)];
+use MooseX::Types::Moose qw/Int HashRef ArrayRef Str Any Bool/;
+use MooseX::Types::JSON qw(JSON);
+
+subtype DBICArrayRef,
+    as ArrayRef;
+
+subtype DBICHashRef,
+    as HashRef;
+
+coerce DBICArrayRef,
+  from JSON,
+  via { _json_to_data ($_) };
+
+coerce DBICHashRef,
+  from JSON,
+  via { _json_to_data($_) };
+
+subtype DBICConnectInfo,
+  as ArrayRef;
+
+coerce DBICConnectInfo,
+  from JSON,
+   via { return _json_to_data($_) } ;
+
+coerce DBICConnectInfo,
+  from Str,
+    via { return _json_to_data($_) };
+
+coerce DBICConnectInfo,
+  from HashRef,
+   via { [ $_ ] };
+
+sub _json_to_data {
+  my ($json_str) = @_;
+  my $json = JSON::Any->new(allow_barekey => 1, allow_singlequote => 1, relaxed=>1);
+  my $ret = $json->jsonToObj($json_str);
+  return $ret;
+}
+
+1;

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/Usage.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/Usage.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin/Usage.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,79 @@
+package     # hide from PAUSE
+    DBIx::Class::Admin::Usage;
+
+
+use base 'Getopt::Long::Descriptive::Usage';
+
+use base 'Class::Accessor::Grouped';
+
+use Class::C3;
+
+__PACKAGE__->mk_group_accessors('simple', 'synopsis', 'short_description');
+
+sub prog_name {
+    Getopt::Long::Descriptive::prog_name();
+}
+
+sub set_simple {
+    my ($self,$field, $value) = @_;
+    my $prog_name = prog_name();
+    $value =~ s/%c/$prog_name/g;
+    $self->next::method($field, $value);
+}
+
+
+
+# This returns the usage formated as a pod document
+sub pod {
+  my ($self) = @_;
+  return join qq{\n}, $self->pod_leader_text, $self->pod_option_text, $self->pod_authorlic_text;
+}
+
+sub pod_leader_text {
+  my ($self) = @_;
+
+  return qq{=head1 NAME\n\n}.prog_name()." - ".$self->short_description().qq{\n\n}.
+         qq{=head1 SYNOPSIS\n\n}.$self->leader_text().qq{\n}.$self->synopsis().qq{\n\n};
+
+}
+
+sub pod_authorlic_text {
+
+  return join ("\n\n",
+    '=head1 AUTHORS',
+    'See L<DBIx::Class/CONTRIBUTORS>',
+    '=head1 LICENSE',
+    'You may distribute this code under the same terms as Perl itself',
+    '=cut',
+  );
+}
+
+
+sub pod_option_text {
+  my ($self) = @_;
+  my @options = @{ $self->{options} || [] };
+  my $string = q{};
+  return $string unless @options;
+
+  $string .= "=head1 OPTIONS\n\n=over\n\n";
+
+  foreach my $opt (@options) {
+    my $spec = $opt->{spec};
+    my $desc = $opt->{desc};
+    next if ($desc eq 'hidden');
+    if ($desc eq 'spacer') {
+        $string .= "=back\n\n=head2 $spec\n\n=cut\n\n=over\n\n";
+        next;
+    }
+
+    $spec = Getopt::Long::Descriptive->_strip_assignment($spec);
+    $string .= "=item " . join " or ", map { length > 1 ? "B<--$_>" : "B<-$_>" }
+                             split /\|/, $spec; 
+    $string .= "\n\n$desc\n\n=cut\n\n";
+
+  }
+  $string .= "=back\n\n";
+  return $string;
+}
+
+1;

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Admin.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,568 @@
+package DBIx::Class::Admin;
+
+# check deps
+BEGIN {
+  use Carp::Clan qw/^DBIx::Class/;
+  use DBIx::Class;
+  croak('The following modules are required for DBIx::Class::Admin ' . DBIx::Class::Optional::Dependencies->req_missing_for ('admin') )
+    unless DBIx::Class::Optional::Dependencies->req_ok_for ('admin');
+}
+
+use Moose;
+use MooseX::Types::Moose qw/Int Str Any Bool/;
+use DBIx::Class::Admin::Types qw/DBICConnectInfo DBICHashRef/;
+use MooseX::Types::JSON qw(JSON);
+use MooseX::Types::Path::Class qw(Dir File);
+use Try::Tiny;
+use JSON::Any qw(DWIW XS JSON);
+use namespace::autoclean;
+
+=head1 NAME
+
+DBIx::Class::Admin - Administration object for schemas
+
+=head1 SYNOPSIS
+
+  $ dbicadmin --help
+
+  $ dbicadmin --schema=MyApp::Schema \
+    --connect='["dbi:SQLite:my.db", "", ""]' \
+    --deploy
+
+  $ dbicadmin --schema=MyApp::Schema --class=Employee \
+    --connect='["dbi:SQLite:my.db", "", ""]' \
+    --op=update --set='{ "name": "New_Employee" }'
+
+  use DBIx::Class::Admin;
+
+  # ddl manipulation
+  my $admin = DBIx::Class::Admin->new(
+    schema_class=> 'MY::Schema',
+    sql_dir=> $sql_dir,
+    connect_info => { dsn => $dsn, user => $user, password => $pass },
+  );
+
+  # create SQLite sql
+  $admin->create('SQLite');
+
+  # create SQL diff for an upgrade
+  $admin->create('SQLite', {} , "1.0");
+
+  # upgrade a database
+  $admin->upgrade();
+
+  # install a version for an unversioned schema
+  $admin->install("3.0");
+
+=head1 REQUIREMENTS
+
+The Admin interface has additional requirements not currently part of
+L<DBIx::Class>. See L<DBIx::Class::Optional::Dependencies> for more details.
+
+=head1 ATTRIBUTES
+
+=head2 schema_class
+
+the class of the schema to load
+
+=cut
+
+has 'schema_class' => (
+  is  => 'ro',
+  isa => Str,
+);
+
+
+=head2 schema
+
+A pre-connected schema object can be provided for manipulation
+
+=cut
+
+has 'schema' => (
+  is          => 'ro',
+  isa         => 'DBIx::Class::Schema',
+  lazy_build  => 1,
+);
+
+sub _build_schema {
+  my ($self)  = @_;
+  require Class::MOP;
+  Class::MOP::load_class($self->schema_class);
+
+  $self->connect_info->[3]->{ignore_version} =1;
+  return $self->schema_class->connect(@{$self->connect_info()} ); # ,  $self->connect_info->[3], { ignore_version => 1} );
+}
+
+
+=head2 resultset
+
+a resultset from the schema to operate on
+
+=cut
+
+has 'resultset' => (
+  is  => 'rw',
+  isa => Str,
+);
+
+
+=head2 where
+
+a hash ref or json string to be used for identifying data to manipulate
+
+=cut
+
+has 'where' => (
+  is      => 'rw',
+  isa     => DBICHashRef,
+  coerce  => 1,
+);
+
+
+=head2 set
+
+a hash ref or json string to be used for inserting or updating data
+
+=cut
+
+has 'set' => (
+  is      => 'rw',
+  isa     => DBICHashRef,
+  coerce  => 1,
+);
+
+
+=head2 attrs
+
+a hash ref or json string to be used for passing additonal info to the ->search call
+
+=cut
+
+has 'attrs' => (
+  is      => 'rw',
+  isa     => DBICHashRef,
+  coerce  => 1,
+);
+
+
+=head2 connect_info
+
+connect_info the arguments to provide to the connect call of the schema_class
+
+=cut
+
+has 'connect_info' => (
+  is          => 'ro',
+  isa         => DBICConnectInfo,
+  lazy_build  => 1,
+  coerce      => 1,
+);
+
+sub _build_connect_info {
+  my ($self) = @_;
+  return $self->_find_stanza($self->config, $self->config_stanza);
+}
+
+
+=head2 config_file
+
+config_file provide a config_file to read connect_info from, if this is provided
+config_stanze should also be provided to locate where the connect_info is in the config
+The config file should be in a format readable by Config::General
+
+=cut
+
+has config_file => (
+  is      => 'ro',
+  isa     => File,
+  coerce  => 1,
+);
+
+
+=head2 config_stanza
+
+config_stanza for use with config_file should be a '::' deliminated 'path' to the connection information
+designed for use with catalyst config files
+
+=cut
+
+has 'config_stanza' => (
+  is  => 'ro',
+  isa => Str,
+);
+
+
+=head2 config
+
+Instead of loading from a file the configuration can be provided directly as a hash ref.  Please note 
+config_stanza will still be required.
+
+=cut
+
+has config => (
+  is          => 'ro',
+  isa         => DBICHashRef,
+  lazy_build  => 1,
+);
+
+sub _build_config {
+  my ($self) = @_;
+
+  eval { require Config::Any }
+    or die ("Config::Any is required to parse the config file.\n");
+
+  my $cfg = Config::Any->load_files ( {files => [$self->config_file], use_ext =>1, flatten_to_hash=>1});
+
+  # just grab the config from the config file
+  $cfg = $cfg->{$self->config_file};
+  return $cfg;
+}
+
+
+=head2 sql_dir
+
+The location where sql ddl files should be created or found for an upgrade.
+
+=cut
+
+has 'sql_dir' => (
+  is      => 'ro',
+  isa     => Dir,
+  coerce  => 1,
+);
+
+
+=head2 version
+
+Used for install, the version which will be 'installed' in the schema
+
+=cut
+
+has version => (
+  is  => 'rw',
+  isa => Str,
+);
+
+
+=head2 preversion
+
+Previouse version of the schema to create an upgrade diff for, the full sql for that version of the sql must be in the sql_dir
+
+=cut
+
+has preversion => (
+  is  => 'rw',
+  isa => Str,
+);
+
+
+=head2 force
+
+Try and force certain operations.
+
+=cut
+
+has force => (
+  is  => 'rw',
+  isa => Bool,
+);
+
+
+=head2 quiet
+
+Be less verbose about actions
+
+=cut
+
+has quiet => (
+  is  => 'rw',
+  isa => Bool,
+);
+
+has '_confirm' => (
+  is  => 'bare',
+  isa => Bool,
+);
+
+
+=head1 METHODS
+
+=head2 create
+
+=over 4
+
+=item Arguments: $sqlt_type, \%sqlt_args, $preversion
+
+=back
+
+L<create> will generate sql for the supplied schema_class in sql_dir.  The flavour of sql to 
+generate can be controlled by suppling a sqlt_type which should be a L<SQL::Translator> name.  
+
+Arguments for L<SQL::Translator> can be supplied in the sqlt_args hashref.
+
+Optional preversion can be supplied to generate a diff to be used by upgrade.
+
+=cut
+
+sub create {
+  my ($self, $sqlt_type, $sqlt_args, $preversion) = @_;
+
+  $preversion ||= $self->preversion();
+
+  my $schema = $self->schema();
+  # create the dir if does not exist
+  $self->sql_dir->mkpath() if ( ! -d $self->sql_dir);
+
+  $schema->create_ddl_dir( $sqlt_type, (defined $schema->schema_version ? $schema->schema_version : ""), $self->sql_dir->stringify, $preversion, $sqlt_args );
+}
+
+
+=head2 upgrade
+
+=over 4
+
+=item Arguments: <none>
+
+=back
+
+upgrade will attempt to upgrade the connected database to the same version as the schema_class.
+B<MAKE SURE YOU BACKUP YOUR DB FIRST>
+
+=cut
+
+sub upgrade {
+  my ($self) = @_;
+  my $schema = $self->schema();
+  if (!$schema->get_db_version()) {
+    # schema is unversioned
+    $schema->throw_exception ("Could not determin current schema version, please either install() or deploy().\n");
+  } else {
+    my $ret = $schema->upgrade();
+    return $ret;
+  }
+}
+
+
+=head2 install
+
+=over 4
+
+=item Arguments: $version
+
+=back
+
+install is here to help when you want to move to L<DBIx::Class::Schema::Versioned> and have an existing 
+database.  install will take a version and add the version tracking tables and 'install' the version.  No 
+further ddl modification takes place.  Setting the force attribute to a true value will allow overriding of 
+already versioned databases.
+
+=cut
+
+sub install {
+  my ($self, $version) = @_;
+
+  my $schema = $self->schema();
+  $version ||= $self->version();
+  if (!$schema->get_db_version() ) {
+    # schema is unversioned
+    print "Going to install schema version\n";
+    my $ret = $schema->install($version);
+    print "retun is $ret\n";
+  }
+  elsif ($schema->get_db_version() and $self->force ) {
+    carp "Forcing install may not be a good idea";
+    if($self->_confirm() ) {
+      $self->schema->_set_db_version({ version => $version});
+    }
+  }
+  else {
+    $schema->throw_exception ("Schema already has a version. Try upgrade instead.\n");
+  }
+
+}
+
+
+=head2 deploy
+
+=over 4
+
+=item Arguments: $args
+
+=back
+
+deploy will create the schema at the connected database.  C<$args> are passed straight to 
+L<DBIx::Class::Schema/deploy>.
+
+=cut
+
+sub deploy {
+  my ($self, $args) = @_;
+  my $schema = $self->schema();
+  if (!$schema->get_db_version() ) {
+    # schema is unversioned
+    $schema->deploy( $args, $self->sql_dir)
+      or $schema->throw_exception ("Could not deploy schema.\n"); # FIXME deploy() does not return 1/0 on success/fail
+  } else {
+    $schema->throw_exception("A versioned schema has already been deployed, try upgrade instead.\n");
+  }
+}
+
+=head2 insert
+
+=over 4
+
+=item Arguments: $rs, $set
+
+=back
+
+insert takes the name of a resultset from the schema_class and a hashref of data to insert
+into that resultset
+
+=cut
+
+sub insert {
+  my ($self, $rs, $set) = @_;
+
+  $rs ||= $self->resultset();
+  $set ||= $self->set();
+  my $resultset = $self->schema->resultset($rs);
+  my $obj = $resultset->create( $set );
+  print ''.ref($resultset).' ID: '.join(',',$obj->id())."\n" if (!$self->quiet);
+}
+
+
+=head2 update
+
+=over 4
+
+=item Arguments: $rs, $set, $where
+
+=back
+
+update takes the name of a resultset from the schema_class, a hashref of data to update and
+a where hash used to form the search for the rows to update.
+
+=cut
+
+sub update {
+  my ($self, $rs, $set, $where) = @_;
+
+  $rs ||= $self->resultset();
+  $where ||= $self->where();
+  $set ||= $self->set();
+  my $resultset = $self->schema->resultset($rs);
+  $resultset = $resultset->search( ($where||{}) );
+
+  my $count = $resultset->count();
+  print "This action will modify $count ".ref($resultset)." records.\n" if (!$self->quiet);
+
+  if ( $self->force || $self->_confirm() ) {
+    $resultset->update_all( $set );
+  }
+}
+
+
+=head2 delete
+
+=over 4
+
+=item Arguments: $rs, $where, $attrs
+
+=back
+
+delete takes the name of a resultset from the schema_class, a where hashref and a attrs to pass to ->search.
+The found data is deleted and cannot be recovered.
+
+=cut
+
+sub delete {
+  my ($self, $rs, $where, $attrs) = @_;
+
+  $rs ||= $self->resultset();
+  $where ||= $self->where();
+  $attrs ||= $self->attrs();
+  my $resultset = $self->schema->resultset($rs);
+  $resultset = $resultset->search( ($where||{}), ($attrs||()) );
+
+  my $count = $resultset->count();
+  print "This action will delete $count ".ref($resultset)." records.\n" if (!$self->quiet);
+
+  if ( $self->force || $self->_confirm() ) {
+    $resultset->delete_all();
+  }
+}
+
+
+=head2 select
+
+=over 4
+
+=item Arguments: $rs, $where, $attrs
+
+=back
+
+select takes the name of a resultset from the schema_class, a where hashref and a attrs to pass to ->search. 
+The found data is returned in a array ref where the first row will be the columns list.
+
+=cut
+
+sub select {
+  my ($self, $rs, $where, $attrs) = @_;
+
+  $rs ||= $self->resultset();
+  $where ||= $self->where();
+  $attrs ||= $self->attrs();
+  my $resultset = $self->schema->resultset($rs);
+  $resultset = $resultset->search( ($where||{}), ($attrs||()) );
+
+  my @data;
+  my @columns = $resultset->result_source->columns();
+  push @data, [@columns];# 
+
+  while (my $row = $resultset->next()) {
+    my @fields;
+    foreach my $column (@columns) {
+      push( @fields, $row->get_column($column) );
+    }
+    push @data, [@fields];
+  }
+
+  return \@data;
+}
+
+sub _confirm {
+  my ($self) = @_;
+  print "Are you sure you want to do this? (type YES to confirm) \n";
+  # mainly here for testing
+  return 1 if ($self->meta->get_attribute('_confirm')->get_value($self));
+  my $response = <STDIN>;
+  return 1 if ($response=~/^YES/);
+  return;
+}
+
+sub _find_stanza {
+  my ($self, $cfg, $stanza) = @_;
+  my @path = split /::/, $stanza;
+  while (my $path = shift @path) {
+    if (exists $cfg->{$path}) {
+      $cfg = $cfg->{$path};
+    }
+    else {
+      die ("Could not find $stanza in config, $path does not seem to exist.\n");
+    }
+  }
+  return $cfg;
+}
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself
+
+=cut
+
+1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/AbstractSearch.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/AbstractSearch.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/AbstractSearch.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -10,7 +10,7 @@
 
 =head1 SYNOPSIS
 
-See DBIx::Class::CDBICompat for directions for use.
+See DBIx::Class::CDBICompat for usage directions.
 
 =head1 DESCRIPTION
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/ColumnCase.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/ColumnCase.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/ColumnCase.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -25,9 +25,15 @@
 
 sub has_many {
   my ($class, $rel, $f_class, $f_key, @rest) = @_;
-  return $class->next::method($rel, $f_class, ( ref($f_key) ?
-                                                          $f_key :
-                                                          lc($f_key) ), @rest);
+  return $class->next::method(
+    $rel,
+    $f_class,
+    (ref($f_key) ?
+      $f_key :
+      lc($f_key||'')
+    ),
+    @rest
+  );
 }
 
 sub get_inflated_column {

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -11,7 +11,7 @@
 
 =head1 SYNOPSIS
 
-See DBIx::Class::CDBICompat for directions for use.
+See DBIx::Class::CDBICompat for usage directions.
 
 =head1 DESCRIPTION
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/Constructor.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/Constructor.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/Constructor.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -3,6 +3,8 @@
 
 use base qw(DBIx::Class::CDBICompat::ImaDBI);
 
+use Sub::Name();
+
 use strict;
 use warnings;
 
@@ -22,7 +24,7 @@
     return carp("$method already exists in $class")
             if *$meth{CODE};
 
-    *$meth = sub {
+    *$meth = Sub::Name::subname $meth => sub {
             my $self = shift;
             $self->sth_to_objects($self->sql_Retrieve($fragment), \@_);
     };

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/Copy.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/Copy.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/Copy.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -12,7 +12,7 @@
 
 =head1 SYNOPSIS
 
-See DBIx::Class::CDBICompat for directions for use.
+See DBIx::Class::CDBICompat for usage directions.
 
 =head1 DESCRIPTION
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/Iterator.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/Iterator.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat/Iterator.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -10,7 +10,7 @@
 
 =head1 SYNOPSIS
 
-See DBIx::Class::CDBICompat for directions for use.
+See DBIx::Class::CDBICompat for usage directions.
 
 =head1 DESCRIPTION
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/CDBICompat.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -91,7 +91,7 @@
 
 =head2 Choosing Features
 
-In fact, this class is just a receipe containing all the features emulated.
+In fact, this class is just a recipe containing all the features emulated.
 If you like, you can choose which features to emulate by building your 
 own class and loading it like this:
 
@@ -145,7 +145,7 @@
 
 =item Relationships
 
-Relationships between tables (has_a, has_many...) must be delcared after all tables in the relationship have been declared.  Thus the usual CDBI idiom of declaring columns and relationships for each class together will not work.  They must instead be done like so:
+Relationships between tables (has_a, has_many...) must be declared after all tables in the relationship have been declared.  Thus the usual CDBI idiom of declaring columns and relationships for each class together will not work.  They must instead be done like so:
 
     package Foo;
     use base qw(Class::DBI);

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Componentised.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Componentised.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Componentised.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -5,30 +5,74 @@
 use warnings;
 
 use base 'Class::C3::Componentised';
-use Carp::Clan qw/^DBIx::Class/;
+use Carp::Clan qw/^DBIx::Class|^Class::C3::Componentised/;
+use mro 'c3';
 
+my $warned;
+
+# this warns of subtle bugs introduced by UTF8Columns hacky handling of store_column
+# if and only if it is placed before something overriding store_column
 sub inject_base {
-  my ($class, $target, @to_inject) = @_;
-  {
-    no strict 'refs';
-    foreach my $to (reverse @to_inject) {
-      my @comps = qw(DigestColumns ResultSetManager Ordered UTF8Columns);
-           # Add components here that need to be loaded before Core
-      foreach my $first_comp (@comps) {
-        if ($to eq 'DBIx::Class::Core' &&
-            $target->isa("DBIx::Class::${first_comp}")) {
-          carp "Possible incorrect order of components in ".
-               "${target}::load_components($first_comp) call: Core loaded ".
-               "before $first_comp. See the documentation for ".
-               "DBIx::Class::$first_comp for more information";
+  my $class = shift;
+  my ($target, @complist) = @_;
+
+  # we already did load the component
+  my $keep_checking = ! (
+    $target->isa ('DBIx::Class::UTF8Columns')
+      ||
+    $target->isa ('DBIx::Class::ForceUTF8')
+  );
+
+  my @target_isa;
+
+  while ($keep_checking && @complist) {
+
+    @target_isa = do { no strict 'refs'; @{"$target\::ISA"} }
+      unless @target_isa;
+
+    my $comp = pop @complist;
+
+    # warn here on use of either component, as we have no access to ForceUTF8,
+    # the author does not respond, and the Catalyst wiki used to recommend it
+    for (qw/DBIx::Class::UTF8Columns DBIx::Class::ForceUTF8/) {
+      if ($comp->isa ($_) ) {
+        $keep_checking = 0; # no use to check from this point on
+        carp "Use of $_ is strongly discouraged. See documentation of DBIx::Class::UTF8Columns for more info\n"
+          unless ($warned->{UTF8Columns}++ || $ENV{DBIC_UTF8COLUMNS_OK});
+        last;
+      }
+    }
+
+    # something unset $keep_checking - we got a unicode mangler
+    if (! $keep_checking) {
+
+      my $base_store_column = do { require DBIx::Class::Row; DBIx::Class::Row->can ('store_column') };
+
+      my @broken;
+      for my $existing_comp (@target_isa) {
+        my $sc = $existing_comp->can ('store_column')
+          or next;
+
+        if ($sc ne $base_store_column) {
+          require B;
+          my $definer = B::svref_2object($sc)->STASH->NAME;
+          push @broken, ($definer eq $existing_comp)
+            ? $existing_comp
+            : "$existing_comp (via $definer)"
+          ;
         }
       }
-      unshift( @{"${target}::ISA"}, $to )
-        unless ($target eq $to || $target->isa($to));
+
+      carp "Incorrect loading order of $comp by $target will affect other components overriding 'store_column' ("
+          . join (', ', @broken)
+          .'). Refer to the documentation of DBIx::Class::UTF8Columns for more info'
+        if @broken;
     }
+
+    unshift @target_isa, $comp;
   }
 
-  $class->next::method($target, @to_inject);
+  $class->next::method(@_);
 }
 
 1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Core.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Core.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Core.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -2,7 +2,6 @@
 
 use strict;
 use warnings;
-no warnings 'qw';
 
 use base qw/DBIx::Class/;
 
@@ -12,7 +11,8 @@
   PK::Auto
   PK
   Row
-  ResultSourceProxy::Table/);
+  ResultSourceProxy::Table
+/);
 
 1;
 
@@ -22,8 +22,8 @@
 
 =head1 SYNOPSIS
 
-  # In your table classes
-  __PACKAGE__->load_components(qw/Core/);
+  # In your result (table) classes
+  use base 'DBIx::Class::Core';
 
 =head1 DESCRIPTION
 
@@ -34,8 +34,6 @@
 
 =over 4
 
-=item L<DBIx::Class::Serialize::Storable>
-
 =item L<DBIx::Class::InflateColumn>
 
 =item L<DBIx::Class::Relationship>

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Cursor.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Cursor.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Cursor.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -3,6 +3,8 @@
 use strict;
 use warnings;
 
+use base qw/DBIx::Class/;
+
 =head1 NAME
 
 DBIx::Class::Cursor - Abstract object representing a query cursor on a

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/FilterColumn.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/FilterColumn.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/FilterColumn.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,205 @@
+package DBIx::Class::FilterColumn;
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Row/;
+
+sub filter_column {
+  my ($self, $col, $attrs) = @_;
+
+  $self->throw_exception("FilterColumn does not work with InflateColumn")
+    if $self->isa('DBIx::Class::InflateColumn') &&
+      defined $self->column_info($col)->{_inflate_info};
+
+  $self->throw_exception("No such column $col to filter")
+    unless $self->has_column($col);
+
+  $self->throw_exception("filter_column needs attr hashref")
+    unless ref $attrs eq 'HASH';
+
+  $self->column_info($col)->{_filter_info} = $attrs;
+  my $acc = $self->column_info($col)->{accessor};
+  $self->mk_group_accessors(filtered_column => [ (defined $acc ? $acc : $col), $col]);
+  return 1;
+}
+
+sub _column_from_storage {
+  my ($self, $col, $value) = @_;
+
+  return $value unless defined $value;
+
+  my $info = $self->column_info($col)
+    or $self->throw_exception("No column info for $col");
+
+  return $value unless exists $info->{_filter_info};
+
+  my $filter = $info->{_filter_info}{filter_from_storage};
+  $self->throw_exception("No filter for $col") unless defined $filter;
+
+  return $self->$filter($value);
+}
+
+sub _column_to_storage {
+  my ($self, $col, $value) = @_;
+
+  my $info = $self->column_info($col) or
+    $self->throw_exception("No column info for $col");
+
+  return $value unless exists $info->{_filter_info};
+
+  my $unfilter = $info->{_filter_info}{filter_to_storage};
+  $self->throw_exception("No unfilter for $col") unless defined $unfilter;
+  return $self->$unfilter($value);
+}
+
+sub get_filtered_column {
+  my ($self, $col) = @_;
+
+  $self->throw_exception("$col is not a filtered column")
+    unless exists $self->column_info($col)->{_filter_info};
+
+  return $self->{_filtered_column}{$col}
+    if exists $self->{_filtered_column}{$col};
+
+  my $val = $self->get_column($col);
+
+  return $self->{_filtered_column}{$col} = $self->_column_from_storage($col, $val);
+}
+
+sub get_column {
+  my ($self, $col) = @_;
+  if (exists $self->{_filtered_column}{$col}) {
+    return $self->{_column_data}{$col} ||= $self->_column_to_storage ($col, $self->{_filtered_column}{$col});
+  }
+
+  return $self->next::method ($col);
+}
+
+# sadly a separate codepath in Row.pm ( used by insert() )
+sub get_columns {
+  my $self = shift;
+
+  foreach my $col (keys %{$self->{_filtered_column}||{}}) {
+    $self->{_column_data}{$col} ||= $self->_column_to_storage ($col, $self->{_filtered_column}{$col})
+      if exists $self->{_filtered_column}{$col};
+  }
+
+  $self->next::method (@_);
+}
+
+sub store_column {
+  my ($self, $col) = (shift, @_);
+
+  # blow cache
+  delete $self->{_filtered_column}{$col};
+
+  $self->next::method(@_);
+}
+
+sub set_filtered_column {
+  my ($self, $col, $filtered) = @_;
+
+  # do not blow up the cache via set_column unless necessary
+  # (filtering may be expensive!)
+  if (exists $self->{_filtered_column}{$col}) {
+    return $filtered
+      if ($self->_eq_column_values ($col, $filtered, $self->{_filtered_column}{$col} ) );
+
+    $self->make_column_dirty ($col); # so the comparison won't run again
+  }
+
+  $self->set_column($col, $self->_column_to_storage($col, $filtered));
+
+  return $self->{_filtered_column}{$col} = $filtered;
+}
+
+sub update {
+  my ($self, $attrs, @rest) = @_;
+
+  foreach my $key (keys %{$attrs||{}}) {
+    if (
+      $self->has_column($key)
+        &&
+      exists $self->column_info($key)->{_filter_info}
+    ) {
+      $self->set_filtered_column($key, delete $attrs->{$key});
+
+      # FIXME update() reaches directly into the object-hash
+      # and we may *not* have a filtered value there - thus
+      # the void-ctx filter-trigger
+      $self->get_column($key) unless exists $self->{_column_data}{$key};
+    }
+  }
+
+  return $self->next::method($attrs, @rest);
+}
+
+sub new {
+  my ($class, $attrs, @rest) = @_;
+  my $source = $attrs->{-result_source}
+    or $class->throw_exception('Sourceless rows are not supported with DBIx::Class::FilterColumn');
+
+  my $obj = $class->next::method($attrs, @rest);
+  foreach my $key (keys %{$attrs||{}}) {
+    if ($obj->has_column($key) &&
+          exists $obj->column_info($key)->{_filter_info} ) {
+      $obj->set_filtered_column($key, $attrs->{$key});
+    }
+  }
+
+  return $obj;
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::FilterColumn - Automatically convert column data
+
+=head1 SYNOPSIS
+
+ # In your result classes
+ __PACKAGE__->filter_column( money => {
+     filter_to_storage => 'to_pennies',
+     filter_from_storage => 'from_pennies',
+ });
+
+ sub to_pennies   { $_[1] * 100 }
+
+ sub from_pennies { $_[1] / 100 }
+
+ 1;
+
+=head1 DESCRIPTION
+
+This component is meant to be a more powerful, but less DWIM-y,
+L<DBIx::Class::InflateColumn>.  One of the major issues with said component is
+that it B<only> works with references.  Generally speaking anything that can
+be done with L<DBIx::Class::InflateColumn> can be done with this component.
+
+=head1 METHODS
+
+=head2 filter_column
+
+ __PACKAGE__->filter_column( colname => {
+     filter_from_storage => 'method',
+     filter_to_storage   => 'method',
+ })
+
+This is the method that you need to call to set up a filtered column.  It takes
+exactly two arguments; the first being the column name the second being a
+C<HashRef> with C<filter_from_storage> and C<filter_to_storage> having
+something that can be called as a method.  The method will be called with
+the value of the column as the first non-C<$self> argument.
+
+=head2 get_filtered_column
+
+ $obj->get_filtered_column('colname')
+
+Returns the filtered value of the column
+
+=head2 set_filtered_column
+
+ $obj->set_filtered_column(colname => 'new_value')
+
+Sets the filtered value of the column

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/InflateColumn/DateTime.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/InflateColumn/DateTime.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/InflateColumn/DateTime.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -15,14 +15,14 @@
 columns to be of the datetime, timestamp or date datatype.
 
   package Event;
-  __PACKAGE__->load_components(qw/InflateColumn::DateTime Core/);
+  use base 'DBIx::Class::Core';
+
+  __PACKAGE__->load_components(qw/InflateColumn::DateTime/);
   __PACKAGE__->add_columns(
     starts_when => { data_type => 'datetime' }
+    create_date => { data_type => 'date' }
   );
 
-NOTE: You B<must> load C<InflateColumn::DateTime> B<before> C<Core>. See
-L<DBIx::Class::Manual::Component> for details.
-
 Then you can treat the specified column as a L<DateTime> object.
 
   print "This event starts the month of ".
@@ -69,14 +69,22 @@
 that this feature is new as of 0.07, so it may not be perfect yet - bug
 reports to the list very much welcome).
 
+If the data_type of a field is C<date>, C<datetime> or C<timestamp> (or
+a derivative of these datatypes, e.g. C<timestamp with timezone>), this
+module will automatically call the appropriate parse/format method for
+deflation/inflation as defined in the storage class. For instance, for
+a C<datetime> field the methods C<parse_datetime> and C<format_datetime>
+would be called on deflation/inflation. If the storage class does not
+provide a specialized inflator/deflator, C<[parse|format]_datetime> will
+be used as a fallback. See L<DateTime::Format> for more information on
+date formatting.
+
 For more help with using components, see L<DBIx::Class::Manual::Component/USING>.
 
 =cut
 
 __PACKAGE__->load_components(qw/InflateColumn/);
 
-__PACKAGE__->mk_group_accessors('simple' => '__datetime_parser');
-
 =head2 register_column
 
 Chains with the L<DBIx::Class::Row/register_column> method, and sets
@@ -124,27 +132,22 @@
       $info->{_ic_dt_method} ||= "timestamp_without_timezone";
     } elsif ($type eq "smalldatetime") {
       $type = "datetime";
-      $info->{_ic_dt_method} ||= "datetime";
+      $info->{_ic_dt_method} ||= "smalldatetime";
     }
   }
 
-  my $timezone;
   if ( defined $info->{extra}{timezone} ) {
     carp "Putting timezone into extra => { timezone => '...' } has been deprecated, ".
          "please put it directly into the '$column' column definition.";
-    $timezone = $info->{extra}{timezone};
+    $info->{timezone} = $info->{extra}{timezone} unless defined $info->{timezone};
   }
 
-  my $locale;
   if ( defined $info->{extra}{locale} ) {
     carp "Putting locale into extra => { locale => '...' } has been deprecated, ".
          "please put it directly into the '$column' column definition.";
-    $locale = $info->{extra}{locale};
+    $info->{locale} = $info->{extra}{locale} unless defined $info->{locale};
   }
 
-  $locale   = $info->{locale}   if defined $info->{locale};
-  $timezone = $info->{timezone} if defined $info->{timezone};
-
   my $undef_if_invalid = $info->{datetime_undef_if_invalid};
 
   if ($type eq 'datetime' || $type eq 'date' || $type eq 'timestamp') {
@@ -170,21 +173,12 @@
               $self->throw_exception ("Error while inflating ${value} for ${column} on ${self}: $err");
             }
 
-            $dt->set_time_zone($timezone) if $timezone;
-            $dt->set_locale($locale) if $locale;
-            return $dt;
+            return $obj->_post_inflate_datetime( $dt, \%info );
           },
           deflate => sub {
             my ($value, $obj) = @_;
-            if ($timezone) {
-                carp "You're using a floating timezone, please see the documentation of"
-                  . " DBIx::Class::InflateColumn::DateTime for an explanation"
-                  if ref( $value->time_zone ) eq 'DateTime::TimeZone::Floating'
-                      and not $info{floating_tz_ok}
-                      and not $ENV{DBIC_FLOATING_TZ_OK};
-                $value->set_time_zone($timezone);
-                $value->set_locale($locale) if $locale;
-            }
+
+            $value = $obj->_pre_deflate_datetime( $value, \%info );
             $obj->_deflate_from_datetime( $value, \%info );
           },
         }
@@ -213,12 +207,34 @@
 }
 
 sub _datetime_parser {
-  my $self = shift;
-  if (my $parser = $self->__datetime_parser) {
-    return $parser;
+  shift->result_source->storage->datetime_parser (@_);
+}
+
+sub _post_inflate_datetime {
+  my( $self, $dt, $info ) = @_;
+
+  $dt->set_time_zone($info->{timezone}) if defined $info->{timezone};
+  $dt->set_locale($info->{locale}) if defined $info->{locale};
+
+  return $dt;
+}
+
+sub _pre_deflate_datetime {
+  my( $self, $dt, $info ) = @_;
+
+  if (defined $info->{timezone}) {
+    carp "You're using a floating timezone, please see the documentation of"
+      . " DBIx::Class::InflateColumn::DateTime for an explanation"
+      if ref( $dt->time_zone ) eq 'DateTime::TimeZone::Floating'
+          and not $info->{floating_tz_ok}
+          and not $ENV{DBIC_FLOATING_TZ_OK};
+
+    $dt->set_time_zone($info->{timezone});
   }
-  my $parser = $self->result_source->storage->datetime_parser(@_);
-  return $self->__datetime_parser($parser);
+
+  $dt->set_locale($info->{locale}) if defined $info->{locale};
+
+  return $dt;
 }
 
 1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/InflateColumn/File.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/InflateColumn/File.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/InflateColumn/File.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -7,6 +7,17 @@
 use File::Copy;
 use Path::Class;
 
+use Carp::Clan qw/^DBIx::Class/;
+carp 'InflateColumn::File has entered a deprecation cycle. This component '
+    .'has a number of architectural deficiencies that can quickly drive '
+    .'your filesystem and database out of sync and is not recommended '
+    .'for further use. It will be retained for backwards '
+    .'compatibility, but no new functionality patches will be accepted. '
+    .'Please consider using the much more mature and actively maintained '
+    .'DBIx::Class::InflateColumn::FS. You can set the environment variable '
+    .'DBIC_IC_FILE_NOWARN to a true value to disable  this warning.'
+unless $ENV{DBIC_IC_FILE_NOWARN};
+
 __PACKAGE__->load_components(qw/InflateColumn/);
 
 sub register_column {
@@ -107,14 +118,26 @@
 
 =head1 NAME
 
-DBIx::Class::InflateColumn::File -  map files from the Database to the filesystem.
+DBIx::Class::InflateColumn::File -  DEPRECATED (superseded by DBIx::Class::InflateColumn::FS)
 
+=head2 Deprecation Notice
+
+ This component has a number of architectural deficiencies that can quickly
+ drive your filesystem and database out of sync and is not recommended for
+ further use. It will be retained for backwards compatibility, but no new
+ functionality patches will be accepted. Please consider using the much more
+ mature and actively supported DBIx::Class::InflateColumn::FS. You can set
+ the environment variable DBIC_IC_FILE_NOWARN to a true value to disable
+ this warning.
+
 =head1 SYNOPSIS
 
 In your L<DBIx::Class> table class:
 
-    __PACKAGE__->load_components( "PK::Auto", "InflateColumn::File", "Core" );
+    use base 'DBIx::Class::Core';
 
+    __PACKAGE__->load_components(qw/InflateColumn::File/);
+
     # define your columns
     __PACKAGE__->add_columns(
         "id",
@@ -174,7 +197,7 @@
 
 =head2 _file_column_callback ($file,$ret,$target)
 
-method made to be overridden for callback purposes.
+Method made to be overridden for callback purposes.
 
 =cut
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/InflateColumn.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/InflateColumn.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/InflateColumn.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -26,7 +26,7 @@
 
 It can be used, for example, to automatically convert to and from
 L<DateTime> objects for your date and time fields. There's a
-conveniece component to actually do that though, try
+convenience component to actually do that though, try
 L<DBIx::Class::InflateColumn::DateTime>.
 
 It will handle all types of references except scalar references. It
@@ -37,7 +37,7 @@
 to work.
 
 If you want to filter plain scalar values and replace them with
-something else, contribute a filtering component.
+something else, see L<DBIx::Class::FilterColumn>.
 
 =head1 METHODS
 
@@ -74,12 +74,18 @@
 
 sub inflate_column {
   my ($self, $col, $attrs) = @_;
+
+  $self->throw_exception("InflateColumn does not work with FilterColumn")
+    if $self->isa('DBIx::Class::FilterColumn') &&
+      defined $self->column_info($col)->{_filter_info};
+
   $self->throw_exception("No such column $col to inflate")
     unless $self->has_column($col);
   $self->throw_exception("inflate_column needs attr hashref")
     unless ref $attrs eq 'HASH';
   $self->column_info($col)->{_inflate_info} = $attrs;
-  $self->mk_group_accessors('inflated_column' => [$self->column_info($col)->{accessor} || $col, $col]);
+  my $acc = $self->column_info($col)->{accessor};
+  $self->mk_group_accessors('inflated_column' => [ (defined $acc ? $acc : $col), $col]);
   return 1;
 }
 
@@ -113,7 +119,7 @@
 
 Fetch a column value in its inflated state.  This is directly
 analogous to L<DBIx::Class::Row/get_column> in that it only fetches a
-column already retreived from the database, and then inflates it.
+column already retrieved from the database, and then inflates it.
 Throws an exception if the column requested is not an inflated column.
 
 =cut
@@ -124,8 +130,11 @@
     unless exists $self->column_info($col)->{_inflate_info};
   return $self->{_inflated_column}{$col}
     if exists $self->{_inflated_column}{$col};
-  return $self->{_inflated_column}{$col} =
-           $self->_inflated_column($col, $self->get_column($col));
+
+  my $val = $self->get_column($col);
+  return $val if ref $val eq 'SCALAR';  #that would be a not-yet-reloaded sclarref update
+
+  return $self->{_inflated_column}{$col} = $self->_inflated_column($col, $val);
 }
 
 =head2 set_inflated_column
@@ -142,9 +151,9 @@
   $self->set_column($col, $self->_deflated_column($col, $inflated));
 #  if (blessed $inflated) {
   if (ref $inflated && ref($inflated) ne 'SCALAR') {
-    $self->{_inflated_column}{$col} = $inflated; 
+    $self->{_inflated_column}{$col} = $inflated;
   } else {
-    delete $self->{_inflated_column}{$col};      
+    delete $self->{_inflated_column}{$col};
   }
   return $inflated;
 }
@@ -175,7 +184,7 @@
 =over 4
 
 =item L<DBIx::Class::Core> - This component is loaded as part of the
-      "core" L<DBIx::Class> components; generally there is no need to
+      C<core> L<DBIx::Class> components; generally there is no need to
       load it directly
 
 =back

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Component.pod
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Component.pod	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Component.pod	2010-05-17 14:31:46 UTC (rev 9401)
@@ -12,31 +12,29 @@
 
 =head1 USING
 
-Components are loaded using the load_components() method within your 
+Components are loaded using the load_components() method within your
 DBIx::Class classes.
 
   package My::Thing;
-  use base qw( DBIx::Class );
-  __PACKAGE__->load_components(qw/ PK::Auto Core /);
+  use base qw( DBIx::Class::Core );
+  __PACKAGE__->load_components(qw/InflateColumn::DateTime TimeStamp/);
 
-Generally you do not want to specify the full package name 
-of a component, instead take off the DBIx::Class:: part of 
-it and just include the rest.  If you do want to load a 
-component outside of the normal namespace you can do so 
+Generally you do not want to specify the full package name
+of a component, instead take off the DBIx::Class:: part of
+it and just include the rest.  If you do want to load a
+component outside of the normal namespace you can do so
 by prepending the component name with a +.
 
   __PACKAGE__->load_components(qw/ +My::Component /);
 
-Once a component is loaded all of it's methods, or otherwise, 
+Once a component is loaded all of it's methods, or otherwise,
 that it provides will be available in your class.
 
-The order in which is you load the components may be 
-very important, depending on the component.  The general 
-rule of thumb is to first load extra components and then 
-load core ones last.  If you are not sure, then read the 
-docs for the components you are using and see if they 
-mention anything about the order in which you should load 
-them.
+The order in which is you load the components may be very
+important, depending on the component. If you are not sure,
+then read the docs for the components you are using and see
+if they mention anything about the order in which you should
+load them.
 
 =head1 CREATING COMPONENTS
 
@@ -47,11 +45,11 @@
   # Create methods, accessors, load other components, etc.
   1;
 
-When a component is loaded it is included in the calling 
-class' inheritance chain using L<Class::C3>.  As well as 
-providing custom utility methods, a component may also 
-override methods provided by other core components, like 
-L<DBIx::Class::Row> and others.  For example, you 
+When a component is loaded it is included in the calling
+class' inheritance chain using L<Class::C3>.  As well as
+providing custom utility methods, a component may also
+override methods provided by other core components, like
+L<DBIx::Class::Row> and others.  For example, you
 could override the insert and delete methods.
 
   sub insert {
@@ -84,6 +82,8 @@
 These components provide extra functionality beyond 
 basic functionality that you can't live without.
 
+L<DBIx::Class::Serialize::Storable> - Hooks for Storable freeze/thaw.
+
 L<DBIx::Class::CDBICompat> - Class::DBI Compatibility layer.
 
 L<DBIx::Class::FormTools> - Build forms with multiple interconnected objects.
@@ -106,26 +106,22 @@
 
 =head2 Experimental
 
-These components are under development, there interfaces may 
-change, they may not work, etc.  So, use them if you want, but 
+These components are under development, their interfaces may
+change, they may not work, etc.  So, use them if you want, but
 be warned.
 
-L<DBIx::Class::Serialize> - Hooks for Storable freeze/thaw.
-
-L<DBIx::Class::Serialize::Storable> - Hooks for Storable freeze/thaw.
-
 L<DBIx::Class::Validation> - Validate all data before submitting to your database.
 
 =head2 Core
 
-These are the components that all, or nearly all, people will use 
-without even knowing it.  These components provide most of 
+These are the components that all, or nearly all, people will use
+without even knowing it.  These components provide most of
 DBIx::Class' functionality.
 
-L<DBIx::Class::AccessorGroup> - Lets you build groups of accessors.
-
 L<DBIx::Class::Core> - Loads various components that "most people" would want.
 
+L<DBIx::Class::AccessorGroup> - Lets you build groups of accessors.
+
 L<DBIx::Class::DB> - Non-recommended classdata schema component.
 
 L<DBIx::Class::InflateColumn> - Automatically create objects from column data.
@@ -145,4 +141,3 @@
 =head1 AUTHOR
 
 Aran Clary Deltac <bluefeet at cpan.org>
-

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Cookbook.pod
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Cookbook.pod	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Cookbook.pod	2010-05-17 14:31:46 UTC (rev 9401)
@@ -37,8 +37,11 @@
 
 This results in something like the following C<WHERE> clause:
 
-  WHERE artist LIKE '%Lamb%' AND title LIKE '%Fear of Fours%'
+  WHERE artist LIKE ? AND title LIKE ?
 
+And the following bind values for the placeholders: C<'%Lamb%'>, C<'%Fear of
+Fours%'>.
+
 Other queries might require slightly more complex logic:
 
   my @albums = $schema->resultset('Album')->search({
@@ -110,9 +113,8 @@
   package My::Schema::Result::UserFriendsComplex;
   use strict;
   use warnings;
-  use base qw/DBIx::Class/;
+  use base qw/DBIx::Class::Core/;
 
-  __PACKAGE__->load_components('Core');
   __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
 
   # ->table, ->add_columns, etc.
@@ -139,7 +141,7 @@
   );
 
 ... and you'll get back a perfect L<DBIx::Class::ResultSet> (except, of course,
-that you cannot modify the rows it contains, ie. cannot call L</update>,
+that you cannot modify the rows it contains, e.g. cannot call L</update>,
 L</delete>, ...  on it).
 
 Note that you cannot have bind parameters unless is_virtual is set to true.
@@ -199,7 +201,7 @@
   # SELECT name name, LENGTH( name )
   # FROM artist
 
-Note that the C<as> attribute B<has absolutely nothing to do> with the sql
+Note that the C<as> attribute B<has absolutely nothing to do> with the SQL
 syntax C< SELECT foo AS bar > (see the documentation in 
 L<DBIx::Class::ResultSet/ATTRIBUTES>). You can control the C<AS> part of the
 generated SQL via the C<-as> field attribute as follows:
@@ -244,6 +246,8 @@
   # Or use DBIx::Class::AccessorGroup:
   __PACKAGE__->mk_group_accessors('column' => 'name_length');
 
+See also L</Using SQL functions on the left hand side of a comparison>.
+
 =head2 SELECT DISTINCT with multiple columns
 
   my $rs = $schema->resultset('Artist')->search(
@@ -288,7 +292,7 @@
   my $count = $rs->count;
 
   # Equivalent SQL:
-  # SELECT COUNT( * ) FROM (SELECT me.name FROM artist me GROUP BY me.name) count_subq:
+  # SELECT COUNT( * ) FROM (SELECT me.name FROM artist me GROUP BY me.name) me:
 
 =head2 Grouping results
 
@@ -313,7 +317,7 @@
 are in any way unsure about the use of the attributes above (C< join
 >, C< select >, C< as > and C< group_by >).
 
-=head2 Subqueries (EXPERIMENTAL)
+=head2 Subqueries
 
 You can write subqueries relatively easily in DBIC.
 
@@ -325,13 +329,13 @@
     artist_id => { 'IN' => $inside_rs->get_column('id')->as_query },
   });
 
-The usual operators ( =, !=, IN, NOT IN, etc) are supported.
+The usual operators ( =, !=, IN, NOT IN, etc.) are supported.
 
 B<NOTE>: You have to explicitly use '=' when doing an equality comparison.
 The following will B<not> work:
 
   my $rs = $schema->resultset('CD')->search({
-    artist_id => $inside_rs->get_column('id')->as_query,
+    artist_id => $inside_rs->get_column('id')->as_query,  # does NOT work
   });
 
 =head3 Support
@@ -361,14 +365,10 @@
        WHERE artist_id = me.artist_id
       )
 
-=head3 EXPERIMENTAL
-
-Please note that subqueries are considered an experimental feature.
-
 =head2 Predefined searches
 
-You can write your own L<DBIx::Class::ResultSet> class by inheriting from it
-and defining often used searches as methods:
+You can define frequently used searches as methods by subclassing
+L<DBIx::Class::ResultSet>:
 
   package My::DBIC::ResultSet::CD;
   use strict;
@@ -386,11 +386,16 @@
 
   1;
 
-To use your resultset, first tell DBIx::Class to create an instance of it
-for you, in your My::DBIC::Schema::CD class:
+If you're using L<DBIx::Class::Schema/load_namespaces>, simply place the file
+into the C<ResultSet> directory next to your C<Result> directory, and it will
+be automatically loaded.
 
+If however you are still using L<DBIx::Class::Schema/load_classes>, first tell
+DBIx::Class to create an instance of the ResultSet class for you, in your
+My::DBIC::Schema::CD class:
+
   # class definition as normal
-  __PACKAGE__->load_components(qw/ Core /);
+  use base 'DBIx::Class::Core';
   __PACKAGE__->table('cd');
 
   # tell DBIC to use the custom ResultSet class
@@ -404,34 +409,47 @@
 
 =head2 Using SQL functions on the left hand side of a comparison
 
-Using SQL functions on the left hand side of a comparison is generally
-not a good idea since it requires a scan of the entire table.  However,
+Using SQL functions on the left hand side of a comparison is generally not a
+good idea since it requires a scan of the entire table. (Unless your RDBMS
+supports indexes on expressions - including return values of functions - and
+you create an index on the return value of the function in question.) However,
 it can be accomplished with C<DBIx::Class> when necessary.
 
+Your approach for doing so will depend on whether you have turned
+quoting on via the C<quote_char> and C<name_sep> attributes. If you
+explicitly defined C<quote_char> and C<name_sep> in your
+C<connect_info> (see L<DBIx::Class::Storage::DBI/"connect_info">) then
+you are using quoting, otherwise not.
+
 If you do not have quoting on, simply include the function in your search
 specification as you would any column:
 
   $rs->search({ 'YEAR(date_of_birth)' => 1979 });
 
-With quoting on, or for a more portable solution, use the C<where>
-attribute:
+With quoting on, or for a more portable solution, use literal SQL values with
+placeholders:
 
-  $rs->search({}, { where => \'YEAR(date_of_birth) = 1979' });
+  $rs->search(\[ 'YEAR(date_of_birth) = ?', [ plain_value => 1979 ] ]);
 
-=begin hidden
+  # Equivalent SQL:
+  # SELECT * FROM employee WHERE YEAR(date_of_birth) = ?
 
-(When the bind args ordering bug is fixed, this technique will be better
-and can replace the one above.)
+  $rs->search({
+    name => 'Bob',
+    -nest => \[ 'YEAR(date_of_birth) = ?', [ plain_value => 1979 ] ],
+  });
 
-With quoting on, or for a more portable solution, use the C<where> and
-C<bind> attributes:
+  # Equivalent SQL:
+  # SELECT * FROM employee WHERE name = ? AND YEAR(date_of_birth) = ?
 
-  $rs->search({}, {
-      where => \'YEAR(date_of_birth) = ?',
-      bind  => [ 1979 ]
-  });
+Note: the C<plain_value> string in the C<< [ plain_value => 1979 ] >> part
+should be either the same as the name of the column (do this if the type of the
+return value of the function is the same as the type of the column) or
+otherwise it's essentially a dummy string currently (use C<plain_value> as a
+habit). It is used by L<DBIx::Class> to handle special column types.
 
-=end hidden
+See also L<SQL::Abstract/Literal SQL with placeholders and bind values
+(subqueries)>.
 
 =head1 JOINS AND PREFETCHING
 
@@ -759,7 +777,7 @@
 
     package My::App::Schema;
 
-    use base DBIx::Class::Schema;
+    use base 'DBIx::Class::Schema';
 
     # load subclassed classes from My::App::Schema::Result/ResultSet
     __PACKAGE__->load_namespaces;
@@ -779,7 +797,7 @@
 
     use strict;
     use warnings;
-    use base My::Shared::Model::Result::Baz;
+    use base 'My::Shared::Model::Result::Baz';
 
     # WARNING: Make sure you call table() again in your subclass,
     # otherwise DBIx::Class::ResultSourceProxy::Table will not be called
@@ -802,7 +820,7 @@
 for admin.  We would like like to give the admin users
 objects (L<DBIx::Class::Row>) the same methods as a regular user but
 also special admin only methods.  It doesn't make sense to create two
-seperate proxy-class files for this.  We would be copying all the user
+separate proxy-class files for this.  We would be copying all the user
 methods into the Admin class.  There is a cleaner way to accomplish
 this.
 
@@ -830,13 +848,11 @@
 
     use strict;
     use warnings;
-    use base qw/DBIx::Class/;
+    use base qw/DBIx::Class::Core/;
 
     ### Define what our admin class is, for ensure_class_loaded()
     my $admin_class = __PACKAGE__ . '::Admin';
 
-    __PACKAGE__->load_components(qw/Core/);
-
     __PACKAGE__->table('users');
 
     __PACKAGE__->add_columns(qw/user_id   email    password
@@ -922,6 +938,9 @@
     ### The statement below will print
     print "I can do admin stuff\n" if $admin->can('do_admin_stuff');
 
+Alternatively you can use L<DBIx::Class::DynamicSubclass> that implements
+exactly the above functionality.
+
 =head2 Skip row object creation for faster results
 
 DBIx::Class is not built for speed, it's built for convenience and
@@ -1062,7 +1081,7 @@
 To order C<< $book->pages >> by descending page_number, create the relation
 as follows:
 
-  __PACKAGE__->has_many('pages' => 'Page', 'book', { order_by => \'page_number DESC'} );
+  __PACKAGE__->has_many('pages' => 'Page', 'book', { order_by => { -desc => 'page_number'} } );
 
 =head2 Filtering a relationship result set
 
@@ -1075,8 +1094,7 @@
 This is straightforward using L<ManyToMany|DBIx::Class::Relationship/many_to_many>:
 
   package My::User;
-  use base 'DBIx::Class';
-  __PACKAGE__->load_components('Core');
+  use base 'DBIx::Class::Core';
   __PACKAGE__->table('user');
   __PACKAGE__->add_columns(qw/id name/);
   __PACKAGE__->set_primary_key('id');
@@ -1084,8 +1102,7 @@
   __PACKAGE__->many_to_many('addresses' => 'user_address', 'address');
 
   package My::UserAddress;
-  use base 'DBIx::Class';
-  __PACKAGE__->load_components('Core');
+  use base 'DBIx::Class::Core';
   __PACKAGE__->table('user_address');
   __PACKAGE__->add_columns(qw/user address/);
   __PACKAGE__->set_primary_key(qw/user address/);
@@ -1093,8 +1110,7 @@
   __PACKAGE__->belongs_to('address' => 'My::Address');
 
   package My::Address;
-  use base 'DBIx::Class';
-  __PACKAGE__->load_components('Core');
+  use base 'DBIx::Class::Core';
   __PACKAGE__->table('address');
   __PACKAGE__->add_columns(qw/id street town area_code country/);
   __PACKAGE__->set_primary_key('id');
@@ -1104,6 +1120,16 @@
   $rs = $user->addresses(); # get all addresses for a user
   $rs = $address->users(); # get all users for an address
 
+  my $address = $user->add_to_addresses(    # returns a My::Address instance,
+                                            # NOT a My::UserAddress instance!
+    {
+      country => 'United Kingdom',
+      area_code => 'XYZ',
+      town => 'London',
+      street => 'Sesame',
+    }
+  );
+
 =head2 Relationships across DB schemas
 
 Mapping relationships across L<DB schemas|DBIx::Class::Manual::Glossary/DB schema>
@@ -1115,8 +1141,7 @@
 declaration, like so...
 
   package MyDatabase::Main::Artist;
-  use base qw/DBIx::Class/;
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  use base qw/DBIx::Class::Core/;
 
   __PACKAGE__->table('database1.artist'); # will use "database1.artist" in FROM clause
 
@@ -1198,6 +1223,8 @@
 
 =head1 TRANSACTIONS
 
+=head2 Transactions with txn_do
+
 As of version 0.04001, there is improved transaction support in
 L<DBIx::Class::Storage> and L<DBIx::Class::Schema>.  Here is an
 example of the recommended way to use it:
@@ -1229,13 +1256,120 @@
     deal_with_failed_transaction();
   }
 
+Note: by default C<txn_do> will re-run the coderef one more time if an
+error occurs due to client disconnection (e.g. the server is bounced).
+You need to make sure that your coderef can be invoked multiple times
+without terrible side effects.
+
 Nested transactions will work as expected. That is, only the outermost
 transaction will actually issue a commit to the $dbh, and a rollback
 at any level of any transaction will cause the entire nested
-transaction to fail. Support for savepoints and for true nested
-transactions (for databases that support them) will hopefully be added
-in the future.
+transaction to fail.
 
+=head2 Nested transactions and auto-savepoints
+
+If savepoints are supported by your RDBMS, it is possible to achieve true
+nested transactions with minimal effort. To enable auto-savepoints via nested
+transactions, supply the C<< auto_savepoint = 1 >> connection attribute.
+
+Here is an example of true nested transactions. In the example, we start a big
+task which will create several rows. Generation of data for each row is a
+fragile operation and might fail. If we fail creating something, depending on
+the type of failure, we want to abort the whole task, or only skip the failed
+row.
+
+  my $schema = MySchema->connect("dbi:Pg:dbname=my_db");
+
+  # Start a transaction. Every database change from here on will only be 
+  # committed into the database if the eval block succeeds.
+  eval {
+    $schema->txn_do(sub {
+      # SQL: BEGIN WORK;
+
+      my $job = $schema->resultset('Job')->create({ name=> 'big job' });
+      # SQL: INSERT INTO job ( name) VALUES ( 'big job' );
+
+      for (1..10) {
+
+        # Start a nested transaction, which in fact sets a savepoint.
+        eval {
+          $schema->txn_do(sub {
+            # SQL: SAVEPOINT savepoint_0;
+
+            my $thing = $schema->resultset('Thing')->create({ job=>$job->id });
+            # SQL: INSERT INTO thing ( job) VALUES ( 1 );
+
+            if (rand > 0.8) {
+              # This will generate an error, thus setting $@
+
+              $thing->update({force_fail=>'foo'});
+              # SQL: UPDATE thing SET force_fail = 'foo'
+              #      WHERE ( id = 42 );
+            }
+          });
+        };
+        if ($@) {
+          # SQL: ROLLBACK TO SAVEPOINT savepoint_0;
+
+          # There was an error while creating a $thing. Depending on the error
+          # we want to abort the whole transaction, or only rollback the
+          # changes related to the creation of this $thing
+
+          # Abort the whole job
+          if ($@ =~ /horrible_problem/) {
+            print "something horrible happend, aborting job!";
+            die $@;                # rethrow error
+          }
+
+          # Ignore this $thing, report the error, and continue with the
+          # next $thing
+          print "Cannot create thing: $@";
+        }
+        # There was no error, so save all changes since the last 
+        # savepoint.
+
+        # SQL: RELEASE SAVEPOINT savepoint_0;
+      }
+    });
+  };
+  if ($@) {
+    # There was an error while handling the $job. Rollback all changes
+    # since the transaction started, including the already committed
+    # ('released') savepoints. There will be neither a new $job nor any
+    # $thing entry in the database.
+
+    # SQL: ROLLBACK;
+
+    print "ERROR: $@\n";
+  }
+  else {
+    # There was no error while handling the $job. Commit all changes.
+    # Only now other connections can see the newly created $job and
+    # @things.
+
+    # SQL: COMMIT;
+
+    print "Ok\n";
+  }
+
+In this example it might be hard to see where the rollbacks, releases and
+commits are happening, but it works just the same as for plain L<<txn_do>>: If
+the C<eval>-block around C<txn_do> fails, a rollback is issued. If the C<eval>
+succeeds, the transaction is committed (or the savepoint released).
+
+While you can get more fine-grained control using C<svp_begin>, C<svp_release>
+and C<svp_rollback>, it is strongly recommended to use C<txn_do> with coderefs.
+
+=head2 Simple Transactions with DBIx::Class::Storage::TxnScopeGuard
+
+An easy way to use transactions is with
+L<DBIx::Class::Storage::TxnScopeGuard>. See L</Automatically creating
+related objects> for an example.
+
+Note that unlike txn_do, TxnScopeGuard will only make sure the connection is
+alive when issuing the C<BEGIN> statement. It will not (and really can not)
+retry if the server goes away mid-operations, unlike C<txn_do>.
+
 =head1 SQL
 
 =head2 Creating Schemas From An Existing Database
@@ -1271,7 +1405,7 @@
 To create a new database using the schema:
 
  my $schema = My::Schema->connect($dsn);
- $schema->deploy({ add_drop_tables => 1});
+ $schema->deploy({ add_drop_table => 1});
 
 To import created .sql files using the mysql client:
 
@@ -1309,8 +1443,7 @@
   package MyAppDB::Dual;
   use strict;
   use warnings;
-  use base 'DBIx::Class';
-  __PACKAGE__->load_components("Core");
+  use base 'DBIx::Class::Core';
   __PACKAGE__->table("Dual");
   __PACKAGE__->add_columns(
     "dummy",
@@ -1511,24 +1644,24 @@
 Add the L<DBIx::Class::Schema::Versioned> schema component to your
 Schema class. This will add a new table to your database called
 C<dbix_class_schema_vesion> which will keep track of which version is installed
-and warn if the user trys to run a newer schema version than the
+and warn if the user tries to run a newer schema version than the
 database thinks it has.
 
-Alternatively, you can send the conversion sql scripts to your
+Alternatively, you can send the conversion SQL scripts to your
 customers as above.
 
-=head2 Setting quoting for the generated SQL.
+=head2 Setting quoting for the generated SQL
 
 If the database contains column names with spaces and/or reserved words, they
 need to be quoted in the SQL queries. This is done using:
 
- __PACKAGE__->storage->sql_maker->quote_char([ qw/[ ]/] );
- __PACKAGE__->storage->sql_maker->name_sep('.');
+ $schema->storage->sql_maker->quote_char([ qw/[ ]/] );
+ $schema->storage->sql_maker->name_sep('.');
 
 The first sets the quote characters. Either a pair of matching
 brackets, or a C<"> or C<'>:
 
- __PACKAGE__->storage->sql_maker->quote_char('"');
+ $schema->storage->sql_maker->quote_char('"');
 
 Check the documentation of your database for the correct quote
 characters to use. C<name_sep> needs to be set to allow the SQL
@@ -1547,6 +1680,17 @@
   }
  )
 
+In some cases, quoting will be required for all users of a schema. To enforce
+this, you can also overload the C<connection> method for your schema class:
+
+ sub connection {
+     my $self = shift;
+     my $rv = $self->next::method( @_ );
+     $rv->storage->sql_maker->quote_char([ qw/[ ]/ ]);
+     $rv->storage->sql_maker->name_sep('.');
+     return $rv;
+ }
+
 =head2 Setting limit dialect for SQL::Abstract::Limit
 
 In some cases, SQL::Abstract::Limit cannot determine the dialect of
@@ -1585,7 +1729,7 @@
     }
   );
 
-In conditions (eg. C<\%cond> in the L<DBIx::Class::ResultSet/search> family of
+In conditions (e.g. C<\%cond> in the L<DBIx::Class::ResultSet/search> family of
 methods) you cannot directly use array references (since this is interpreted as
 a list of values to be C<OR>ed), but you can use the following syntax to force
 passing them as bind values:
@@ -1603,6 +1747,75 @@
 arrayrefs together with the column name, like this: C<< [column_name => value]
 >>.
 
+=head2 Using Unicode
+
+When using unicode character data there are two alternatives -
+either your database supports unicode characters (including setting
+the utf8 flag on the returned string), or you need to encode/decode
+data appropriately each time a string field is inserted into or
+retrieved from the database. It is better to avoid
+encoding/decoding data and to use your database's own unicode
+capabilities if at all possible.
+
+The L<DBIx::Class::UTF8Columns> component handles storing selected
+unicode columns in a database that does not directly support
+unicode. If used with a database that does correctly handle unicode
+then strange and unexpected data corrupt B<will> occur.
+
+The Catalyst Wiki Unicode page at
+L<http://wiki.catalystframework.org/wiki/tutorialsandhowtos/using_unicode>
+has additional information on the use of Unicode with Catalyst and
+DBIx::Class.
+
+The following databases do correctly handle unicode data:-
+
+=head3 MySQL
+
+MySQL supports unicode, and will correctly flag utf8 data from the
+database if the C<mysql_enable_utf8> is set in the connect options.
+
+  my $schema = My::Schema->connection('dbi:mysql:dbname=test',
+                                      $user, $pass,
+                                      { mysql_enable_utf8 => 1} );
+  
+
+When set, a data retrieved from a textual column type (char,
+varchar, etc) will have the UTF-8 flag turned on if necessary. This
+enables character semantics on that string. You will also need to
+ensure that your database / table / column is configured to use
+UTF8. See Chapter 10 of the mysql manual for details.
+
+See L<DBD::mysql> for further details.
+
+=head3 Oracle
+
+Information about Oracle support for unicode can be found in
+L<DBD::Oracle/Unicode>.
+
+=head3 PostgreSQL
+
+PostgreSQL supports unicode if the character set is correctly set
+at database creation time. Additionally the C<pg_enable_utf8>
+should be set to ensure unicode data is correctly marked.
+
+  my $schema = My::Schema->connection('dbi:Pg:dbname=test',
+                                      $user, $pass,
+                                      { pg_enable_utf8 => 1} );
+
+Further information can be found in L<DBD::Pg>.
+
+=head3 SQLite
+
+SQLite version 3 and above natively use unicode internally. To
+correctly mark unicode strings taken from the database, the
+C<sqlite_unicode> flag should be set at connect time (in versions
+of L<DBD::SQLite> prior to 1.27 this attribute was named
+C<unicode>).
+
+  my $schema = My::Schema->connection('dbi:SQLite:/tmp/test.db',
+                                      '', '',
+                                      { sqlite_unicode => 1} );
+
 =head1 BOOTSTRAPPING/MIGRATING
 
 =head2 Easy migration from class-based to schema-based setup
@@ -1700,13 +1913,28 @@
   sub insert {
     my ( $self, @args ) = @_;
     $self->next::method(@args);
-    $self->cds->new({})->fill_from_artist($self)->insert;
+    $self->create_related ('cds', \%initial_cd_data );
     return $self;
   }
 
-where C<fill_from_artist> is a method you specify in C<CD> which sets
-values in C<CD> based on the data in the C<Artist> object you pass in.
+If you want to wrap the two inserts in a transaction (for consistency,
+an excellent idea), you can use the awesome
+L<DBIx::Class::Storage::TxnScopeGuard>:
 
+  sub insert {
+    my ( $self, @args ) = @_;
+
+    my $guard = $self->result_source->schema->txn_scope_guard;
+
+    $self->next::method(@args);
+    $self->create_related ('cds', \%initial_cd_data );
+
+    $guard->commit;
+
+    return $self
+  }
+
+
 =head2 Wrapping/overloading a column accessor
 
 B<Problem:>
@@ -1856,6 +2084,47 @@
 statement and dig down to see if certain parameters cause aberrant behavior.
 You might want to check out L<DBIx::Class::QueryLog> as well.
 
+=head1 IMPROVING PERFORMANCE
+
+=over
+
+=item *
+
+Install L<Class::XSAccessor> to speed up L<Class::Accessor::Grouped>.
+
+=item *
+
+On Perl 5.8 install L<Class::C3::XS>.
+
+=item *
+
+L<prefetch|DBIx::Class::ResultSet/prefetch> relationships, where possible. See
+L</Using joins and prefetch>.
+
+=item *
+
+Use L<populate|DBIx::Class::ResultSet/populate> in void context to insert data
+when you don't need the resulting L<DBIx::Class::Row> objects, if possible, but
+see the caveats.
+
+When inserting many rows, for best results, populate a large number of rows at a
+time, but not so large that the table is locked for an unacceptably long time.
+
+If using L<create|DBIx::Class::ResultSet/create> instead, use a transaction and
+commit every C<X> rows; where C<X> gives you the best performance without
+locking the table for too long. 
+
+=item *
+
+When selecting many rows, if you don't need full-blown L<DBIx::Class::Row>
+objects, consider using L<DBIx::Class::ResultClass::HashRefInflator>.
+
+=item *
+
+See also L</STARTUP SPEED> and L</MEMORY USAGE> in this document.
+
+=back
+
 =head1 STARTUP SPEED
 
 L<DBIx::Class|DBIx::Class> programs can have a significant startup delay
@@ -1884,15 +2153,15 @@
 
 Typically L<DBIx::Class> result classes start off with
 
-    use base qw/DBIx::Class/;
-    __PACKAGE__->load_components(qw/InflateColumn::DateTime Core/);
+    use base qw/DBIx::Class::Core/;
+    __PACKAGE__->load_components(qw/InflateColumn::DateTime/);
 
 If this preamble is moved into a common base class:-
 
     package MyDBICbase;
 
-    use base qw/DBIx::Class/;
-    __PACKAGE__->load_components(qw/InflateColumn::DateTime Core/);
+    use base qw/DBIx::Class::Core/;
+    __PACKAGE__->load_components(qw/InflateColumn::DateTime/);
     1;
 
 and each result class then uses this as a base:-

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/DocMap.pod
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/DocMap.pod	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/DocMap.pod	2010-05-17 14:31:46 UTC (rev 9401)
@@ -40,8 +40,6 @@
 
 =item L<DBIx::Class::Core> - Set of standard components to load.
 
-=item L<DBIx::Class::Serialize::Storable> - ?
-
 =item L<DBIx::Class::InflateColumn> - Making objects out of your columns.
 
 =item L<DBIx::Class::InflateColumn::DateTime> - Magically turn your datetime or timestamp columns into DateTime objects.

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Example.pod
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Example.pod	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Example.pod	2010-05-17 14:31:46 UTC (rev 9401)
@@ -27,7 +27,7 @@
 
 Install DBIx::Class via CPAN should be sufficient.
 
-=head3 Create the database/tables.
+=head3 Create the database/tables
 
 First make and change the directory:
 
@@ -58,7 +58,7 @@
     title TEXT NOT NULL
   );
 
-and create the sqlite database file:
+and create the SQLite database file:
 
   sqlite3 example.db < example.sql
 
@@ -89,8 +89,7 @@
 MyDatabase/Main/Result/Artist.pm:
 
   package MyDatabase::Main::Result::Artist;
-  use base qw/DBIx::Class/;
-  __PACKAGE__->load_components(qw/Core/);
+  use base qw/DBIx::Class::Core/;
   __PACKAGE__->table('artist');
   __PACKAGE__->add_columns(qw/ artistid name /);
   __PACKAGE__->set_primary_key('artistid');
@@ -102,8 +101,8 @@
 MyDatabase/Main/Result/Cd.pm:
 
   package MyDatabase::Main::Result::Cd;
-  use base qw/DBIx::Class/;
-  __PACKAGE__->load_components(qw/Core/);
+  use base qw/DBIx::Class::Core/;
+  __PACKAGE__->load_components(qw/InflateColumn::DateTime/);
   __PACKAGE__->table('cd');
   __PACKAGE__->add_columns(qw/ cdid artist title/);
   __PACKAGE__->set_primary_key('cdid');
@@ -116,17 +115,16 @@
 MyDatabase/Main/Result/Track.pm:
 
   package MyDatabase::Main::Result::Track;
-  use base qw/DBIx::Class/;
-  __PACKAGE__->load_components(qw/Core/);
+  use base qw/DBIx::Class::Core/;
   __PACKAGE__->table('track');
-  __PACKAGE__->add_columns(qw/ trackid cd title/);
+  __PACKAGE__->add_columns(qw/ trackid cd title /);
   __PACKAGE__->set_primary_key('trackid');
   __PACKAGE__->belongs_to('cd' => 'MyDatabase::Main::Result::Cd');
 
   1;
 
 
-=head3 Write a script to insert some records.
+=head3 Write a script to insert some records
 
 insertdb.pl
 
@@ -155,10 +153,10 @@
 
   my @cds;
   foreach my $lp (keys %albums) {
-    my $artist = $schema->resultset('Artist')->search({
+    my $artist = $schema->resultset('Artist')->find({
       name => $albums{$lp}
     });
-    push @cds, [$lp, $artist->first];
+    push @cds, [$lp, $artist->id];
   }
 
   $schema->populate('Cd', [
@@ -179,10 +177,10 @@
 
   my @tracks;
   foreach my $track (keys %tracks) {
-    my $cdname = $schema->resultset('Cd')->search({
+    my $cdname = $schema->resultset('Cd')->find({
       title => $tracks{$track},
     });
-    push @tracks, [$cdname->first, $track];
+    push @tracks, [$cdname->id, $track];
   }
 
   $schema->populate('Track',[
@@ -200,7 +198,7 @@
   use strict;
 
   my $schema = MyDatabase::Main->connect('dbi:SQLite:db/example.db');
-  # for other DSNs, e.g. MySql, see the perldoc for the relevant dbd
+  # for other DSNs, e.g. MySQL, see the perldoc for the relevant dbd
   # driver, e.g perldoc L<DBD::mysql>.
 
   get_tracks_by_cd('Bad');
@@ -347,7 +345,7 @@
 
 =head1 Notes
 
-A reference implentation of the database and scripts in this example
+A reference implementation of the database and scripts in this example
 are available in the main distribution for DBIx::Class under the
 directory F<t/examples/Schema>.
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/FAQ.pod
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/FAQ.pod	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/FAQ.pod	2010-05-17 14:31:46 UTC (rev 9401)
@@ -26,8 +26,7 @@
 
 Next, spend some time defining which data you need to store, and how
 it relates to the other data you have. For some help on normalisation,
-go to L<http://b62.tripod.com/doc/dbbase.htm> or
-L<http://209.197.234.36/db/simple.html>.
+go to L<http://b62.tripod.com/doc/dbbase.htm>.
 
 Now, decide whether you want to have the database itself be the
 definitive source of information about the data layout, or your
@@ -57,6 +56,12 @@
 L<DBIx::Class::Schema/deploy>. See there for details, or the
 L<DBIx::Class::Manual::Cookbook>.
 
+=item .. store/retrieve Unicode data in my database?
+
+Make sure you database supports Unicode and set the connect
+attributes appropriately - see
+L<DBIx::Class::Manual::Cookbook/Using Unicode>
+
 =item .. connect to my database?
 
 Once you have created all the appropriate table/source classes, and an
@@ -127,7 +132,7 @@
 the tables are to be joined. The condition may contain as many fields
 as you like. See L<DBIx::Class::Relationship::Base>.
 
-=item .. define a relatiopnship across an intermediate table? (many-to-many)
+=item .. define a relationship across an intermediate table? (many-to-many)
 
 Read the documentation on L<DBIx::Class::Relationship/many_to_many>.
 
@@ -183,16 +188,10 @@
 
 =item .. sort my results based on fields I've aliased using C<as>?
 
-You don't. You'll need to supply the same functions/expressions to
-C<order_by>, as you did to C<select>.
+You didn't alias anything, since L<as|DBIx::Class::ResultSet/as>
+B<has nothing to do> with the produced SQL. See
+L<DBIx::Class::ResultSet/select> for details.
 
-To get "fieldname AS alias" in your SQL, you'll need to supply a
-literal chunk of SQL in your C<select> attribute, such as:
-
- ->search({}, { select => [ \'now() AS currenttime'] })
-
-Then you can use the alias in your C<order_by> attribute.
-
 =item .. group the results of my search?
 
 Supply a list of columns you want to group on, to the C<group_by>
@@ -200,16 +199,8 @@
 
 =item .. group my results based on fields I've aliased using C<as>?
 
-You don't. You'll need to supply the same functions/expressions to
-C<group_by>, as you did to C<select>.
+You don't. See the explanation on ordering by an alias above.
 
-To get "fieldname AS alias" in your SQL, you'll need to supply a
-literal chunk of SQL in your C<select> attribute, such as:
-
- ->search({}, { select => [ \'now() AS currenttime'] })
-
-Then you can use the alias in your C<group_by> attribute.
-
 =item .. filter the results of my search?
 
 The first argument to C<search> is a hashref of accessor names and
@@ -217,10 +208,10 @@
 
  ->search({'created_time' => { '>=', '2006-06-01 00:00:00' } })
 
-Note that to use a function here you need to make the whole value into
-a scalar reference:
+Note that to use a function here you need to make it a scalar
+reference:
 
- ->search({'created_time' => \'>= yesterday()' })
+ ->search({'created_time' => { '>=', \'yesterday()' } })
 
 =item .. search in several tables simultaneously?
 
@@ -244,34 +235,18 @@
 query, which can be accessed similarly to a table, see your database
 documentation for details.
 
-=item .. search using greater-than or less-than and database functions?
-
-To use functions or literal SQL with conditions other than equality
-you need to supply the entire condition, for example:
-
- my $interval = "< now() - interval '12 hours'";
- ->search({last_attempt => \$interval})
-
-and not:
-
- my $interval = "now() - interval '12 hours'";
- ->search({last_attempt => { '<' => \$interval } })
-
 =item .. search with an SQL function on the left hand side?
 
 To use an SQL function on the left hand side of a comparison:
 
- ->search({}, { where => \'YEAR(date_of_birth)=1979' });
+ ->search({ -nest => \[ 'YEAR(date_of_birth) = ?', [ plain_value => 1979 ] ] });
 
-=begin hidden
+Note: the C<plain_value> string in the C<< [ plain_value => 1979 ] >> part
+should be either the same as the name of the column (do this if the type of the
+return value of the function is the same as the type of the column) or
+otherwise it's essentially a dummy string currently (use C<plain_value> as a
+habit). It is used by L<DBIx::Class> to handle special column types.
 
-(When the bind arg ordering bug is fixed, the previous example can be
-replaced with the following.)
-
- ->search({}, { where => \'YEAR(date_of_birth)=?', bind => [ 1979 ] });
-
-=end hidden
-
 Or, if you have quoting off:
 
  ->search({ 'YEAR(date_of_birth)' => 1979 });
@@ -388,6 +363,9 @@
 
 =item .. insert many rows of data efficiently?
 
+The C<populate> method in L<DBIx::Class::ResultSet> provides
+efficient bulk inserts.
+
 =item .. update a collection of rows at the same time?
 
 Create a resultset using a search, to filter the rows of data you
@@ -447,6 +425,38 @@
 
 =back
 
+=head2 Custom methods in Result classes
+
+You can add custom methods that do arbitrary things, even to unrelated tables. 
+For example, to provide a C<< $book->foo() >> method which searches the 
+cd table, you'd could add this to Book.pm:
+
+  sub foo {
+    my ($self, $col_data) = @_;
+    return $self->result_source->schema->resultset('cd')->search($col_data);
+  }
+
+And invoke that on any Book Result object like so:
+
+  my $rs = $book->foo({ title => 'Down to Earth' });
+
+When two tables ARE related, L<DBIx::Class::Relationship::Base> provides many
+methods to find or create data in related tables for you. But if you want to
+write your own methods, you can.
+
+For example, to provide a C<< $book->foo() >> method to manually implement
+what create_related() from L<DBIx::Class::Relationship::Base> does, you could 
+add this to Book.pm:
+
+  sub foo {
+    my ($self, $relname, $col_data) = @_;
+    return $self->related_resultset($relname)->create($col_data);
+  }
+
+Invoked like this:
+
+  my $author = $book->foo('author', { name => 'Fred' });
+
 =head2 Misc
 
 =over 4
@@ -534,6 +544,65 @@
 using the tips in L<DBIx::Class::Manual::Cookbook/"Skip row object creation for faster results">
 and L<DBIx::Class::Manual::Cookbook/"Get raw data for blindingly fast results">
 
+=item How do I override a run time method (e.g. a relationship accessor)?
+
+If you need access to the original accessor, then you must "wrap around" the original method.
+You can do that either with L<Moose::Manual::MethodModifiers> or L<Class::Method::Modifiers>.
+The code example works for both modules:
+
+    package Your::Schema::Group;
+    use Class::Method::Modifiers;
+    
+    # ... declare columns ...
+    
+    __PACKAGE__->has_many('group_servers', 'Your::Schema::GroupServer', 'group_id');
+    __PACKAGE__->many_to_many('servers', 'group_servers', 'server');
+    
+    # if the server group is a "super group", then return all servers
+    # otherwise return only servers that belongs to the given group
+    around 'servers' => sub {
+        my $orig = shift;
+        my $self = shift;
+
+        return $self->$orig(@_) unless $self->is_super_group;
+        return $self->result_source->schema->resultset('Server')->all;
+    };
+
+If you just want to override the original method, and don't care about the data
+from the original accessor, then you have two options. Either use
+L<Method::Signatures::Simple> that does most of the work for you, or do
+it the "dirty way".
+
+L<Method::Signatures::Simple> way:
+
+    package Your::Schema::Group;
+    use Method::Signatures::Simple;
+    
+    # ... declare columns ...
+    
+    __PACKAGE__->has_many('group_servers', 'Your::Schema::GroupServer', 'group_id');
+    __PACKAGE__->many_to_many('servers', 'group_servers', 'server');
+    
+    # The method keyword automatically injects the annoying my $self = shift; for you.
+    method servers {
+        return $self->result_source->schema->resultset('Server')->search({ ... });
+    }
+
+The dirty way:
+
+    package Your::Schema::Group;
+    use Sub::Name;
+    
+    # ... declare columns ...
+    
+    __PACKAGE__->has_many('group_servers', 'Your::Schema::GroupServer', 'group_id');
+    __PACKAGE__->many_to_many('servers', 'group_servers', 'server');
+    
+    *servers = subname servers => sub {
+        my $self = shift;
+        return $self->result_source->schema->resultset('Server')->search({ ... });
+    };
+    
 =back
 
 =head2 Notes for CDBI users
@@ -564,7 +633,7 @@
 second one will use a default port of 5433, while L<DBD::Pg> is compiled with a
 default port of 5432.
 
-You can chance the port setting in C<postgresql.conf>.
+You can change the port setting in C<postgresql.conf>.
 
 =item I've lost or forgotten my mysql password
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Intro.pod
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Intro.pod	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Intro.pod	2010-05-17 14:31:46 UTC (rev 9401)
@@ -105,21 +105,26 @@
 Next, create each of the classes you want to load as specified above:
 
   package My::Schema::Result::Album;
-  use base qw/DBIx::Class/;
+  use base qw/DBIx::Class::Core/;
 
-Load any components required by each class with the load_components() method.
-This should consist of "Core" plus any additional components you want to use.
-For example, if you want to force columns to use UTF-8 encoding:
+Load any additional components you may need with the load_components() method,
+and provide component configuration if required. For example, if you want
+automatic row ordering:
 
-  __PACKAGE__->load_components(qw/ ForceUTF8 Core /);
+  __PACKAGE__->load_components(qw/ Ordered /);
+  __PACKAGE__->position_column('rank');
 
+Ordered will refer to a field called 'position' unless otherwise directed.  Here you are defining
+the ordering field to be named 'rank'.  (NOTE: Insert errors may occur if you use the Ordered 
+component, but have not defined a position column or have a 'position' field in your row.)
+
 Set the table for your class:
 
   __PACKAGE__->table('album');
 
 Add columns to your class:
 
-  __PACKAGE__->add_columns(qw/ albumid artist title /);
+  __PACKAGE__->add_columns(qw/ albumid artist title rank /);
 
 Each column can also be set up with its own accessor, data_type and other pieces
 of information that it may be useful to have -- just pass C<add_columns> a hash:
@@ -145,13 +150,20 @@
                               is_nullable => 0,
                               is_auto_increment => 0,
                               default_value => '',
+                            },
+                          rank =>
+                            { data_type => 'integer',
+                              size      => 16,
+                              is_nullable => 0,
+                              is_auto_increment => 0,
+                              default_value => '',
                             }
                          );
 
 DBIx::Class doesn't directly use most of this data yet, but various related
 modules such as L<DBIx::Class::WebForm> make use of it. Also it allows you to
 create your database tables from your Schema, instead of the other way around.
-See L<SQL::Translator> for details.
+See L<DBIx::Class::Schema/deploy> for details.
 
 See L<DBIx::Class::ResultSource> for more details of the possible column
 attributes.
@@ -217,7 +229,7 @@
 Note that L<DBIx::Class::Schema> does not cache connections for you. If you use
 multiple connections, you need to do this manually.
 
-To execute some sql statements on every connect you can add them as an option in
+To execute some SQL statements on every connect you can add them as an option in
 a special fifth argument to connect:
 
   my $another_schema = My::Schema->connect(
@@ -228,7 +240,7 @@
       { on_connect_do => \@on_connect_sql_statments }
   );
 
-See L<DBIx::Class::Schema::Storage::DBI/connect_info> for more information about
+See L<DBIx::Class::Storage::DBI/connect_info> for more information about
 this and other special C<connect>-time options.
 
 =head3 Via a database handle
@@ -267,8 +279,8 @@
   $album->set_column('title', 'Presence');
   $title = $album->get_column('title');
 
-Just like with L<Class::DBI>, you call C<update> to commit your changes to the
-database:
+Just like with L<Class::DBI>, you call C<update> to save your changes to the
+database (by executing the actual C<UPDATE> statement):
 
   $album->update;
 
@@ -389,6 +401,53 @@
 
 =head1 NOTES
 
+=head2 The Significance and Importance of Primary Keys
+
+The concept of a L<primary key|DBIx::Class::ResultSource/set_primary_key> in
+DBIx::Class warrants special discussion. The formal definition (which somewhat
+resembles that of a classic RDBMS) is I<a unique constraint that is least
+likely to change after initial row creation>. However this is where the
+similarity ends. Any time you call a CRUD operation on a row (e.g.
+L<delete|DBIx::Class::Row/delete>,
+L<update|DBIx::Class::Row/update>,
+L<discard_changes|DBIx::Class::Row/discard_changes>,
+etc.) DBIx::Class will use the values of of the
+L<primary key|DBIx::Class::ResultSource/set_primary_key> columns to populate
+the C<WHERE> clause necessary to accomplish the operation. This is why it is
+important to declare a L<primary key|DBIx::Class::ResultSource/set_primary_key>
+on all your result sources B<even if the underlying RDBMS does not have one>.
+In a pinch one can always declare each row identifiable by all its columns:
+
+ __PACKAGE__->set_primary_keys (__PACKAGE__->columns);
+
+Note that DBIx::Class is smart enough to store a copy of the PK values before
+any row-object changes take place, so even if you change the values of PK
+columns the C<WHERE> clause will remain correct.
+
+If you elect not to declare a C<primary key>, DBIx::Class will behave correctly
+by throwing exceptions on any row operation that relies on unique identifiable
+rows. If you inherited datasets with multiple identical rows in them, you can
+still operate with such sets provided you only utilize
+L<DBIx::Class::ResultSet> CRUD methods:
+L<search|DBIx::Class::ResultSet/search>,
+L<update|DBIx::Class::ResultSet/update>,
+L<delete|DBIx::Class::ResultSet/delete>
+
+For example, the following would not work (assuming C<People> does not have
+a declared PK):
+
+ my $row = $schema->resultset('People')
+                   ->search({ last_name => 'Dantes' })
+                    ->next;
+ $row->update({ children => 2 }); # <-- exception thrown because $row isn't
+                                  # necessarily unique
+
+So instead the following should be done:
+
+ $schema->resultset('People')
+         ->search({ last_name => 'Dantes' })
+          ->update({ children => 2 }); # <-- update's ALL Dantes to have children of 2
+
 =head2 Problems on RHEL5/CentOS5
 
 There used to be an issue with the system perl on Red Hat Enterprise

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Joining.pod
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Joining.pod	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Joining.pod	2010-05-17 14:31:46 UTC (rev 9401)
@@ -17,7 +17,7 @@
 But I'll explain anyway. Assuming you have created your database in a
 more or less sensible way, you will end up with several tables that
 contain C<related> information. For example, you may have a table
-containing information about C<CDs>, containing the CD title and it's
+containing information about C<CD>s, containing the CD title and it's
 year of publication, and another table containing all the C<Track>s
 for the CDs, one track per row.
 
@@ -34,7 +34,8 @@
 So, joins are a way of extending simple select statements to include
 fields from other, related, tables. There are various types of joins,
 depending on which combination of the data you wish to retrieve, see
-MySQL's doc on JOINs: L<http://dev.mysql.com/doc/refman/5.0/en/join.html>.
+MySQL's doc on JOINs:
+L<http://dev.mysql.com/doc/refman/5.0/en/join.html>.
 
 =head1 DEFINING JOINS AND RELATIONSHIPS
 
@@ -42,7 +43,7 @@
 be defined in the L<ResultSource|DBIx::Class::Manual::Glossary/ResultSource> for the
 table. If the relationship needs to be accessed in both directions
 (i.e. Fetch all tracks of a CD, and fetch the CD data for a Track),
-then it needs to be defined in both tables.
+then it needs to be defined for both tables.
 
 For the CDs/Tracks example, that means writing, in C<MySchema::CD>:
 
@@ -68,14 +69,15 @@
 
 When performing either a L<search|DBIx::Class::ResultSet/search> or a
 L<find|DBIx::Class::ResultSet/find> operation, you can specify which
-C<relations> to also fetch data from (or sort by), using the
+C<relations> to also refine your results based on, using the
 L<join|DBIx::Class::ResultSet/join> attribute, like this:
 
   $schema->resultset('CD')->search(
-    { 'Title' => 'Funky CD' },
+    { 'Title' => 'Funky CD',
+      'tracks.Name' => { like => 'T%' }
+    },
     { join      => 'tracks',
-      '+select' => [ 'tracks.Name', 'tracks.Artist' ],
-      '+as'     => [ 'TrackName', 'ArtistName' ]
+      order_by  => ['tracks.id'],
     }
   );
 
@@ -84,18 +86,125 @@
 L<DBIx::Class::ResultSet/ATTRIBUTES>, but here's a quick break down:
 
 The first argument to search is a hashref of the WHERE attributes, in
-this case a simple restriction on the Title column. The second
-argument is a hashref of attributes to the search, '+select' adds
-extra columns to the select (from the joined table(s) or from
-calculations), and '+as' gives aliases to those fields.
+this case a restriction on the Title column in the CD table, and a
+restriction on the name of the track in the Tracks table, but ONLY for
+tracks actually related to the chosen CD(s). The second argument is a
+hashref of attributes to the search, the results will be returned
+sorted by the C<id> of the related tracks.
 
-'join' specifies which C<relationships> to include in the query. The
-distinction between C<relationships> and C<tables> is important here,
-only the C<relationship> names are valid.
+The special 'join' attribute specifies which C<relationships> to
+include in the query. The distinction between C<relationships> and
+C<tables> is important here, only the C<relationship> names are valid.
 
-This example should magically produce SQL like the second select in
-L</WHAT ARE JOINS> above.
+This slightly nonsense example will produce SQL similar to:
 
+  SELECT cd.ID, cd.Title, cd.Year FROM CD cd JOIN Tracks tracks ON cd.ID = tracks.CDID WHERE cd.Title = 'Funky CD' AND tracks.Name LIKE 'T%' ORDER BY 'tracks.id';
+
+=head1 FETCHING RELATED DATA
+
+Another common use for joining to related tables, is to fetch the data
+from both tables in one query, preventing extra round-trips to the
+database. See the example above in L</WHAT ARE JOINS>.
+
+Three techniques are described here. Of the three, only the
+C<prefetch> technique will deal sanely with fetching related objects
+over a C<has_many> relation. The others work fine for 1 to 1 type
+relationships.
+
+=head2 Whole related objects
+
+To fetch entire related objects, e.g. CDs and all Track data, use the
+'prefetch' attribute:
+
+  $schema->resultset('CD')->search(
+    { 'Title' => 'Funky CD',
+    },
+    { prefetch      => 'tracks',
+      order_by  => ['tracks.id'],
+    }
+  );
+
+This will produce SQL similar to the following:
+
+  SELECT cd.ID, cd.Title, cd.Year, tracks.id, tracks.Name, tracks.Artist FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
+
+The syntax of 'prefetch' is the same as 'join' and implies the
+joining, so there is no need to use both together.
+
+=head2 Subset of related fields
+
+To fetch a subset or the related fields, the '+select' and '+as'
+attributes can be used. For example, if the CD data is required and
+just the track name from the Tracks table:
+
+  $schema->resultset('CD')->search(
+    { 'Title' => 'Funky CD',
+    },
+    { join      => 'tracks',
+      '+select' => ['tracks.Name'],
+      '+as'     => ['track_name'],
+      order_by  => ['tracks.id'],
+    }
+  );
+
+Which will produce the query:
+
+  SELECT cd.ID, cd.Title, cd.Year, tracks.Name FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
+
+Note that the '+as' does not produce an SQL 'AS' keyword in the
+output, see the L<DBIx::Class::Manual::FAQ> for an explanation.
+
+This type of column restriction has a downside, the resulting $row
+object will have no 'track_name' accessor:
+
+  while(my $row = $search_rs->next) {
+     print $row->track_name; ## ERROR
+  }
+
+Instead C<get_column> must be used:
+
+  while(my $row = $search_rs->next) {
+     print $row->get_colum('track_name'); ## WORKS
+  }
+
+=head2 Incomplete related objects
+
+In rare circumstances, you may also wish to fetch related data as
+incomplete objects. The usual reason to do is when the related table
+has a very large field you don't need for the current data
+output. This is better solved by storing that field in a separate
+table which you only join to when needed.
+
+To fetch an incomplete related object, supply the dotted notation to the '+as' attribute: 
+
+  $schema->resultset('CD')->search(
+    { 'Title' => 'Funky CD',
+    },
+    { join      => 'tracks',
+      '+select' => ['tracks.Name'],
+      '+as'     => ['tracks.Name'], 
+      order_by  => ['tracks.id'],
+    }
+  );
+
+Which will produce same query as above;
+
+  SELECT cd.ID, cd.Title, cd.Year, tracks.Name FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
+
+Now you can access the result using the relationship accessor:
+
+  while(my $row = $search_rs->next) {
+     print $row->tracks->name; ## WORKS
+  }
+
+However, this will produce broken objects. If the tracks id column is
+not fetched, the object will not be usable for any operation other
+than reading its data. Use the L</Whole related objects> method as
+much as possible to avoid confusion in your code later.
+
+Broken means: Update will not work. Fetching other related objects
+will not work. Deleting the object will not work.
+
 =head1 COMPLEX JOINS AND STUFF
 
 =head2 Across multiple relations
@@ -114,18 +223,16 @@
   $schema->resultset('CD')->search(
     { 'Title' => 'Funky CD' },
     { join      => { 'tracks' => 'artist' },
-      '+select' => [ 'tracks.Name', 'artist.Artist' ],
-      '+as'     => [ 'TrackName', 'ArtistName' ]
     }
   );
 
 Which is:
 
-  SELECT me.ID, me.Title, me.Year, tracks.Name, artist.Artist FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD';
+  SELECT me.ID, me.Title, me.Year FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD';
 
 To perform joins using relations of the tables you are joining to, use
 a hashref to indicate the join depth. This can theoretically go as
-deep as you like (warning, contrived examples!): 
+deep as you like (warning: contrived examples!): 
 
   join => { room => { table => 'leg' } }
 
@@ -147,12 +254,10 @@
     { 'Title' => 'Funky CD' },
     { join      => { 'tracks' => 'artist' },
       order_by  => [ 'tracks.Name', 'artist.Artist' ],
-      '+select' => [ 'tracks.Name', 'artist.Artist' ],
-      '+as'     => [ 'TrackName', 'ArtistName' ]
     }
   );
 
-  SELECT me.ID, me.Title, me.Year, tracks.Name, artist.Artist FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD' ORDER BY tracks.Name, artist.Artist;
+  SELECT me.ID, me.Title, me.Year FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD' ORDER BY tracks.Name, artist.Artist;
 
 This is essential if any of your tables have columns with the same names.
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Reading.pod
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Reading.pod	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Reading.pod	2010-05-17 14:31:46 UTC (rev 9401)
@@ -17,14 +17,14 @@
 Methods should be documented in the files which also contain the code
 for the method, or that file should be hidden from PAUSE completely,
 in which case the methods are documented in the file which loads
-it. Methods may also be documented and refered to in files
+it. Methods may also be documented and referred to in files
 representing the major objects or components on which they can be
 called.
 
 For example, L<DBIx::Class::Relationship> documents the methods
 actually coded in the helper relationship classes like
 DBIx::Class::Relationship::BelongsTo. The BelongsTo file itself is
-hidden from pause as it has no documentation. The accessors created by
+hidden from PAUSE as it has no documentation. The accessors created by
 relationships should be mentioned in L<DBIx::Class::Row>, the major
 object that they will be called on.
 
@@ -46,7 +46,7 @@
 what the method returns.
 
 The first item provides a list of all possible values for the
-arguments of the method in order, separated by C<, >, preceeded by the
+arguments of the method in order, separated by C<, >, preceded by the
 text "Arguments: "
 
 Example (for the belongs_to relationship):
@@ -145,10 +145,10 @@
 =item *
 
 The argument list is followed by some examples of how to use the
-method, using it's various types of arguments.
+method, using its various types of arguments.
 
 The examples can also include ways to use the results if
-applicable. For instance if the documentation is for a relationship
+applicable. For instance, if the documentation is for a relationship
 type, the examples can include how to call the resulting relation
 accessor, how to use the relation name in a search and so on.
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Troubleshooting.pod
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Troubleshooting.pod	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Manual/Troubleshooting.pod	2010-05-17 14:31:46 UTC (rev 9401)
@@ -17,13 +17,13 @@
 
 Alternatively use the C<< storage->debug >> class method:-
 
-  $class->storage->debug(1);
+  $schema->storage->debug(1);
 
 To send the output somewhere else set debugfh:-
 
-  $class->storage->debugfh(IO::File->new('/tmp/trace.out', 'w');
+  $schema->storage->debugfh(IO::File->new('/tmp/trace.out', 'w');
 
-Alternatively you can do this with the environment variable too:-
+Alternatively you can do this with the environment variable, too:-
 
   export DBIC_TRACE="1=/tmp/trace.out"
 
@@ -51,9 +51,8 @@
 
 There's likely a syntax error in the table class referred to elsewhere
 in this error message.  In particular make sure that the package
-declaration is correct, so for a schema C< MySchema > you need to
-specify a fully qualified namespace: C< package MySchema::MyTable; >
-for example.
+declaration is correct. For example, for a schema C< MySchema > 
+you need to specify a fully qualified namespace: C< package MySchema::MyTable; >.
 
 =head2 syntax error at or near "<something>" ...
 
@@ -100,29 +99,21 @@
 L<DBIx::Class::Manual::Cookbook/Setting_quoting_for_the_generated_SQL> for
 details.
 
-Note that quoting may lead to problems with C<order_by> clauses, see
-L<... column "foo DESC" does not exist ...> for info on avoiding those.
-
 =head2 column "foo DESC" does not exist ...
 
-This can happen if you've turned on quoting and then done something like
-this:
+This can happen if you are still using the obsolete order hack, and also
+happen to turn on SQL-quoting.
 
   $rs->search( {}, { order_by => [ 'name DESC' ] } );
 
-This results in SQL like this:
+Since L<DBIx::Class> >= 0.08100 and L<SQL::Abstract> >= 1.50 the above
+should be written as:
 
-  ... ORDER BY "name DESC"
+  $rs->search( {}, { order_by => { -desc => 'name' } } );
 
-The solution is to pass your order_by items as scalar references to avoid
-quoting:
+For more ways to express order clauses refer to
+L<SQL::Abstract/ORDER_BY_CLAUSES>
 
-  $rs->search( {}, { order_by => [ \'name DESC' ] } );
-
-Now you'll get SQL like this:
-
-  ... ORDER BY name DESC
-
 =head2 Perl Performance Issues on Red Hat Systems
 
 There is a problem with slow performance of certain DBIx::Class
@@ -141,20 +132,39 @@
   Fedora 8     - perl-5.8.8-41.fc8
   RHEL5        - perl-5.8.8-15.el5_2.1
 
-The issue is due to perl doing an exhaustive search of blessed objects
+This issue is due to perl doing an exhaustive search of blessed objects
 under certain circumstances.  The problem shows up as performance
-degredation exponential to the number of L<DBIx::Class> row objects in
-memory, so can be unoticeable with certain data sets, but with huge
+degradation exponential to the number of L<DBIx::Class> row objects in
+memory, so can be unnoticeable with certain data sets, but with huge
 performance impacts on other datasets.
 
-A pair of tests for susceptability to the issue, and performance effects
+A pair of tests for susceptibility to the issue and performance effects
 of the bless/overload problem can be found in the L<DBIx::Class> test
-suite in the file C<t/99rh_perl_perf_bug.t>
+suite, in the C<t/99rh_perl_perf_bug.t> file.
 
 Further information on this issue can be found in
 L<https://bugzilla.redhat.com/show_bug.cgi?id=379791>,
 L<https://bugzilla.redhat.com/show_bug.cgi?id=460308> and
 L<http://rhn.redhat.com/errata/RHBA-2008-0876.html>
 
+=head2 Excessive Memory Allocation with TEXT/BLOB/etc. Columns and Large LongReadLen
+
+It has been observed, using L<DBD::ODBC>, that creating a L<DBIx::Class::Row> 
+object which includes a column of data type TEXT/BLOB/etc. will allocate 
+LongReadLen bytes.  This allocation does not leak, but if LongReadLen 
+is large in size, and many such row objects are created, e.g. as the 
+output of a ResultSet query, the memory footprint of the Perl interpreter 
+can grow very large.
+
+The solution is to use the smallest practical value for LongReadLen.
+
+=head2 create_ddl_dir does not produce DDL for MySQL views
+
+L<SQL::Translator> does not create DDL for MySQL views if it doesn't know you
+are using mysql version 5.000001 or higher.  To explicity set this version, add
+C<mysql_version> to the C<producer_args> in the C<%sqlt> options.
+
+  $schema->create_ddl_dir(['MySQL'], '1.0', './sql/', undef, { producer_args => { mysql_version => 5.000058 } })
+
 =cut
 


Property changes on: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Optional
___________________________________________________________________
Added: svn:ignore
   + Dependencies.pod


Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Optional/Dependencies.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Optional/Dependencies.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Optional/Dependencies.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,456 @@
+package DBIx::Class::Optional::Dependencies;
+
+use warnings;
+use strict;
+
+use Carp;
+
+# NO EXTERNAL NON-5.8.1 CORE DEPENDENCIES EVER (e.g. C::A::G)
+# This module is to be loaded by Makefile.PM on a pristine system
+
+# POD is generated automatically by calling _gen_pod from the
+# Makefile.PL in $AUTHOR mode
+
+my $moose_basic = {
+  'Moose'                      => '0.98',
+  'MooseX::Types'              => '0.21',
+};
+
+my $admin_basic = {
+  %$moose_basic,
+  'MooseX::Types::Path::Class' => '0.05',
+  'MooseX::Types::JSON'        => '0.02',
+  'JSON::Any'                  => '1.22',
+  'namespace::autoclean'       => '0.09',
+};
+
+my $reqs = {
+  dist => {
+    #'Module::Install::Pod::Inherit' => '0.01',
+  },
+
+  replicated => {
+    req => {
+      %$moose_basic,
+      'namespace::clean'          => '0.11',
+      'Hash::Merge'               => '0.12',
+    },
+    pod => {
+      title => 'Storage::Replicated',
+      desc => 'Modules required for L<DBIx::Class::Storage::DBI::Replicated>',
+    },
+  },
+
+  admin => {
+    req => {
+      %$admin_basic,
+    },
+    pod => {
+      title => 'DBIx::Class::Admin',
+      desc => 'Modules required for the DBIx::Class administrative library',
+    },
+  },
+
+  admin_script => {
+    req => {
+      %$moose_basic,
+      %$admin_basic,
+      'Getopt::Long::Descriptive' => '0.081',
+      'Text::CSV'                 => '1.16',
+    },
+    pod => {
+      title => 'dbicadmin',
+      desc => 'Modules required for the CLI DBIx::Class interface dbicadmin',
+    },
+  },
+
+  deploy => {
+    req => {
+      'SQL::Translator'           => '0.11005',
+    },
+    pod => {
+      title => 'Storage::DBI::deploy()',
+      desc => 'Modules required for L<DBIx::Class::Storage::DBI/deploy> and L<DBIx::Class::Storage::DBI/deploymen_statements>',
+    },
+  },
+
+
+  test_pod => {
+    req => {
+      'Test::Pod'                 => '1.41',
+    },
+  },
+
+  test_podcoverage => {
+    req => {
+      'Test::Pod::Coverage'       => '1.08',
+      'Pod::Coverage'             => '0.20',
+    },
+  },
+
+  test_notabs => {
+    req => {
+      'Test::NoTabs'              => '0.9',
+    },
+  },
+
+  test_eol => {
+    req => {
+      'Test::EOL'                 => '0.6',
+    },
+  },
+
+  test_cycle => {
+    req => {
+      'Test::Memory::Cycle'       => '0',
+      'Devel::Cycle'              => '1.10',
+    },
+  },
+
+  test_dtrelated => {
+    req => {
+      # t/36datetime.t
+      # t/60core.t
+      'DateTime::Format::SQLite'  => '0',
+
+      # t/96_is_deteministic_value.t
+      'DateTime::Format::Strptime'=> '0',
+
+      # t/inflate/datetime_mysql.t
+      # (doesn't need Mysql itself)
+      'DateTime::Format::MySQL' => '0',
+
+      # t/inflate/datetime_pg.t
+      # (doesn't need PG itself)
+      'DateTime::Format::Pg'  => '0',
+    },
+  },
+
+  cdbicompat => {
+    req => {
+      'DBIx::ContextualFetch'     => '0',
+      'Class::DBI::Plugin::DeepAbstractSearch' => '0',
+      'Class::Trigger'            => '0',
+      'Time::Piece::MySQL'        => '0',
+      'Clone'                     => '0',
+      'Date::Simple'              => '3.03',
+    },
+  },
+
+  rdbms_pg => {
+    req => {
+      $ENV{DBICTEST_PG_DSN}
+        ? (
+          'Sys::SigAction'        => '0',
+          'DBD::Pg'               => '2.009002',
+        ) : ()
+    },
+  },
+
+  rdbms_mysql => {
+    req => {
+      $ENV{DBICTEST_MYSQL_DSN}
+        ? (
+          'DBD::mysql'              => '0',
+        ) : ()
+    },
+  },
+
+  rdbms_oracle => {
+    req => {
+      $ENV{DBICTEST_ORA_DSN}
+        ? (
+          'DateTime::Format::Oracle' => '0',
+        ) : ()
+    },
+  },
+
+  rdbms_ase => {
+    req => {
+      $ENV{DBICTEST_SYBASE_DSN}
+        ? (
+          'DateTime::Format::Sybase' => 0,
+        ) : ()
+    },
+  },
+
+  rdbms_asa => {
+    req => {
+      (scalar grep { $ENV{$_} } (qw/DBICTEST_SYBASE_ASA_DSN DBICTEST_SYBASE_ASA_ODBC_DSN/) )
+        ? (
+          'DateTime::Format::Strptime' => 0,
+        ) : ()
+    },
+  },
+
+  rdbms_db2 => {
+    req => {
+      $ENV{DBICTEST_DB2_DSN}
+        ? (
+          'DBD::DB2' => 0,
+        ) : ()
+    },
+  },
+
+};
+
+
+sub req_list_for {
+  my ($class, $group) = @_;
+
+  croak "req_list_for() expects a requirement group name"
+    unless $group;
+
+  my $deps = $reqs->{$group}{req}
+    or croak "Requirement group '$group' does not exist";
+
+  return { %$deps };
+}
+
+
+our %req_availability_cache;
+sub req_ok_for {
+  my ($class, $group) = @_;
+
+  croak "req_ok_for() expects a requirement group name"
+    unless $group;
+
+  $class->_check_deps ($group) unless $req_availability_cache{$group};
+
+  return $req_availability_cache{$group}{status};
+}
+
+sub req_missing_for {
+  my ($class, $group) = @_;
+
+  croak "req_missing_for() expects a requirement group name"
+    unless $group;
+
+  $class->_check_deps ($group) unless $req_availability_cache{$group};
+
+  return $req_availability_cache{$group}{missing};
+}
+
+sub req_errorlist_for {
+  my ($class, $group) = @_;
+
+  croak "req_errorlist_for() expects a requirement group name"
+    unless $group;
+
+  $class->_check_deps ($group) unless $req_availability_cache{$group};
+
+  return $req_availability_cache{$group}{errorlist};
+}
+
+sub _check_deps {
+  my ($class, $group) = @_;
+
+  my $deps = $class->req_list_for ($group);
+
+  my %errors;
+  for my $mod (keys %$deps) {
+    if (my $ver = $deps->{$mod}) {
+      eval "use $mod $ver ()";
+    }
+    else {
+      eval "require $mod";
+    }
+
+    $errors{$mod} = $@ if $@;
+  }
+
+  if (keys %errors) {
+    my $missing = join (', ', map { $deps->{$_} ? "$_ >= $deps->{$_}" : $_ } (sort keys %errors) );
+    $missing .= " (see $class for details)" if $reqs->{$group}{pod};
+    $req_availability_cache{$group} = {
+      status => 0,
+      errorlist => { %errors },
+      missing => $missing,
+    };
+  }
+  else {
+    $req_availability_cache{$group} = {
+      status => 1,
+      errorlist => {},
+      missing => '',
+    };
+  }
+}
+
+sub req_group_list {
+  return { map { $_ => { %{ $reqs->{$_}{req} || {} } } } (keys %$reqs) };
+}
+
+# This is to be called by the author only (automatically in Makefile.PL)
+sub _gen_pod {
+
+  my $class = shift;
+  my $modfn = __PACKAGE__ . '.pm';
+  $modfn =~ s/\:\:/\//g;
+
+  my $podfn = __FILE__;
+  $podfn =~ s/\.pm$/\.pod/;
+
+  my $distver =
+    eval { require DBIx::Class; DBIx::Class->VERSION; }
+      ||
+    do {
+      warn
+"\n\n---------------------------------------------------------------------\n" .
+'Unable to load core DBIx::Class module to determine current version, '.
+'possibly due to missing dependencies. Author-mode autodocumentation ' .
+"halted\n\n" . $@ .
+"\n\n---------------------------------------------------------------------\n"
+      ;
+      '*UNKNOWN*';  # rv
+    }
+  ;
+
+  my $sqltver = $class->req_list_for ('deploy')->{'SQL::Translator'}
+    or die "Hrmm? No sqlt dep?";
+
+  my @chunks = (
+    <<"EOC",
+#########################################################################
+#####################  A U T O G E N E R A T E D ########################
+#########################################################################
+#
+# The contents of this POD file are auto-generated.  Any changes you make
+# will be lost. If you need to change the generated text edit _gen_pod()
+# at the end of $modfn
+#
+EOC
+    '=head1 NAME',
+    "$class - Optional module dependency specifications (for module authors)",
+    '=head1 SYNOPSIS',
+    <<EOS,
+Somewhere in your build-file (e.g. L<Module::Install>'s Makefile.PL):
+
+  ...
+
+  configure_requires 'DBIx::Class' => '$distver';
+
+  require $class;
+
+  my \$deploy_deps = $class->req_list_for ('deploy');
+
+  for (keys %\$deploy_deps) {
+    requires \$_ => \$deploy_deps->{\$_};
+  }
+
+  ...
+
+Note that there are some caveats regarding C<configure_requires()>, more info
+can be found at L<Module::Install/configure_requires>
+EOS
+    '=head1 DESCRIPTION',
+    <<'EOD',
+Some of the less-frequently used features of L<DBIx::Class> have external
+module dependencies on their own. In order not to burden the average user
+with modules he will never use, these optional dependencies are not included
+in the base Makefile.PL. Instead an exception with a descriptive message is
+thrown when a specific feature is missing one or several modules required for
+its operation. This module is the central holding place for  the current list
+of such dependencies, for DBIx::Class core authors, and DBIx::Class extension
+authors alike.
+EOD
+    '=head1 CURRENT REQUIREMENT GROUPS',
+    <<'EOD',
+Dependencies are organized in C<groups> and each group can list one or more
+required modules, with an optional minimum version (or 0 for any version).
+The group name can be used in the 
+EOD
+  );
+
+  for my $group (sort keys %$reqs) {
+    my $p = $reqs->{$group}{pod}
+      or next;
+
+    my $modlist = $reqs->{$group}{req}
+      or next;
+
+    next unless keys %$modlist;
+
+    push @chunks, (
+      "=head2 $p->{title}",
+      "$p->{desc}",
+      '=over',
+      ( map { "=item * $_" . ($modlist->{$_} ? " >= $modlist->{$_}" : '') } (sort keys %$modlist) ),
+      '=back',
+      "Requirement group: B<$group>",
+    );
+  }
+
+  push @chunks, (
+    '=head1 METHODS',
+    '=head2 req_group_list',
+    '=over',
+    '=item Arguments: $none',
+    '=item Returns: \%list_of_requirement_groups',
+    '=back',
+    <<EOD,
+This method should be used by DBIx::Class packagers, to get a hashref of all
+dependencies keyed by dependency group. Each key (group name) can be supplied
+to one of the group-specific methods below.
+EOD
+
+    '=head2 req_list_for',
+    '=over',
+    '=item Arguments: $group_name',
+    '=item Returns: \%list_of_module_version_pairs',
+    '=back',
+    <<EOD,
+This method should be used by DBIx::Class extension authors, to determine the
+version of modules a specific feature requires in the B<current> version of
+DBIx::Class. See the L</SYNOPSIS> for a real-world
+example.
+EOD
+
+    '=head2 req_ok_for',
+    '=over',
+    '=item Arguments: $group_name',
+    '=item Returns: 1|0',
+    '=back',
+    'Returns true or false depending on whether all modules required by C<$group_name> are present on the system and loadable',
+
+    '=head2 req_missing_for',
+    '=over',
+    '=item Arguments: $group_name',
+    '=item Returns: $error_message_string',
+    '=back',
+    <<EOD,
+Returns a single line string suitable for inclusion in larger error messages.
+This method would normally be used by DBIx::Class core-module author, to
+indicate to the user that he needs to install specific modules before he will
+be able to use a specific feature.
+
+For example if some of the requirements for C<deploy> are not available,
+the returned string could look like:
+
+ SQL::Translator >= $sqltver (see $class for details)
+
+The author is expected to prepend the necessary text to this message before
+returning the actual error seen by the user.
+EOD
+
+    '=head2 req_errorlist_for',
+    '=over',
+    '=item Arguments: $group_name',
+    '=item Returns: \%list_of_loaderrors_per_module',
+    '=back',
+    <<'EOD',
+Returns a hashref containing the actual errors that occured while attempting
+to load each module in the requirement group.
+EOD
+    '=head1 AUTHOR',
+    'See L<DBIx::Class/CONTRIBUTORS>.',
+    '=head1 LICENSE',
+    'You may distribute this code under the same terms as Perl itself',
+  );
+
+  open (my $fh, '>', $podfn) or croak "Unable to write to $podfn: $!";
+  print $fh join ("\n\n", @chunks);
+  close ($fh);
+}
+
+1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Ordered.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Ordered.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Ordered.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -127,7 +127,7 @@
 This method specifies a value of L</position_column> which B<would
 never be assigned to a row> during normal operation. When
 a row is moved, its position is set to this value temporarily, so
-that any unique constrainst can not be violated. This value defaults
+that any unique constraints can not be violated. This value defaults
 to 0, which should work for all cases except when your positions do
 indeed start from 0.
 
@@ -434,10 +434,7 @@
 sub move_to_group {
     my( $self, $to_group, $to_position ) = @_;
 
-    $self->throw_exception ('move_to_group() expects a group specification')
-        unless defined $to_group;
-
-    # if we're given a string, turn it into a hashref
+    # if we're given a single value, turn it into a hashref
     unless (ref $to_group eq 'HASH') {
         my @gcols = $self->_grouping_columns;
 
@@ -504,7 +501,7 @@
     }
     else {
       my $bumped_pos_val = $self->_position_value ($to_position);
-      my @between = ($to_position, $new_group_last_position);
+      my @between = map { $self->_position_value ($_) } ($to_position, $new_group_last_position);
       $self->_shift_siblings (1, @between);   #shift right
       $self->set_column( $position_column => $bumped_pos_val );
     }
@@ -685,27 +682,9 @@
 if you are working with preexisting non-normalised position data,
 or if you need to work with materialized path columns.
 
-=head2 _position
-
-  my $num_pos = $item->_position;
-
-Returns the B<absolute numeric position> of the current object, with the
-first object being at position 1, its sibling at position 2 and so on.
-By default simply returns the value of L</position_column>.
-
-=cut
-sub _position {
-    my $self = shift;
-
-#    #the right way to do this
-#    return $self->previous_siblings->count + 1;
-
-    return $self->get_column ($self->position_column);
-}
-
 =head2 _position_from_value
 
-  my $num_pos = $item->_position_of_value ( $pos_value )
+  my $num_pos = $item->_position_from_value ( $pos_value )
 
 Returns the B<absolute numeric position> of an object with a B<position
 value> set to C<$pos_value>. By default simply returns C<$pos_value>.
@@ -818,15 +797,15 @@
 
     if (grep { $_ eq $position_column } ( map { @$_ } (values %{{ $rsrc->unique_constraints }} ) ) ) {
 
-        my @pcols = $rsrc->primary_columns;
+        my @pcols = $rsrc->_pri_cols;
         my $cursor = $shift_rs->search ({}, { order_by => { "-$ord", $position_column }, columns => \@pcols } )->cursor;
         my $rs = $self->result_source->resultset;
 
-        while (my @pks = $cursor->next ) {
-
+        my @all_pks = $cursor->all;
+        while (my $pks = shift @all_pks) {
           my $cond;
           for my $i (0.. $#pcols) {
-            $cond->{$pcols[$i]} = $pks[$i];
+            $cond->{$pcols[$i]} = $pks->[$i];
           }
 
           $rs->search($cond)->update ({ $position_column => \ "$position_column $op 1" } );
@@ -867,6 +846,19 @@
     );
 }
 
+=head2 _position
+
+  my $num_pos = $item->_position;
+
+Returns the B<absolute numeric position> of the current object, with the
+first object being at position 1, its sibling at position 2 and so on.
+
+=cut
+sub _position {
+    my $self = shift;
+    return $self->_position_from_value ($self->get_column ($self->position_column) );
+}
+
 =head2 _grouping_clause
 
 This method returns one or more name=>value pairs for limiting a search
@@ -929,7 +921,7 @@
 triggering any of the positioning integrity code).
 
 Some day you might get confronted by datasets that have ambiguous
-positioning data (i.e. duplicate position values within the same group,
+positioning data (e.g. duplicate position values within the same group,
 in a table without unique constraints). When manually fixing such data
 keep in mind that you can not invoke L<DBIx::Class::Row/update> like
 you normally would, as it will get confused by the wrong data before
@@ -964,14 +956,14 @@
 
 =head2 Multiple Moves
 
-Be careful when issueing move_* methods to multiple objects.  If 
+Be careful when issuing move_* methods to multiple objects.  If 
 you've pre-loaded the objects then when you move one of the objects 
 the position of the other object will not reflect their new value 
 until you reload them from the database - see
 L<DBIx::Class::Row/discard_changes>.
 
 There are times when you will want to move objects as groups, such 
-as changeing the parent of several objects at once - this directly 
+as changing the parent of several objects at once - this directly 
 conflicts with this problem.  One solution is for us to write a 
 ResultSet class that supports a parent() method, for example.  Another 
 solution is to somehow automagically modify the objects that exist 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/PK/Auto.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/PK/Auto.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/PK/Auto.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -11,7 +11,7 @@
 
 =head1 SYNOPSIS
 
-__PACKAGE__->load_components(qw/Core/);
+use base 'DBIx::Class::Core';
 __PACKAGE__->set_primary_key('id');
 
 =head1 DESCRIPTION
@@ -19,8 +19,6 @@
 This class overrides the insert method to get automatically incremented primary
 keys.
 
-  __PACKAGE__->load_components(qw/Core/);
-
 PK::Auto is now part of Core.
 
 See L<DBIx::Class::Manual::Component> for details of component interactions.

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/PK.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/PK.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/PK.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -31,13 +31,28 @@
   my ($self) = @_;
   $self->throw_exception( "Can't call id() as a class method" )
     unless ref $self;
-  my @pk = $self->_ident_values;
-  return (wantarray ? @pk : $pk[0]);
+  my @id_vals = $self->_ident_values;
+  return (wantarray ? @id_vals : $id_vals[0]);
 }
 
 sub _ident_values {
   my ($self) = @_;
-  return (map { $self->{_column_data}{$_} } $self->primary_columns);
+
+  my (@ids, @missing);
+
+  for ($self->_pri_cols) {
+    push @ids, $self->get_column($_);
+    push @missing, $_ if (! defined $ids[-1] and ! $self->has_column_loaded ($_) );
+  }
+
+  if (@missing && $self->in_storage) {
+    $self->throw_exception (
+      'Unable to uniquely identify row object with missing PK columns: '
+      . join (', ', @missing )
+    );
+  }
+
+  return @ids;
 }
 
 =head2 ID
@@ -64,12 +79,11 @@
   $self->throw_exception( "Can't call ID() as a class method" )
     unless ref $self;
   return undef unless $self->in_storage;
-  return $self->_create_ID(map { $_ => $self->{_column_data}{$_} }
-                             $self->primary_columns);
+  return $self->_create_ID(%{$self->ident_condition});
 }
 
 sub _create_ID {
-  my ($self,%vals) = @_;
+  my ($self, %vals) = @_;
   return undef unless 0 == grep { !defined } values %vals;
   return join '|', ref $self || $self, $self->result_source->name,
     map { $_ . '=' . $vals{$_} } sort keys %vals;
@@ -87,9 +101,25 @@
 
 sub ident_condition {
   my ($self, $alias) = @_;
-  my %cond;
+
+  my @pks = $self->_pri_cols;
+  my @vals = $self->_ident_values;
+
+  my (%cond, @undef);
   my $prefix = defined $alias ? $alias.'.' : '';
-  $cond{$prefix.$_} = $self->get_column($_) for $self->primary_columns;
+  for my $col (@pks) {
+    if (! defined ($cond{$prefix.$col} = shift @vals) ) {
+      push @undef, $col;
+    }
+  }
+
+  if (@undef && $self->in_storage) {
+    $self->throw_exception (
+      'Unable to construct row object identity condition due to NULL PK columns: '
+      . join (', ', @undef)
+    );
+  }
+
   return \%cond;
 }
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/Accessor.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/Accessor.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/Accessor.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -4,7 +4,6 @@
 use strict;
 use warnings;
 use Sub::Name ();
-use Class::Inspector ();
 
 our %_pod_inherit_config = 
   (

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/Base.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/Base.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/Base.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -30,6 +30,8 @@
 
   __PACKAGE__->add_relationship('relname', 'Foreign::Class', $cond, $attrs);
 
+=head3 condition
+
 The condition needs to be an L<SQL::Abstract>-style representation of the
 join between the tables. When resolving the condition for use in a C<JOIN>,
 keys using the pseudo-table C<foreign> are resolved to mean "the Table on the
@@ -67,10 +69,19 @@
 To add an C<OR>ed condition, use an arrayref of hashrefs. See the
 L<SQL::Abstract> documentation for more details.
 
-In addition to the
-L<standard ResultSet attributes|DBIx::Class::ResultSet/ATTRIBUTES>,
-the following attributes are also valid:
+=head3 attributes
 
+The L<standard ResultSet attributes|DBIx::Class::ResultSet/ATTRIBUTES> may
+be used as relationship attributes. In particular, the 'where' attribute is
+useful for filtering relationships:
+
+     __PACKAGE__->has_many( 'valid_users', 'MyApp::Schema::User',
+        { 'foreign.user_id' => 'self.user_id' },
+        { where => { valid => 1 } }
+    );
+
+The following attributes are also valid:
+
 =over 4
 
 =item join_type
@@ -111,6 +122,40 @@
 should, set this attribute to a true or false value to override the detection
 of when to create constraints.
 
+=item cascade_copy
+
+If C<cascade_copy> is true on a C<has_many> relationship for an
+object, then when you copy the object all the related objects will
+be copied too. To turn this behaviour off, pass C<< cascade_copy => 0 >> 
+in the C<$attr> hashref. 
+
+The behaviour defaults to C<< cascade_copy => 1 >> for C<has_many>
+relationships.
+
+=item cascade_delete
+
+By default, DBIx::Class cascades deletes across C<has_many>,
+C<has_one> and C<might_have> relationships. You can disable this
+behaviour on a per-relationship basis by supplying 
+C<< cascade_delete => 0 >> in the relationship attributes.
+
+The cascaded operations are performed after the requested delete,
+so if your database has a constraint on the relationship, it will
+have deleted/updated the related records or raised an exception
+before DBIx::Class gets to perform the cascaded operation.
+
+=item cascade_update
+
+By default, DBIx::Class cascades updates across C<has_one> and
+C<might_have> relationships. You can disable this behaviour on a
+per-relationship basis by supplying C<< cascade_update => 0 >> in
+the relationship attributes.
+
+This is not a RDMS style cascade update - it purely means that when
+an object has update called on it, all the related objects also
+have update called. It will not change foreign keys automatically -
+you must arrange to do this yourself.
+
 =item on_delete / on_update
 
 If you are using L<SQL::Translator> to create SQL for you, you can use these
@@ -189,13 +234,23 @@
     my $query = ((@_ > 1) ? {@_} : shift);
 
     my $source = $self->result_source;
-    my $cond = $source->_resolve_condition(
-      $rel_info->{cond}, $rel, $self
-    );
+
+    # condition resolution may fail if an incomplete master-object prefetch
+    # is encountered - that is ok during prefetch construction (not yet in_storage)
+    my $cond = eval { $source->_resolve_condition( $rel_info->{cond}, $rel, $self ) };
+    if (my $err = $@) {
+      if ($self->in_storage) {
+        $self->throw_exception ($err);
+      }
+      else {
+        $cond = $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION;
+      }
+    }
+
     if ($cond eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION) {
       my $reverse = $source->reverse_relationship_info($rel);
       foreach my $rev_rel (keys %$reverse) {
-        if ($reverse->{$rev_rel}{attrs}{accessor} eq 'multi') {
+        if ($reverse->{$rev_rel}{attrs}{accessor} && $reverse->{$rev_rel}{attrs}{accessor} eq 'multi') {
           $attrs->{related_objects}{$rev_rel} = [ $self ];
           Scalar::Util::weaken($attrs->{related_object}{$rev_rel}[0]);
         } else {
@@ -249,7 +304,7 @@
   ( $objects_rs ) = $rs->search_related_rs('relname', $cond, $attrs);
 
 This method works exactly the same as search_related, except that 
-it guarantees a restultset, even in list context.
+it guarantees a resultset, even in list context.
 
 =cut
 
@@ -381,7 +436,7 @@
 call set_from_related on the book.
 
 This is called internally when you pass existing objects as values to
-L<DBIx::Class::ResultSet/create>, or pass an object to a belongs_to acessor.
+L<DBIx::Class::ResultSet/create>, or pass an object to a belongs_to accessor.
 
 The columns are only set in the local copy of the object, call L</update> to
 set them in the storage.

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/BelongsTo.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/BelongsTo.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/BelongsTo.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -24,19 +24,14 @@
   # no join condition or just a column name
   if (!ref $cond) {
     $class->ensure_class_loaded($f_class);
-    my %f_primaries = map { $_ => 1 } eval { $f_class->primary_columns };
+    my %f_primaries = map { $_ => 1 } eval { $f_class->_pri_cols };
     $class->throw_exception(
-      "Can't infer join condition for ${rel} on ${class}; ".
-      "unable to load ${f_class}: $@"
+      "Can't infer join condition for ${rel} on ${class}: $@"
     ) if $@;
 
     my ($pri, $too_many) = keys %f_primaries;
     $class->throw_exception(
       "Can't infer join condition for ${rel} on ${class}; ".
-      "${f_class} has no primary keys"
-    ) unless defined $pri;
-    $class->throw_exception(
-      "Can't infer join condition for ${rel} on ${class}; ".
       "${f_class} has multiple primary keys"
     ) if $too_many;
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/CascadeActions.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/CascadeActions.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/CascadeActions.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -16,15 +16,24 @@
     # be handling this anyway. Assuming we have joins we probably actually
     # *could* do them, but I'd rather not.
 
-  my $ret = $self->next::method(@rest);
-
   my $source = $self->result_source;
   my %rels = map { $_ => $source->relationship_info($_) } $source->relationships;
   my @cascade = grep { $rels{$_}{attrs}{cascade_delete} } keys %rels;
-  foreach my $rel (@cascade) {
-    $self->search_related($rel)->delete_all;
+
+  if (@cascade) {
+    my $guard = $source->schema->txn_scope_guard;
+
+    my $ret = $self->next::method(@rest);
+
+    foreach my $rel (@cascade) {
+      $self->search_related($rel)->delete_all;
+    }
+
+    $guard->commit;
+    return $ret;
   }
-  return $ret;
+
+  $self->next::method(@rest);
 }
 
 sub update {
@@ -32,19 +41,31 @@
   return $self->next::method(@rest) unless ref $self;
     # Because update cascades on a class *really* don't make sense!
 
-  my $ret = $self->next::method(@rest);
-
   my $source = $self->result_source;
   my %rels = map { $_ => $source->relationship_info($_) } $source->relationships;
   my @cascade = grep { $rels{$_}{attrs}{cascade_update} } keys %rels;
-  foreach my $rel (@cascade) {
-    next if (
-      $rels{$rel}{attrs}{accessor} eq 'single'
-      && !exists($self->{_relationship_data}{$rel})
-    );
-    $_->update for grep defined, $self->$rel;
+
+  if (@cascade) {
+    my $guard = $source->schema->txn_scope_guard;
+
+    my $ret = $self->next::method(@rest);
+
+    foreach my $rel (@cascade) {
+      next if (
+        $rels{$rel}{attrs}{accessor}
+          &&
+        $rels{$rel}{attrs}{accessor} eq 'single'
+          &&
+        !exists($self->{_relationship_data}{$rel})
+      );
+      $_->update for grep defined, $self->$rel;
+    }
+
+    $guard->commit;
+    return $ret;
   }
-  return $ret;
+
+  $self->next::method(@rest);
 }
 
 1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/HasMany.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/HasMany.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/HasMany.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -14,7 +14,10 @@
 
   unless (ref $cond) {
     $class->ensure_class_loaded($f_class);
-    my ($pri, $too_many) = $class->primary_columns;
+    my ($pri, $too_many) = eval { $class->_pri_cols };
+    $class->throw_exception(
+      "Can't infer join condition for ${rel} on ${class}: $@"
+    ) if $@;
 
     $class->throw_exception(
       "has_many can only infer join for a single primary key; ".

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/HasOne.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/HasOne.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/HasOne.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -3,6 +3,7 @@
 
 use strict;
 use warnings;
+use Carp::Clan qw/^DBIx::Class/;
 
 our %_pod_inherit_config = 
   (
@@ -21,12 +22,8 @@
   my ($class, $join_type, $rel, $f_class, $cond, $attrs) = @_;
   unless (ref $cond) {
     $class->ensure_class_loaded($f_class);
-    my ($pri, $too_many) = $class->primary_columns;
 
-    $class->throw_exception(
-      "might_have/has_one can only infer join for a single primary key; ".
-      "${class} has more"
-    ) if $too_many;
+    my $pri = $class->_get_primary_key;
 
     $class->throw_exception(
       "might_have/has_one needs a primary key  to infer a join; ".
@@ -34,7 +31,7 @@
     ) if !defined $pri && (!defined $cond || !length $cond);
 
     my $f_class_loaded = eval { $f_class->columns };
-    my ($f_key,$guess);
+    my ($f_key,$too_many,$guess);
     if (defined $cond && length $cond) {
       $f_key = $cond;
       $guess = "caller specified foreign key '$f_key'";
@@ -42,11 +39,7 @@
       $f_key = $rel;
       $guess = "using given relationship '$rel' for foreign key";
     } else {
-      ($f_key, $too_many) = $f_class->primary_columns;
-      $class->throw_exception(
-        "might_have/has_one can only infer join for a single primary key; ".
-        "${f_class} has more"
-      ) if $too_many;
+      $f_key = $class->_get_primary_key($f_class);
       $guess = "using primary key of foreign class for foreign key";
     }
     $class->throw_exception(
@@ -54,6 +47,7 @@
     ) if $f_class_loaded && !$f_class->has_column($f_key);
     $cond = { "foreign.${f_key}" => "self.${pri}" };
   }
+  $class->_validate_has_one_condition($cond);
   $class->add_relationship($rel, $f_class,
    $cond,
    { accessor => 'single',
@@ -63,4 +57,40 @@
   1;
 }
 
+sub _get_primary_key {
+  my ( $class, $target_class ) = @_;
+  $target_class ||= $class;
+  my ($pri, $too_many) = eval { $target_class->_pri_cols };
+  $class->throw_exception(
+    "Can't infer join condition on ${target_class}: $@"
+  ) if $@;
+
+  $class->throw_exception(
+    "might_have/has_one can only infer join for a single primary key; ".
+    "${class} has more"
+  ) if $too_many;
+  return $pri;
+}
+
+sub _validate_has_one_condition {
+  my ($class, $cond )  = @_;
+
+  return if $ENV{DBIC_DONT_VALIDATE_RELS};
+  return unless 'HASH' eq ref $cond;
+  foreach my $foreign_id ( keys %$cond ) {
+    my $self_id = $cond->{$foreign_id};
+
+    # we can ignore a bad $self_id because add_relationship handles this
+    # warning
+    return unless $self_id =~ /^self\.(.*)$/;
+    my $key = $1;
+    $class->throw_exception("Defining rel on ${class} that includes ${key} but no such column defined here yet")
+        unless $class->has_column($key);
+    my $column_info = $class->column_info($key);
+    if ( $column_info->{is_nullable} ) {
+      carp(qq'"might_have/has_one" must not be on columns with is_nullable set to true ($class/$key). This might indicate an incorrect use of those relationship helpers instead of belongs_to.');
+    }
+  }
+}
+
 1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/ManyToMany.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/ManyToMany.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship/ManyToMany.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -64,15 +64,15 @@
       my $rs = $self->search_related($rel)->search_related(
         $f_rel, @_ > 0 ? @_ : undef, { %{$rel_attrs||{}}, %$attrs }
       );
-	  return $rs;
+      return $rs;
     };
 
     my $meth_name = join '::', $class, $meth;
     *$meth_name = Sub::Name::subname $meth_name, sub {
-		my $self = shift;
-		my $rs = $self->$rs_meth( @_ );
-  		return (wantarray ? $rs->all : $rs);
-	};
+      my $self = shift;
+      my $rs = $self->$rs_meth( @_ );
+      return (wantarray ? $rs->all : $rs);
+    };
 
     my $add_meth_name = join '::', $class, $add_meth;
     *$add_meth_name = Sub::Name::subname $add_meth_name, sub {
@@ -102,7 +102,7 @@
       my $link = $self->search_related($rel)->new_result($link_vals);
       $link->set_from_related($f_rel, $obj);
       $link->insert();
-	  return $obj;
+      return $obj;
     };
 
     my $set_meth_name = join '::', $class, $set_meth;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Relationship.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -111,7 +111,7 @@
 you want to use the default value for it, but still want to set C<\%attrs>.
 
 See L<DBIx::Class::Relationship::Base> for documentation on the
-attrubutes that are allowed in the C<\%attrs> argument.
+attributes that are allowed in the C<\%attrs> argument.
 
 
 =head2 belongs_to
@@ -232,13 +232,13 @@
 
 =back
 
-Creates a one-to-many relationship, where the corresponding elements
-of the foreign class store the calling class's primary key in one (or
-more) of the foreign class columns. This relationship defaults to using
-the end of this classes namespace as the foreign key in C<$related_class>
-to resolve the join, unless C<$their_fk_column> specifies the foreign
-key column in C<$related_class> or C<cond> specifies a reference to a
-join condition hash.
+Creates a one-to-many relationship where the foreign class refers to
+this class's primary key. This relationship refers to zero or more
+records in the foreign table (e.g. a C<LEFT JOIN>). This relationship 
+defaults to using the end of this classes namespace as the foreign key
+in C<$related_class> to resolve the join, unless C<$their_fk_column>
+specifies the foreign key column in C<$related_class> or C<cond>
+specifies a reference to a join condition hash.
 
 =over
 
@@ -441,6 +441,17 @@
 for a L<list of standard resultset attributes|DBIx::Class::ResultSet/ATTRIBUTES>
 which can be assigned to relationships as well.
 
+Note that if you supply a condition on which to join, if the column in the
+current table allows nulls (i.e., has the C<is_nullable> attribute set to a
+true value), than C<might_have> will warn about this because it's naughty and
+you shouldn't do that.  
+
+ "might_have/has_one" must not be on columns with is_nullable set to true (MySchema::SomeClass/key)
+
+If you must be naughty, you can suppress the warning by setting
+C<DBIC_DONT_VALIDATE_RELS> environment variable to a true value.  Otherwise,
+you probably just want to use C<DBIx::Class::Relationship/belongs_to>.
+
 =head2 has_one
 
 =over 4
@@ -528,6 +539,11 @@
 for a L<list of standard resultset attributes|DBIx::Class::ResultSet/ATTRIBUTES>
 which can be assigned to relationships as well.
 
+Note that if you supply a condition on which to join, if the column in the
+current table allows nulls (i.e., has the C<is_nullable> attribute set to a
+true value), than warnings might apply just as with
+L<DBIx::Class::Relationship/might_have>.
+
 =head2 many_to_many
 
 =over 4

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSet.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSet.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSet.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -7,6 +7,7 @@
         'bool'   => "_bool",
         fallback => 1;
 use Carp::Clan qw/^DBIx::Class/;
+use DBIx::Class::Exception;
 use Data::Page;
 use Storable;
 use DBIx::Class::ResultSetColumn;
@@ -24,6 +25,10 @@
 =head1 SYNOPSIS
 
   my $users_rs   = $schema->resultset('User');
+  while( $user = $users_rs->next) {
+    print $user->username;
+  }
+
   my $registered_users_rs   = $schema->resultset('User')->search({ registered => 1 });
   my @cds_in_2005 = $schema->resultset('CD')->search({ year => 2005 })->all();
 
@@ -140,7 +145,7 @@
 =head1 OVERLOADING
 
 If a resultset is used in a numeric context it returns the L</count>.
-However, if it is used in a booleand context it is always true.  So if
+However, if it is used in a boolean context it is always true.  So if
 you want to check if a resultset has any results use C<if $rs != 0>.
 C<if $rs> will always be true.
 
@@ -290,10 +295,15 @@
     $rows = $self->get_cache;
   }
 
+  # reset the selector list
+  if (List::Util::first { exists $attrs->{$_} } qw{columns select as}) {
+     delete @{$our_attrs}{qw{select as columns +select +as +columns include_columns}};
+  }
+
   my $new_attrs = { %{$our_attrs}, %{$attrs} };
 
   # merge new attrs into inherited
-  foreach my $key (qw/join prefetch +select +as bind/) {
+  foreach my $key (qw/join prefetch +select +as +columns include_columns bind/) {
     next unless exists $attrs->{$key};
     $new_attrs->{$key} = $self->_merge_attr($our_attrs->{$key}, $attrs->{$key});
   }
@@ -356,9 +366,9 @@
   }
 
   my $rs = (ref $self)->new($self->result_source, $new_attrs);
-  if ($rows) {
-    $rs->set_cache($rows);
-  }
+
+  $rs->set_cache($rows) if ($rows);
+
   return $rs;
 }
 
@@ -518,7 +528,7 @@
     # in ::Relationship::Base::search_related (the row method), and furthermore
     # the relationship is of the 'single' type. This means that the condition
     # provided by the relationship (already attached to $self) is sufficient,
-    # as there can be only one row in the databse that would satisfy the 
+    # as there can be only one row in the database that would satisfy the
     # relationship
   }
   else {
@@ -570,12 +580,16 @@
   my $where = $self->_collapse_cond($self->{attrs}{where} || {});
   my $num_where = scalar keys %$where;
 
-  my @unique_queries;
+  my (@unique_queries, %seen_column_combinations);
   foreach my $name (@constraint_names) {
-    my @unique_cols = $self->result_source->unique_constraint_columns($name);
-    my $unique_query = $self->_build_unique_query($query, \@unique_cols);
+    my @constraint_cols = $self->result_source->unique_constraint_columns($name);
 
-    my $num_cols = scalar @unique_cols;
+    my $constraint_sig = join "\x00", sort @constraint_cols;
+    next if $seen_column_combinations{$constraint_sig}++;
+
+    my $unique_query = $self->_build_unique_query($query, \@constraint_cols);
+
+    my $num_cols = scalar @constraint_cols;
     my $num_query = scalar keys %$unique_query;
 
     my $total = $num_query + $num_where;
@@ -629,7 +643,7 @@
 =head2 search_related_rs
 
 This method works exactly the same as search_related, except that
-it guarantees a restultset, even in list context.
+it guarantees a resultset, even in list context.
 
 =cut
 
@@ -687,7 +701,7 @@
 
 =item B<Note>
 
-As of 0.08100, this method enforces the assumption that the preceeding
+As of 0.08100, this method enforces the assumption that the preceding
 query returns only one row. If more than one row is returned, you will receive
 a warning:
 
@@ -969,19 +983,6 @@
 sub _collapse_result {
   my ($self, $as_proto, $row) = @_;
 
-  # if the first row that ever came in is totally empty - this means we got
-  # hit by a smooth^Wempty left-joined resultset. Just noop in that case
-  # instead of producing a {}
-  #
-  my $has_def;
-  for (@$row) {
-    if (defined $_) {
-      $has_def++;
-      last;
-    }
-  }
-  return undef unless $has_def;
-
   my @copy = @$row;
 
   # 'foo'         => [ undef, 'foo' ]
@@ -1006,7 +1007,7 @@
   # without having to contruct the full hash
 
   if (keys %collapse) {
-    my %pri = map { ($_ => 1) } $self->result_source->primary_columns;
+    my %pri = map { ($_ => 1) } $self->result_source->_pri_cols;
     foreach my $i (0 .. $#construct_as) {
       next if defined($construct_as[$i][0]); # only self table
       if (delete $pri{$construct_as[$i][1]}) {
@@ -1139,6 +1140,7 @@
   if ($result_class) {
     $self->ensure_class_loaded($result_class);
     $self->_result_class($result_class);
+    $self->{attrs}{result_class} = $result_class if ref $self;
   }
   $self->_result_class;
 }
@@ -1233,20 +1235,16 @@
   my $rsrc = $self->result_source;
   $attrs ||= $self->_resolved_attrs;
 
-  my $tmp_attrs = { %$attrs };
+  # only take pieces we need for a simple count
+  my $tmp_attrs = { map
+    { $_ => $attrs->{$_} }
+    qw/ alias from where bind join /
+  };
 
-  # take off any limits, record_filter is cdbi, and no point of ordering a count 
-  delete $tmp_attrs->{$_} for (qw/select as rows offset order_by record_filter/);
-
   # overwrite the selector (supplied by the storage)
   $tmp_attrs->{select} = $rsrc->storage->_count_select ($rsrc, $tmp_attrs);
   $tmp_attrs->{as} = 'count';
 
-  # read the comment on top of the actual function to see what this does
-  $tmp_attrs->{from} = $self->_switch_to_inner_join_if_needed (
-    $tmp_attrs->{from}, $tmp_attrs->{alias}
-  );
-
   my $tmp_rs = $rsrc->resultset_class->new($rsrc, $tmp_attrs)->get_column ('count');
 
   return $tmp_rs;
@@ -1259,114 +1257,45 @@
   my ($self, $attrs) = @_;
 
   my $rsrc = $self->result_source;
-  $attrs ||= $self->_resolved_attrs_copy;
+  $attrs ||= $self->_resolved_attrs;
 
-  my $sub_attrs = { %$attrs };
+  my $sub_attrs = { map
+    { $_ => $attrs->{$_} }
+    qw/ alias from where bind join group_by having rows offset /
+  };
 
-  # extra selectors do not go in the subquery and there is no point of ordering it
-  delete $sub_attrs->{$_} for qw/collapse select _prefetch_select as order_by/;
-
-  # if we prefetch, we group_by primary keys only as this is what we would get out
-  # of the rs via ->next/->all. We DO WANT to clobber old group_by regardless
-  if ( keys %{$attrs->{collapse}} ) {
-    $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->primary_columns) ]
+  # if we multi-prefetch we group_by primary keys only as this is what we would
+  # get out of the rs via ->next/->all. We *DO WANT* to clobber old group_by regardless
+  if ( keys %{$attrs->{collapse}}  ) {
+    $sub_attrs->{group_by} = [ map { "$attrs->{alias}.$_" } ($rsrc->_pri_cols) ]
   }
 
-  $sub_attrs->{select} = $rsrc->storage->_subq_count_select ($rsrc, $sub_attrs);
+  # Calculate subquery selector
+  if (my $g = $sub_attrs->{group_by}) {
 
-  # read the comment on top of the actual function to see what this does
-  $sub_attrs->{from} = $self->_switch_to_inner_join_if_needed (
-    $sub_attrs->{from}, $sub_attrs->{alias}
-  );
-
-  # this is so that ordering can be thrown away in things like Top limit
-  $sub_attrs->{-for_count_only} = 1;
-
-  my $sub_rs = $rsrc->resultset_class->new ($rsrc, $sub_attrs);
-
-  $attrs->{from} = [{
-    -alias => 'count_subq',
-    -source_handle => $rsrc->handle,
-    count_subq => $sub_rs->as_query,
-  }];
-
-  # the subquery replaces this
-  delete $attrs->{$_} for qw/where bind collapse group_by having having_bind rows offset/;
-
-  return $self->_count_rs ($attrs);
-}
-
-
-# The DBIC relationship chaining implementation is pretty simple - every
-# new related_relationship is pushed onto the {from} stack, and the {select}
-# window simply slides further in. This means that when we count somewhere
-# in the middle, we got to make sure that everything in the join chain is an
-# actual inner join, otherwise the count will come back with unpredictable
-# results (a resultset may be generated with _some_ rows regardless of if
-# the relation which the $rs currently selects has rows or not). E.g.
-# $artist_rs->cds->count - normally generates:
-# SELECT COUNT( * ) FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid
-# which actually returns the number of artists * (number of cds || 1)
-#
-# So what we do here is crawl {from}, determine if the current alias is at
-# the top of the stack, and if not - make sure the chain is inner-joined down
-# to the root.
-#
-sub _switch_to_inner_join_if_needed {
-  my ($self, $from, $alias) = @_;
-
-  # subqueries and other oddness is naturally not supported
-  return $from if (
-    ref $from ne 'ARRAY'
-      ||
-    @$from <= 1
-      ||
-    ref $from->[0] ne 'HASH'
-      ||
-    ! $from->[0]{-alias}
-      ||
-    $from->[0]{-alias} eq $alias
-  );
-
-  my $switch_branch;
-  JOINSCAN:
-  for my $j (@{$from}[1 .. $#$from]) {
-    if ($j->[0]{-alias} eq $alias) {
-      $switch_branch = $j->[0]{-join_path};
-      last JOINSCAN;
+    # necessary as the group_by may refer to aliased functions
+    my $sel_index;
+    for my $sel (@{$attrs->{select}}) {
+      $sel_index->{$sel->{-as}} = $sel
+        if (ref $sel eq 'HASH' and $sel->{-as});
     }
-  }
 
-  # something else went wrong
-  return $from unless $switch_branch;
-
-  # So it looks like we will have to switch some stuff around.
-  # local() is useless here as we will be leaving the scope
-  # anyway, and deep cloning is just too fucking expensive
-  # So replace the inner hashref manually
-  my @new_from = ($from->[0]);
-  my $sw_idx = { map { $_ => 1 } @$switch_branch };
-
-  for my $j (@{$from}[1 .. $#$from]) {
-    my $jalias = $j->[0]{-alias};
-
-    if ($sw_idx->{$jalias}) {
-      my %attrs = %{$j->[0]};
-      delete $attrs{-join_type};
-      push @new_from, [
-        \%attrs,
-        @{$j}[ 1 .. $#$j ],
-      ];
+    for my $g_part (@$g) {
+      push @{$sub_attrs->{select}}, $sel_index->{$g_part} || $g_part;
     }
-    else {
-      push @new_from, $j;
-    }
   }
+  else {
+    my @pcols = map { "$attrs->{alias}.$_" } ($rsrc->primary_columns);
+    $sub_attrs->{select} = @pcols ? \@pcols : [ 1 ];
+  }
 
-  return \@new_from;
+  return $rsrc->resultset_class
+               ->new ($rsrc, $sub_attrs)
+                ->as_subselect_rs
+                 ->search ({}, { columns => { count => $rsrc->storage->_count_select ($rsrc, $attrs) } })
+                  -> get_column ('count');
 }
 
-
 sub _bool {
   return 1;
 }
@@ -1490,8 +1419,12 @@
 
   my $rsrc = $self->result_source;
 
+  # if a condition exists we need to strip all table qualifiers
+  # if this is not possible we'll force a subquery below
+  my $cond = $rsrc->schema->storage->_strip_cond_qualifiers ($self->{cond});
+
   my $needs_group_by_subq = $self->_has_resolved_attr (qw/collapse group_by -join/);
-  my $needs_subq = $self->_has_resolved_attr (qw/row offset/);
+  my $needs_subq = $needs_group_by_subq || (not defined $cond) || $self->_has_resolved_attr(qw/rows offset/);
 
   if ($needs_group_by_subq or $needs_subq) {
 
@@ -1499,7 +1432,7 @@
     my $attrs = $self->_resolved_attrs_copy;
 
     delete $attrs->{$_} for qw/collapse select as/;
-    $attrs->{columns} = [ map { "$attrs->{alias}.$_" } ($self->result_source->primary_columns) ];
+    $attrs->{columns} = [ map { "$attrs->{alias}.$_" } ($self->result_source->_pri_cols) ];
 
     if ($needs_group_by_subq) {
       # make sure no group_by was supplied, or if there is one - make sure it matches
@@ -1539,70 +1472,11 @@
     return $rsrc->storage->$op(
       $rsrc,
       $op eq 'update' ? $values : (),
-      $self->_cond_for_update_delete,
+      $cond,
     );
   }
 }
 
-
-# _cond_for_update_delete
-#
-# update/delete require the condition to be modified to handle
-# the differing SQL syntax available.  This transforms the $self->{cond}
-# appropriately, returning the new condition.
-
-sub _cond_for_update_delete {
-  my ($self, $full_cond) = @_;
-  my $cond = {};
-
-  $full_cond ||= $self->{cond};
-  # No-op. No condition, we're updating/deleting everything
-  return $cond unless ref $full_cond;
-
-  if (ref $full_cond eq 'ARRAY') {
-    $cond = [
-      map {
-        my %hash;
-        foreach my $key (keys %{$_}) {
-          $key =~ /([^.]+)$/;
-          $hash{$1} = $_->{$key};
-        }
-        \%hash;
-      } @{$full_cond}
-    ];
-  }
-  elsif (ref $full_cond eq 'HASH') {
-    if ((keys %{$full_cond})[0] eq '-and') {
-      $cond->{-and} = [];
-      my @cond = @{$full_cond->{-and}};
-       for (my $i = 0; $i < @cond; $i++) {
-        my $entry = $cond[$i];
-        my $hash;
-        if (ref $entry eq 'HASH') {
-          $hash = $self->_cond_for_update_delete($entry);
-        }
-        else {
-          $entry =~ /([^.]+)$/;
-          $hash->{$1} = $cond[++$i];
-        }
-        push @{$cond->{-and}}, $hash;
-      }
-    }
-    else {
-      foreach my $key (keys %{$full_cond}) {
-        $key =~ /([^.]+)$/;
-        $cond->{$1} = $full_cond->{$key};
-      }
-    }
-  }
-  else {
-    $self->throw_exception("Can't update/delete on resultset with condition unless hash or array");
-  }
-
-  return $cond;
-}
-
-
 =head2 update
 
 =over 4
@@ -1646,9 +1520,10 @@
   my ($self, $values) = @_;
   $self->throw_exception('Values for update_all must be a hash')
     unless ref $values eq 'HASH';
-  foreach my $obj ($self->all) {
-    $obj->set_columns($values)->update;
-  }
+
+  my $guard = $self->result_source->schema->txn_scope_guard;
+  $_->update($values) for $self->all;
+  $guard->commit;
   return 1;
 }
 
@@ -1666,7 +1541,7 @@
 will not run DBIC cascade triggers. See L</delete_all> if you need triggers
 to run. See also L<DBIx::Class::Row/delete>.
 
-Return value will be the amount of rows deleted; exact type of return value
+Return value will be the number of rows deleted; exact type of return value
 is storage-dependent.
 
 =cut
@@ -1699,7 +1574,9 @@
   $self->throw_exception('delete_all does not accept any arguments')
     if @_;
 
+  my $guard = $self->result_source->schema->txn_scope_guard;
   $_->delete for $self->all;
+  $guard->commit;
   return 1;
 }
 
@@ -1735,7 +1612,7 @@
       ],
      },
      { artistid => 5, name => 'Angsty-Whiny Girl', cds => [
-        { title => 'My parents sold me to a record company' ,year => 2005 },
+        { title => 'My parents sold me to a record company', year => 2005 },
         { title => 'Why Am I So Ugly?', year => 2006 },
         { title => 'I Got Surgery and am now Popular', year => 2007 }
       ],
@@ -1763,7 +1640,7 @@
     [qw/artistid name/],
     [100, 'A Formally Unknown Singer'],
     [101, 'A singer that jumped the shark two albums ago'],
-    [102, 'An actually cool singer.'],
+    [102, 'An actually cool singer'],
   ]);
 
 Please note an important effect on your data when choosing between void and
@@ -1777,11 +1654,11 @@
 =cut
 
 sub populate {
-  my $self = shift @_;
-  my $data = ref $_[0][0] eq 'HASH'
-    ? $_[0] : ref $_[0][0] eq 'ARRAY' ? $self->_normalize_populate_args($_[0]) :
-    $self->throw_exception('Populate expects an arrayref of hashes or arrayref of arrayrefs');
+  my $self = shift;
 
+  # cruft placed in standalone method
+  my $data = $self->_normalize_populate_args(@_);
+
   if(defined wantarray) {
     my @created;
     foreach my $item (@$data) {
@@ -1789,10 +1666,19 @@
     }
     return wantarray ? @created : \@created;
   } else {
-    my ($first, @rest) = @$data;
+    my $first = $data->[0];
 
-    my @names = grep {!ref $first->{$_}} keys %$first;
-    my @rels = grep { $self->result_source->has_relationship($_) } keys %$first;
+    # if a column is a registered relationship, and is a non-blessed hash/array, consider
+    # it relationship data
+    my (@rels, @columns);
+    for (keys %$first) {
+      my $ref = ref $first->{$_};
+      $self->result_source->has_relationship($_) && ($ref eq 'ARRAY' or $ref eq 'HASH')
+        ? push @rels, $_
+        : push @columns, $_
+      ;
+    }
+
     my @pks = $self->result_source->primary_columns;
 
     ## do the belongs_to relationships
@@ -1821,17 +1707,21 @@
         delete $data->[$index]->{$rel};
         $data->[$index] = {%{$data->[$index]}, %$related};
 
-        push @names, keys %$related if $index == 0;
+        push @columns, keys %$related if $index == 0;
       }
     }
 
-    ## do bulk insert on current row
-    my @values = map { [ @$_{@names} ] } @$data;
+    ## inherit the data locked in the conditions of the resultset
+    my ($rs_data) = $self->_merge_cond_with_data({});
+    delete @{$rs_data}{@columns};
+    my @inherit_cols = keys %$rs_data;
+    my @inherit_data = values %$rs_data;
 
+    ## do bulk insert on current row
     $self->result_source->storage->insert_bulk(
       $self->result_source,
-      \@names,
-      \@values,
+      [@columns, @inherit_cols],
+      [ map { [ @$_{@columns}, @inherit_data ] } @$data ],
     );
 
     ## do the has_many relationships
@@ -1840,7 +1730,7 @@
       foreach my $rel (@rels) {
         next unless $item->{$rel} && ref $item->{$rel} eq "ARRAY";
 
-        my $parent = $self->find(map {{$_=>$item->{$_}} } @pks)
+        my $parent = $self->find({map { $_ => $item->{$_} } @pks})
      || $self->throw_exception('Cannot find the relating object.');
 
         my $child = $parent->$rel;
@@ -1860,26 +1750,27 @@
   }
 }
 
-=head2 _normalize_populate_args ($args)
 
-Private method used by L</populate> to normalize its incoming arguments.  Factored
-out in case you want to subclass and accept new argument structures to the
-L</populate> method.
-
-=cut
-
+# populate() argumnets went over several incarnations
+# What we ultimately support is AoH
 sub _normalize_populate_args {
-  my ($self, $data) = @_;
-  my @names = @{shift(@$data)};
-  my @results_to_create;
-  foreach my $datum (@$data) {
-    my %result_to_create;
-    foreach my $index (0..$#names) {
-      $result_to_create{$names[$index]} = $$datum[$index];
+  my ($self, $arg) = @_;
+
+  if (ref $arg eq 'ARRAY') {
+    if (ref $arg->[0] eq 'HASH') {
+      return $arg;
     }
-    push @results_to_create, \%result_to_create;
+    elsif (ref $arg->[0] eq 'ARRAY') {
+      my @ret;
+      my @colnames = @{$arg->[0]};
+      foreach my $values (@{$arg}[1 .. $#$arg]) {
+        push @ret, { map { $colnames[$_] => $values->[$_] } (0 .. $#colnames) };
+      }
+      return \@ret;
+    }
   }
-  return \@results_to_create;
+
+  $self->throw_exception('Populate expects an arrayref of hashrefs or arrayref of arrayrefs');
 }
 
 =head2 pager
@@ -1968,46 +1859,66 @@
   $self->throw_exception( "new_result needs a hash" )
     unless (ref $values eq 'HASH');
 
-  my %new;
+  my ($merged_cond, $cols_from_relations) = $self->_merge_cond_with_data($values);
+
+  my %new = (
+    %$merged_cond,
+    @$cols_from_relations
+      ? (-cols_from_relations => $cols_from_relations)
+      : (),
+    -source_handle => $self->_source_handle,
+    -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED
+  );
+
+  return $self->result_class->new(\%new);
+}
+
+# _merge_cond_with_data
+#
+# Takes a simple hash of K/V data and returns its copy merged with the
+# condition already present on the resultset. Additionally returns an
+# arrayref of value/condition names, which were inferred from related
+# objects (this is needed for in-memory related objects)
+sub _merge_cond_with_data {
+  my ($self, $data) = @_;
+
+  my (%new_data, @cols_from_relations);
+
   my $alias = $self->{attrs}{alias};
 
-  if (
-    defined $self->{cond}
-    && $self->{cond} eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION
-  ) {
-    %new = %{ $self->{attrs}{related_objects} || {} };  # nothing might have been inserted yet
-    $new{-from_resultset} = [ keys %new ] if keys %new;
-  } else {
+  if (! defined $self->{cond}) {
+    # just massage $data below
+  }
+  elsif ($self->{cond} eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION) {
+    %new_data = %{ $self->{attrs}{related_objects} || {} };  # nothing might have been inserted yet
+    @cols_from_relations = keys %new_data;
+  }
+  elsif (ref $self->{cond} ne 'HASH') {
     $self->throw_exception(
-      "Can't abstract implicit construct, condition not a hash"
-    ) if ($self->{cond} && !(ref $self->{cond} eq 'HASH'));
-
-    my $collapsed_cond = (
-      $self->{cond}
-        ? $self->_collapse_cond($self->{cond})
-        : {}
+      "Can't abstract implicit construct, resultset condition not a hash"
     );
-
+  }
+  else {
     # precendence must be given to passed values over values inherited from
     # the cond, so the order here is important.
-    my %implied =  %{$self->_remove_alias($collapsed_cond, $alias)};
-    while( my($col,$value) = each %implied ){
-      if(ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '='){
-        $new{$col} = $value->{'='};
+    my $collapsed_cond = $self->_collapse_cond($self->{cond});
+    my %implied = %{$self->_remove_alias($collapsed_cond, $alias)};
+
+    while ( my($col, $value) = each %implied ) {
+      if (ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '=') {
+        $new_data{$col} = $value->{'='};
         next;
       }
-      $new{$col} = $value if $self->_is_deterministic_value($value);
+      $new_data{$col} = $value if $self->_is_deterministic_value($value);
     }
   }
 
-  %new = (
-    %new,
-    %{ $self->_remove_alias($values, $alias) },
-    -source_handle => $self->_source_handle,
-    -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED
+  %new_data = (
+    %new_data,
+    %{ $self->_remove_alias($data, $alias) },
   );
 
-  return $self->result_class->new(\%new);
+  return (\%new_data, \@cols_from_relations);
 }
 
 # _is_deterministic_value
@@ -2132,7 +2043,7 @@
   return \%unaliased;
 }
 
-=head2 as_query (EXPERIMENTAL)
+=head2 as_query
 
 =over 4
 
@@ -2146,8 +2057,6 @@
 
 This is generally used as the RHS for a subquery.
 
-B<NOTE>: This feature is still experimental.
-
 =cut
 
 sub as_query {
@@ -2192,13 +2101,14 @@
 a unique constraint that is not the primary key, or looking for
 related rows.
 
-If you want objects to be saved immediately, use L</find_or_create> instead.
+If you want objects to be saved immediately, use L</find_or_create>
+instead.
 
-B<Note>: C<find_or_new> is probably not what you want when creating a
-new row in a table that uses primary keys supplied by the
-database. Passing in a primary key column with a value of I<undef>
-will cause L</find> to attempt to search for a row with a value of
-I<NULL>.
+B<Note>: Take care when using C<find_or_new> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<find_or_new>, even when set to C<undef>.
 
 =cut
 
@@ -2237,7 +2147,7 @@
 B<keyed on the relationship name>. If the relationship is of type C<multi>
 (L<DBIx::Class::Relationship/has_many>) - pass an arrayref of hashrefs.
 The process will correctly identify columns holding foreign keys, and will
-transparrently populate them from the keys of the corresponding relation.
+transparently populate them from the keys of the corresponding relation.
 This can be applied recursively, and will work correctly for a structure
 with an arbitrary depth and width, as long as the relationships actually
 exists and the correct column data has been supplied.
@@ -2278,6 +2188,19 @@
     }
   });
 
+=over
+
+=item WARNING
+
+When subclassing ResultSet never attempt to override this method. Since
+it is a simple shortcut for C<< $self->new_result($attrs)->insert >>, a
+lot of the internals simply never call it, so your override will be
+bypassed more often than not. Override either L<new|DBIx::Class::Row/new>
+or L<insert|DBIx::Class::Row/insert> depending on how early in the
+L</create> process you need to intervene.
+
+=back
+
 =cut
 
 sub create {
@@ -2327,11 +2250,11 @@
 the find has completed and before the create has started. To avoid
 this problem, use find_or_create() inside a transaction.
 
-B<Note>: C<find_or_create> is probably not what you want when creating
-a new row in a table that uses primary keys supplied by the
-database. Passing in a primary key column with a value of I<undef>
-will cause L</find> to attempt to search for a row with a value of
-I<NULL>.
+B<Note>: Take care when using C<find_or_create> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<find_or_create>, even when set to C<undef>.
 
 See also L</find> and L</update_or_create>. For information on how to declare
 unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
@@ -2382,7 +2305,7 @@
     producer => $producer,
     name => 'harry',
   }, {
-    key => 'primary,
+    key => 'primary',
   });
 
 
@@ -2394,11 +2317,11 @@
 See also L</find> and L</find_or_create>. For information on how to declare
 unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
 
-B<Note>: C<update_or_create> is probably not what you want when
-looking for a row in a table that uses primary keys supplied by the
-database, unless you actually have a key value. Passing in a primary
-key column with a value of I<undef> will cause L</find> to attempt to
-search for a row with a value of I<NULL>.
+B<Note>: Take care when using C<update_or_create> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<update_or_create>, even when set to C<undef>.
 
 =cut
 
@@ -2455,8 +2378,14 @@
       $cd->insert;
   }
 
-See also L</find>, L</find_or_create> and L<find_or_new>.
+B<Note>: Take care when using C<update_or_new> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<update_or_new>, even when set to C<undef>.
 
+See also L</find>, L</find_or_create> and L</find_or_new>.
+
 =cut
 
 sub update_or_new {
@@ -2539,6 +2468,40 @@
   shift->set_cache(undef);
 }
 
+=head2 is_paged
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: true, if the resultset has been paginated
+
+=back
+
+=cut
+
+sub is_paged {
+  my ($self) = @_;
+  return !!$self->{attrs}{page};
+}
+
+=head2 is_ordered
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: true, if the resultset has been ordered with C<order_by>.
+
+=back
+
+=cut
+
+sub is_ordered {
+  my ($self) = @_;
+  return scalar $self->result_source->storage->_parse_order_by($self->{attrs}{order_by});
+}
+
 =head2 related_resultset
 
 =over 4
@@ -2560,21 +2523,30 @@
 
   $self->{related_resultsets} ||= {};
   return $self->{related_resultsets}{$rel} ||= do {
-    my $rel_info = $self->result_source->relationship_info($rel);
+    my $rsrc = $self->result_source;
+    my $rel_info = $rsrc->relationship_info($rel);
 
     $self->throw_exception(
-      "search_related: result source '" . $self->result_source->source_name .
+      "search_related: result source '" . $rsrc->source_name .
         "' has no such relationship $rel")
       unless $rel_info;
 
-    my ($from,$seen) = $self->_chain_relationship($rel);
+    my $attrs = $self->_chain_relationship($rel);
 
-    my $join_count = $seen->{$rel};
-    my $alias = ($join_count > 1 ? join('_', $rel, $join_count) : $rel);
+    my $join_count = $attrs->{seen_join}{$rel};
 
+    my $alias = $self->result_source->storage
+        ->relname_to_table_alias($rel, $join_count);
+
+    # since this is search_related, and we already slid the select window inwards
+    # (the select/as attrs were deleted in the beginning), we need to flip all
+    # left joins to inner, so we get the expected results
+    # read the comment on top of the actual function to see what this does
+    $attrs->{from} = $rsrc->schema->storage->_straight_join_to_node ($attrs->{from}, $alias);
+
+
     #XXX - temp fix for result_class bug. There likely is a more elegant fix -groditi
-    my %attrs = %{$self->{attrs}||{}};
-    delete @attrs{qw(result_class alias)};
+    delete @{$attrs}{qw(result_class alias)};
 
     my $new_cache;
 
@@ -2585,7 +2557,7 @@
       }
     }
 
-    my $rel_source = $self->result_source->related_source($rel);
+    my $rel_source = $rsrc->related_source($rel);
 
     my $new = do {
 
@@ -2595,20 +2567,14 @@
       # to work sanely (e.g. RestrictWithObject wants to be able to add
       # extra query restrictions, and these may need to be $alias.)
 
-      my $attrs = $rel_source->resultset_attributes;
-      local $attrs->{alias} = $alias;
+      my $rel_attrs = $rel_source->resultset_attributes;
+      local $rel_attrs->{alias} = $alias;
 
       $rel_source->resultset
                  ->search_rs(
                      undef, {
-                       %attrs,
-                       join => undef,
-                       prefetch => undef,
-                       select => undef,
-                       as => undef,
-                       where => $self->{cond},
-                       seen_join => $seen,
-                       from => $from,
+                       %$attrs,
+                       where => $attrs->{where},
                    });
     };
     $new->set_cache($new_cache) if $new_cache;
@@ -2659,6 +2625,78 @@
   return ($self->{attrs} || {})->{alias} || 'me';
 }
 
+=head2 as_subselect_rs
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $resultset
+
+=back
+
+Act as a barrier to SQL symbols.  The resultset provided will be made into a
+"virtual view" by including it as a subquery within the from clause.  From this
+point on, any joined tables are inaccessible to ->search on the resultset (as if
+it were simply where-filtered without joins).  For example:
+
+ my $rs = $schema->resultset('Bar')->search({'x.name' => 'abc'},{ join => 'x' });
+
+ # 'x' now pollutes the query namespace
+
+ # So the following works as expected
+ my $ok_rs = $rs->search({'x.other' => 1});
+
+ # But this doesn't: instead of finding a 'Bar' related to two x rows (abc and
+ # def) we look for one row with contradictory terms and join in another table
+ # (aliased 'x_2') which we never use
+ my $broken_rs = $rs->search({'x.name' => 'def'});
+
+ my $rs2 = $rs->as_subselect_rs;
+
+ # doesn't work - 'x' is no longer accessible in $rs2, having been sealed away
+ my $not_joined_rs = $rs2->search({'x.other' => 1});
+
+ # works as expected: finds a 'table' row related to two x rows (abc and def)
+ my $correctly_joined_rs = $rs2->search({'x.name' => 'def'});
+
+Another example of when one might use this would be to select a subset of
+columns in a group by clause:
+
+ my $rs = $schema->resultset('Bar')->search(undef, {
+   group_by => [qw{ id foo_id baz_id }],
+ })->as_subselect_rs->search(undef, {
+   columns => [qw{ id foo_id }]
+ });
+
+In the above example normally columns would have to be equal to the group by,
+but because we isolated the group by into a subselect the above works.
+
+=cut
+
+sub as_subselect_rs {
+  my $self = shift;
+
+  my $attrs = $self->_resolved_attrs;
+
+  my $fresh_rs = (ref $self)->new (
+    $self->result_source
+  );
+
+  # these pieces will be locked in the subquery
+  delete $fresh_rs->{cond};
+  delete @{$fresh_rs->{attrs}}{qw/where bind/};
+
+  return $fresh_rs->search( {}, {
+    from => [{
+      $attrs->{alias} => $self->as_query,
+      -alias         => $attrs->{alias},
+      -source_handle => $self->result_source->handle,
+    }],
+    alias => $attrs->{alias},
+  });
+}
+
 # This code is called by search_related, and makes sure there
 # is clear separation between the joins before, during, and
 # after the relationship. This information is needed later
@@ -2666,37 +2704,67 @@
 # with a relation_chain_depth less than the depth of the
 # current prefetch is not considered)
 #
-# The increments happen in 1/2s to make it easier to correlate the
-# join depth with the join path. An integer means a relationship
-# specified via a search_related, whereas a fraction means an added
-# join/prefetch via attributes
+# The increments happen twice per join. An even number means a
+# relationship specified via a search_related, whereas an odd
+# number indicates a join/prefetch added via attributes
+#
+# Also this code will wrap the current resultset (the one we
+# chain to) in a subselect IFF it contains limiting attributes
 sub _chain_relationship {
   my ($self, $rel) = @_;
   my $source = $self->result_source;
-  my $attrs = $self->{attrs};
+  my $attrs = { %{$self->{attrs}||{}} };
 
-  my $from = [ @{
-      $attrs->{from}
-        ||
-      [{
-        -source_handle => $source->handle,
-        -alias => $attrs->{alias},
-        $attrs->{alias} => $source->from,
-      }]
-  }];
+  # we need to take the prefetch the attrs into account before we
+  # ->_resolve_join as otherwise they get lost - captainL
+  my $join = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
 
-  my $seen = { %{$attrs->{seen_join} || {} } };
-  my $jpath = ($attrs->{seen_join} && keys %{$attrs->{seen_join}}) 
-    ? $from->[-1][0]{-join_path} 
-    : [];
+  delete @{$attrs}{qw/join prefetch collapse group_by distinct select as columns +select +as +columns/};
 
+  my $seen = { %{ (delete $attrs->{seen_join}) || {} } };
 
-  # we need to take the prefetch the attrs into account before we
-  # ->_resolve_join as otherwise they get lost - captainL
-  my $merged = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
+  my $from;
+  my @force_subq_attrs = qw/offset rows group_by having/;
 
+  if (
+    ($attrs->{from} && ref $attrs->{from} ne 'ARRAY')
+      ||
+    $self->_has_resolved_attr (@force_subq_attrs)
+  ) {
+    # Nuke the prefetch (if any) before the new $rs attrs
+    # are resolved (prefetch is useless - we are wrapping
+    # a subquery anyway).
+    my $rs_copy = $self->search;
+    $rs_copy->{attrs}{join} = $self->_merge_attr (
+      $rs_copy->{attrs}{join},
+      delete $rs_copy->{attrs}{prefetch},
+    );
+
+    $from = [{
+      -source_handle => $source->handle,
+      -alias => $attrs->{alias},
+      $attrs->{alias} => $rs_copy->as_query,
+    }];
+    delete @{$attrs}{@force_subq_attrs, qw/where bind/};
+    $seen->{-relation_chain_depth} = 0;
+  }
+  elsif ($attrs->{from}) {  #shallow copy suffices
+    $from = [ @{$attrs->{from}} ];
+  }
+  else {
+    $from = [{
+      -source_handle => $source->handle,
+      -alias => $attrs->{alias},
+      $attrs->{alias} => $source->from,
+    }];
+  }
+
+  my $jpath = ($seen->{-relation_chain_depth})
+    ? $from->[-1][0]{-join_path}
+    : [];
+
   my @requested_joins = $source->_resolve_join(
-    $merged,
+    $join,
     $attrs->{alias},
     $seen,
     $jpath,
@@ -2704,7 +2772,7 @@
 
   push @$from, @requested_joins;
 
-  $seen->{-relation_chain_depth} += 0.5;
+  $seen->{-relation_chain_depth}++;
 
   # if $self already had a join/prefetch specified on it, the requested
   # $rel might very well be already included. What we do in this case
@@ -2712,26 +2780,16 @@
   # the join in question so we could tell it *is* the search_related)
   my $already_joined;
 
-
   # we consider the last one thus reverse
   for my $j (reverse @requested_joins) {
-    if ($rel eq $j->[0]{-join_path}[-1]) {
-      $j->[0]{-relation_chain_depth} += 0.5;
+    my ($last_j) = keys %{$j->[0]{-join_path}[-1]};
+    if ($rel eq $last_j) {
+      $j->[0]{-relation_chain_depth}++;
       $already_joined++;
       last;
     }
   }
 
-# alternative way to scan the entire chain - not backwards compatible
-#  for my $j (reverse @$from) {
-#    next unless ref $j eq 'ARRAY';
-#    if ($j->[0]{-join_path} && $j->[0]{-join_path}[-1] eq $rel) {
-#      $j->[0]{-relation_chain_depth} += 0.5;
-#      $already_joined++;
-#      last;
-#    }
-#  }
-
   unless ($already_joined) {
     push @$from, $source->_resolve_join(
       $rel,
@@ -2741,9 +2799,9 @@
     );
   }
 
-  $seen->{-relation_chain_depth} += 0.5;
+  $seen->{-relation_chain_depth}++;
 
-  return ($from,$seen);
+  return {%$attrs, from => $from, seen_join => $seen};
 }
 
 # too many times we have to do $attrs = { %{$self->_resolved_attrs} }
@@ -2765,31 +2823,47 @@
 
   # build columns (as long as select isn't set) into a set of as/select hashes
   unless ( $attrs->{select} ) {
-      @colbits = map {
-          ( ref($_) eq 'HASH' )
-              ? $_
-              : {
-                  (
-                    /^\Q${alias}.\E(.+)$/
-                      ? "$1"
-                      : "$_"
-                  )
-                =>
-                  (
-                    /\./
-                      ? "$_"
-                      : "${alias}.$_"
-                  )
-            }
-      } ( ref($attrs->{columns}) eq 'ARRAY' ) ? @{ delete $attrs->{columns}} : (delete $attrs->{columns} || $source->columns );
+
+    my @cols;
+    if ( ref $attrs->{columns} eq 'ARRAY' ) {
+      @cols = @{ delete $attrs->{columns}}
+    } elsif ( defined $attrs->{columns} ) {
+      @cols = delete $attrs->{columns}
+    } else {
+      @cols = $source->columns
+    }
+
+    for (@cols) {
+      if ( ref $_ eq 'HASH' ) {
+        push @colbits, $_
+      } else {
+        my $key = /^\Q${alias}.\E(.+)$/
+          ? "$1"
+          : "$_";
+        my $value = /\./
+          ? "$_"
+          : "${alias}.$_";
+        push @colbits, { $key => $value };
+      }
+    }
   }
+
   # add the additional columns on
-  foreach ( 'include_columns', '+columns' ) {
-      push @colbits, map {
-          ( ref($_) eq 'HASH' )
-            ? $_
-            : { ( split( /\./, $_ ) )[-1] => ( /\./ ? $_ : "${alias}.$_" ) }
-      } ( ref($attrs->{$_}) eq 'ARRAY' ) ? @{ delete $attrs->{$_} } : delete $attrs->{$_} if ( $attrs->{$_} );
+  foreach (qw{include_columns +columns}) {
+    if ( $attrs->{$_} ) {
+      my @list = ( ref($attrs->{$_}) eq 'ARRAY' )
+        ? @{ delete $attrs->{$_} }
+        : delete $attrs->{$_};
+      for (@list) {
+        if ( ref($_) eq 'HASH' ) {
+          push @colbits, $_
+        } else {
+          my $key = ( split /\./, $_ )[-1];
+          my $value = ( /\./ ? $_ : "$alias.$_" );
+          push @colbits, { $key => $value };
+        }
+      }
+    }
   }
 
   # start with initial select items
@@ -2798,15 +2872,22 @@
         ( ref $attrs->{select} eq 'ARRAY' )
       ? [ @{ $attrs->{select} } ]
       : [ $attrs->{select} ];
-    $attrs->{as} = (
-      $attrs->{as}
-      ? (
-        ref $attrs->{as} eq 'ARRAY'
-        ? [ @{ $attrs->{as} } ]
-        : [ $attrs->{as} ]
+
+    if ( $attrs->{as} ) {
+      $attrs->{as} =
+        (
+          ref $attrs->{as} eq 'ARRAY'
+            ? [ @{ $attrs->{as} } ]
+            : [ $attrs->{as} ]
         )
-      : [ map { m/^\Q${alias}.\E(.+)$/ ? $1 : $_ } @{ $attrs->{select} } ]
-    );
+    } else {
+      $attrs->{as} = [ map {
+         m/^\Q${alias}.\E(.+)$/
+           ? $1
+           : $_
+         } @{ $attrs->{select} }
+      ]
+    }
   }
   else {
 
@@ -2816,31 +2897,28 @@
   }
 
   # now add colbits to select/as
-  push( @{ $attrs->{select} }, map { values( %{$_} ) } @colbits );
-  push( @{ $attrs->{as} },     map { keys( %{$_} ) } @colbits );
+  push @{ $attrs->{select} }, map values %{$_}, @colbits;
+  push @{ $attrs->{as}     }, map keys   %{$_}, @colbits;
 
-  my $adds;
-  if ( $adds = delete $attrs->{'+select'} ) {
+  if ( my $adds = delete $attrs->{'+select'} ) {
     $adds = [$adds] unless ref $adds eq 'ARRAY';
-    push(
-      @{ $attrs->{select} },
-      map { /\./ || ref $_ ? $_ : "${alias}.$_" } @$adds
-    );
+    push @{ $attrs->{select} },
+      map { /\./ || ref $_ ? $_ : "$alias.$_" } @$adds;
   }
-  if ( $adds = delete $attrs->{'+as'} ) {
+  if ( my $adds = delete $attrs->{'+as'} ) {
     $adds = [$adds] unless ref $adds eq 'ARRAY';
-    push( @{ $attrs->{as} }, @$adds );
+    push @{ $attrs->{as} }, @$adds;
   }
 
-  $attrs->{from} ||= [ {
+  $attrs->{from} ||= [{
     -source_handle => $source->handle,
     -alias => $self->{attrs}{alias},
     $self->{attrs}{alias} => $source->from,
-  } ];
+  }];
 
   if ( $attrs->{join} || $attrs->{prefetch} ) {
 
-    $self->throw_exception ('join/prefetch can not be used with a literal scalarref {from}')
+    $self->throw_exception ('join/prefetch can not be used with a custom {from}')
       if ref $attrs->{from} ne 'ARRAY';
 
     my $join = delete $attrs->{join} || {};
@@ -2856,7 +2934,7 @@
           $join,
           $alias,
           { %{ $attrs->{seen_join} || {} } },
-          ($attrs->{seen_join} && keys %{$attrs->{seen_join}})
+          ( $attrs->{seen_join} && keys %{$attrs->{seen_join}})
             ? $attrs->{from}[-1][0]{-join_path}
             : []
           ,
@@ -2879,7 +2957,27 @@
   # generate the distinct induced group_by early, as prefetch will be carried via a
   # subquery (since a group_by is present)
   if (delete $attrs->{distinct}) {
-    $attrs->{group_by} ||= [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
+    if ($attrs->{group_by}) {
+      carp ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)");
+    }
+    else {
+      $attrs->{group_by} = [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
+
+      # add any order_by parts that are not already present in the group_by
+      # we need to be careful not to add any named functions/aggregates
+      # i.e. select => [ ... { count => 'foo', -as 'foocount' } ... ]
+      my %already_grouped = map { $_ => 1 } (@{$attrs->{group_by}});
+
+      my $storage = $self->result_source->schema->storage;
+
+      my $rs_column_list = $storage->_resolve_column_info ($attrs->{from});
+
+      for my $chunk ($storage->_parse_order_by($attrs->{order_by})) {
+        if ($rs_column_list->{$chunk} && not $already_grouped{$chunk}++) {
+          push @{$attrs->{group_by}}, $chunk;
+        }
+      }
+    }
   }
 
   $attrs->{collapse} ||= {};
@@ -2888,8 +2986,27 @@
 
     my $prefetch_ordering = [];
 
-    my $join_map = $self->_joinpath_aliases ($attrs->{from}, $attrs->{seen_join});
+    # this is a separate structure (we don't look in {from} directly)
+    # as the resolver needs to shift things off the lists to work
+    # properly (identical-prefetches on different branches)
+    my $join_map = {};
+    if (ref $attrs->{from} eq 'ARRAY') {
 
+      my $start_depth = $attrs->{seen_join}{-relation_chain_depth} || 0;
+
+      for my $j ( @{$attrs->{from}}[1 .. $#{$attrs->{from}} ] ) {
+        next unless $j->[0]{-alias};
+        next unless $j->[0]{-join_path};
+        next if ($j->[0]{-relation_chain_depth} || 0) < $start_depth;
+
+        my @jpath = map { keys %$_ } @{$j->[0]{-join_path}};
+
+        my $p = $join_map;
+        $p = $p->{$_} ||= {} for @jpath[ ($start_depth/2) .. $#jpath]; #only even depths are actual jpath boundaries
+        push @{$p->{-join_aliases} }, $j->[0]{-alias};
+      }
+    }
+
     my @prefetch =
       $source->_resolve_prefetch( $prefetch, $alias, $join_map, $prefetch_ordering, $attrs->{collapse} );
 
@@ -2907,7 +3024,7 @@
   # even though it doesn't make much sense, this is what pre 081xx has
   # been doing
   if (my $page = delete $attrs->{page}) {
-    $attrs->{offset} = 
+    $attrs->{offset} =
       ($attrs->{rows} * ($page - 1))
             +
       ($attrs->{offset} || 0)
@@ -2917,33 +3034,6 @@
   return $self->{_attrs} = $attrs;
 }
 
-sub _joinpath_aliases {
-  my ($self, $fromspec, $seen) = @_;
-
-  my $paths = {};
-  return $paths unless ref $fromspec eq 'ARRAY';
-
-  my $cur_depth = $seen->{-relation_chain_depth} || 0;
-
-  if (int ($cur_depth) != $cur_depth) {
-    $self->throw_exception ("-relation_chain_depth is not an integer, something went horribly wrong ($cur_depth)");
-  }
-
-  for my $j (@$fromspec) {
-
-    next if ref $j ne 'ARRAY';
-    next if ($j->[0]{-relation_chain_depth} || 0) < $cur_depth;
-
-    my $jpath = $j->[0]{-join_path};
-
-    my $p = $paths;
-    $p = $p->{$_} ||= {} for @{$jpath}[$cur_depth .. $#$jpath];
-    push @{$p->{-join_aliases} }, $j->[0]{-alias};
-  }
-
-  return $paths;
-}
-
 sub _rollout_attr {
   my ($self, $attr) = @_;
 
@@ -2986,6 +3076,13 @@
 sub _calculate_score {
   my ($self, $a, $b) = @_;
 
+  if (defined $a xor defined $b) {
+    return 0;
+  }
+  elsif (not defined $a) {
+    return 1;
+  }
+
   if (ref $b eq 'HASH') {
     my ($b_key) = keys %{$b};
     if (ref $a eq 'HASH') {
@@ -3067,12 +3164,13 @@
 
 sub throw_exception {
   my $self=shift;
+
   if (ref $self && $self->_source_handle->schema) {
     $self->_source_handle->schema->throw_exception(@_)
-  } else {
-    croak(@_);
   }
-
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 # XXX: FIXME: Attributes docs need clearing up
@@ -3094,7 +3192,7 @@
 
 =back
 
-Which column(s) to order the results by. 
+Which column(s) to order the results by.
 
 [The full list of suitable values is documented in
 L<SQL::Abstract/"ORDER BY CLAUSES">; the following is a summary of
@@ -3181,20 +3279,27 @@
     select => [
       'name',
       { count => 'employeeid' },
-      { sum => 'salary' }
+      { max => { length => 'name' }, -as => 'longest_name' }
     ]
   });
 
-When you use function/stored procedure names and do not supply an C<as>
-attribute, the column names returned are storage-dependent. E.g. MySQL would
-return a column named C<count(employeeid)> in the above example.
+  # Equivalent SQL
+  SELECT name, COUNT( employeeid ), MAX( LENGTH( name ) ) AS longest_name FROM employee
 
+B<NOTE:> You will almost always need a corresponding L</as> attribute when you
+use L</select>, to instruct DBIx::Class how to store the result of the column.
+Also note that the L</as> attribute has nothing to do with the SQL-side 'AS'
+identifier aliasing. You can however alias a function, so you can use it in
+e.g. an C<ORDER BY> clause. This is done via the C<-as> B<select function
+attribute> supplied as shown in the example above.
+
 =head2 +select
 
 =over 4
 
 Indicates additional columns to be selected from storage.  Works the same as
-L</select> but adds columns to the selection.
+L</select> but adds columns to the default selection, instead of specifying
+an explicit list.
 
 =back
 
@@ -3214,25 +3319,26 @@
 
 =back
 
-Indicates column names for object inflation. That is, C<as>
-indicates the name that the column can be accessed as via the
-C<get_column> method (or via the object accessor, B<if one already
-exists>).  It has nothing to do with the SQL code C<SELECT foo AS bar>.
+Indicates column names for object inflation. That is L</as> indicates the
+slot name in which the column value will be stored within the
+L<Row|DBIx::Class::Row> object. The value will then be accessible via this
+identifier by the C<get_column> method (or via the object accessor B<if one
+with the same name already exists>) as shown below. The L</as> attribute has
+B<nothing to do> with the SQL-side C<AS>. See L</select> for details.
 
-The C<as> attribute is used in conjunction with C<select>,
-usually when C<select> contains one or more function or stored
-procedure names:
-
   $rs = $schema->resultset('Employee')->search(undef, {
     select => [
       'name',
-      { count => 'employeeid' }
+      { count => 'employeeid' },
+      { max => { length => 'name' }, -as => 'longest_name' }
     ],
-    as => ['name', 'employee_count'],
+    as => [qw/
+      name
+      employee_count
+      max_name_length
+    /],
   });
 
-  my $employee = $rs->first(); # get the first Employee
-
 If the object against which the search is performed already has an accessor
 matching a column name specified in C<as>, the value can be retrieved using
 the accessor as normal:
@@ -3247,16 +3353,6 @@
 You can create your own accessors if required - see
 L<DBIx::Class::Manual::Cookbook> for details.
 
-Please note: This will NOT insert an C<AS employee_count> into the SQL
-statement produced, it is used for internal access only. Thus
-attempting to use the accessor in an C<order_by> clause or similar
-will fail miserably.
-
-To get around this limitation, you can supply literal SQL to your
-C<select> attibute that contains the C<AS alias> text, eg:
-
-  select => [\'myfield AS alias']
-
 =head2 join
 
 =over 4
@@ -3364,7 +3460,7 @@
 C<prefetch> can be used with the following relationship types: C<belongs_to>,
 C<has_one> (or if you're using C<add_relationship>, any relationship declared
 with an accessor type of 'single' or 'filter'). A more complex example that
-prefetches an artists cds, the tracks on those cds, and the tags associted
+prefetches an artists cds, the tracks on those cds, and the tags associated
 with that artist is given below (assuming many-to-many from artists to tags):
 
  my $rs = $schema->resultset('Artist')->search(
@@ -3386,12 +3482,12 @@
 
 =over 4
 
-=item * 
+=item *
 
 Prefetch uses the L</cache> to populate the prefetched relationships. This
 may or may not be what you want.
 
-=item * 
+=item *
 
 If you specify a condition on a prefetched relationship, ONLY those
 rows that match the prefetched condition will be fetched into that relationship.
@@ -3443,7 +3539,7 @@
 
 =back
 
-Specifes the maximum number of rows for direct retrieval or the number of
+Specifies the maximum number of rows for direct retrieval or the number of
 rows per page if the page attribute or method is used.
 
 =head2 offset
@@ -3491,7 +3587,8 @@
 
 =back
 
-Set to 1 to group by all columns.
+Set to 1 to group by all columns. If the resultset already has a group_by
+attribute, this setting is ignored and an appropriate warning is issued.
 
 =head2 where
 
@@ -3502,8 +3599,8 @@
   # only return rows WHERE deleted IS NULL for all searches
   __PACKAGE__->resultset_attributes({ where => { deleted => undef } }); )
 
-Can be overridden by passing C<{ where => undef }> as an attribute
-to a resulset.
+Can be overridden by passing C<< { where => undef } >> as an attribute
+to a resultset.
 
 =back
 
@@ -3525,177 +3622,6 @@
 For more examples of using these attributes, see
 L<DBIx::Class::Manual::Cookbook>.
 
-=head2 from
-
-=over 4
-
-=item Value: \@from_clause
-
-=back
-
-The C<from> attribute gives you manual control over the C<FROM> clause of SQL
-statements generated by L<DBIx::Class>, allowing you to express custom C<JOIN>
-clauses.
-
-NOTE: Use this on your own risk.  This allows you to shoot off your foot!
-
-C<join> will usually do what you need and it is strongly recommended that you
-avoid using C<from> unless you cannot achieve the desired result using C<join>.
-And we really do mean "cannot", not just tried and failed. Attempting to use
-this because you're having problems with C<join> is like trying to use x86
-ASM because you've got a syntax error in your C. Trust us on this.
-
-Now, if you're still really, really sure you need to use this (and if you're
-not 100% sure, ask the mailing list first), here's an explanation of how this
-works.
-
-The syntax is as follows -
-
-  [
-    { <alias1> => <table1> },
-    [
-      { <alias2> => <table2>, -join_type => 'inner|left|right' },
-      [], # nested JOIN (optional)
-      { <table1.column1> => <table2.column2>, ... (more conditions) },
-    ],
-    # More of the above [ ] may follow for additional joins
-  ]
-
-  <table1> <alias1>
-  JOIN
-    <table2> <alias2>
-    [JOIN ...]
-  ON <table1.column1> = <table2.column2>
-  <more joins may follow>
-
-An easy way to follow the examples below is to remember the following:
-
-    Anything inside "[]" is a JOIN
-    Anything inside "{}" is a condition for the enclosing JOIN
-
-The following examples utilize a "person" table in a family tree application.
-In order to express parent->child relationships, this table is self-joined:
-
-    # Person->belongs_to('father' => 'Person');
-    # Person->belongs_to('mother' => 'Person');
-
-C<from> can be used to nest joins. Here we return all children with a father,
-then search against all mothers of those children:
-
-  $rs = $schema->resultset('Person')->search(
-      undef,
-      {
-          alias => 'mother', # alias columns in accordance with "from"
-          from => [
-              { mother => 'person' },
-              [
-                  [
-                      { child => 'person' },
-                      [
-                          { father => 'person' },
-                          { 'father.person_id' => 'child.father_id' }
-                      ]
-                  ],
-                  { 'mother.person_id' => 'child.mother_id' }
-              ],
-          ]
-      },
-  );
-
-  # Equivalent SQL:
-  # SELECT mother.* FROM person mother
-  # JOIN (
-  #   person child
-  #   JOIN person father
-  #   ON ( father.person_id = child.father_id )
-  # )
-  # ON ( mother.person_id = child.mother_id )
-
-The type of any join can be controlled manually. To search against only people
-with a father in the person table, we could explicitly use C<INNER JOIN>:
-
-    $rs = $schema->resultset('Person')->search(
-        undef,
-        {
-            alias => 'child', # alias columns in accordance with "from"
-            from => [
-                { child => 'person' },
-                [
-                    { father => 'person', -join_type => 'inner' },
-                    { 'father.id' => 'child.father_id' }
-                ],
-            ]
-        },
-    );
-
-    # Equivalent SQL:
-    # SELECT child.* FROM person child
-    # INNER JOIN person father ON child.father_id = father.id
-
-You can select from a subquery by passing a resultset to from as follows.
-
-    $schema->resultset('Artist')->search( 
-        undef, 
-        {   alias => 'artist2',
-            from  => [ { artist2 => $artist_rs->as_query } ],
-        } );
-
-    # and you'll get sql like this..
-    # SELECT artist2.artistid, artist2.name, artist2.rank, artist2.charfield FROM 
-    #   ( SELECT me.artistid, me.name, me.rank, me.charfield FROM artists me ) artist2
-
-If you need to express really complex joins, you
-can supply literal SQL to C<from> via a scalar reference. In this case
-the contents of the scalar will replace the table name associated with the
-resultsource.
-
-WARNING: This technique might very well not work as expected on chained
-searches - you have been warned.
-
-    # Assuming the Event resultsource is defined as:
-
-        MySchema::Event->add_columns (
-            sequence => {
-                data_type => 'INT',
-                is_auto_increment => 1,
-            },
-            location => {
-                data_type => 'INT',
-            },
-            type => {
-                data_type => 'INT',
-            },
-        );
-        MySchema::Event->set_primary_key ('sequence');
-
-    # This will get back the latest event for every location. The column
-    # selector is still provided by DBIC, all we do is add a JOIN/WHERE
-    # combo to limit the resultset
-
-    $rs = $schema->resultset('Event');
-    $table = $rs->result_source->name;
-    $latest = $rs->search (
-        undef,
-        { from => \ "
-            (SELECT e1.* FROM $table e1
-                JOIN $table e2
-                    ON e1.location = e2.location
-                    AND e1.sequence < e2.sequence
-                WHERE e2.sequence is NULL
-            ) me",
-        },
-    );
-
-    # Equivalent SQL (with the DBIC chunks added):
-
-    SELECT me.sequence, me.location, me.type FROM
-       (SELECT e1.* FROM events e1
-           JOIN events e2
-               ON e1.location = e2.location
-               AND e1.sequence < e2.sequence
-           WHERE e2.sequence is NULL
-       ) me;
-
 =head2 for
 
 =over 4

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSetColumn.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSetColumn.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSetColumn.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,7 +1,12 @@
 package DBIx::Class::ResultSetColumn;
+
 use strict;
 use warnings;
+
 use base 'DBIx::Class';
+
+use Carp::Clan qw/^DBIx::Class/;
+use DBIx::Class::Exception;
 use List::Util;
 
 =head1 NAME
@@ -37,32 +42,51 @@
   my ($class, $rs, $column) = @_;
   $class = ref $class if ref $class;
 
-  $rs->throw_exception("column must be supplied") unless $column;
+  $rs->throw_exception('column must be supplied') unless $column;
 
   my $orig_attrs = $rs->_resolved_attrs;
-  my $new_parent_rs = $rs->search_rs;
+  my $alias = $rs->current_source_alias;
 
-  # prefetch causes additional columns to be fetched, but we can not just make a new
-  # rs via the _resolved_attrs trick - we need to retain the separation between
-  # +select/+as and select/as. At the same time we want to preserve any joins that the
-  # prefetch would otherwise generate.
-
-  my $new_attrs = $new_parent_rs->{attrs} ||= {};
-  $new_attrs->{join} = $rs->_merge_attr( delete $new_attrs->{join}, delete $new_attrs->{prefetch} );
-
   # If $column can be found in the 'as' list of the parent resultset, use the
   # corresponding element of its 'select' list (to keep any custom column
   # definition set up with 'select' or '+select' attrs), otherwise use $column
   # (to create a new column definition on-the-fly).
-
   my $as_list = $orig_attrs->{as} || [];
   my $select_list = $orig_attrs->{select} || [];
   my $as_index = List::Util::first { ($as_list->[$_] || "") eq $column } 0..$#$as_list;
   my $select = defined $as_index ? $select_list->[$as_index] : $column;
 
+  my $new_parent_rs;
+  # analyze the order_by, and see if it is done over a function/nonexistentcolumn
+  # if this is the case we will need to wrap a subquery since the result of RSC
+  # *must* be a single column select
+  my %collist = map 
+    { $_ => 1, ($_ =~ /\./) ? () : ( "$alias.$_" => 1 ) }
+    ($rs->result_source->columns, $column)
+  ;
+  if (
+    scalar grep
+      { ! $collist{$_} }
+      ( $rs->result_source->schema->storage->_parse_order_by ($orig_attrs->{order_by} ) ) 
+  ) {
+    # nuke the prefetch before collapsing to sql
+    my $subq_rs = $rs->search;
+    $subq_rs->{attrs}{join} = $subq_rs->_merge_attr( $subq_rs->{attrs}{join}, delete $subq_rs->{attrs}{prefetch} );
+    $new_parent_rs = $subq_rs->as_subselect_rs;
+  }
+
+  $new_parent_rs ||= $rs->search_rs;
+  my $new_attrs = $new_parent_rs->{attrs} ||= {};
+
+  # prefetch causes additional columns to be fetched, but we can not just make a new
+  # rs via the _resolved_attrs trick - we need to retain the separation between
+  # +select/+as and select/as. At the same time we want to preserve any joins that the
+  # prefetch would otherwise generate.
+  $new_attrs->{join} = $rs->_merge_attr( $new_attrs->{join}, delete $new_attrs->{prefetch} );
+
   # {collapse} would mean a has_many join was injected, which in turn means
-  # we need to group IF WE CAN (only if the column in question is unique)
-  if (!$new_attrs->{group_by} && keys %{$orig_attrs->{collapse}}) {
+  # we need to group *IF WE CAN* (only if the column in question is unique)
+  if (!$orig_attrs->{group_by} && keys %{$orig_attrs->{collapse}}) {
 
     # scan for a constraint that would contain our column only - that'd be proof
     # enough it is unique
@@ -76,16 +100,24 @@
 
       if ($col eq $select or $fqcol eq $select) {
         $new_attrs->{group_by} = [ $select ];
+        delete $new_attrs->{distinct}; # it is ignored when group_by is present
         last;
       }
     }
+
+    if (!$new_attrs->{group_by}) {
+      carp (
+          "Attempting to retrieve non-unique column '$column' on a resultset containing "
+        . 'one-to-many joins will return duplicate results.'
+      );
+    }
   }
 
   my $new = bless { _select => $select, _as => $column, _parent_resultset => $new_parent_rs }, $class;
   return $new;
 }
 
-=head2 as_query (EXPERIMENTAL)
+=head2 as_query
 
 =over 4
 
@@ -99,8 +131,6 @@
 
 This is generally used as the RHS for a subquery.
 
-B<NOTE>: This feature is still experimental.
-
 =cut
 
 sub as_query { return shift->_resultset->as_query(@_) }
@@ -125,7 +155,10 @@
 
 sub next {
   my $self = shift;
+
+  # using cursor so we don't inflate anything
   my ($row) = $self->_resultset->cursor->next;
+
   return $row;
 }
 
@@ -149,6 +182,8 @@
 
 sub all {
   my $self = shift;
+
+  # using cursor so we don't inflate anything
   return map { $_->[0] } $self->_resultset->cursor->all;
 }
 
@@ -194,10 +229,41 @@
 
 sub first {
   my $self = shift;
-  my ($row) = $self->_resultset->cursor->reset->next;
+
+  # using cursor so we don't inflate anything
+  $self->_resultset->cursor->reset;
+  my ($row) = $self->_resultset->cursor->next;
+
   return $row;
 }
 
+=head2 single
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $value
+
+=back
+
+Much like L<DBIx::Class::ResultSet/single> fetches one and only one column
+value using the cursor directly. If additional rows are present a warning
+is issued before discarding the cursor.
+
+=cut
+
+sub single {
+  my $self = shift;
+
+  my $attrs = $self->_resultset->_resolved_attrs;
+  my ($row) = $self->_resultset->result_source->storage->select_single(
+    $attrs->{from}, $attrs->{select}, $attrs->{where}, $attrs
+  );
+
+  return $row;
+}
+
 =head2 min
 
 =over 4
@@ -378,11 +444,13 @@
 
 sub throw_exception {
   my $self=shift;
+
   if (ref $self && $self->{_parent_resultset}) {
-    $self->{_parent_resultset}->throw_exception(@_)
-  } else {
-    croak(@_);
+    $self->{_parent_resultset}->throw_exception(@_);
   }
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 # _resultset
@@ -395,7 +463,7 @@
 #
 # Returns the underlying resultset. Creates it from the parent resultset if
 # necessary.
-# 
+#
 sub _resultset {
   my $self = shift;
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSource/Table.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSource/Table.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSource/Table.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -16,7 +16,7 @@
 
 =head1 DESCRIPTION
 
-Table object that inherits from L<DBIx::Class::ResultSource>
+Table object that inherits from L<DBIx::Class::ResultSource>.
 
 =head1 METHODS
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSource/View.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSource/View.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSource/View.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -25,9 +25,8 @@
 
   package MyDB::Schema::Result::Year2000CDs;
 
-  use base qw/DBIx::Class/;
+  use base qw/DBIx::Class::Core/;
 
-  __PACKAGE__->load_components('Core');
   __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
 
   __PACKAGE__->table('year2000cds');

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSource.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSource.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSource.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -5,8 +5,9 @@
 
 use DBIx::Class::ResultSet;
 use DBIx::Class::ResultSourceHandle;
+
+use DBIx::Class::Exception;
 use Carp::Clan qw/^DBIx::Class/;
-use Storable;
 
 use base qw/DBIx::Class/;
 
@@ -27,9 +28,8 @@
   # Create a table based result source, in a result class.
 
   package MyDB::Schema::Result::Artist;
-  use base qw/DBIx::Class/;
+  use base qw/DBIx::Class::Core/;
 
-  __PACKAGE__->load_components(qw/Core/);
   __PACKAGE__->table('artist');
   __PACKAGE__->add_columns(qw/ artistid name /);
   __PACKAGE__->set_primary_key('artistid');
@@ -39,8 +39,9 @@
 
   # Create a query (view) based result source, in a result class
   package MyDB::Schema::Result::Year2000CDs;
+  use base qw/DBIx::Class::Core/;
 
-  __PACKAGE__->load_components('Core');
+  __PACKAGE__->load_components('InflateColumn::DateTime');
   __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
 
   __PACKAGE__->table('year2000cds');
@@ -59,10 +60,10 @@
 default result source type, so one is created for you when defining a
 result class as described in the synopsis above.
 
-More specifically, the L<DBIx::Class::Core> component pulls in the
-L<DBIx::Class::ResultSourceProxy::Table> as a base class, which
-defines the L<table|DBIx::Class::ResultSourceProxy::Table/table>
-method. When called, C<table> creates and stores an instance of
+More specifically, the L<DBIx::Class::Core> base class pulls in the
+L<DBIx::Class::ResultSourceProxy::Table> component, which defines
+the L<table|DBIx::Class::ResultSourceProxy::Table/table> method.
+When called, C<table> creates and stores an instance of
 L<DBIx::Class::ResultSoure::Table>. Luckily, to use tables as result
 sources, you don't need to remember any of this.
 
@@ -138,6 +139,13 @@
 L<DBIx::Class::Row> objects. You can change the name of the accessor
 by supplying an L</accessor> in the column_info hash.
 
+If a column name beginning with a plus sign ('+col1') is provided, the
+attributes provided will be merged with any existing attributes for the
+column, with the new attributes taking precedence in the case that an
+attribute already exists. Using this without a hashref 
+(C<< $source->add_columns(qw/+col1 +col2/) >>) is legal, but useless --
+it does the same thing it would do without the plus.
+
 The contents of the column_info are not set in stone. The following
 keys are currently recognised/used by DBIx::Class:
 
@@ -249,9 +257,9 @@
 L</sequence> value as well.
 
 Also set this for MSSQL columns with the 'uniqueidentifier'
-L<DBIx::Class::ResultSource/data_type> whose values you want to automatically
-generate using C<NEWID()>, unless they are a primary key in which case this will
-be done anyway.
+L<data_type|DBIx::Class::ResultSource/data_type> whose values you want to
+automatically generate using C<NEWID()>, unless they are a primary key in which
+case this will be done anyway.
 
 =item extra
 
@@ -287,9 +295,17 @@
   my @added;
   my $columns = $self->_columns;
   while (my $col = shift @cols) {
+    my $column_info = {};
+    if ($col =~ s/^\+//) {
+      $column_info = $self->column_info($col);
+    }
+
     # If next entry is { ... } use that for the column info, if not
     # use an empty hashref
-    my $column_info = ref $cols[0] ? shift(@cols) : {};
+    if (ref $cols[0]) {
+      my $new_info = shift(@cols);
+      %$column_info = (%$column_info, %$new_info);
+    }
     push(@added, $col) unless exists $columns->{$col};
     $columns->{$col} = $column_info;
   }
@@ -388,7 +404,7 @@
   my $self = shift;
   $self->throw_exception(
     "columns() is a read-only accessor, did you mean add_columns()?"
-  ) if (@_ > 1);
+  ) if @_;
   return @{$self->{_ordered_columns}||[]};
 }
 
@@ -464,10 +480,11 @@
 Additionally, defines a L<unique constraint|add_unique_constraint>
 named C<primary>.
 
-The primary key columns are used by L<DBIx::Class::PK::Auto> to
-retrieve automatically created values from the database. They are also
-used as default joining columns when specifying relationships, see
-L<DBIx::Class::Relationship>.
+Note: you normally do want to define a primary key on your sources
+B<even if the underlying database table does not have a primary key>.
+See
+L<DBIx::Class::Intro/The Significance and Importance of Primary Keys>
+for more info.
 
 =cut
 
@@ -502,6 +519,19 @@
   return @{shift->_primaries||[]};
 }
 
+# a helper method that will automatically die with a descriptive message if
+# no pk is defined on the source in question. For internal use to save
+# on if @pks... boilerplate
+sub _pri_cols {
+  my $self = shift;
+  my @pcols = $self->primary_columns
+    or $self->throw_exception (sprintf(
+      "Operation requires a primary key to be declared on '%s' via set_primary_key",
+      $self->source_name,
+    ));
+  return @pcols;
+}
+
 =head2 add_unique_constraint
 
 =over 4
@@ -1187,15 +1217,9 @@
   return $found;
 }
 
-sub resolve_join {
-  carp 'resolve_join is a private method, stop calling it';
-  my $self = shift;
-  $self->_resolve_join (@_);
-}
-
 # Returns the {from} structure used to express JOIN conditions
 sub _resolve_join {
-  my ($self, $join, $alias, $seen, $jpath, $force_left) = @_;
+  my ($self, $join, $alias, $seen, $jpath, $parent_force_left) = @_;
 
   # we need a supplied one, because we do in-place modifications, no returns
   $self->throw_exception ('You must supply a seen hashref as the 3rd argument to _resolve_join')
@@ -1204,49 +1228,68 @@
   $self->throw_exception ('You must supply a joinpath arrayref as the 4th argument to _resolve_join')
     unless ref $jpath eq 'ARRAY';
 
-  $jpath = [@$jpath];
+  $jpath = [@$jpath]; # copy
 
-  if (ref $join eq 'ARRAY') {
+  if (not defined $join) {
+    return ();
+  }
+  elsif (ref $join eq 'ARRAY') {
     return
       map {
-        $self->_resolve_join($_, $alias, $seen, $jpath, $force_left);
+        $self->_resolve_join($_, $alias, $seen, $jpath, $parent_force_left);
       } @$join;
-  } elsif (ref $join eq 'HASH') {
-    return
-      map {
-        my $as = ($seen->{$_} ? join ('_', $_, $seen->{$_} + 1) : $_);  # the actual seen value will be incremented below
-        local $force_left->{force} = $force_left->{force};
-        (
-          $self->_resolve_join($_, $alias, $seen, [@$jpath], $force_left),
-          $self->related_source($_)->_resolve_join(
-            $join->{$_}, $as, $seen, [@$jpath, $_], $force_left
-          )
-        );
-      } keys %$join;
-  } elsif (ref $join) {
-    $self->throw_exception("No idea how to resolve join reftype ".ref $join);
-  } else {
+  }
+  elsif (ref $join eq 'HASH') {
 
-    return() unless defined $join;
+    my @ret;
+    for my $rel (keys %$join) {
 
-    my $count = ++$seen->{$join};
-    my $as = ($count > 1 ? "${join}_${count}" : $join);
+      my $rel_info = $self->relationship_info($rel)
+        or $self->throw_exception("No such relationship '$rel' on " . $self->source_name);
 
-    my $rel_info = $self->relationship_info($join);
-    $self->throw_exception("No such relationship ${join}") unless $rel_info;
-    my $type;
-    if ($force_left) {
-      $type = 'left';
-    } else {
-      $type = $rel_info->{attrs}{join_type} || '';
-      $force_left = 1 if lc($type) eq 'left';
+      my $force_left = $parent_force_left;
+      $force_left ||= lc($rel_info->{attrs}{join_type}||'') eq 'left';
+
+      # the actual seen value will be incremented by the recursion
+      my $as = $self->storage->relname_to_table_alias(
+        $rel, ($seen->{$rel} && $seen->{$rel} + 1)
+      );
+
+      push @ret, (
+        $self->_resolve_join($rel, $alias, $seen, [@$jpath], $force_left),
+        $self->related_source($rel)->_resolve_join(
+          $join->{$rel}, $as, $seen, [@$jpath, { $rel => $as }], $force_left
+        )
+      );
     }
+    return @ret;
 
+  }
+  elsif (ref $join) {
+    $self->throw_exception("No idea how to resolve join reftype ".ref $join);
+  }
+  else {
+    my $count = ++$seen->{$join};
+    my $as = $self->storage->relname_to_table_alias(
+      $join, ($count > 1 && $count)
+    );
+
+    my $rel_info = $self->relationship_info($join)
+      or $self->throw_exception("No such relationship $join on " . $self->source_name);
+
     my $rel_src = $self->related_source($join);
     return [ { $as => $rel_src->from,
                -source_handle => $rel_src->handle,
-               -join_type => $type,
-               -join_path => [@$jpath, $join],
+               -join_type => $parent_force_left
+                  ? 'left'
+                  : $rel_info->{attrs}{join_type}
+                ,
+               -join_path => [@$jpath, { $join => $as } ],
+               -is_single => (
+                  $rel_info->{attrs}{accessor}
+                    &&
+                  List::Util::first { $rel_info->{attrs}{accessor} eq $_ } (qw/single filter/)
+                ),
                -alias => $as,
                -relation_chain_depth => $seen->{-relation_chain_depth} || 0,
              },
@@ -1322,10 +1365,14 @@
         #warn "$self $k $for $v";
         unless ($for->has_column_loaded($v)) {
           if ($for->in_storage) {
-            $self->throw_exception(
-              "Column ${v} not loaded or not passed to new() prior to insert()"
-                ." on ${for} trying to resolve relationship (maybe you forgot "
-                  ."to call ->discard_changes to get defaults from the db)"
+            $self->throw_exception(sprintf
+              "Unable to resolve relationship '%s' from object %s: column '%s' not "
+            . 'loaded from storage (or not passed to new() prior to insert()). You '
+            . 'probably need to call ->discard_changes to get the server-side defaults '
+            . 'from the database.',
+              $as,
+              $for,
+              $v,
             );
           }
           return $UNRESOLVABLE_CONDITION;
@@ -1353,89 +1400,20 @@
   }
 }
 
-# Legacy code, needs to go entirely away (fully replaced by _resolve_prefetch)
-sub resolve_prefetch {
-  carp 'resolve_prefetch is a private method, stop calling it';
 
-  my ($self, $pre, $alias, $seen, $order, $collapse) = @_;
-  $seen ||= {};
-  if( ref $pre eq 'ARRAY' ) {
-    return
-      map { $self->resolve_prefetch( $_, $alias, $seen, $order, $collapse ) }
-        @$pre;
-  }
-  elsif( ref $pre eq 'HASH' ) {
-    my @ret =
-    map {
-      $self->resolve_prefetch($_, $alias, $seen, $order, $collapse),
-      $self->related_source($_)->resolve_prefetch(
-               $pre->{$_}, "${alias}.$_", $seen, $order, $collapse)
-    } keys %$pre;
-    return @ret;
-  }
-  elsif( ref $pre ) {
-    $self->throw_exception(
-      "don't know how to resolve prefetch reftype ".ref($pre));
-  }
-  else {
-    my $count = ++$seen->{$pre};
-    my $as = ($count > 1 ? "${pre}_${count}" : $pre);
-    my $rel_info = $self->relationship_info( $pre );
-    $self->throw_exception( $self->name . " has no such relationship '$pre'" )
-      unless $rel_info;
-    my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
-    my $rel_source = $self->related_source($pre);
-
-    if (exists $rel_info->{attrs}{accessor}
-         && $rel_info->{attrs}{accessor} eq 'multi') {
-      $self->throw_exception(
-        "Can't prefetch has_many ${pre} (join cond too complex)")
-        unless ref($rel_info->{cond}) eq 'HASH';
-      my $dots = @{[$as_prefix =~ m/\./g]} + 1; # +1 to match the ".${as_prefix}"
-      if (my ($fail) = grep { @{[$_ =~ m/\./g]} == $dots }
-                         keys %{$collapse}) {
-        my ($last) = ($fail =~ /([^\.]+)$/);
-        carp (
-          "Prefetching multiple has_many rels ${last} and ${pre} "
-          .(length($as_prefix)
-            ? "at the same level (${as_prefix}) "
-            : "at top level "
-          )
-          . 'will explode the number of row objects retrievable via ->next or ->all. '
-          . 'Use at your own risk.'
-        );
-      }
-      #my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); }
-      #              values %{$rel_info->{cond}};
-      $collapse->{".${as_prefix}${pre}"} = [ $rel_source->primary_columns ];
-        # action at a distance. prepending the '.' allows simpler code
-        # in ResultSet->_collapse_result
-      my @key = map { (/^foreign\.(.+)$/ ? ($1) : ()); }
-                    keys %{$rel_info->{cond}};
-      my @ord = (ref($rel_info->{attrs}{order_by}) eq 'ARRAY'
-                   ? @{$rel_info->{attrs}{order_by}}
-                   : (defined $rel_info->{attrs}{order_by}
-                       ? ($rel_info->{attrs}{order_by})
-                       : ()));
-      push(@$order, map { "${as}.$_" } (@key, @ord));
-    }
-
-    return map { [ "${as}.$_", "${as_prefix}${pre}.$_", ] }
-      $rel_source->columns;
-  }
-}
-
 # Accepts one or more relationships for the current source and returns an
 # array of column names for each of those relationships. Column names are
 # prefixed relative to the current source, in accordance with where they appear
-# in the supplied relationships. Needs an alias_map generated by
-# $rs->_joinpath_aliases
+# in the supplied relationships.
 
 sub _resolve_prefetch {
   my ($self, $pre, $alias, $alias_map, $order, $collapse, $pref_path) = @_;
   $pref_path ||= [];
 
-  if( ref $pre eq 'ARRAY' ) {
+  if (not defined $pre) {
+    return ();
+  }
+  elsif( ref $pre eq 'ARRAY' ) {
     return
       map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ) }
         @$pre;
@@ -1458,20 +1436,19 @@
     $p = $p->{$_} for (@$pref_path, $pre);
 
     $self->throw_exception (
-      "Unable to resolve prefetch $pre - join alias map does not contain an entry for path: "
+      "Unable to resolve prefetch '$pre' - join alias map does not contain an entry for path: "
       . join (' -> ', @$pref_path, $pre)
     ) if (ref $p->{-join_aliases} ne 'ARRAY' or not @{$p->{-join_aliases}} );
 
     my $as = shift @{$p->{-join_aliases}};
 
     my $rel_info = $self->relationship_info( $pre );
-    $self->throw_exception( $self->name . " has no such relationship '$pre'" )
+    $self->throw_exception( $self->source_name . " has no such relationship '$pre'" )
       unless $rel_info;
     my $as_prefix = ($alias =~ /^.*?\.(.+)$/ ? $1.'.' : '');
     my $rel_source = $self->related_source($pre);
 
-    if (exists $rel_info->{attrs}{accessor}
-         && $rel_info->{attrs}{accessor} eq 'multi') {
+    if ($rel_info->{attrs}{accessor} && $rel_info->{attrs}{accessor} eq 'multi') {
       $self->throw_exception(
         "Can't prefetch has_many ${pre} (join cond too complex)")
         unless ref($rel_info->{cond}) eq 'HASH';
@@ -1491,14 +1468,15 @@
       }
       #my @col = map { (/^self\.(.+)$/ ? ("${as_prefix}.$1") : ()); }
       #              values %{$rel_info->{cond}};
-      $collapse->{".${as_prefix}${pre}"} = [ $rel_source->primary_columns ];
+      $collapse->{".${as_prefix}${pre}"} = [ $rel_source->_pri_cols ];
         # action at a distance. prepending the '.' allows simpler code
         # in ResultSet->_collapse_result
       my @key = map { (/^foreign\.(.+)$/ ? ($1) : ()); }
                     keys %{$rel_info->{cond}};
       my @ord = (ref($rel_info->{attrs}{order_by}) eq 'ARRAY'
                    ? @{$rel_info->{attrs}{order_by}}
-                   : (defined $rel_info->{attrs}{order_by}
+   
+                : (defined $rel_info->{attrs}{order_by}
                        ? ($rel_info->{attrs}{order_by})
                        : ()));
       push(@$order, map { "${as}.$_" } (@key, @ord));
@@ -1526,7 +1504,7 @@
 sub related_source {
   my ($self, $rel) = @_;
   if( !$self->has_relationship( $rel ) ) {
-    $self->throw_exception("No such relationship '$rel'");
+    $self->throw_exception("No such relationship '$rel' on " . $self->source_name);
   }
   return $self->schema->source($self->relationship_info($rel)->{source});
 }
@@ -1548,7 +1526,7 @@
 sub related_class {
   my ($self, $rel) = @_;
   if( !$self->has_relationship( $rel ) ) {
-    $self->throw_exception("No such relationship '$rel'");
+    $self->throw_exception("No such relationship '$rel' on " . $self->source_name);
   }
   return $self->schema->class($self->relationship_info($rel)->{source});
 }
@@ -1561,7 +1539,7 @@
 =cut
 
 sub handle {
-    return new DBIx::Class::ResultSourceHandle({
+    return DBIx::Class::ResultSourceHandle->new({
         schema         => $_[0]->schema,
         source_moniker => $_[0]->source_name
     });
@@ -1575,11 +1553,13 @@
 
 sub throw_exception {
   my $self = shift;
+
   if (defined $self->schema) {
     $self->schema->throw_exception(@_);
-  } else {
-    croak(@_);
   }
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 =head2 source_info
@@ -1614,7 +1594,7 @@
   __PACKAGE__->column_info_from_storage(1);
 
 Enables the on-demand automatic loading of the above column
-metadata from storage as neccesary.  This is *deprecated*, and
+metadata from storage as necessary.  This is *deprecated*, and
 should not be used.  It will be removed before 1.0.
 
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSourceHandle.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSourceHandle.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSourceHandle.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -78,25 +78,26 @@
 
     my $to_serialize = { %$self };
 
-    my $class = $self->schema->class($self->source_moniker);
-    $to_serialize->{schema} = $class;
+    delete $to_serialize->{schema};
+    $to_serialize->{_frozen_from_class} = $self->schema->class($self->source_moniker);
+
     return (Storable::freeze($to_serialize));
 }
 
 =head2 STORABLE_thaw
 
 Thaws frozen handle. Resets the internal schema reference to the package
-variable C<$thaw_schema>. The recomened way of setting this is to use 
+variable C<$thaw_schema>. The recommended way of setting this is to use 
 C<< $schema->thaw($ice) >> which handles this for you.
 
 =cut
 
 
 sub STORABLE_thaw {
-    my ($self, $cloning,$ice) = @_;
+    my ($self, $cloning, $ice) = @_;
     %$self = %{ Storable::thaw($ice) };
 
-    my $class = delete $self->{schema};
+    my $class = delete $self->{_frozen_from_class};
     if( $thaw_schema ) {
         $self->{schema} = $thaw_schema;
     }
@@ -105,7 +106,8 @@
         $self->{schema} = $rs->schema if $rs;
     }
 
-    carp "Unable to restore schema" unless $self->{schema};
+    carp "Unable to restore schema. Look at 'freeze' and 'thaw' methods in DBIx::Class::Schema."
+        unless $self->{schema};
 }
 
 =head1 AUTHOR

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSourceProxy.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSourceProxy.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/ResultSourceProxy.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -37,11 +37,16 @@
   my $source = $class->result_source_instance;
   $source->add_columns(@cols);
   foreach my $c (grep { !ref } @cols) {
+    # If this is an augment definition get the real colname.
+    $c =~ s/^\+//;
+
     $class->register_column($c => $source->column_info($c));
   }
 }
 
-*add_column = \&add_columns;
+sub add_column {
+  shift->add_columns(@_);
+}
 
 sub has_column {
   shift->result_source_instance->has_column(@_);
@@ -73,6 +78,10 @@
   shift->result_source_instance->primary_columns(@_);
 }
 
+sub _pri_cols {
+  shift->result_source_instance->_pri_cols(@_);
+}
+
 sub add_unique_constraint {
   shift->result_source_instance->add_unique_constraint(@_);
 }
@@ -104,4 +113,7 @@
   shift->result_source_instance->relationship_info(@_);
 }
 
+sub has_relationship {
+  shift->result_source_instance->has_relationship(@_);
+}
 1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Row.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Row.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Row.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -4,9 +4,9 @@
 use warnings;
 
 use base qw/DBIx::Class/;
-use Carp::Clan qw/^DBIx::Class/;
+
+use DBIx::Class::Exception;
 use Scalar::Util ();
-use Scope::Guard;
 
 ###
 ### Internal method
@@ -105,26 +105,40 @@
 
 sub __new_related_find_or_new_helper {
   my ($self, $relname, $data) = @_;
-  if ($self->__their_pk_needs_us($relname, $data)) {
+
+  my $rsrc = $self->result_source;
+
+  # create a mock-object so all new/set_column component overrides will run:
+  my $rel_rs = $rsrc->related_source($relname)->resultset;
+  my $new_rel_obj = $rel_rs->new_result($data);
+  my $proc_data = { $new_rel_obj->get_columns };
+
+  if ($self->__their_pk_needs_us($relname)) {
     MULTICREATE_DEBUG and warn "MC $self constructing $relname via new_result";
-    return $self->result_source
-                ->related_source($relname)
-                ->resultset
-                ->new_result($data);
+    return $new_rel_obj;
   }
-  if ($self->result_source->_pk_depends_on($relname, $data)) {
-    MULTICREATE_DEBUG and warn "MC $self constructing $relname via find_or_new";
-    return $self->result_source
-                ->related_source($relname)
-                ->resultset
-                ->find_or_new($data);
+  elsif ($rsrc->_pk_depends_on($relname, $proc_data )) {
+    if (! keys %$proc_data) {
+      # there is nothing to search for - blind create
+      MULTICREATE_DEBUG and warn "MC $self constructing default-insert $relname";
+    }
+    else {
+      MULTICREATE_DEBUG and warn "MC $self constructing $relname via find_or_new";
+      # this is not *really* find or new, as we don't want to double-new the
+      # data (thus potentially double encoding or whatever)
+      my $exists = $rel_rs->find ($proc_data);
+      return $exists if $exists;
+    }
+    return $new_rel_obj;
   }
-  MULTICREATE_DEBUG and warn "MC $self constructing $relname via find_or_new_related";
-  return $self->find_or_new_related($relname, $data);
+  else {
+    my $us = $rsrc->source_name;
+    $self->throw_exception ("'$us' neither depends nor is depended on by '$relname', something is wrong...");
+  }
 }
 
 sub __their_pk_needs_us { # this should maybe be in resultsource.
-  my ($self, $relname, $data) = @_;
+  my ($self, $relname) = @_;
   my $source = $self->result_source;
   my $reverse = $source->reverse_relationship_info($relname);
   my $rel_source = $source->related_source($relname);
@@ -155,7 +169,7 @@
     $new->result_source($source);
   }
 
-  if (my $related = delete $attrs->{-from_resultset}) {
+  if (my $related = delete $attrs->{-cols_from_relations}) {
     @{$new->{_ignore_at_insert}={}}{@$related} = ();
   }
 
@@ -168,11 +182,11 @@
     foreach my $key (keys %$attrs) {
       if (ref $attrs->{$key}) {
         ## Can we extract this lot to use with update(_or .. ) ?
-        confess "Can't do multi-create without result source" unless $source;
+        $new->throw_exception("Can't do multi-create without result source")
+          unless $source;
         my $info = $source->relationship_info($key);
-        if ($info && $info->{attrs}{accessor}
-          && $info->{attrs}{accessor} eq 'single')
-        {
+        my $acc_type = $info->{attrs}{accessor} || '';
+        if ($acc_type eq 'single') {
           my $rel_obj = delete $attrs->{$key};
           if(!Scalar::Util::blessed($rel_obj)) {
             $rel_obj = $new->__new_related_find_or_new_helper($key, $rel_obj);
@@ -187,9 +201,8 @@
 
           $related->{$key} = $rel_obj;
           next;
-        } elsif ($info && $info->{attrs}{accessor}
-            && $info->{attrs}{accessor} eq 'multi'
-            && ref $attrs->{$key} eq 'ARRAY') {
+        }
+        elsif ($acc_type eq 'multi' && ref $attrs->{$key} eq 'ARRAY' ) {
           my $others = delete $attrs->{$key};
           my $total = @$others;
           my @objects;
@@ -209,9 +222,8 @@
           }
           $related->{$key} = \@objects;
           next;
-        } elsif ($info && $info->{attrs}{accessor}
-          && $info->{attrs}{accessor} eq 'filter')
-        {
+        }
+        elsif ($acc_type eq 'filter') {
           ## 'filter' should disappear and get merged in with 'single' above!
           my $rel_obj = delete $attrs->{$key};
           if(!Scalar::Util::blessed($rel_obj)) {
@@ -302,13 +314,21 @@
 
       MULTICREATE_DEBUG and warn "MC $self pre-reconstructing $relname $rel_obj\n";
 
-      my $them = { %{$rel_obj->{_relationship_data} || {} }, $rel_obj->get_inflated_columns };
-      my $re = $self->result_source
-                    ->related_source($relname)
-                    ->resultset
-                    ->find_or_create($them);
+      my $them = { %{$rel_obj->{_relationship_data} || {} }, $rel_obj->get_columns };
+      my $existing;
 
-      %{$rel_obj} = %{$re};
+      # if there are no keys - nothing to search for
+      if (keys %$them and $existing = $self->result_source
+                                           ->related_source($relname)
+                                           ->resultset
+                                           ->find($them)
+      ) {
+        %{$rel_obj} = %{$existing};
+      }
+      else {
+        $rel_obj->insert;
+      }
+
       $self->{_rel_in_storage}{$relname} = 1;
     }
 
@@ -322,34 +342,50 @@
     $rollback_guard ||= $source->storage->txn_scope_guard
   }
 
+  ## PK::Auto
+  my %auto_pri;
+  my $auto_idx = 0;
+  for ($self->primary_columns) {
+    if (
+      not defined $self->get_column($_)
+        ||
+      (ref($self->get_column($_)) eq 'SCALAR')
+    ) {
+      my $col_info = $source->column_info($_);
+      $auto_pri{$_} = $auto_idx++ unless $col_info->{auto_nextval};   # auto_nextval's are pre-fetched in the storage
+    }
+  }
+
   MULTICREATE_DEBUG and do {
     no warnings 'uninitialized';
     warn "MC $self inserting (".join(', ', $self->get_columns).")\n";
   };
-  my $updated_cols = $source->storage->insert($source, { $self->get_columns });
+  my $updated_cols = $source->storage->insert(
+    $source,
+    { $self->get_columns },
+    (keys %auto_pri) && $source->storage->_supports_insert_returning
+      ? { returning => [ sort { $auto_pri{$a} <=> $auto_pri{$b} } keys %auto_pri ] }
+      : ()
+    ,
+  );
+
   foreach my $col (keys %$updated_cols) {
     $self->store_column($col, $updated_cols->{$col});
+    delete $auto_pri{$col};
   }
 
-  ## PK::Auto
-  my @auto_pri = grep {
-                  (not defined $self->get_column($_))
-                    ||
-                  (ref($self->get_column($_)) eq 'SCALAR')
-                 } $self->primary_columns;
-
-  if (@auto_pri) {
-    MULTICREATE_DEBUG and warn "MC $self fetching missing PKs ".join(', ', @auto_pri)."\n";
+  if (keys %auto_pri) {
+    my @missing = sort { $auto_pri{$a} <=> $auto_pri{$b} } keys %auto_pri;
+    MULTICREATE_DEBUG and warn "MC $self fetching missing PKs ".join(', ', @missing )."\n";
     my $storage = $self->result_source->storage;
     $self->throw_exception( "Missing primary key but Storage doesn't support last_insert_id" )
       unless $storage->can('last_insert_id');
-    my @ids = $storage->last_insert_id($self->result_source, at auto_pri);
+    my @ids = $storage->last_insert_id($self->result_source, @missing);
     $self->throw_exception( "Can't get last insert id" )
-      unless (@ids == @auto_pri);
-    $self->store_column($auto_pri[$_] => $ids[$_]) for 0 .. $#ids;
+      unless (@ids == @missing);
+    $self->store_column($missing[$_] => $ids[$_]) for 0 .. $#missing;
   }
 
-
   $self->{_dirty_columns} = {};
   $self->{related_resultsets} = {};
 
@@ -369,7 +405,7 @@
       foreach my $obj (@cands) {
         $obj->set_from_related($_, $self) for keys %$reverse;
         my $them = { %{$obj->{_relationship_data} || {} }, $obj->get_inflated_columns };
-        if ($self->__their_pk_needs_us($relname, $them)) {
+        if ($self->__their_pk_needs_us($relname)) {
           if (exists $self->{_ignore_at_insert}{$relname}) {
             MULTICREATE_DEBUG and warn "MC $self skipping post-insert on $relname";
           } else {
@@ -423,7 +459,7 @@
 sub in_storage {
   my ($self, $val) = @_;
   $self->{_in_storage} = $val if @_ > 1;
-  return $self->{_in_storage};
+  return $self->{_in_storage} ? 1 : 0;
 }
 
 =head2 update
@@ -442,9 +478,13 @@
 according to L</in_storage>.
 
 This method issues an SQL UPDATE query to commit any changes to the
-object to the database if required.
+object to the database if required (see L</get_dirty_columns>).
+It throws an exception if a proper WHERE clause uniquely identifying
+the database row can not be constructed (see
+L<significance of primary keys|DBIx::Class::Manual::Intro/The Significance and Importance of Primary Keys>
+for more details).
 
-Also takes an optional hashref of C<< column_name => value> >> pairs
+Also takes an optional hashref of C<< column_name => value >> pairs
 to update on the object first. Be aware that the hashref will be
 passed to C<set_inflated_columns>, which might edit it in place, so
 don't rely on it being the same after a call to C<update>.  If you
@@ -452,7 +492,7 @@
 to C<update>, e.g. ( { %{ $href } } )
 
 If the values passed or any of the column values set on the object
-contain scalar references, eg:
+contain scalar references, e.g.:
 
   $row->last_modified(\'NOW()');
   # OR
@@ -478,18 +518,21 @@
 
 sub update {
   my ($self, $upd) = @_;
-  $self->throw_exception( "Not in database" ) unless $self->in_storage;
-  my $ident_cond = $self->ident_condition;
-  $self->throw_exception("Cannot safely update a row in a PK-less table")
-    if ! keys %$ident_cond;
 
+  my $ident_cond = $self->{_orig_ident} || $self->ident_condition;
+
   $self->set_inflated_columns($upd) if $upd;
   my %to_update = $self->get_dirty_columns;
   return $self unless keys %to_update;
+
+  $self->throw_exception( "Not in database" ) unless $self->in_storage;
+
+  $self->throw_exception('Unable to update a row with incomplete or no identity')
+    if ! keys %$ident_cond;
+
   my $rows = $self->result_source->storage->update(
-               $self->result_source, \%to_update,
-               $self->{_orig_ident} || $ident_cond
-             );
+    $self->result_source, \%to_update, $ident_cond
+  );
   if ($rows == 0) {
     $self->throw_exception( "Can't update ${self}: row not found" );
   } elsif ($rows > 1) {
@@ -497,7 +540,7 @@
   }
   $self->{_dirty_columns} = {};
   $self->{related_resultsets} = {};
-  undef $self->{_orig_ident};
+  delete $self->{_orig_ident};
   return $self;
 }
 
@@ -514,8 +557,10 @@
 =back
 
 Throws an exception if the object is not in the database according to
-L</in_storage>. Runs an SQL DELETE statement using the primary key
-values to locate the row.
+L</in_storage>. Also throws an exception if a proper WHERE clause
+uniquely identifying the database row can not be constructed (see
+L<significance of primary keys|DBIx::Class::Manual::Intro/The Significance and Importance of Primary Keys>
+for more details).
 
 The object is still perfectly usable, but L</in_storage> will
 now return 0 and the object must be reinserted using L</insert>
@@ -526,7 +571,9 @@
 this behaviour off, pass C<< cascade_delete => 0 >> in the C<$attr>
 hashref of the relationship, see L<DBIx::Class::Relationship>. Any
 database-level cascade or restrict will take precedence over a
-DBIx-Class-based cascading delete.
+DBIx-Class-based cascading delete, since DBIx-Class B<deletes the
+main row first> and only then attempts to delete any remaining related
+rows.
 
 If you delete an object within a txn_do() (see L<DBIx::Class::Storage/txn_do>)
 and the transaction subsequently fails, the row object will remain marked as
@@ -544,17 +591,19 @@
   my $self = shift;
   if (ref $self) {
     $self->throw_exception( "Not in database" ) unless $self->in_storage;
+
     my $ident_cond = $self->{_orig_ident} || $self->ident_condition;
-    $self->throw_exception("Cannot safely delete a row in a PK-less table")
+    $self->throw_exception('Unable to delete a row with incomplete or no identity')
       if ! keys %$ident_cond;
-    foreach my $column (keys %$ident_cond) {
-            $self->throw_exception("Can't delete the object unless it has loaded the primary keys")
-              unless exists $self->{_column_data}{$column};
-    }
+
     $self->result_source->storage->delete(
-      $self->result_source, $ident_cond);
+      $self->result_source, $ident_cond
+    );
+
+    delete $self->{_orig_ident};
     $self->in_storage(undef);
-  } else {
+  }
+  else {
     $self->throw_exception("Can't do class delete without a ResultSource instance")
       unless $self->can('result_source_instance');
     my $attrs = @_ > 1 && ref $_[$#_] eq 'HASH' ? { %{pop(@_)} } : {};
@@ -750,12 +799,43 @@
 
 sub get_inflated_columns {
   my $self = shift;
-  return map {
-    my $accessor = $self->column_info($_)->{'accessor'} || $_;
-    ($_ => $self->$accessor);
-  } grep $self->has_column_loaded($_), $self->columns;
+
+  my %loaded_colinfo = (map
+    { $_ => $self->column_info($_) }
+    (grep { $self->has_column_loaded($_) } $self->columns)
+  );
+
+  my %inflated;
+  for my $col (keys %loaded_colinfo) {
+    if (exists $loaded_colinfo{$col}{accessor}) {
+      my $acc = $loaded_colinfo{$col}{accessor};
+      $inflated{$col} = $self->$acc if defined $acc;
+    }
+    else {
+      $inflated{$col} = $self->$col;
+    }
+  }
+
+  # return all loaded columns with the inflations overlayed on top
+  return ($self->get_columns, %inflated);
 }
 
+sub _is_column_numeric {
+   my ($self, $column) = @_;
+    my $colinfo = $self->column_info ($column);
+
+    # cache for speed (the object may *not* have a resultsource instance)
+    if (not defined $colinfo->{is_numeric} && $self->_source_handle) {
+      $colinfo->{is_numeric} =
+        $self->result_source->schema->storage->is_datatype_numeric ($colinfo->{data_type})
+          ? 1
+          : 0
+        ;
+    }
+
+    return $colinfo->{is_numeric};
+}
+
 =head2 set_column
 
   $row->set_column($col => $val);
@@ -781,45 +861,21 @@
 sub set_column {
   my ($self, $column, $new_value) = @_;
 
-  $self->{_orig_ident} ||= $self->ident_condition;
+  # if we can't get an ident condition on first try - mark the object as unidentifiable
+  $self->{_orig_ident} ||= (eval { $self->ident_condition }) || {};
+
   my $old_value = $self->get_column($column);
+  $new_value = $self->store_column($column, $new_value);
 
-  $self->store_column($column, $new_value);
+  my $dirty =
+    $self->{_dirty_columns}{$column}
+      ||
+    $self->in_storage # no point tracking dirtyness on uninserted data
+      ? ! $self->_eq_column_values ($column, $old_value, $new_value)
+      : 1
+  ;
 
-  my $dirty;
-  if (!$self->in_storage) { # no point tracking dirtyness on uninserted data
-    $dirty = 1;
-  }
-  elsif (defined $old_value xor defined $new_value) {
-    $dirty = 1;
-  }
-  elsif (not defined $old_value) {  # both undef
-    $dirty = 0;
-  }
-  elsif ($old_value eq $new_value) {
-    $dirty = 0;
-  }
-  else {  # do a numeric comparison if datatype allows it
-    my $colinfo = $self->column_info ($column);
-
-    # cache for speed (the object may *not* have a resultsource instance)
-    if (not defined $colinfo->{is_numeric} && $self->_source_handle) {
-      $colinfo->{is_numeric} =
-        $self->result_source->schema->storage->is_datatype_numeric ($colinfo->{data_type})
-          ? 1
-          : 0
-        ;
-    }
-
-    if ($colinfo->{is_numeric}) {
-      $dirty = $old_value != $new_value;
-    }
-    else {
-      $dirty = 1;
-    }
-  }
-
-  # sadly the update code just checks for keys, not for their value
+  # FIXME sadly the update code just checks for keys, not for their value
   $self->{_dirty_columns}{$column} = 1 if $dirty;
 
   # XXX clear out the relation cache for this column
@@ -828,6 +884,26 @@
   return $new_value;
 }
 
+sub _eq_column_values {
+  my ($self, $col, $old, $new) = @_;
+
+  if (defined $old xor defined $new) {
+    return 0;
+  }
+  elsif (not defined $old) {  # both undef
+    return 1;
+  }
+  elsif ($old eq $new) {
+    return 1;
+  }
+  elsif ($self->_is_column_numeric($col)) {  # do a numeric comparison if datatype allows it
+    return $old == $new;
+  }
+  else {
+    return 0;
+  }
+}
+
 =head2 set_columns
 
   $row->set_columns({ $col => $val, ... });
@@ -878,7 +954,7 @@
 L<DBIx::Class::Relationship/has_many> key, and create the related
 objects if necessary.
 
-Be aware that the input hashref might be edited in place, so dont rely
+Be aware that the input hashref might be edited in place, so don't rely
 on it being the same after a call to C<set_inflated_columns>. If you
 need to preserve the hashref, it is sufficient to pass a shallow copy
 to C<set_inflated_columns>, e.g. ( { %{ $href } } )
@@ -892,21 +968,18 @@
   foreach my $key (keys %$upd) {
     if (ref $upd->{$key}) {
       my $info = $self->relationship_info($key);
-      if ($info && $info->{attrs}{accessor}
-        && $info->{attrs}{accessor} eq 'single')
-      {
+      my $acc_type = $info->{attrs}{accessor} || '';
+      if ($acc_type eq 'single') {
         my $rel = delete $upd->{$key};
         $self->set_from_related($key => $rel);
         $self->{_relationship_data}{$key} = $rel;
-      } elsif ($info && $info->{attrs}{accessor}
-        && $info->{attrs}{accessor} eq 'multi') {
-          $self->throw_exception(
-            "Recursive update is not supported over relationships of type multi ($key)"
-          );
       }
-      elsif ($self->has_column($key)
-        && exists $self->column_info($key)->{_inflate_info})
-      {
+      elsif ($acc_type eq 'multi') {
+        $self->throw_exception(
+          "Recursive update is not supported over relationships of type '$acc_type' ($key)"
+        );
+      }
+      elsif ($self->has_column($key) && exists $self->column_info($key)->{_inflate_info}) {
         $self->set_inflated_column($key, delete $upd->{$key});
       }
     }
@@ -935,7 +1008,7 @@
 the new object.
 
 Relationships will be followed by the copy procedure B<only> if the
-relationship specifes a true value for its
+relationship specifies a true value for its
 L<cascade_copy|DBIx::Class::Relationship::Base> attribute. C<cascade_copy>
 is set by default on C<has_many> relationships and unset on all others.
 
@@ -958,7 +1031,7 @@
   $new->insert;
 
   # Its possible we'll have 2 relations to the same Source. We need to make
-  # sure we don't try to insert the same row twice esle we'll violate unique
+  # sure we don't try to insert the same row twice else we'll violate unique
   # constraints
   my $rels_copied = {};
 
@@ -1045,9 +1118,10 @@
   my ($source_handle) = $source;
 
   if ($source->isa('DBIx::Class::ResultSourceHandle')) {
-      $source = $source_handle->resolve
-  } else {
-      $source_handle = $source->handle
+    $source = $source_handle->resolve
+  } 
+  else {
+    $source_handle = $source->handle
   }
 
   my $new = {
@@ -1056,17 +1130,29 @@
   };
   bless $new, (ref $class || $class);
 
-  my $schema;
   foreach my $pre (keys %{$prefetch||{}}) {
-    my $pre_val = $prefetch->{$pre};
-    my $pre_source = $source->related_source($pre);
-    $class->throw_exception("Can't prefetch non-existent relationship ${pre}")
-      unless $pre_source;
-    if (ref($pre_val->[0]) eq 'ARRAY') { # multi
-      my @pre_objects;
 
-      for my $me_pref (@$pre_val) {
+    my $pre_source = $source->related_source($pre)
+      or $class->throw_exception("Can't prefetch non-existent relationship ${pre}");
 
+    my $accessor = $source->relationship_info($pre)->{attrs}{accessor}
+      or $class->throw_exception("No accessor for prefetched $pre");
+
+    my @pre_vals;
+    if (ref $prefetch->{$pre}[0] eq 'ARRAY') {
+      @pre_vals = @{$prefetch->{$pre}};
+    }
+    elsif ($accessor eq 'multi') {
+      $class->throw_exception("Implicit prefetch (via select/columns) not supported with accessor 'multi'");
+    }
+    else {
+      @pre_vals = $prefetch->{$pre};
+    }
+
+    my @pre_objects;
+    for my $me_pref (@pre_vals) {
+
+        # FIXME - this should not be necessary
         # the collapser currently *could* return bogus elements with all
         # columns set to undef
         my $has_def;
@@ -1081,29 +1167,16 @@
         push @pre_objects, $pre_source->result_class->inflate_result(
           $pre_source, @$me_pref
         );
-      }
+    }
 
-      $new->related_resultset($pre)->set_cache(\@pre_objects);
-    } elsif (defined $pre_val->[0]) {
-      my $fetched;
-      unless ($pre_source->primary_columns == grep { exists $pre_val->[0]{$_}
-         and !defined $pre_val->[0]{$_} } $pre_source->primary_columns)
-      {
-        $fetched = $pre_source->result_class->inflate_result(
-                      $pre_source, @{$pre_val});
-      }
-      my $accessor = $source->relationship_info($pre)->{attrs}{accessor};
-      $class->throw_exception("No accessor for prefetched $pre")
-       unless defined $accessor;
-      if ($accessor eq 'single') {
-        $new->{_relationship_data}{$pre} = $fetched;
-      } elsif ($accessor eq 'filter') {
-        $new->{_inflated_column}{$pre} = $fetched;
-      } else {
-       $class->throw_exception("Implicit prefetch (via select/columns) not supported with accessor '$accessor'");
-      }
-      $new->related_resultset($pre)->set_cache([ $fetched ]);
+    if ($accessor eq 'single') {
+      $new->{_relationship_data}{$pre} = $pre_objects[0];
     }
+    elsif ($accessor eq 'filter') {
+      $new->{_inflated_column}{$pre} = $pre_objects[0];
+    }
+
+    $new->related_resultset($pre)->set_cache(\@pre_objects);
   }
 
   $new->in_storage (1);
@@ -1255,8 +1328,11 @@
 =back
 
 Fetches a fresh copy of the Row object from the database and returns it.
-
-If passed the \%attrs argument, will first apply these attributes to
+Throws an exception if a proper WHERE clause identifying the database row
+can not be constructed (i.e. if the original object does not contain its
+entire
+ L<primary key|DBIx::Class::Manual::Intro/The Significance and Importance of Primary Keys>
+). If passed the \%attrs argument, will first apply these attributes to
 the resultset used to find the row.
 
 This copy can then be used to compare to an existing row object, to
@@ -1280,13 +1356,22 @@
       $resultset = $resultset->search(undef, $attrs);
     }
 
-    return $resultset->find($self->{_orig_ident} || $self->ident_condition);
+    my $ident_cond = $self->{_orig_ident} || $self->ident_condition;
+
+    $self->throw_exception('Unable to requery a row with incomplete or no identity')
+      if ! keys %$ident_cond;
+
+    return $resultset->find($ident_cond);
 }
 
 =head2 discard_changes ($attrs)
 
 Re-selects the row from the database, losing any changes that had
-been made.
+been made. Throws an exception if a proper WHERE clause identifying
+the database row can not be constructed (i.e. if the original object
+does not contain its entire
+L<primary key|DBIx::Class::Manual::Intro/The Significance and Importance of Primary Keys>
+).
 
 This method can also be used to refresh from storage, retrieving any
 changes made since the row was last read from storage.
@@ -1298,7 +1383,6 @@
 
 sub discard_changes {
   my ($self, $attrs) = @_;
-  delete $self->{_dirty_columns};
   return unless $self->in_storage; # Don't reload if we aren't real!
 
   # add a replication default to read from the master only
@@ -1330,11 +1414,13 @@
 
 sub throw_exception {
   my $self=shift;
+
   if (ref $self && ref $self->result_source && $self->result_source->schema) {
-    $self->result_source->schema->throw_exception(@_);
-  } else {
-    croak(@_);
+    $self->result_source->schema->throw_exception(@_)
   }
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 =head2 id

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/MSSQL.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/MSSQL.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/MSSQL.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -5,29 +5,10 @@
 use Carp::Clan qw/^DBIx::Class|^SQL::Abstract/;
 
 #
-# MSSQL is retarded wrt TOP (crappy limit) and ordering.
-# One needs to add a TOP to *all* ordered subqueries, if
-# TOP has been used in the statement at least once.
-# Do it here.
+# MSSQL does not support ... OVER() ... RNO limits
 #
-sub select {
-  my $self = shift;
-
-  my ($sql, @bind) = $self->SUPER::select (@_);
-
-  # ordering was requested and there are at least 2 SELECT/FROM pairs
-  # (thus subquery), and there is no TOP specified
-  if (
-    $sql =~ /\bSELECT\b .+? \bFROM\b .+? \bSELECT\b .+? \bFROM\b/isx
-      &&
-    $sql !~ /^ \s* SELECT \s+ TOP \s+ \d+ /xi
-      &&
-    scalar $self->_order_by_chunks ($_[3]->{order_by})
-  ) {
-    $sql =~ s/^ \s* SELECT \s/SELECT TOP 100 PERCENT /xi;
-  }
-
-  return wantarray ? ($sql, @bind) : $sql;
+sub _rno_default_order {
+  return \ '(SELECT(1))';
 }
 
 1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/MySQL.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/MySQL.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/MySQL.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -21,4 +21,14 @@
   return $self->SUPER::insert (@_);
 }
 
+# Allow STRAIGHT_JOIN's
+sub _generate_join_clause {
+    my ($self, $join_type) = @_;
+
+    if( $join_type && $join_type =~ /^STRAIGHT\z/i ) {
+        return ' STRAIGHT_JOIN '
+    }
+
+    return $self->SUPER::_generate_join_clause( $join_type );
+}
 1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/OracleJoins.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/OracleJoins.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/OracleJoins.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -5,13 +5,13 @@
 use Carp::Clan qw/^DBIx::Class|^SQL::Abstract/;
 
 sub select {
-  my ($self, $table, $fields, $where, $order, @rest) = @_;
+  my ($self, $table, $fields, $where, $rs_attrs, @rest) = @_;
 
   if (ref($table) eq 'ARRAY') {
     $where = $self->_oracle_joins($where, @{ $table });
   }
 
-  return $self->SUPER::select($table, $fields, $where, $order, @rest);
+  return $self->SUPER::select($table, $fields, $where, $rs_attrs, @rest);
 }
 
 sub _recurse_from {
@@ -96,8 +96,7 @@
 
 This module was originally written to support Oracle < 9i where ANSI joins
 weren't supported at all, but became the module for Oracle >= 8 because
-Oracle's optimising of ANSI joins is horrible.  (See:
-http://scsys.co.uk:8001/7495)
+Oracle's optimising of ANSI joins is horrible.
 
 =head1 SYNOPSIS
 

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/SQLite.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/SQLite.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks/SQLite.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,21 @@
+package # Hide from PAUSE
+  DBIx::Class::SQLAHacks::SQLite;
+
+use base qw( DBIx::Class::SQLAHacks );
+use Carp::Clan qw/^DBIx::Class|^SQL::Abstract/;
+
+#
+# SQLite does not understand SELECT ... FOR UPDATE
+# Disable it here
+#
+sub _parse_rs_attrs {
+  my ($self, $attrs) = @_;
+
+  return $self->SUPER::_parse_rs_attrs ($attrs)
+    if ref $attrs ne 'HASH';
+
+  local $attrs->{for};
+  return $self->SUPER::_parse_rs_attrs ($attrs);
+}
+
+1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/SQLAHacks.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -9,6 +9,7 @@
 use strict;
 use warnings;
 use Carp::Clan qw/^DBIx::Class|^SQL::Abstract/;
+use Sub::Name();
 
 BEGIN {
   # reinstall the carp()/croak() functions imported into SQL::Abstract
@@ -18,17 +19,15 @@
   for my $f (qw/carp croak/) {
 
     my $orig = \&{"SQL::Abstract::$f"};
-    *{"SQL::Abstract::$f"} = sub {
-
-      local $Carp::CarpLevel = 1;   # even though Carp::Clan ignores this, $orig will not
-
-      if (Carp::longmess() =~ /DBIx::Class::SQLAHacks::[\w]+ .+? called \s at/x) {
-        __PACKAGE__->can($f)->(@_);
-      }
-      else {
-        $orig->(@_);
-      }
-    }
+    *{"SQL::Abstract::$f"} = Sub::Name::subname "SQL::Abstract::$f" =>
+      sub {
+        if (Carp::longmess() =~ /DBIx::Class::SQLAHacks::[\w]+ .+? called \s at/x) {
+          __PACKAGE__->can($f)->(@_);
+        }
+        else {
+          goto $orig;
+        }
+      };
   }
 }
 
@@ -47,270 +46,347 @@
   $self;
 }
 
-# Some databases (sqlite) do not handle multiple parenthesis
-# around in/between arguments. A tentative x IN ( (1, 2 ,3) )
-# is interpreted as x IN 1 or something similar.
+# !!! THIS IS ALSO HORRIFIC !!! /me ashamed
 #
-# Since we currently do not have access to the SQLA AST, resort
-# to barbaric mutilation of any SQL supplied in literal form
-sub _strip_outer_paren {
-  my ($self, $arg) = @_;
+# generate inner/outer select lists for various limit dialects
+# which result in one or more subqueries (e.g. RNO, Top, RowNum)
+# Any non-root-table columns need to have their table qualifier
+# turned into a column alias (otherwise names in subqueries clash
+# and/or lose their source table)
+#
+# returns inner/outer strings of SQL QUOTED selectors with aliases
+# (to be used in whatever select statement), and an alias index hashref
+# of QUOTED SEL => QUOTED ALIAS pairs (to maybe be used for string-subst
+# higher up)
+#
+# If the $scan_order option is supplied, it signals that the limit dialect
+# needs to order the outer side of the query, which in turn means that the
+# inner select needs to bring out columns used in implicit (non-selected)
+# orders, and the order condition itself needs to be realiased to the proper
+# names in the outer query.
+#
+# In this case ($scan_order os true) we also return a hashref (order doesn't
+# matter) of QUOTED EXTRA-SEL => QUOTED ALIAS pairs, which is a list of extra
+# selectors that do *not* exist in the original select list
 
-  return $self->_SWITCH_refkind ($arg, {
-    ARRAYREFREF => sub {
-      $$arg->[0] = __strip_outer_paren ($$arg->[0]);
-      return $arg;
-    },
-    SCALARREF => sub {
-      return \__strip_outer_paren( $$arg );
-    },
-    FALLBACK => sub {
-      return $arg
-    },
-  });
-}
+sub _subqueried_limit_attrs {
+  my ($self, $rs_attrs, $scan_order) = @_;
 
-sub __strip_outer_paren {
-  my $sql = shift;
+  croak 'Limit dialect implementation usable only in the context of DBIC (missing $rs_attrs)'
+    unless ref ($rs_attrs) eq 'HASH';
 
-  if ($sql and not ref $sql) {
-    while ($sql =~ /^ \s* \( (.*) \) \s* $/x ) {
-      $sql = $1;
+  my ($re_sep, $re_alias) = map { quotemeta $_ } (
+    $self->name_sep || '.',
+    $rs_attrs->{alias},
+  );
+
+  # correlate select and as, build selection index
+  my (@sel, $in_sel_index);
+  for my $i (0 .. $#{$rs_attrs->{select}}) {
+
+    my $s = $rs_attrs->{select}[$i];
+    my $sql_sel = $self->_recurse_fields ($s);
+    my $sql_alias = (ref $s) eq 'HASH' ? $s->{-as} : undef;
+
+
+    push @sel, {
+      sql => $sql_sel,
+      unquoted_sql => do { local $self->{quote_char}; $self->_recurse_fields ($s) },
+      as =>
+        $sql_alias
+          ||
+        $rs_attrs->{as}[$i]
+          ||
+        croak "Select argument $i ($s) without corresponding 'as'"
+      ,
+    };
+
+    $in_sel_index->{$sql_sel}++;
+    $in_sel_index->{$self->_quote ($sql_alias)}++ if $sql_alias;
+
+# this *may* turn out to be necessary, not sure yet
+#    my ($sql_unqualified_sel) = $sql_sel =~ / $re_sep (.+) $/x
+#      if ! ref $s;
+#    $in_sel_index->{$sql_unqualified_sel}++;
+  }
+
+
+  # re-alias and remove any name separators from aliases,
+  # unless we are dealing with the current source alias
+  # (which will transcend the subqueries as it is necessary
+  # for possible further chaining)
+  my (@in_sel, @out_sel, %renamed);
+  for my $node (@sel) {
+    if (List::Util::first { $_ =~ / (?<! $re_alias ) $re_sep /x } ($node->{as}, $node->{unquoted_sql}) )  {
+      $node->{as} =~ s/ $re_sep /__/xg;
+      my $quoted_as = $self->_quote($node->{as});
+      push @in_sel, sprintf '%s AS %s', $node->{sql}, $quoted_as;
+      push @out_sel, $quoted_as;
+      $renamed{$node->{sql}} = $quoted_as;
     }
+    else {
+      push @in_sel, $node->{sql};
+      push @out_sel, $self->_quote ($node->{as});
+    }
   }
 
-  return $sql;
-}
+  my %extra_order_sel;
+  if ($scan_order) {
+    for my $chunk ($self->_order_by_chunks ($rs_attrs->{order_by})) {
+      # order with bind
+      $chunk = $chunk->[0] if (ref $chunk) eq 'ARRAY';
+      $chunk =~ s/\s+ (?: ASC|DESC ) \s* $//ix;
 
-sub _where_field_IN {
-  my ($self, $lhs, $op, $rhs) = @_;
-  $rhs = $self->_strip_outer_paren ($rhs);
-  return $self->SUPER::_where_field_IN ($lhs, $op, $rhs);
-}
+      next if $in_sel_index->{$chunk};
 
-sub _where_field_BETWEEN {
-  my ($self, $lhs, $op, $rhs) = @_;
-  $rhs = $self->_strip_outer_paren ($rhs);
-  return $self->SUPER::_where_field_BETWEEN ($lhs, $op, $rhs);
+      $extra_order_sel{$chunk} ||= $self->_quote (
+        'ORDER__BY__' . scalar keys %extra_order_sel
+      );
+    }
+  }
+  return (
+    (map { join (', ', @$_ ) } (
+      \@in_sel,
+      \@out_sel)
+    ),
+    \%renamed,
+    keys %extra_order_sel ? \%extra_order_sel : (),
+  );
 }
 
-# Slow but ANSI standard Limit/Offset support. DB2 uses this
+# ANSI standard Limit/Offset implementation. DB2 and MSSQL >= 2005 use this
 sub _RowNumberOver {
-  my ($self, $sql, $order, $rows, $offset ) = @_;
+  my ($self, $sql, $rs_attrs, $rows, $offset ) = @_;
 
-  $offset += 1;
-  my $last = $rows + $offset - 1;
-  my ( $order_by ) = $self->_order_by( $order );
+  # mangle the input sql as we will be replacing the selector
+  $sql =~ s/^ \s* SELECT \s+ .+? \s+ (?= \b FROM \b )//ix
+    or croak "Unrecognizable SELECT: $sql";
 
-  $sql = <<"SQL";
-SELECT * FROM
-(
-   SELECT Q1.*, ROW_NUMBER() OVER( ) AS ROW_NUM FROM (
-      $sql
-      $order_by
-   ) Q1
-) Q2
-WHERE ROW_NUM BETWEEN $offset AND $last
+  # get selectors, and scan the order_by (if any)
+  my ($in_sel, $out_sel, $alias_map, $extra_order_sel) = $self->_subqueried_limit_attrs (
+    $rs_attrs, 'scan_order_by',
+  );
 
-SQL
+  # make up an order if none exists
+  my $requested_order = (delete $rs_attrs->{order_by}) || $self->_rno_default_order;
+  my $rno_ord = $self->_order_by ($requested_order);
 
-  return $sql;
-}
+  # this is the order supplement magic
+  my $mid_sel = $out_sel;
+  if ($extra_order_sel) {
+    for my $extra_col (sort
+      { $extra_order_sel->{$a} cmp $extra_order_sel->{$b} }
+      keys %$extra_order_sel
+    ) {
+      $in_sel .= sprintf (', %s AS %s',
+        $extra_col,
+        $extra_order_sel->{$extra_col},
+      );
 
-# Crappy Top based Limit/Offset support. MSSQL uses this currently,
-# but may have to switch to RowNumberOver one day
-sub _Top {
-  my ( $self, $sql, $order, $rows, $offset ) = @_;
+      $mid_sel .= ', ' . $extra_order_sel->{$extra_col};
+    }
+  }
 
-  # mangle the input sql so it can be properly aliased in the outer queries
-  $sql =~ s/^ \s* SELECT \s+ (.+?) \s+ (?=FROM)//ix
-    or croak "Unrecognizable SELECT: $sql";
-  my $sql_select = $1;
-  my @sql_select = split (/\s*,\s*/, $sql_select);
-
-  # we can't support subqueries (in fact MSSQL can't) - croak
-  if (@sql_select != @{$self->{_dbic_rs_attrs}{select}}) {
-    croak (sprintf (
-      'SQL SELECT did not parse cleanly - retrieved %d comma separated elements, while '
-    . 'the resultset select attribure contains %d elements: %s',
-      scalar @sql_select,
-      scalar @{$self->{_dbic_rs_attrs}{select}},
-      $sql_select,
-    ));
+  # and this is order re-alias magic
+  for ($extra_order_sel, $alias_map) {
+    for my $col (keys %$_) {
+      my $re_col = quotemeta ($col);
+      $rno_ord =~ s/$re_col/$_->{$col}/;
+    }
   }
 
-  my $name_sep = $self->name_sep || '.';
-  my $esc_name_sep = "\Q$name_sep\E";
-  my $col_re = qr/ ^ (?: (.+) $esc_name_sep )? ([^$esc_name_sep]+) $ /x;
+  # whatever is left of the order_by (only where is processed at this point)
+  my $group_having = $self->_parse_rs_attrs($rs_attrs);
 
-  my $rs_alias = $self->{_dbic_rs_attrs}{alias};
-  my $quoted_rs_alias = $self->_quote ($rs_alias);
+  my $qalias = $self->_quote ($rs_attrs->{alias});
+  my $idx_name = $self->_quote ('rno__row__index');
 
-  # construct the new select lists, rename(alias) some columns if necessary
-  my (@outer_select, @inner_select, %seen_names, %col_aliases, %outer_col_aliases);
+  $sql = sprintf (<<EOS, $offset + 1, $offset + $rows, );
 
-  for (@{$self->{_dbic_rs_attrs}{select}}) {
-    next if ref $_;
-    my ($table, $orig_colname) = ( $_ =~ $col_re );
-    next unless $table;
-    $seen_names{$orig_colname}++;
-  }
+SELECT $out_sel FROM (
+  SELECT $mid_sel, ROW_NUMBER() OVER( $rno_ord ) AS $idx_name FROM (
+    SELECT $in_sel ${sql}${group_having}
+  ) $qalias
+) $qalias WHERE $idx_name BETWEEN %d AND %d
 
-  for my $i (0 .. $#sql_select) {
+EOS
 
-    my $colsel_arg = $self->{_dbic_rs_attrs}{select}[$i];
-    my $colsel_sql = $sql_select[$i];
+  $sql =~ s/\s*\n\s*/ /g;   # easier to read in the debugger
+  return $sql;
+}
 
-    # this may or may not work (in case of a scalarref or something)
-    my ($table, $orig_colname) = ( $colsel_arg =~ $col_re );
+# some databases are happy with OVER (), some need OVER (ORDER BY (SELECT (1)) )
+sub _rno_default_order {
+  return undef;
+}
 
-    my $quoted_alias;
-    # do not attempt to understand non-scalar selects - alias numerically
-    if (ref $colsel_arg) {
-      $quoted_alias = $self->_quote ('column_' . (@inner_select + 1) );
-    }
-    # column name seen more than once - alias it
-    elsif ($orig_colname &&
-          ($seen_names{$orig_colname} && $seen_names{$orig_colname} > 1) ) {
-      $quoted_alias = $self->_quote ("${table}__${orig_colname}");
-    }
+# Informix specific limit, almost like LIMIT/OFFSET
+sub _SkipFirst {
+  my ($self, $sql, $rs_attrs, $rows, $offset) = @_;
 
-    # we did rename - make a record and adjust
-    if ($quoted_alias) {
-      # alias inner
-      push @inner_select, "$colsel_sql AS $quoted_alias";
+  $sql =~ s/^ \s* SELECT \s+ //ix
+    or croak "Unrecognizable SELECT: $sql";
 
-      # push alias to outer
-      push @outer_select, $quoted_alias;
+  return sprintf ('SELECT %s%s%s%s',
+    $offset
+      ? sprintf ('SKIP %d ', $offset)
+      : ''
+    ,
+    sprintf ('FIRST %d ', $rows),
+    $sql,
+    $self->_parse_rs_attrs ($rs_attrs),
+  );
+}
 
-      # Any aliasing accumulated here will be considered
-      # both for inner and outer adjustments of ORDER BY
-      $self->__record_alias (
-        \%col_aliases,
-        $quoted_alias,
-        $colsel_arg,
-        $table ? $orig_colname : undef,
-      );
-    }
+# Firebird specific limit, reverse of _SkipFirst for Informix
+sub _FirstSkip {
+  my ($self, $sql, $rs_attrs, $rows, $offset) = @_;
 
-    # otherwise just leave things intact inside, and use the abbreviated one outside
-    # (as we do not have table names anymore)
-    else {
-      push @inner_select, $colsel_sql;
+  $sql =~ s/^ \s* SELECT \s+ //ix
+    or croak "Unrecognizable SELECT: $sql";
 
-      my $outer_quoted = $self->_quote ($orig_colname);  # it was not a duplicate so should just work
-      push @outer_select, $outer_quoted;
-      $self->__record_alias (
-        \%outer_col_aliases,
-        $outer_quoted,
-        $colsel_arg,
-        $table ? $orig_colname : undef,
-      );
-    }
-  }
+  return sprintf ('SELECT %s%s%s%s',
+    sprintf ('FIRST %d ', $rows),
+    $offset
+      ? sprintf ('SKIP %d ', $offset)
+      : ''
+    ,
+    $sql,
+    $self->_parse_rs_attrs ($rs_attrs),
+  );
+}
 
-  my $outer_select = join (', ', @outer_select );
-  my $inner_select = join (', ', @inner_select );
+# WhOracle limits
+sub _RowNum {
+  my ( $self, $sql, $rs_attrs, $rows, $offset ) = @_;
 
-  %outer_col_aliases = (%outer_col_aliases, %col_aliases);
+  # mangle the input sql as we will be replacing the selector
+  $sql =~ s/^ \s* SELECT \s+ .+? \s+ (?= \b FROM \b )//ix
+    or croak "Unrecognizable SELECT: $sql";
 
-  # deal with order
-  croak '$order supplied to SQLAHacks limit emulators must be a hash'
-    if (ref $order ne 'HASH');
+  my ($insel, $outsel) = $self->_subqueried_limit_attrs ($rs_attrs);
 
-  $order = { %$order }; #copy
+  my $qalias = $self->_quote ($rs_attrs->{alias});
+  my $idx_name = $self->_quote ('rownum__index');
+  my $order_group_having = $self->_parse_rs_attrs($rs_attrs);
 
-  my $req_order = $order->{order_by};
+  $sql = sprintf (<<EOS, $offset + 1, $offset + $rows, );
 
-  # examine normalized version, collapses nesting
-  my $limit_order;
-  if (scalar $self->_order_by_chunks ($req_order)) {
-    $limit_order = $req_order;
-  }
-  else {
-    $limit_order = [ map
-      { join ('', $rs_alias, $name_sep, $_ ) }
-      ( $self->{_dbic_rs_attrs}{_source_handle}->resolve->primary_columns )
-    ];
-  }
+SELECT $outsel FROM (
+  SELECT $outsel, ROWNUM $idx_name FROM (
+    SELECT $insel ${sql}${order_group_having}
+  ) $qalias
+) $qalias WHERE $idx_name BETWEEN %d AND %d
 
-  my ( $order_by_inner, $order_by_outer ) = $self->_order_directions($limit_order);
-  my $order_by_requested = $self->_order_by ($req_order);
+EOS
 
-  # generate the rest
-  delete $order->{order_by};
-  my $grpby_having = $self->_order_by ($order);
+  $sql =~ s/\s*\n\s*/ /g;   # easier to read in the debugger
+  return $sql;
+}
 
-  # short circuit for counts - the ordering complexity is needless
-  if ($self->{_dbic_rs_attrs}{-for_count_only}) {
-    return "SELECT TOP $rows $inner_select $sql $grpby_having $order_by_outer";
-  }
+# Crappy Top based Limit/Offset support. Legacy for MSSQL < 2005
+sub _Top {
+  my ( $self, $sql, $rs_attrs, $rows, $offset ) = @_;
 
-  # we can't really adjust the order_by columns, as introspection is lacking
-  # resort to simple substitution
-  for my $col (keys %outer_col_aliases) {
-    for ($order_by_requested, $order_by_outer) {
-      $_ =~ s/\s+$col\s+/ $outer_col_aliases{$col} /g;
-    }
-  }
-  for my $col (keys %col_aliases) {
-    $order_by_inner =~ s/\s+$col\s+/ $col_aliases{$col} /g;
-  }
+  # mangle the input sql as we will be replacing the selector
+  $sql =~ s/^ \s* SELECT \s+ .+? \s+ (?= \b FROM \b )//ix
+    or croak "Unrecognizable SELECT: $sql";
 
+  # get selectors
+  my ($in_sel, $out_sel, $alias_map, $extra_order_sel)
+    = $self->_subqueried_limit_attrs ($rs_attrs, 'outer_order_by');
 
-  my $inner_lim = $rows + $offset;
+  my $requested_order = delete $rs_attrs->{order_by};
 
-  $sql = "SELECT TOP $inner_lim $inner_select $sql $grpby_having $order_by_inner";
+  my $order_by_requested = $self->_order_by ($requested_order);
 
-  if ($offset) {
-    $sql = <<"SQL";
+  # make up an order unless supplied
+  my $inner_order = ($order_by_requested
+    ? $requested_order
+    : [ map
+      { join ('', $rs_attrs->{alias}, $self->{name_sep}||'.', $_ ) }
+      ( $rs_attrs->{_rsroot_source_handle}->resolve->_pri_cols )
+    ]
+  );
 
-    SELECT TOP $rows $outer_select FROM
-    (
-      $sql
-    ) $quoted_rs_alias
-    $order_by_outer
-SQL
+  my ($order_by_inner, $order_by_reversed);
 
+  # localise as we already have all the bind values we need
+  {
+    local $self->{order_bind};
+    $order_by_inner = $self->_order_by ($inner_order);
+
+    my @out_chunks;
+    for my $ch ($self->_order_by_chunks ($inner_order)) {
+      $ch = $ch->[0] if ref $ch eq 'ARRAY';
+
+      $ch =~ s/\s+ ( ASC|DESC ) \s* $//ix;
+      my $dir = uc ($1||'ASC');
+
+      push @out_chunks, \join (' ', $ch, $dir eq 'ASC' ? 'DESC' : 'ASC' );
+    }
+
+    $order_by_reversed = $self->_order_by (\@out_chunks);
   }
 
-  if ($order_by_requested) {
-    $sql = <<"SQL";
+  # this is the order supplement magic
+  my $mid_sel = $out_sel;
+  if ($extra_order_sel) {
+    for my $extra_col (sort
+      { $extra_order_sel->{$a} cmp $extra_order_sel->{$b} }
+      keys %$extra_order_sel
+    ) {
+      $in_sel .= sprintf (', %s AS %s',
+        $extra_col,
+        $extra_order_sel->{$extra_col},
+      );
 
-    SELECT $outer_select FROM
-      ( $sql ) $quoted_rs_alias
-    $order_by_requested
-SQL
+      $mid_sel .= ', ' . $extra_order_sel->{$extra_col};
+    }
+  }
 
+  # and this is order re-alias magic
+  for my $map ($extra_order_sel, $alias_map) {
+    for my $col (keys %$map) {
+      my $re_col = quotemeta ($col);
+      $_ =~ s/$re_col/$map->{$col}/
+        for ($order_by_reversed, $order_by_requested);
+    }
   }
 
-  $sql =~ s/\s*\n\s*/ /g; # parsing out multiline statements is harder than a single line
-  return $sql;
-}
+  # generate the rest of the sql
+  my $grpby_having = $self->_parse_rs_attrs ($rs_attrs);
 
-# action at a distance to shorten Top code above
-sub __record_alias {
-  my ($self, $register, $alias, $fqcol, $col) = @_;
+  my $quoted_rs_alias = $self->_quote ($rs_attrs->{alias});
 
-  # record qualified name
-  $register->{$fqcol} = $alias;
-  $register->{$self->_quote($fqcol)} = $alias;
+  $sql = sprintf ('SELECT TOP %d %s %s %s %s',
+    $rows + ($offset||0),
+    $in_sel,
+    $sql,
+    $grpby_having,
+    $order_by_inner,
+  );
 
-  return unless $col;
+  $sql = sprintf ('SELECT TOP %d %s FROM ( %s ) %s %s',
+    $rows,
+    $mid_sel,
+    $sql,
+    $quoted_rs_alias,
+    $order_by_reversed,
+  ) if $offset;
 
-  # record unqualified name, undef (no adjustment) if a duplicate is found
-  if (exists $register->{$col}) {
-    $register->{$col} = undef;
-  }
-  else {
-    $register->{$col} = $alias;
-  }
+  $sql = sprintf ('SELECT TOP %d %s FROM ( %s ) %s %s',
+    $rows,
+    $out_sel,
+    $sql,
+    $quoted_rs_alias,
+    $order_by_requested,
+  ) if ( ($offset && $order_by_requested) || ($mid_sel ne $out_sel) );
 
-  $register->{$self->_quote($col)} = $register->{$col};
+  return $sql;
 }
 
 
-
 # While we're at it, this should make LIMIT queries more efficient,
 #  without digging into things too deeply
 sub _find_syntax {
@@ -318,14 +394,10 @@
   return $self->{_cached_syntax} ||= $self->SUPER::_find_syntax($syntax);
 }
 
-my $for_syntax = {
-  update => 'FOR UPDATE',
-  shared => 'FOR SHARE',
-};
 # Quotes table names, handles "limit" dialects (e.g. where rownum between x and
-# y), supports SELECT ... FOR UPDATE and SELECT ... FOR SHARE.
+# y)
 sub select {
-  my ($self, $table, $fields, $where, $order, @rest) = @_;
+  my ($self, $table, $fields, $where, $rs_attrs, @rest) = @_;
 
   $self->{"${_}_bind"} = [] for (qw/having from order/);
 
@@ -333,18 +405,13 @@
     $table = $self->_quote($table);
   }
 
-  local $self->{rownum_hack_count} = 1
-    if (defined $rest[0] && $self->{limit_dialect} eq 'RowNum');
   @rest = (-1) unless defined $rest[0];
   croak "LIMIT 0 Does Not Compute" if $rest[0] == 0;
     # and anyway, SQL::Abstract::Limit will cause a barf if we don't first
+
   my ($sql, @where_bind) = $self->SUPER::select(
-    $table, $self->_recurse_fields($fields), $where, $order, @rest
+    $table, $self->_recurse_fields($fields), $where, $rs_attrs, @rest
   );
-  if (my $for = delete $self->{_dbic_rs_attrs}{for}) {
-    $sql .= " $for_syntax->{$for}" if $for_syntax->{$for};
-  }
-
   return wantarray ? ($sql, @{$self->{from_bind}}, @where_bind, @{$self->{having_bind}}, @{$self->{order_bind}} ) : $sql;
 }
 
@@ -358,7 +425,13 @@
   # which is sadly understood only by MySQL. Change default behavior here,
   # until SQLA2 comes with proper dialect support
   if (! $_[0] or (ref $_[0] eq 'HASH' and !keys %{$_[0]} ) ) {
-    return "INSERT INTO ${table} DEFAULT VALUES"
+    my $sql = "INSERT INTO ${table} DEFAULT VALUES";
+
+    if (my $ret = ($_[1]||{})->{returning} ) {
+      $sql .= $self->_insert_returning ($ret);
+    }
+
+    return $sql;
   }
 
   $self->SUPER::insert($table, @_);
@@ -382,35 +455,36 @@
 
 sub _emulate_limit {
   my $self = shift;
+  # my ( $syntax, $sql, $order, $rows, $offset ) = @_;
+
   if ($_[3] == -1) {
-    return $_[1].$self->_order_by($_[2]);
+    return $_[1] . $self->_parse_rs_attrs($_[2]);
   } else {
     return $self->SUPER::_emulate_limit(@_);
   }
 }
 
 sub _recurse_fields {
-  my ($self, $fields, $params) = @_;
+  my ($self, $fields) = @_;
   my $ref = ref $fields;
   return $self->_quote($fields) unless $ref;
   return $$fields if $ref eq 'SCALAR';
 
   if ($ref eq 'ARRAY') {
-    return join(', ', map {
-      $self->_recurse_fields($_)
-        .(exists $self->{rownum_hack_count} && !($params && $params->{no_rownum_hack})
-          ? ' AS col'.$self->{rownum_hack_count}++
-          : '')
-      } @$fields);
+    return join(', ', map { $self->_recurse_fields($_) } @$fields);
   }
   elsif ($ref eq 'HASH') {
-    my %hash = %$fields;
+    my %hash = %$fields;  # shallow copy
 
     my $as = delete $hash{-as};   # if supplied
 
-    my ($func, $args) = each %hash;
-    delete $hash{$func};
+    my ($func, $args, @toomany) = %hash;
 
+    # there should be only one pair
+    if (@toomany) {
+      croak "Malformed select argument - too many keys in hash: " . join (',', keys %$fields );
+    }
+
     if (lc ($func) eq 'distinct' && ref $args eq 'ARRAY' && @$args > 1) {
       croak (
         'The select => { distinct => ... } syntax is not supported for multiple columns.'
@@ -423,15 +497,10 @@
       $self->_sqlcase($func),
       $self->_recurse_fields($args),
       $as
-        ? sprintf (' %s %s', $self->_sqlcase('as'), $as)
+        ? sprintf (' %s %s', $self->_sqlcase('as'), $self->_quote ($as) )
         : ''
     );
 
-    # there should be nothing left
-    if (keys %hash) {
-      croak "Malformed select argument - too many keys in hash: " . join (',', keys %$fields );
-    }
-
     return $select;
   }
   # Is the second check absolutely necessary?
@@ -443,34 +512,55 @@
   }
 }
 
-sub _order_by {
+my $for_syntax = {
+  update => 'FOR UPDATE',
+  shared => 'FOR SHARE',
+};
+
+# this used to be a part of _order_by but is broken out for clarity.
+# What we have been doing forever is hijacking the $order arg of
+# SQLA::select to pass in arbitrary pieces of data (first the group_by,
+# then pretty much the entire resultset attr-hash, as more and more
+# things in the SQLA space need to have mopre info about the $rs they
+# create SQL for. The alternative would be to keep expanding the
+# signature of _select with more and more positional parameters, which
+# is just gross. All hail SQLA2!
+sub _parse_rs_attrs {
   my ($self, $arg) = @_;
 
-  if (ref $arg eq 'HASH' and keys %$arg and not grep { $_ =~ /^-(?:desc|asc)/i } keys %$arg ) {
+  my $sql = '';
 
-    my $ret = '';
+  if (my $g = $self->_recurse_fields($arg->{group_by}) ) {
+    $sql .= $self->_sqlcase(' group by ') . $g;
+  }
 
-    if (my $g = $self->_recurse_fields($arg->{group_by}, { no_rownum_hack => 1 }) ) {
-      $ret = $self->_sqlcase(' group by ') . $g;
-    }
+  if (defined $arg->{having}) {
+    my ($frag, @bind) = $self->_recurse_where($arg->{having});
+    push(@{$self->{having_bind}}, @bind);
+    $sql .= $self->_sqlcase(' having ') . $frag;
+  }
 
-    if (defined $arg->{having}) {
-      my ($frag, @bind) = $self->_recurse_where($arg->{having});
-      push(@{$self->{having_bind}}, @bind);
-      $ret .= $self->_sqlcase(' having ').$frag;
-    }
+  if (defined $arg->{order_by}) {
+    $sql .= $self->_order_by ($arg->{order_by});
+  }
 
-    if (defined $arg->{order_by}) {
-      my ($frag, @bind) = $self->SUPER::_order_by($arg->{order_by});
-      push(@{$self->{order_bind}}, @bind);
-      $ret .= $frag;
-    }
+  if (my $for = $arg->{for}) {
+    $sql .= " $for_syntax->{$for}" if $for_syntax->{$for};
+  }
 
-    return $ret;
+  return $sql;
+}
+
+sub _order_by {
+  my ($self, $arg) = @_;
+
+  # check that we are not called in legacy mode (order_by as 4th argument)
+  if (ref $arg eq 'HASH' and not grep { $_ =~ /^-(?:desc|asc)/i } keys %$arg ) {
+    return $self->_parse_rs_attrs ($arg);
   }
   else {
     my ($sql, @bind) = $self->SUPER::_order_by ($arg);
-    push(@{$self->{order_bind}}, @bind);
+    push @{$self->{order_bind}}, @bind;
     return $sql;
   }
 }
@@ -501,6 +591,14 @@
   }
 }
 
+sub _generate_join_clause {
+    my ($self, $join_type) = @_;
+
+    return sprintf ('%s JOIN ',
+      $join_type ?  ' ' . uc($join_type) : ''
+    );
+}
+
 sub _recurse_from {
   my ($self, $from, @join) = @_;
   my @sqlf;
@@ -508,16 +606,19 @@
   foreach my $j (@join) {
     my ($to, $on) = @$j;
 
+
     # check whether a join type exists
-    my $join_clause = '';
     my $to_jt = ref($to) eq 'ARRAY' ? $to->[0] : $to;
-    if (ref($to_jt) eq 'HASH' and exists($to_jt->{-join_type})) {
-      $join_clause = ' '.uc($to_jt->{-join_type}).' JOIN ';
-    } else {
-      $join_clause = ' JOIN ';
+    my $join_type;
+    if (ref($to_jt) eq 'HASH' and defined($to_jt->{-join_type})) {
+      $join_type = $to_jt->{-join_type};
+      $join_type =~ s/^\s+ | \s+$//xg;
     }
-    push(@sqlf, $join_clause);
 
+    $join_type = $self->{_default_jointype} if not defined $join_type;
+
+    push @sqlf, $self->_generate_join_clause( $join_type );
+
     if (ref $to eq 'ARRAY') {
       push(@sqlf, '(', $self->_recurse_from(@$to), ')');
     } else {
@@ -577,26 +678,12 @@
   }
 }
 
-sub _quote {
-  my ($self, $label) = @_;
-  return '' unless defined $label;
-  return $$label if ref($label) eq 'SCALAR';
-  return "*" if $label eq '*';
-  return $label unless $self->{quote_char};
-  if(ref $self->{quote_char} eq "ARRAY"){
-    return $self->{quote_char}->[0] . $label . $self->{quote_char}->[1]
-      if !defined $self->{name_sep};
-    my $sep = $self->{name_sep};
-    return join($self->{name_sep},
-        map { $self->{quote_char}->[0] . $_ . $self->{quote_char}->[1]  }
-       split(/\Q$sep\E/,$label));
-  }
-  return $self->SUPER::_quote($label);
-}
-
 sub limit_dialect {
     my $self = shift;
-    $self->{limit_dialect} = shift if @_;
+    if (@_) {
+      $self->{limit_dialect} = shift;
+      undef $self->{_cached_syntax};
+    }
     return $self->{limit_dialect};
 }
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Schema/Versioned.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Schema/Versioned.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Schema/Versioned.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,10 +1,9 @@
 package # Hide from PAUSE
   DBIx::Class::Version::Table;
-use base 'DBIx::Class';
+use base 'DBIx::Class::Core';
 use strict;
 use warnings;
 
-__PACKAGE__->load_components(qw/ Core/);
 __PACKAGE__->table('dbix_class_schema_versions');
 
 __PACKAGE__->add_columns
@@ -31,8 +30,7 @@
 
 package # Hide from PAUSE
   DBIx::Class::Version::TableCompat;
-use base 'DBIx::Class';
-__PACKAGE__->load_components(qw/ Core/);
+use base 'DBIx::Class::Core';
 __PACKAGE__->table('SchemaVersions');
 
 __PACKAGE__->add_columns
@@ -116,7 +114,7 @@
   use Getopt::Long;
   use MyApp::Schema;
 
-  my ( $preversion, $help ); 
+  my ( $preversion, $help );
   GetOptions(
     'p|preversion:s'  => \$preversion,
   ) or die pod2usage;
@@ -152,13 +150,13 @@
 and we can safely deploy the DDL to it. However things are not always so simple.
 
 if you want to initialise a pre-existing database where the DDL is not the same
-as the DDL for your current schema version then you will need a diff which 
+as the DDL for your current schema version then you will need a diff which
 converts the database's DDL to the current DDL. The best way to do this is
 to get a dump of the database schema (without data) and save that in your
 SQL directory as version 0.000 (the filename must be as with
-L<DBIx::Class::Schema/ddl_filename>) then create a diff using your create DDL 
+L<DBIx::Class::Schema/ddl_filename>) then create a diff using your create DDL
 script given above from version 0.000 to the current version. Then hand check
-and if necessary edit the resulting diff to ensure that it will apply. Once you have 
+and if necessary edit the resulting diff to ensure that it will apply. Once you have
 done all that you can do this:
 
   if (!$schema->get_db_version()) {
@@ -170,7 +168,7 @@
   $schema->upgrade();
 
 In the case of an unversioned database the above code will create the
-dbix_class_schema_versions table and write version 0.000 to it, then 
+dbix_class_schema_versions table and write version 0.000 to it, then
 upgrade will then apply the diff we talked about creating in the previous paragraph
 and then you're good to go.
 
@@ -180,10 +178,10 @@
 
 use strict;
 use warnings;
-use base 'DBIx::Class';
+use base 'DBIx::Class::Schema';
 
 use Carp::Clan qw/^DBIx::Class/;
-use POSIX 'strftime';
+use Time::HiRes qw/gettimeofday/;
 
 __PACKAGE__->mk_classdata('_filedata');
 __PACKAGE__->mk_classdata('upgrade_directory');
@@ -260,58 +258,155 @@
 
 =back
 
-Virtual method that should be overriden to create an upgrade file. 
-This is useful in the case of upgrading across multiple versions 
+Virtual method that should be overridden to create an upgrade file.
+This is useful in the case of upgrading across multiple versions
 to concatenate several files to create one upgrade file.
 
 You'll probably want the db_version retrieved via $self->get_db_version
-and the schema_version which is retrieved via $self->schema_version 
+and the schema_version which is retrieved via $self->schema_version
 
 =cut
 
 sub create_upgrade_path {
-	## override this method
+  ## override this method
 }
 
+=head2 ordered_schema_versions
+
+=over 4
+
+=item Returns: a list of version numbers, ordered from lowest to highest
+
+=back
+
+Virtual method that should be overridden to return an ordered list
+of schema versions. This is then used to produce a set of steps to
+upgrade through to achieve the required schema version.
+
+You may want the db_version retrieved via $self->get_db_version
+and the schema_version which is retrieved via $self->schema_version
+
+=cut
+
+sub ordered_schema_versions {
+  ## override this method
+}
+
 =head2 upgrade
 
-Call this to attempt to upgrade your database from the version it is at to the version
-this DBIC schema is at. If they are the same it does nothing.
+Call this to attempt to upgrade your database from the version it
+is at to the version this DBIC schema is at. If they are the same
+it does nothing.
 
-It requires an SQL diff file to exist in you I<upgrade_directory>, normally you will
-have created this using L<DBIx::Class::Schema/create_ddl_dir>.
+It will call L</ordered_schema_versions> to retrieve an ordered
+list of schema versions (if ordered_schema_versions returns nothing
+then it is assumed you can do the upgrade as a single step). It
+then iterates through the list of versions between the current db
+version and the schema version applying one update at a time until
+all relevant updates are applied.
 
-If successful the dbix_class_schema_versions table is updated with the current
-DBIC schema version.
+The individual update steps are performed by using
+L</upgrade_single_step>, which will apply the update and also
+update the dbix_class_schema_versions table.
 
 =cut
 
-sub upgrade
+sub upgrade {
+    my ($self) = @_;
+    my $db_version = $self->get_db_version();
+
+    # db unversioned
+    unless ($db_version) {
+        carp 'Upgrade not possible as database is unversioned. Please call install first.';
+        return;
+    }
+
+    # db and schema at same version. do nothing
+    if ( $db_version eq $self->schema_version ) {
+        carp "Upgrade not necessary\n";
+        return;
+    }
+
+    my @version_list = $self->ordered_schema_versions;
+
+    # if nothing returned then we preload with min/max
+    @version_list = ( $db_version, $self->schema_version )
+      unless ( scalar(@version_list) );
+
+    # catch the case of someone returning an arrayref
+    @version_list = @{ $version_list[0] }
+      if ( ref( $version_list[0] ) eq 'ARRAY' );
+
+    # remove all versions in list above the required version
+    while ( scalar(@version_list)
+        && ( $version_list[-1] ne $self->schema_version ) )
+    {
+        pop @version_list;
+    }
+
+    # remove all versions in list below the current version
+    while ( scalar(@version_list) && ( $version_list[0] ne $db_version ) ) {
+        shift @version_list;
+    }
+
+    # check we have an appropriate list of versions
+    if ( scalar(@version_list) < 2 ) {
+        die;
+    }
+
+    # do sets of upgrade
+    while ( scalar(@version_list) >= 2 ) {
+        $self->upgrade_single_step( $version_list[0], $version_list[1] );
+        shift @version_list;
+    }
+}
+
+=head2 upgrade_single_step
+
+=over 4
+
+=item Arguments: db_version - the version currently within the db
+
+=item Arguments: target_version - the version to upgrade to
+
+=back
+
+Call this to attempt to upgrade your database from the
+I<db_version> to the I<target_version>. If they are the same it
+does nothing.
+
+It requires an SQL diff file to exist in your I<upgrade_directory>,
+normally you will have created this using L<DBIx::Class::Schema/create_ddl_dir>.
+
+If successful the dbix_class_schema_versions table is updated with
+the I<target_version>.
+
+This method may be called repeatedly by the upgrade method to
+upgrade through a series of updates.
+
+=cut
+
+sub upgrade_single_step
 {
-  my ($self) = @_;
-  my $db_version = $self->get_db_version();
+  my ($self,
+      $db_version,
+      $target_version) = @_;
 
-  # db unversioned
-  unless ($db_version) {
-    carp 'Upgrade not possible as database is unversioned. Please call install first.';
-    return;
-  }
-
   # db and schema at same version. do nothing
-  if ($db_version eq $self->schema_version) {
+  if ($db_version eq $target_version) {
     carp "Upgrade not necessary\n";
     return;
   }
 
   # strangely the first time this is called can
-  # differ to subsequent times. so we call it 
+  # differ to subsequent times. so we call it
   # here to be sure.
   # XXX - just fix it
   $self->storage->sqlt_type;
 
   my $upgrade_file = $self->ddl_filename(
                                          $self->storage->sqlt_type,
-                                         $self->schema_version,
+                                         $target_version,
                                          $self->upgrade_directory,
                                          $db_version,
                                         );
@@ -323,7 +418,7 @@
     return;
   }
 
-  carp "\nDB version ($db_version) is lower than the schema version (".$self->schema_version."). Attempting upgrade.\n";
+  carp "DB version ($db_version) is lower than the schema version (".$self->schema_version."). Attempting upgrade.\n";
 
   # backup if necessary then apply upgrade
   $self->_filedata($self->_read_sql_file($upgrade_file));
@@ -331,7 +426,7 @@
   $self->txn_do(sub { $self->do_upgrade() });
 
   # set row in dbix_class_schema_versions table
-  $self->_set_db_version;
+  $self->_set_db_version({version => $target_version});
 }
 
 =head2 do_upgrade
@@ -340,7 +435,7 @@
 allows you to run your upgrade any way you please, you can call C<run_upgrade>
 any number of times to run the actual SQL commands, and in between you can
 sandwich your data upgrading. For example, first run all the B<CREATE>
-commands, then migrate your data from old to new tables/formats, then 
+commands, then migrate your data from old to new tables/formats, then
 issue the DROP commands when you are finished. Will run the whole file as it is by default.
 
 =cut
@@ -349,7 +444,7 @@
 {
   my ($self) = @_;
 
-  # just run all the commands (including inserts) in order                                                        
+  # just run all the commands (including inserts) in order
   $self->run_upgrade(qr/.*?/);
 }
 
@@ -374,7 +469,7 @@
     $self->_filedata([ grep { $_ !~ /$stm/i } @{$self->_filedata} ]);
 
     for (@statements)
-    {      
+    {
         $self->storage->debugobj->query_start($_) if $self->storage->debug;
         $self->apply_statement($_);
         $self->storage->debugobj->query_end($_) if $self->storage->debug;
@@ -393,7 +488,7 @@
 sub apply_statement {
     my ($self, $statement) = @_;
 
-    $self->storage->dbh->do($_) or carp "SQL was:\n $_";
+    $self->storage->dbh->do($_) or carp "SQL was: $_";
 }
 
 =head2 get_db_version
@@ -408,12 +503,12 @@
     my ($self, $rs) = @_;
 
     my $vtable = $self->{vschema}->resultset('Table');
-    my $version = 0;
-    eval {
-      my $stamp = $vtable->get_column('installed')->max;
-      $version = $vtable->search({ installed => $stamp })->first->version;
+    my $version = eval {
+      $vtable->search({}, { order_by => { -desc => 'installed' }, rows => 1 } )
+              ->get_column ('version')
+               ->next;
     };
-    return $version;
+    return $version || 0;
 }
 
 =head2 schema_version
@@ -427,7 +522,7 @@
 This is an overwritable method which is called just before the upgrade, to
 allow you to make a backup of the database. Per default this method attempts
 to call C<< $self->storage->backup >>, to run the standard backup on each
-database type. 
+database type.
 
 This method should return the name of the backup file, if appropriate..
 
@@ -449,7 +544,7 @@
 compatibility between the old versions table (SchemaVersions) and the new one
 (dbix_class_schema_versions).
 
-To avoid the checks on connect, set the env var DBIC_NO_VERSION_CHECK or alternatively you can set the ignore_version attr in the forth argument like so:
+To avoid the checks on connect, set the environment var DBIC_NO_VERSION_CHECK or alternatively you can set the ignore_version attr in the forth argument like so:
 
   my $schema = MyApp::Schema->connect(
     $dsn,
@@ -463,20 +558,25 @@
 sub connection {
   my $self = shift;
   $self->next::method(@_);
-  $self->_on_connect($_[3]);
+  $self->_on_connect();
   return $self;
 }
 
 sub _on_connect
 {
-  my ($self, $args) = @_;
+  my ($self) = @_;
 
-  $args = {} unless $args;
-  $self->{vschema} = DBIx::Class::Version->connect(@{$self->storage->connect_info()});
+  my $info = $self->storage->connect_info;
+  my $args = $info->[-1];
+
+  $self->{vschema} = DBIx::Class::Version->connect(@$info);
   my $vtable = $self->{vschema}->resultset('Table');
 
+  # useful when connecting from scripts etc
+  return if ($args->{ignore_version} || ($ENV{DBIC_NO_VERSION_CHECK} && !exists $args->{ignore_version}));
+
   # check for legacy versions table and move to new if exists
-  my $vschema_compat = DBIx::Class::VersionCompat->connect(@{$self->storage->connect_info()});
+  my $vschema_compat = DBIx::Class::VersionCompat->connect(@$info);
   unless ($self->_source_exists($vtable)) {
     my $vtable_compat = $vschema_compat->resultset('TableCompat');
     if ($self->_source_exists($vtable_compat)) {
@@ -486,8 +586,6 @@
     }
   }
 
-  # useful when connecting from scripts etc
-  return if ($args->{ignore_version} || ($ENV{DBIC_NO_VERSION_CHECK} && !exists $args->{ignore_version}));
   my $pversion = $self->get_db_version();
 
   if($pversion eq $self->schema_version)
@@ -502,7 +600,7 @@
         return 1;
     }
 
-  carp "Versions out of sync. This is " . $self->schema_version . 
+  carp "Versions out of sync. This is " . $self->schema_version .
     ", your database contains version $pversion, please call upgrade on your Schema.\n";
 }
 
@@ -520,13 +618,12 @@
     return;
   }
 
-  eval 'require SQL::Translator "0.09003"';
-  if ($@) {
-    $self->throw_exception("SQL::Translator 0.09003 required");
+  unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
+    $self->throw_exception("Unable to proceed without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
   }
 
-  my $db_tr = SQL::Translator->new({ 
-                                    add_drop_table => 1, 
+  my $db_tr = SQL::Translator->new({
+                                    add_drop_table => 1,
                                     parser => 'DBI',
                                     parser_args => { dbh => $self->storage->dbh }
                                    });
@@ -546,7 +643,7 @@
     $tr->parser->($tr, $$data);
   }
 
-  my $diff = SQL::Translator::Diff::schema_diff($db_tr->schema, $db, 
+  my $diff = SQL::Translator::Diff::schema_diff($db_tr->schema, $db,
                                                 $dbic_tr->schema, $db,
                                                 { ignore_constraint_names => 1, ignore_index_names => 1, caseopt => 1 });
 
@@ -576,24 +673,50 @@
 
   my $version = $params->{version} ? $params->{version} : $self->schema_version;
   my $vtable = $self->{vschema}->resultset('Table');
-  $vtable->create({ version => $version,
-                      installed => strftime("%Y-%m-%d %H:%M:%S", gmtime())
-                      });
 
+  ##############################################################################
+  #                             !!! NOTE !!!
+  ##############################################################################
+  #
+  # The travesty below replaces the old nice timestamp format of %Y-%m-%d %H:%M:%S
+  # This is necessary since there are legitimate cases when upgrades can happen
+  # back to back within the same second. This breaks things since we relay on the
+  # ability to sort by the 'installed' value. The logical choice of an autoinc
+  # is not possible, as it will break multiple legacy installations. Also it is 
+  # not possible to format the string sanely, as the column is a varchar(20).
+  # The 'v' character is added to the front of the string, so that any version
+  # formatted by this new function will sort _after_ any existing 200... strings.
+  my @tm = gettimeofday();
+  my @dt = gmtime ($tm[0]);
+  my $o = $vtable->create({ 
+    version => $version,
+    installed => sprintf("v%04d%02d%02d_%02d%02d%02d.%03.0f",
+      $dt[5] + 1900,
+      $dt[4] + 1,
+      $dt[3],
+      $dt[2],
+      $dt[1],
+      $dt[0],
+      $tm[1] / 1000, # convert to millisecs, format as up/down rounded int above
+    ),
+  });
 }
 
 sub _read_sql_file {
   my $self = shift;
   my $file = shift || return;
 
-  my $fh;
-  open $fh, "<$file" or carp("Can't open upgrade file, $file ($!)");
-  my @data = split(/\n/, join('', <$fh>));
-  @data = grep(!/^--/, @data);
-  @data = split(/;/, join('', @data));
-  close($fh);
-  @data = grep { $_ && $_ !~ /^-- / } @data;
-  @data = grep { $_ !~ /^(BEGIN|BEGIN TRANSACTION|COMMIT)/m } @data;
+  open my $fh, '<', $file or carp("Can't open upgrade file, $file ($!)");
+  my @data = split /\n/, join '', <$fh>;
+  close $fh;
+
+  @data = split /;/,
+     join '',
+       grep { $_ &&
+              !/^--/  &&
+              !/^(BEGIN|BEGIN TRANSACTION|COMMIT)/mi }
+         @data;
+
   return \@data;
 }
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Schema.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Schema.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Schema.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -5,7 +5,7 @@
 
 use DBIx::Class::Exception;
 use Carp::Clan qw/^DBIx::Class/;
-use Scalar::Util qw/weaken/;
+use Scalar::Util ();
 use File::Spec;
 use Sub::Name ();
 use Module::Find();
@@ -33,8 +33,9 @@
   __PACKAGE__->load_namespaces();
 
   package Library::Schema::Result::CD;
-  use base qw/DBIx::Class/;
-  __PACKAGE__->load_components(qw/Core/); # for example
+  use base qw/DBIx::Class::Core/;
+
+  __PACKAGE__->load_components(qw/InflateColumn::DateTime/); # for example
   __PACKAGE__->table('cd');
 
   # Elsewhere in your code:
@@ -81,7 +82,7 @@
 
 With no arguments, this method uses L<Module::Find> to load all your
 Result classes from a sub-namespace F<Result> under your Schema class'
-namespace. Eg. With a Schema of I<MyDB::Schema> all files in
+namespace, i.e. with a Schema of I<MyDB::Schema> all files in
 I<MyDB::Schema::Result> are assumed to be Result classes.
 
 It also finds all ResultSet classes in the namespace F<ResultSet> and
@@ -270,6 +271,10 @@
       }
       elsif($rs_class ||= $default_resultset_class) {
         $class->ensure_class_loaded($rs_class);
+        if(!$rs_class->isa("DBIx::Class::ResultSet")) {
+            carp "load_namespaces found ResultSet class $rs_class that does not subclass DBIx::Class::ResultSet";
+        }
+
         $class->_ns_get_rsrc_instance ($result_class)->resultset_class($rs_class);
       }
 
@@ -406,12 +411,10 @@
 
 Set the storage class that will be instantiated when L</connect> is called.
 If the classname starts with C<::>, the prefix C<DBIx::Class::Storage> is
-assumed by L</connect>.  
+assumed by L</connect>.
 
 You want to use this to set subclasses of L<DBIx::Class::Storage::DBI>
-in cases where the appropriate subclass is not autodetected, such as
-when dealing with MSSQL via L<DBD::Sybase>, in which case you'd set it
-to C<::DBI::Sybase::MSSQL>.
+in cases where the appropriate subclass is not autodetected.
 
 If your storage type requires instantiation arguments, those are
 defined as a second argument in the form of a hashref and the entire
@@ -631,13 +634,13 @@
 This interface is preferred over using the individual methods L</txn_begin>,
 L</txn_commit>, and L</txn_rollback> below.
 
-WARNING: If you are connected with C<AutoCommit => 0> the transaction is
+WARNING: If you are connected with C<< AutoCommit => 0 >> the transaction is
 considered nested, and you will still need to call L</txn_commit> to write your
-changes when appropriate. You will also want to connect with C<auto_savepoint =>
-1> to get partial rollback to work, if the storage driver for your database
+changes when appropriate. You will also want to connect with C<< auto_savepoint =>
+1 >> to get partial rollback to work, if the storage driver for your database
 supports it.
 
-Connecting with C<AutoCommit => 1> is recommended.
+Connecting with C<< AutoCommit => 1 >> is recommended.
 
 =cut
 
@@ -670,7 +673,7 @@
 
 Begins a transaction (does nothing if AutoCommit is off). Equivalent to
 calling $schema->storage->txn_begin. See
-L<DBIx::Class::Storage::DBI/"txn_begin"> for more information.
+L<DBIx::Class::Storage/"txn_begin"> for more information.
 
 =cut
 
@@ -686,7 +689,7 @@
 =head2 txn_commit
 
 Commits the current transaction. Equivalent to calling
-$schema->storage->txn_commit. See L<DBIx::Class::Storage::DBI/"txn_commit">
+$schema->storage->txn_commit. See L<DBIx::Class::Storage/"txn_commit">
 for more information.
 
 =cut
@@ -704,7 +707,7 @@
 
 Rolls back the current transaction. Equivalent to calling
 $schema->storage->txn_rollback. See
-L<DBIx::Class::Storage::DBI/"txn_rollback"> for more information.
+L<DBIx::Class::Storage/"txn_rollback"> for more information.
 
 =cut
 
@@ -749,7 +752,7 @@
 L<DBIx::Class::ResultSet/create>, and a arrayref of the resulting row
 objects is returned.
 
-i.e.,
+e.g.
 
   $schema->populate('Artist', [
     [ qw/artistid name/ ],
@@ -814,7 +817,7 @@
 
   $storage_class = 'DBIx::Class::Storage'.$storage_class
     if $storage_class =~ m/^::/;
-  eval "require ${storage_class};";
+  eval { $self->ensure_class_loaded ($storage_class) };
   $self->throw_exception(
     "No arguments to load_classes and couldn't load ${storage_class} ($@)"
   ) if $@;
@@ -852,7 +855,7 @@
 
 It also attaches a corresponding L<DBIx::Class::ResultSource> object to the
 new $schema object. If C<$additional_base_class> is given, the new composed
-classes will inherit from first the corresponding classe from the current
+classes will inherit from first the corresponding class from the current
 schema then the base class.
 
 For example, for a schema with My::Schema::CD and My::Schema::Artist classes,
@@ -910,7 +913,7 @@
     no strict 'refs';
     no warnings 'redefine';
     foreach my $meth (qw/class source resultset/) {
-      *{"${target}::${meth}"} =
+      *{"${target}::${meth}"} = Sub::Name::subname "${target}::${meth}" =>
         sub { shift->schema->$meth(@_) };
     }
   }
@@ -928,7 +931,7 @@
 
 Creates a new savepoint (does nothing outside a transaction). 
 Equivalent to calling $schema->storage->svp_begin.  See
-L<DBIx::Class::Storage::DBI/"svp_begin"> for more information.
+L<DBIx::Class::Storage/"svp_begin"> for more information.
 
 =cut
 
@@ -945,7 +948,7 @@
 
 Releases a savepoint (does nothing outside a transaction). 
 Equivalent to calling $schema->storage->svp_release.  See
-L<DBIx::Class::Storage::DBI/"svp_release"> for more information.
+L<DBIx::Class::Storage/"svp_release"> for more information.
 
 =cut
 
@@ -962,7 +965,7 @@
 
 Rollback to a savepoint (does nothing outside a transaction). 
 Equivalent to calling $schema->storage->svp_rollback.  See
-L<DBIx::Class::Storage::DBI/"svp_rollback"> for more information.
+L<DBIx::Class::Storage/"svp_rollback"> for more information.
 
 =cut
 
@@ -1084,7 +1087,7 @@
   $self->storage->deployment_statements($self, @_);
 }
 
-=head2 create_ddl_dir (EXPERIMENTAL)
+=head2 create_ddl_dir
 
 =over 4
 
@@ -1156,7 +1159,7 @@
 
 Provided as the recommended way of thawing schema objects. You can call 
 C<Storable::thaw> directly if you wish, but the thawed objects will not have a
-reference to any schema, so are rather useless
+reference to any schema, so are rather useless.
 
 =cut
 
@@ -1168,8 +1171,8 @@
 
 =head2 freeze
 
-This doesn't actualy do anything more than call L<Storable/freeze>, it is just
-provided here for symetry.
+This doesn't actually do anything more than call L<Storable/freeze>, it is just
+provided here for symmetry.
 
 =cut
 
@@ -1179,9 +1182,18 @@
 
 =head2 dclone
 
-Recommeneded way of dcloning objects. This is needed to properly maintain
-references to the schema object (which itself is B<not> cloned.)
+=over 4
 
+=item Arguments: $object
+
+=item Return Value: dcloned $object
+
+=back
+
+Recommended way of dcloning L<DBIx::Class::Row> and L<DBIx::Class::ResultSet>
+objects so their references to the schema object
+(which itself is B<not> cloned) are properly maintained.
+
 =cut
 
 sub dclone {
@@ -1261,6 +1273,24 @@
   $self->_register_source(@_);
 }
 
+=head2 unregister_source
+
+=over 4
+
+=item Arguments: $moniker
+
+=back
+
+Removes the L<DBIx::Class::ResultSource> from the schema for the given moniker.
+
+=cut
+
+sub unregister_source {
+  my $self = shift;
+
+  $self->_unregister_source(@_);
+}
+
 =head2 register_extra_source
 
 =over 4
@@ -1287,7 +1317,7 @@
 
   $source = $source->new({ %$source, source_name => $moniker });
   $source->schema($self);
-  weaken($source->{schema}) if ref($self);
+  Scalar::Util::weaken($source->{schema}) if ref($self);
 
   my $rs_class = $source->result_class;
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Serialize/Storable.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Serialize/Storable.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Serialize/Storable.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -7,10 +7,13 @@
     my ($self, $cloning) = @_;
     my $to_serialize = { %$self };
 
+    # The source is either derived from _source_handle or is
+    # reattached in the thaw handler below
     delete $to_serialize->{result_source};
-    delete $to_serialize->{related_resultsets};
-    delete $to_serialize->{_inflated_column};
 
+    # Dynamic values, easy to recalculate
+    delete $to_serialize->{$_} for qw/related_resultsets _inflated_column/;
+
     return (Storable::freeze($to_serialize));
 }
 
@@ -18,8 +21,10 @@
     my ($self, $cloning, $serialized) = @_;
 
     %$self = %{ Storable::thaw($serialized) };
+
+    # if the handle went missing somehow, reattach
     $self->result_source($self->result_source_instance)
-      if $self->can('result_source_instance');
+      if !$self->_source_handle && $self->can('result_source_instance');
 }
 
 1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/StartupCheck.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/StartupCheck.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/StartupCheck.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -17,7 +17,7 @@
 triggers, incorrectly flagging those versions of perl to be buggy. A
 more comprehensive check has been moved into the test suite in
 C<t/99rh_perl_perf_bug.t> and further information about the bug has been
-put in L<DBIx::Class::Manual::Troubleshooting>
+put in L<DBIx::Class::Manual::Troubleshooting>.
 
 Other checks may be added from time to time.
 

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,144 @@
+package DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server;
+
+use strict;
+use warnings;
+
+use base qw/
+  DBIx::Class::Storage::DBI::ADO
+  DBIx::Class::Storage::DBI::MSSQL
+/;
+use mro 'c3';
+
+sub _rebless {
+  my $self = shift;
+  $self->_identity_method('@@identity');
+}
+
+sub source_bind_attributes {
+  my $self = shift;
+  my ($source) = @_;
+
+  my $bind_attributes = $self->next::method(@_);
+
+  foreach my $column ($source->columns) {
+    $bind_attributes->{$column}{ado_size} ||= 8000; # max VARCHAR
+  }
+
+  return $bind_attributes;
+}
+
+sub bind_attribute_by_data_type {
+  my ($self, $data_type) = @_;
+
+  ($data_type = lc($data_type)) =~ s/\s+.*//;
+
+  my $max_size =
+    $self->_mssql_max_data_type_representation_size_in_bytes->{$data_type};
+
+  my $res = {};
+  $res->{ado_size} = $max_size if $max_size;
+
+  return $res;
+}
+
+# approximate
+# XXX needs to support varchar(max) and varbinary(max)
+sub _mssql_max_data_type_representation_size_in_bytes {
+  my $self = shift;
+
+  my $blob_max = $self->_get_dbh->{LongReadLen} || 32768;
+
+  return +{
+# MSSQL types
+    char => 8000,
+    varchar => 8000,
+    binary => 8000,
+    varbinary => 8000,
+    nchar => 8000,
+    nvarchar => 8000,
+    numeric => 100,
+    smallint => 100,
+    tinyint => 100,
+    smallmoney => 100,
+    bigint => 100,
+    bit => 100,
+    decimal => 100,
+    integer => 100,
+    int => 100,
+    money => 100,
+    float => 100,
+    real => 100,
+    uniqueidentifier => 100,
+    ntext => $blob_max,
+    text => $blob_max,
+    image => $blob_max,
+    date => 100,
+    datetime => 100,
+    datetime2 => 100,
+    datetimeoffset => 100,
+    smalldatetime => 100,
+    time => 100,
+    timestamp => 100,
+    cursor => 100,
+    hierarchyid => 100,
+    sql_variant => 100,
+    table => 100,
+    xml => $blob_max, # ???
+
+# some non-MSSQL types
+    serial => 100,
+    bigserial => 100,
+    varchar2 => 8000,
+    blob => $blob_max,
+    clob => $blob_max,
+  }
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server - Support for Microsoft
+SQL Server via DBD::ADO
+
+=head1 SYNOPSIS
+
+This subclass supports MSSQL server connections via L<DBD::ADO>.
+
+=head1 DESCRIPTION
+
+The MSSQL specific functionality is provided by
+L<DBIx::Class::Storage::DBI::MSSQL>.
+
+=head2 CAVEATS
+
+=head3 identities
+
+C<_identity_method> is set to C<@@identity>, as C<SCOPE_IDENTITY()> doesn't work
+with L<DBD::ADO>. See L<DBIx::Class::Storage::DBI::MSSQL/IMPLEMENTATION NOTES>
+for caveats regarding this.
+
+=head3 truncation bug
+
+There is a bug with MSSQL ADO providers where data gets truncated based on the
+size of the bind sizes in the first prepare call:
+
+L<https://rt.cpan.org/Ticket/Display.html?id=52048>
+
+The C<ado_size> workaround is used (see L<DBD::ADO/"ADO Providers">) with the
+approximate maximum size of the data_type of the bound column, or 8000 (maximum
+VARCHAR size) if the data_type is not available.
+
+This code is incomplete and may be buggy. Particularly, C<VARCHAR(MAX)> is not
+supported yet. The data_type list for other DBs is also incomplete. Please
+report problems (and send patches.)
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ADO.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ADO.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ADO.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,43 @@
+package # hide from PAUSE
+    DBIx::Class::Storage::DBI::ADO;
+
+use base 'DBIx::Class::Storage::DBI';
+
+sub _rebless {
+  my $self = shift;
+
+# check for MSSQL
+# XXX This should be using an OpenSchema method of some sort, but I don't know
+# how.
+# Current version is stolen from Sybase.pm
+  my $dbtype = eval {
+    @{$self->_get_dbh
+      ->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})
+    }[2]
+  };
+
+  unless ($@) {
+    $dbtype =~ s/\W/_/gi;
+    my $subclass = "DBIx::Class::Storage::DBI::ADO::${dbtype}";
+    if ($self->load_optional_class($subclass) && !$self->isa($subclass)) {
+      bless $self, $subclass;
+      $self->_rebless;
+    }
+  }
+}
+
+# Here I was just experimenting with ADO cursor types, left in as a comment in
+# case you want to as well. See the DBD::ADO docs.
+#sub _dbh_sth {
+#  my ($self, $dbh, $sql) = @_;
+#
+#  my $sth = $self->disable_sth_caching
+#    ? $dbh->prepare($sql, { CursorType => 'adOpenStatic' })
+#    : $dbh->prepare_cached($sql, { CursorType => 'adOpenStatic' }, 3);
+#
+#  $self->throw_exception($dbh->errstr) if !$sth;
+#
+#  $sth;
+#}
+
+1;

Deleted: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/AmbiguousGlob.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/AmbiguousGlob.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/AmbiguousGlob.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,44 +0,0 @@
-package DBIx::Class::Storage::DBI::AmbiguousGlob;
-
-use strict;
-use warnings;
-
-use base 'DBIx::Class::Storage::DBI';
-use mro 'c3';
-
-=head1 NAME
-
-DBIx::Class::Storage::DBI::AmbiguousGlob - Storage component for RDBMS supporting multicolumn in clauses
-
-=head1 DESCRIPTION
-
-Some servers choke on things like:
-
-  COUNT(*) FROM (SELECT tab1.col, tab2.col FROM tab1 JOIN tab2 ... )
-
-claiming that col is a duplicate column (it loses the table specifiers by
-the time it gets to the *). Thus for any subquery count we select only the
-primary keys of the main table in the inner query. This hopefully still
-hits the indexes and keeps the server happy.
-
-At this point the only overriden method is C<_subq_count_select()>
-
-=cut
-
-sub _subq_count_select {
-  my ($self, $source, $rs_attrs) = @_;
-  my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns);
-  return @pcols ? \@pcols : [ 1 ];
-}
-
-=head1 AUTHORS
-
-See L<DBIx::Class/CONTRIBUTORS>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
-
-1;

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/AutoCast.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/AutoCast.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/AutoCast.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,99 @@
+package DBIx::Class::Storage::DBI::AutoCast;
+
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
+
+__PACKAGE__->mk_group_accessors('simple' => 'auto_cast' );
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::AutoCast - Storage component for RDBMS requiring explicit placeholder typing
+
+=head1 SYNOPSIS
+
+  $schema->storage->auto_cast(1);
+
+=head1 DESCRIPTION
+
+In some combinations of RDBMS and DBD drivers (e.g. FreeTDS and Sybase)
+statements with values bound to columns or conditions that are not strings will
+throw implicit type conversion errors.
+
+As long as a column L<data_type|DBIx::Class::ResultSource/add_columns> is
+defined and resolves to a base RDBMS native type via L</_native_data_type> as
+defined in your Storage driver, the placeholder for this column will be
+converted to:
+
+  CAST(? as $mapped_type)
+
+This option can also be enabled in
+L<connect_info|DBIx::Class::Storage::DBI/connect_info> as:
+
+  on_connect_call => ['set_auto_cast']
+
+=cut
+
+sub _prep_for_execute {
+  my $self = shift;
+  my ($op, $extra_bind, $ident, $args) = @_;
+
+  my ($sql, $bind) = $self->next::method (@_);
+
+# If we're using ::NoBindVars, there are no binds by this point so this code
+# gets skippeed.
+  if ($self->auto_cast && @$bind) {
+    my $new_sql;
+    my @sql_part = split /\?/, $sql;
+    my $col_info = $self->_resolve_column_info($ident,[ map $_->[0], @$bind ]);
+
+    foreach my $bound (@$bind) {
+      my $col = $bound->[0];
+      my $type = $self->_native_data_type($col_info->{$col}{data_type});
+
+      foreach my $data (@{$bound}[1..$#$bound]) {
+        $new_sql .= shift(@sql_part) .
+          ($type ? "CAST(? AS $type)" : '?');
+      }
+    }
+    $new_sql .= join '', @sql_part;
+    $sql = $new_sql;
+  }
+
+  return ($sql, $bind);
+}
+
+=head2 connect_call_set_auto_cast
+
+Executes:
+
+  $schema->storage->auto_cast(1);
+
+on connection.
+
+Used as:
+
+    on_connect_call => ['set_auto_cast']
+
+in L<connect_info|DBIx::Class::Storage::DBI/connect_info>.
+
+=cut
+
+sub connect_call_set_auto_cast {
+  my $self = shift;
+  $self->auto_cast(1);
+}
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Cursor.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Cursor.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Cursor.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -5,6 +5,10 @@
 
 use base qw/DBIx::Class::Cursor/;
 
+__PACKAGE__->mk_group_accessors('simple' =>
+    qw/sth/
+);
+
 =head1 NAME
 
 DBIx::Class::Storage::DBI::Cursor - Object representing a query cursor on a
@@ -73,24 +77,24 @@
       && $self->{attrs}{rows}
         && $self->{pos} >= $self->{attrs}{rows}
   ) {
-    $self->{sth}->finish if $self->{sth}->{Active};
-    delete $self->{sth};
+    $self->sth->finish if $self->sth->{Active};
+    $self->sth(undef);
     $self->{done} = 1;
   }
   return if $self->{done};
-  unless ($self->{sth}) {
-    $self->{sth} = ($storage->_select(@{$self->{args}}))[1];
+  unless ($self->sth) {
+    $self->sth(($storage->_select(@{$self->{args}}))[1]);
     if ($self->{attrs}{software_limit}) {
       if (my $offset = $self->{attrs}{offset}) {
-        $self->{sth}->fetch for 1 .. $offset;
+        $self->sth->fetch for 1 .. $offset;
       }
     }
   }
-  my @row = $self->{sth}->fetchrow_array;
+  my @row = $self->sth->fetchrow_array;
   if (@row) {
     $self->{pos}++;
   } else {
-    delete $self->{sth};
+    $self->sth(undef);
     $self->{done} = 1;
   }
   return @row;
@@ -120,8 +124,8 @@
   my ($storage, $dbh, $self) = @_;
 
   $self->_check_dbh_gen;
-  $self->{sth}->finish if $self->{sth}->{Active};
-  delete $self->{sth};
+  $self->sth->finish if $self->sth && $self->sth->{Active};
+  $self->sth(undef);
   my ($rv, $sth) = $storage->_select(@{$self->{args}});
   return @{$sth->fetchall_arrayref};
 }
@@ -146,17 +150,17 @@
   my ($self) = @_;
 
   # No need to care about failures here
-  eval { $self->{sth}->finish if $self->{sth} && $self->{sth}->{Active} };
+  eval { $self->sth->finish if $self->sth && $self->sth->{Active} };
   $self->_soft_reset;
+  return undef;
 }
 
 sub _soft_reset {
   my ($self) = @_;
 
-  delete $self->{sth};
+  $self->sth(undef);
   delete $self->{done};
   $self->{pos} = 0;
-  return $self;
 }
 
 sub _check_dbh_gen {
@@ -173,7 +177,7 @@
 
   # None of the reasons this would die matter if we're in DESTROY anyways
   local $@;
-  eval { $self->{sth}->finish if $self->{sth} && $self->{sth}->{Active} };
+  eval { $self->sth->finish if $self->sth && $self->sth->{Active} };
 }
 
 1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/DB2.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/DB2.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/DB2.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -38,7 +38,7 @@
 =head1 SYNOPSIS
 
   # In your table classes
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  use base 'DBIx::Class::Core';
   __PACKAGE__->set_primary_key('id');
 
 =head1 DESCRIPTION

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Informix.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Informix.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Informix.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,189 @@
+package DBIx::Class::Storage::DBI::Informix;
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
+
+use Scope::Guard ();
+use Context::Preserve ();
+
+__PACKAGE__->mk_group_accessors('simple' => '__last_insert_id');
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Informix - Base Storage Class for Informix Support
+
+=head1 DESCRIPTION
+
+This class implements storage-specific support for the Informix RDBMS
+
+=head1 METHODS
+
+=cut
+
+sub _execute {
+  my $self = shift;
+  my ($op) = @_;
+  my ($rv, $sth, @rest) = $self->next::method(@_);
+  if ($op eq 'insert') {
+    $self->__last_insert_id($sth->{ix_sqlerrd}[1]);
+  }
+  return (wantarray ? ($rv, $sth, @rest) : $rv);
+}
+
+sub last_insert_id {
+  shift->__last_insert_id;
+}
+
+sub _sql_maker_opts {
+  my ( $self, $opts ) = @_;
+
+  if ( $opts ) {
+    $self->{_sql_maker_opts} = { %$opts };
+  }
+
+  return { limit_dialect => 'SkipFirst', %{$self->{_sql_maker_opts}||{}} };
+}
+
+sub _svp_begin {
+    my ($self, $name) = @_;
+
+    $self->_get_dbh->do("SAVEPOINT $name");
+}
+
+# can't release savepoints
+sub _svp_release { 1 }
+
+sub _svp_rollback {
+    my ($self, $name) = @_;
+
+    $self->_get_dbh->do("ROLLBACK TO SAVEPOINT $name")
+}
+
+sub with_deferred_fk_checks {
+  my ($self, $sub) = @_;
+
+  my $txn_scope_guard = $self->txn_scope_guard;
+
+  $self->_do_query('SET CONSTRAINTS ALL DEFERRED');
+
+  my $sg = Scope::Guard->new(sub {
+    $self->_do_query('SET CONSTRAINTS ALL IMMEDIATE');
+  });
+
+  return Context::Preserve::preserve_context(sub { $sub->() },
+    after => sub { $txn_scope_guard->commit });
+}
+
+=head2 connect_call_datetime_setup
+
+Used as:
+
+  on_connect_call => 'datetime_setup'
+
+In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set the C<DATE> and
+C<DATETIME> formats.
+
+Sets the following environment variables:
+
+    GL_DATE="%m/%d/%Y"
+    GL_DATETIME="%Y-%m-%d %H:%M:%S%F5"
+
+The C<DBDATE> and C<DBCENTURY> environment variables are cleared.
+
+B<NOTE:> setting the C<GL_DATE> environment variable seems to have no effect
+after the process has started, so the default format is used. The C<GL_DATETIME>
+setting does take effect however.
+
+The C<DATETIME> data type supports up to 5 digits after the decimal point for
+second precision, depending on how you have declared your column. The full
+possible precision is used.
+
+The column declaration for a C<DATETIME> with maximum precision is:
+
+  column_name DATETIME YEAR TO FRACTION(5)
+
+The C<DATE> data type stores the date portion only, and it B<MUST> be declared
+with:
+
+  data_type => 'date'
+
+in your Result class.
+
+You will need the L<DateTime::Format::Strptime> module for inflation to work.
+
+=cut
+
+sub connect_call_datetime_setup {
+  my $self = shift;
+
+  delete @ENV{qw/DBDATE DBCENTURY/};
+
+  $ENV{GL_DATE}     = "%m/%d/%Y";
+  $ENV{GL_DATETIME} = "%Y-%m-%d %H:%M:%S%F5";
+}
+
+sub datetime_parser_type {
+  'DBIx::Class::Storage::DBI::Informix::DateTime::Format'
+}
+
+package # hide from PAUSE
+  DBIx::Class::Storage::DBI::Informix::DateTime::Format;
+
+my $timestamp_format = '%Y-%m-%d %H:%M:%S.%5N'; # %F %T
+my $date_format      = '%m/%d/%Y';
+
+my ($timestamp_parser, $date_parser);
+
+sub parse_datetime {
+  shift;
+  require DateTime::Format::Strptime;
+  $timestamp_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $timestamp_format,
+    on_error => 'croak',
+  );
+  return $timestamp_parser->parse_datetime(shift);
+}
+
+sub format_datetime {
+  shift;
+  require DateTime::Format::Strptime;
+  $timestamp_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $timestamp_format,
+    on_error => 'croak',
+  );
+  return $timestamp_parser->format_datetime(shift);
+}
+
+sub parse_date {
+  shift;
+  require DateTime::Format::Strptime;
+  $date_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $date_format,
+    on_error => 'croak',
+  );
+  return $date_parser->parse_datetime(shift);
+}
+
+sub format_date {
+  shift;
+  require DateTime::Format::Strptime;
+  $date_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $date_format,
+    on_error => 'croak',
+  );
+  return $date_parser->format_datetime(shift);
+}
+
+1;
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/InterBase.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/InterBase.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/InterBase.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,320 @@
+package DBIx::Class::Storage::DBI::InterBase;
+
+use strict;
+use warnings;
+use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
+use List::Util();
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::InterBase - Driver for the Firebird RDBMS
+
+=head1 DESCRIPTION
+
+This class implements autoincrements for Firebird using C<RETURNING> as well as
+L<auto_nextval|DBIx::Class::ResultSource/auto_nextval> sets the limit dialect to
+C<FIRST X SKIP X> and provides L<DBIx::Class::InflateColumn::DateTime> support.
+
+You need to use either the
+L<disable_sth_caching|DBIx::Class::Storage::DBI/disable_sth_caching> option or
+L</connect_call_use_softcommit> (see L</CAVEATS>) for your code to function
+correctly with this driver. Otherwise you will likely get bizarre error messages
+such as C<no statement executing>. The alternative is to use the
+L<ODBC|DBIx::Class::Storage::DBI::ODBC::Firebird> driver, which is more suitable
+for long running processes such as under L<Catalyst>.
+
+To turn on L<DBIx::Class::InflateColumn::DateTime> support, see
+L</connect_call_datetime_setup>.
+
+=cut
+
+sub _supports_insert_returning { 1 }
+
+sub _sequence_fetch {
+  my ($self, $nextval, $sequence) = @_;
+
+  if ($nextval ne 'nextval') {
+    $self->throw_exception("Can only fetch 'nextval' for a sequence");
+  }
+
+  $self->throw_exception('No sequence to fetch') unless $sequence;
+  
+  my ($val) = $self->_get_dbh->selectrow_array(
+'SELECT GEN_ID(' . $self->sql_maker->_quote($sequence) .
+', 1) FROM rdb$database');
+
+  return $val;
+} 
+
+sub _dbh_get_autoinc_seq {
+  my ($self, $dbh, $source, $col) = @_;
+
+  my $table_name = $source->from;
+  $table_name    = $$table_name if ref $table_name;
+  $table_name    = $self->sql_maker->quote_char ? $table_name : uc($table_name);
+
+  local $dbh->{LongReadLen} = 100000;
+  local $dbh->{LongTruncOk} = 1;
+
+  my $sth = $dbh->prepare(<<'EOF');
+SELECT t.rdb$trigger_source
+FROM rdb$triggers t
+WHERE t.rdb$relation_name = ?
+AND t.rdb$system_flag = 0 -- user defined
+AND t.rdb$trigger_type = 1 -- BEFORE INSERT
+EOF
+  $sth->execute($table_name);
+
+  while (my ($trigger) = $sth->fetchrow_array) {
+    my @trig_cols = map {
+      /^"([^"]+)/ ? $1 : uc($1)
+    } $trigger =~ /new\.("?\w+"?)/ig;
+
+    my ($quoted, $generator) = $trigger =~
+/(?:gen_id\s* \( \s* |next \s* value \s* for \s*)(")?(\w+)/ix;
+
+    if ($generator) {
+      $generator = uc $generator unless $quoted;
+
+      return $generator
+        if List::Util::first {
+          $self->sql_maker->quote_char ? ($_ eq $col) : (uc($_) eq uc($col))
+        } @trig_cols;
+    }
+  }
+
+  return undef;
+}
+
+# this sub stolen from DB2
+
+sub _sql_maker_opts {
+  my ( $self, $opts ) = @_;
+
+  if ( $opts ) {
+    $self->{_sql_maker_opts} = { %$opts };
+  }
+
+  return { limit_dialect => 'FirstSkip', %{$self->{_sql_maker_opts}||{}} };
+}
+
+sub _svp_begin {
+    my ($self, $name) = @_;
+
+    $self->_get_dbh->do("SAVEPOINT $name");
+}
+
+sub _svp_release {
+    my ($self, $name) = @_;
+
+    $self->_get_dbh->do("RELEASE SAVEPOINT $name");
+}
+
+sub _svp_rollback {
+    my ($self, $name) = @_;
+
+    $self->_get_dbh->do("ROLLBACK TO SAVEPOINT $name")
+}
+
+sub _ping {
+  my $self = shift;
+
+  my $dbh = $self->_dbh or return 0;
+
+  local $dbh->{RaiseError} = 1;
+  local $dbh->{PrintError} = 0;
+
+  eval {
+    $dbh->do('select 1 from rdb$database');
+  };
+
+  return $@ ? 0 : 1;
+}
+
+# We want dialect 3 for new features and quoting to work, DBD::InterBase uses
+# dialect 1 (interbase compat) by default.
+sub _init {
+  my $self = shift;
+  $self->_set_sql_dialect(3);
+}
+
+sub _set_sql_dialect {
+  my $self = shift;
+  my $val  = shift || 3;
+
+  my $dsn = $self->_dbi_connect_info->[0];
+
+  return if ref($dsn) eq 'CODE';
+
+  if ($dsn !~ /ib_dialect=/) {
+    $self->_dbi_connect_info->[0] = "$dsn;ib_dialect=$val";
+    my $connected = defined $self->_dbh;
+    $self->disconnect;
+    $self->ensure_connected if $connected;
+  }
+}
+
+sub _get_server_version {
+  my $self = shift;
+
+  return $self->next::method(@_) if ref $self ne __PACKAGE__;
+
+  local $SIG{__WARN__} = sub {}; # silence warning due to bug in DBD::InterBase
+
+  return $self->next::method(@_);
+}
+
+=head2 connect_call_use_softcommit
+
+Used as:
+
+  on_connect_call => 'use_softcommit'
+
+In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set the
+L<DBD::InterBase> C<ib_softcommit> option.
+
+You need either this option or C<< disable_sth_caching => 1 >> for
+L<DBIx::Class> code to function correctly (otherwise you may get C<no statement
+executing> errors.) Or use the L<ODBC|DBIx::Class::Storage::DBI::ODBC::Firebird>
+driver.
+
+The downside of using this option is that your process will B<NOT> see UPDATEs,
+INSERTs and DELETEs from other processes for already open statements.
+
+=cut
+
+sub connect_call_use_softcommit {
+  my $self = shift;
+
+  $self->_dbh->{ib_softcommit} = 1;
+}
+
+=head2 connect_call_datetime_setup
+
+Used as:
+
+  on_connect_call => 'datetime_setup'
+
+In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set the date and
+timestamp formats using:
+
+  $dbh->{ib_time_all} = 'ISO';
+
+See L<DBD::InterBase> for more details.
+
+The C<TIMESTAMP> data type supports up to 4 digits after the decimal point for
+second precision. The full precision is used.
+
+The C<DATE> data type stores the date portion only, and it B<MUST> be declared
+with:
+
+  data_type => 'date'
+
+in your Result class.
+
+Timestamp columns can be declared with either C<datetime> or C<timestamp>.
+
+You will need the L<DateTime::Format::Strptime> module for inflation to work.
+
+For L<DBIx::Class::Storage::DBI::ODBC::Firebird>, this is a noop and sub-second
+precision is not currently available.
+
+=cut
+
+sub connect_call_datetime_setup {
+  my $self = shift;
+
+  $self->_get_dbh->{ib_time_all} = 'ISO';
+}
+
+sub datetime_parser_type {
+  'DBIx::Class::Storage::DBI::InterBase::DateTime::Format'
+}
+
+package # hide from PAUSE
+  DBIx::Class::Storage::DBI::InterBase::DateTime::Format;
+
+my $timestamp_format = '%Y-%m-%d %H:%M:%S.%4N'; # %F %T
+my $date_format      = '%Y-%m-%d';
+
+my ($timestamp_parser, $date_parser);
+
+sub parse_datetime {
+  shift;
+  require DateTime::Format::Strptime;
+  $timestamp_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $timestamp_format,
+    on_error => 'croak',
+  );
+  return $timestamp_parser->parse_datetime(shift);
+}
+
+sub format_datetime {
+  shift;
+  require DateTime::Format::Strptime;
+  $timestamp_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $timestamp_format,
+    on_error => 'croak',
+  );
+  return $timestamp_parser->format_datetime(shift);
+}
+
+sub parse_date {
+  shift;
+  require DateTime::Format::Strptime;
+  $date_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $date_format,
+    on_error => 'croak',
+  );
+  return $date_parser->parse_datetime(shift);
+}
+
+sub format_date {
+  shift;
+  require DateTime::Format::Strptime;
+  $date_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $date_format,
+    on_error => 'croak',
+  );
+  return $date_parser->format_datetime(shift);
+}
+
+1;
+
+=head1 CAVEATS
+
+=over 4
+
+=item *
+
+with L</connect_call_use_softcommit>, you will not be able to see changes made
+to data in other processes. If this is an issue, use
+L<disable_sth_caching|DBIx::Class::Storage::DBI/disable_sth_caching> as a
+workaround for the C<no statement executing> errors, this of course adversely
+affects performance.
+
+Alternately, use the L<ODBC|DBIx::Class::Storage::DBI::ODBC::Firebird> driver.
+
+=item *
+
+C<last_insert_id> support by default only works for Firebird versions 2 or
+greater, L<auto_nextval|DBIx::Class::ResultSource/auto_nextval> however should
+work with earlier versions.
+
+=item *
+
+Sub-second precision for TIMESTAMPs is not currently available when using the
+L<ODBC|DBIx::Class::Storage::DBI::ODBC::Firebird> driver.
+
+=back
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/MSSQL.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/MSSQL.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/MSSQL.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -3,7 +3,7 @@
 use strict;
 use warnings;
 
-use base qw/DBIx::Class::Storage::DBI::AmbiguousGlob DBIx::Class::Storage::DBI/;
+use base qw/DBIx::Class::Storage::DBI::UniqueIdentifier/;
 use mro 'c3';
 
 use List::Util();
@@ -14,66 +14,78 @@
 
 __PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks::MSSQL');
 
+sub _set_identity_insert {
+  my ($self, $table) = @_;
+
+  my $sql = sprintf (
+    'SET IDENTITY_INSERT %s ON',
+    $self->sql_maker->_quote ($table),
+  );
+
+  my $dbh = $self->_get_dbh;
+  eval { $dbh->do ($sql) };
+  if ($@) {
+    $self->throw_exception (sprintf "Error executing '%s': %s",
+      $sql,
+      $dbh->errstr,
+    );
+  }
+}
+
+sub _unset_identity_insert {
+  my ($self, $table) = @_;
+
+  my $sql = sprintf (
+    'SET IDENTITY_INSERT %s OFF',
+    $self->sql_maker->_quote ($table),
+  );
+
+  my $dbh = $self->_get_dbh;
+  $dbh->do ($sql);
+}
+
 sub insert_bulk {
   my $self = shift;
   my ($source, $cols, $data) = @_;
 
-  my $identity_insert = 0;
+  my $is_identity_insert = (List::Util::first
+      { $source->column_info ($_)->{is_auto_increment} }
+      (@{$cols})
+  )
+     ? 1
+     : 0;
 
-  COLUMNS:
-  foreach my $col (@{$cols}) {
-    if ($source->column_info($col)->{is_auto_increment}) {
-      $identity_insert = 1;
-      last COLUMNS;
-    }
+  if ($is_identity_insert) {
+     $self->_set_identity_insert ($source->name);
   }
 
-  if ($identity_insert) {
-    my $table = $source->from;
-    $self->_get_dbh->do("SET IDENTITY_INSERT $table ON");
-  }
-
   $self->next::method(@_);
 
-  if ($identity_insert) {
-    my $table = $source->from;
-    $self->_get_dbh->do("SET IDENTITY_INSERT $table OFF");
+  if ($is_identity_insert) {
+     $self->_unset_identity_insert ($source->name);
   }
 }
 
-# support MSSQL GUID column types
-
 sub insert {
   my $self = shift;
   my ($source, $to_insert) = @_;
 
-  my $updated_cols = {};
+  my $supplied_col_info = $self->_resolve_column_info($source, [keys %$to_insert] );
 
-  my %guid_cols;
-  my @pk_cols = $source->primary_columns;
-  my %pk_cols;
-  @pk_cols{@pk_cols} = ();
+  my $is_identity_insert = (List::Util::first { $_->{is_auto_increment} } (values %$supplied_col_info) )
+     ? 1
+     : 0;
 
-  my @pk_guids = grep {
-    $source->column_info($_)->{data_type} =~ /^uniqueidentifier/i
-  } @pk_cols;
+  if ($is_identity_insert) {
+     $self->_set_identity_insert ($source->name);
+  }
 
-  my @auto_guids = grep {
-    $source->column_info($_)->{data_type} =~ /^uniqueidentifier/i
-    &&
-    $source->column_info($_)->{auto_nextval}
-  } grep { not exists $pk_cols{$_} } $source->columns;
+  my $updated_cols = $self->next::method(@_);
 
-  my @get_guids_for =
-    grep { not exists $to_insert->{$_} } (@pk_guids, @auto_guids);
-
-  for my $guid_col (@get_guids_for) {
-    my ($new_guid) = $self->_get_dbh->selectrow_array('SELECT NEWID()');
-    $updated_cols->{$guid_col} = $to_insert->{$guid_col} = $new_guid;
+  if ($is_identity_insert) {
+     $self->_unset_identity_insert ($source->name);
   }
 
-  $updated_cols = { %$updated_cols, %{ $self->next::method(@_) } };
-
   return $updated_cols;
 }
 
@@ -87,7 +99,9 @@
 
     for my $col (keys %$fields) {
       # $ident is a result source object with INSERT/UPDATE ops
-      if ($ident->column_info ($col)->{data_type} =~ /^money\z/i) {
+      if ($ident->column_info ($col)->{data_type}
+         &&
+         $ident->column_info ($col)->{data_type} =~ /^money\z/i) {
         my $val = $fields->{$col};
         $fields->{$col} = \['CAST(? AS MONEY)', [ $col => $val ]];
       }
@@ -99,14 +113,6 @@
   if ($op eq 'insert') {
     $sql .= ';SELECT SCOPE_IDENTITY()';
 
-    my $col_info = $self->_resolve_column_info($ident, [map $_->[0], @{$bind}]);
-    if (List::Util::first { $_->{is_auto_increment} } (values %$col_info) ) {
-
-      my $table = $ident->from;
-      my $identity_insert_on = "SET IDENTITY_INSERT $table ON";
-      my $identity_insert_off = "SET IDENTITY_INSERT $table OFF";
-      $sql = "$identity_insert_on; $sql; $identity_insert_off";
-    }
   }
 
   return ($sql, $bind);
@@ -122,7 +128,7 @@
 
     # this should bring back the result of SELECT SCOPE_IDENTITY() we tacked
     # on in _prep_for_execute above
-    my ($identity) = $sth->fetchrow_array;
+    my ($identity) = eval { $sth->fetchrow_array };
 
     # SCOPE_IDENTITY failed, but we can do something else
     if ( (! $identity) && $self->_identity_method) {
@@ -140,6 +146,37 @@
 
 sub last_insert_id { shift->_identity }
 
+#
+# MSSQL is retarded wrt ordered subselects. One needs to add a TOP
+# to *all* subqueries, but one also can't use TOP 100 PERCENT
+# http://sqladvice.com/forums/permalink/18496/22931/ShowThread.aspx#22931
+#
+sub _select_args_to_query {
+  my $self = shift;
+
+  my ($sql, $prep_bind, @rest) = $self->next::method (@_);
+
+  # see if this is an ordered subquery
+  my $attrs = $_[3];
+  if (
+    $sql !~ /^ \s* SELECT \s+ TOP \s+ \d+ \s+ /xi
+      &&
+    scalar $self->_parse_order_by ($attrs->{order_by}) 
+  ) {
+    $self->throw_exception(
+      'An ordered subselect encountered - this is not safe! Please see "Ordered Subselects" in DBIx::Class::Storage::DBI::MSSQL
+    ') unless $attrs->{unsafe_subselect_ok};
+    my $max = 2 ** 32;
+    $sql =~ s/^ \s* SELECT \s/SELECT TOP $max /xi;
+  }
+
+  return wantarray
+    ? ($sql, $prep_bind, @rest)
+    : \[ "($sql)", @$prep_bind ]
+  ;
+}
+
+
 # savepoint syntax is the same as in Sybase ASE
 
 sub _svp_begin {
@@ -157,26 +194,107 @@
   $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
 }
 
-sub build_datetime_parser {
-  my $self = shift;
-  my $type = "DateTime::Format::Strptime";
-  eval "use ${type}";
-  $self->throw_exception("Couldn't load ${type}: $@") if $@;
-  return $type->new( pattern => '%Y-%m-%d %H:%M:%S' );  # %F %T
-}
+sub datetime_parser_type {
+  'DBIx::Class::Storage::DBI::MSSQL::DateTime::Format'
+} 
 
 sub sqlt_type { 'SQLServer' }
 
-sub _sql_maker_opts {
-  my ( $self, $opts ) = @_;
+sub sql_maker {
+  my $self = shift;
 
-  if ( $opts ) {
-    $self->{_sql_maker_opts} = { %$opts };
+  unless ($self->_sql_maker) {
+    unless ($self->{_sql_maker_opts}{limit_dialect}) {
+      my $have_rno = 0;
+
+      if (exists $self->_server_info->{normalized_dbms_version}) {
+        $have_rno = 1 if $self->_server_info->{normalized_dbms_version} >= 9;
+      }
+      else {
+        # User is connecting via DBD::Sybase and has no permission to run
+        # stored procedures like xp_msver, or version detection failed for some
+        # other reason.
+        # So, we use a query to check if RNO is implemented.
+        $have_rno = 1 if (eval { local $@; ($self->_get_dbh
+          ->selectrow_array('SELECT row_number() OVER (ORDER BY rand())')
+          )[0] });
+      }
+
+      $self->{_sql_maker_opts} = {
+        limit_dialect => ($have_rno ? 'RowNumberOver' : 'Top'),
+        %{$self->{_sql_maker_opts}||{}}
+      };
+    }
+
+    my $maker = $self->next::method (@_);
   }
 
-  return { limit_dialect => 'Top', %{$self->{_sql_maker_opts}||{}} };
+  return $self->_sql_maker;
 }
 
+sub _ping {
+  my $self = shift;
+
+  my $dbh = $self->_dbh or return 0;
+
+  local $dbh->{RaiseError} = 1;
+  local $dbh->{PrintError} = 0;
+
+  eval {
+    $dbh->do('select 1');
+  };
+
+  return $@ ? 0 : 1;
+}
+
+package # hide from PAUSE
+  DBIx::Class::Storage::DBI::MSSQL::DateTime::Format;
+
+my $datetime_format      = '%Y-%m-%d %H:%M:%S.%3N'; # %F %T 
+my $smalldatetime_format = '%Y-%m-%d %H:%M:%S';
+
+my ($datetime_parser, $smalldatetime_parser);
+
+sub parse_datetime {
+  shift;
+  require DateTime::Format::Strptime;
+  $datetime_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $datetime_format,
+    on_error => 'croak',
+  );
+  return $datetime_parser->parse_datetime(shift);
+}
+
+sub format_datetime {
+  shift;
+  require DateTime::Format::Strptime;
+  $datetime_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $datetime_format,
+    on_error => 'croak',
+  );
+  return $datetime_parser->format_datetime(shift);
+}
+
+sub parse_smalldatetime {
+  shift;
+  require DateTime::Format::Strptime;
+  $smalldatetime_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $smalldatetime_format,
+    on_error => 'croak',
+  );
+  return $smalldatetime_parser->parse_datetime(shift);
+}
+
+sub format_smalldatetime {
+  shift;
+  require DateTime::Format::Strptime;
+  $smalldatetime_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $smalldatetime_format,
+    on_error => 'croak',
+  );
+  return $smalldatetime_parser->format_datetime(shift);
+}
+
 1;
 
 =head1 NAME
@@ -192,6 +310,8 @@
 
 =head1 IMPLEMENTATION NOTES
 
+=head2 IDENTITY information
+
 Microsoft SQL Server supports three methods of retrieving the IDENTITY
 value for inserted row: IDENT_CURRENT, @@IDENTITY, and SCOPE_IDENTITY().
 SCOPE_IDENTITY is used here because it is the safest.  However, it must
@@ -210,9 +330,67 @@
 inserts into another table with an identity will give erroneous results on
 recent versions of SQL Server.
 
+=head2 identity insert
+
+Be aware that we have tried to make things as simple as possible for our users.
+For MSSQL that means that when a user tries to create a row, while supplying an
+explicit value for an autoincrementing column, we will try to issue the
+appropriate database call to make this possible, namely C<SET IDENTITY_INSERT
+$table_name ON>. Unfortunately this operation in MSSQL requires the
+C<db_ddladmin> privilege, which is normally not included in the standard
+write-permissions.
+
+=head2 Ordered Subselects
+
+If you attempted the following query (among many others) in Microsoft SQL
+Server
+
+ $rs->search ({}, {
+  prefetch => 'relation',
+  rows => 2,
+  offset => 3,
+ });
+
+You may be surprised to receive an exception. The reason for this is a quirk
+in the MSSQL engine itself, and sadly doesn't have a sensible workaround due
+to the way DBIC is built. DBIC can do truly wonderful things with the aid of
+subselects, and does so automatically when necessary. The list of situations
+when a subselect is necessary is long and still changes often, so it can not
+be exhaustively enumerated here. The general rule of thumb is a joined
+L<has_many|DBIx::Class::Relationship/has_many> relationship with limit/group
+applied to the left part of the join.
+
+In its "pursuit of standards" Microsft SQL Server goes to great lengths to
+forbid the use of ordered subselects. This breaks a very useful group of
+searches like "Give me things number 4 to 6 (ordered by name), and prefetch
+all their relations, no matter how many". While there is a hack which fools
+the syntax checker, the optimizer may B<still elect to break the subselect>.
+Testing has determined that while such breakage does occur (the test suite
+contains an explicit test which demonstrates the problem), it is relative
+rare. The benefits of ordered subselects are on the other hand too great to be
+outright disabled for MSSQL.
+
+Thus compromise between usability and perfection is the MSSQL-specific
+L<resultset attribute|DBIx::Class::ResultSet/ATTRIBUTES> C<unsafe_subselect_ok>.
+It is deliberately not possible to set this on the Storage level, as the user
+should inspect (and preferably regression-test) the return of every such
+ResultSet individually. The example above would work if written like:
+
+ $rs->search ({}, {
+  unsafe_subselect_ok => 1,
+  prefetch => 'relation',
+  rows => 2,
+  offset => 3,
+ });
+
+If it is possible to rewrite the search() in a way that will avoid the need
+for this flag - you are urged to do so. If DBIC internals insist that an
+ordered subselect is necessary for an operation, and you believe there is a
+different/better way to get the same result - please file a bugreport.
+
 =head1 AUTHOR
 
-See L<DBIx::Class/CONTRIBUTORS>.
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
 
 =head1 LICENSE
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/MultiColumnIn.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/MultiColumnIn.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/MultiColumnIn.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -17,7 +17,7 @@
 The storage class for any such RDBMS should inherit from this class, in order
 to dramatically speed up update/delete operations on joined multipk resultsets.
 
-At this point the only overriden method is C<_multipk_update_delete()>
+At this point the only overridden method is C<_multipk_update_delete()>
 
 =cut
 
@@ -26,7 +26,7 @@
   my ($rs, $op, $values) = @_;
 
   my $rsrc = $rs->result_source;
-  my @pcols = $rsrc->primary_columns;
+  my @pcols = $rsrc->_pri_cols;
   my $attrs = $rs->_resolved_attrs;
 
   # naive check - this is an internal method after all, we should know what we are doing 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/NoBindVars.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/NoBindVars.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -40,24 +40,32 @@
 sub _prep_for_execute {
   my $self = shift;
 
-  my ($op, $extra_bind, $ident) = @_;
-
   my ($sql, $bind) = $self->next::method(@_);
 
-  # stringify args, quote via $dbh, and manually insert
+  # stringify bind args, quote via $dbh, and manually insert
+  #my ($op, $extra_bind, $ident, $args) = @_;
+  my $ident = $_[2];
 
   my @sql_part = split /\?/, $sql;
   my $new_sql;
 
+  my $col_info = $self->_resolve_column_info($ident, [ map $_->[0], @$bind ]);
+
   foreach my $bound (@$bind) {
     my $col = shift @$bound;
-    my $datatype = 'FIXME!!!';
+
+    my $datatype = $col_info->{$col}{data_type};
+
     foreach my $data (@$bound) {
-        if(ref $data) {
-            $data = ''.$data;
-        }
-        $data = $self->_dbh->quote($data);
-        $new_sql .= shift(@sql_part) . $data;
+      $data = ''.$data if ref $data;
+
+      $data = $self->_prep_interpolated_value($datatype, $data)
+        if $datatype;
+
+      $data = $self->_dbh->quote($data)
+        unless $self->interpolate_unquoted($datatype, $data);
+
+      $new_sql .= shift(@sql_part) . $data;
     }
   }
   $new_sql .= join '', @sql_part;
@@ -65,12 +73,44 @@
   return ($new_sql, []);
 }
 
+=head2 interpolate_unquoted
+
+This method is called by L</_prep_for_execute> for every column in
+order to determine if its value should be quoted or not. The arguments
+are the current column data type and the actual bind value. The return
+value is interpreted as: true - do not quote, false - do quote. You should
+override this in you Storage::DBI::<database> subclass, if your RDBMS
+does not like quotes around certain datatypes (e.g. Sybase and integer
+columns). The default method always returns false (do quote).
+
+ WARNING!!!
+
+ Always validate that the bind-value is valid for the current datatype.
+ Otherwise you may very well open the door to SQL injection attacks.
+
+=cut
+
+sub interpolate_unquoted {
+  #my ($self, $datatype, $value) = @_;
+  return 0;
+}
+
+=head2 _prep_interpolated_value
+
+Given a datatype and the value to be inserted directly into a SQL query, returns
+the necessary string to represent that value (by e.g. adding a '$' sign)
+
+=cut
+
+sub _prep_interpolated_value {
+  #my ($self, $datatype, $value) = @_;
+  return $_[2];
+}
+
 =head1 AUTHORS
 
-Brandon Black <blblack at gmail.com>
+See L<DBIx::Class/CONTRIBUTORS>
 
-Trym Skaar <trym at tryms.no>
-
 =head1 LICENSE
 
 You may distribute this code under the same terms as Perl itself.

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -79,7 +79,7 @@
 
 =head1 IMPLEMENTATION NOTES
 
-MS Access supports the @@IDENTITY function for retriving the id of the latest inserted row.
+MS Access supports the @@IDENTITY function for retrieving the id of the latest inserted row.
 @@IDENTITY is global to the connection, so to support the possibility of getting the last inserted
 id for different tables, the insert() function stores the inserted id on a per table basis.
 last_insert_id() then just returns the stored value.

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -43,8 +43,8 @@
 
 =head1 SYNOPSIS
 
-  # In your table classes
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  # In your result (table) classes
+  use base 'DBIx::Class::Core';
   __PACKAGE__->set_primary_key('id');
 
 

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/Firebird.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/Firebird.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/Firebird.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,95 @@
+package DBIx::Class::Storage::DBI::ODBC::Firebird;
+
+use strict;
+use warnings;
+use base qw/DBIx::Class::Storage::DBI::InterBase/;
+use mro 'c3';
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC::Firebird - Driver for using the Firebird RDBMS
+through ODBC
+
+=head1 DESCRIPTION
+
+Most functionality is provided by L<DBIx::Class::Storage::DBI::Interbase>, see
+that module for details.
+
+To build the ODBC driver for Firebird on Linux for unixODBC, see:
+
+L<http://www.firebirdnews.org/?p=1324>
+
+This driver does not suffer from the nested statement handles across commits
+issue that the L<DBD::InterBase|DBIx::Class::Storage::DBI::InterBase> based
+driver does. This makes it more suitable for long running processes such as
+under L<Catalyst>.
+
+=cut
+
+# XXX seemingly no equivalent to ib_time_all from DBD::InterBase via ODBC
+sub connect_call_datetime_setup { 1 }
+
+# we don't need DBD::InterBase-specific initialization
+sub _init { 1 }
+
+# ODBC uses dialect 3 by default, good
+sub _set_sql_dialect { 1 }
+
+# releasing savepoints doesn't work, but that shouldn't matter
+sub _svp_release { 1 }
+
+sub datetime_parser_type {
+  'DBIx::Class::Storage::DBI::ODBC::Firebird::DateTime::Format'
+}
+
+package # hide from PAUSE
+  DBIx::Class::Storage::DBI::ODBC::Firebird::DateTime::Format;
+
+# inherit parse/format date
+our @ISA = 'DBIx::Class::Storage::DBI::InterBase::DateTime::Format';
+
+my $timestamp_format = '%Y-%m-%d %H:%M:%S'; # %F %T, no fractional part
+my $timestamp_parser;
+
+sub parse_datetime {
+  shift;
+  require DateTime::Format::Strptime;
+  $timestamp_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $timestamp_format,
+    on_error => 'croak',
+  );
+  return $timestamp_parser->parse_datetime(shift);
+}
+
+sub format_datetime {
+  shift;
+  require DateTime::Format::Strptime;
+  $timestamp_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $timestamp_format,
+    on_error => 'croak',
+  );
+  return $timestamp_parser->format_datetime(shift);
+}
+
+1;
+
+=head1 CAVEATS
+
+=over 4
+
+=item *
+
+This driver (unlike L<DBD::InterBase>) does not currently support reading or
+writing C<TIMESTAMP> values with sub-second precision.
+
+=back
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -5,7 +5,6 @@
 use base qw/DBIx::Class::Storage::DBI::MSSQL/;
 use mro 'c3';
 
-use Carp::Clan qw/^DBIx::Class/;
 use List::Util();
 use Scalar::Util ();
 
@@ -38,7 +37,7 @@
 
   on_connect_call => 'use_dynamic_cursors'
 
-in your L<DBIx::Class::Storage::DBI/connect_info> as one way to enable multiple
+in your L<connect_info|DBIx::Class::Storage::DBI/connect_info> as one way to enable multiple
 concurrent statements.
 
 Will add C<< odbc_cursortype => 2 >> to your DBI connection attributes. See
@@ -62,7 +61,7 @@
   my $self = shift;
 
   if (ref($self->_dbi_connect_info->[0]) eq 'CODE') {
-    croak 'cannot set DBI attributes on a CODE ref connect_info';
+    $self->throw_exception ('Cannot set DBI attributes on a CODE ref connect_info');
   }
 
   my $dbi_attrs = $self->_dbi_connect_info->[-1];
@@ -75,16 +74,15 @@
   if (not exists $dbi_attrs->{odbc_cursortype}) {
     # turn on support for multiple concurrent statements, unless overridden
     $dbi_attrs->{odbc_cursortype} = 2;
-    my $connected = defined $self->_dbh;
-    $self->disconnect;
-    $self->ensure_connected if $connected;
+    $self->disconnect; # resetting dbi attrs, so have to reconnect
+    $self->ensure_connected;
     $self->_set_dynamic_cursors;
   }
 }
 
 sub _set_dynamic_cursors {
   my $self = shift;
-  my $dbh  = $self->_dbh;
+  my $dbh  = $self->_get_dbh;
 
   eval {
     local $dbh->{RaiseError} = 1;
@@ -92,7 +90,7 @@
     $dbh->do('SELECT @@IDENTITY');
   };
   if ($@) {
-    croak <<'EOF';
+    $self->throw_exception (<<'EOF');
 
 Your drivers do not seem to support dynamic cursors (odbc_cursortype => 2),
 if you're using FreeTDS, make sure to set tds_version to 8.0 or greater.
@@ -103,12 +101,18 @@
   $self->_identity_method('@@identity');
 }
 
-sub _rebless {
-  no warnings 'uninitialized';
+sub _init {
   my $self = shift;
 
-  if (ref($self->_dbi_connect_info->[0]) ne 'CODE' &&
-      eval { $self->_dbi_connect_info->[-1]{odbc_cursortype} } == 2) {
+  no warnings qw/uninitialized/;
+
+  if (
+    ref($self->_dbi_connect_info->[0]) ne 'CODE'
+      &&
+    ref ($self->_dbi_connect_info->[-1]) eq 'HASH'
+      &&
+    $self->_dbi_connect_info->[-1]{odbc_cursortype} == 2
+  ) {
     $self->_set_dynamic_cursors;
     return;
   }
@@ -160,7 +164,7 @@
   my $dsn = $self->_dbi_connect_info->[0];
 
   if (ref($dsn) eq 'CODE') {
-    croak 'cannot change the DBI DSN on a CODE ref connect_info';
+    $self->throw_exception('cannot change the DBI DSN on a CODE ref connect_info');
   }
 
   if ($dsn !~ /MARS_Connection=/) {

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/SQL_Anywhere.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/SQL_Anywhere.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC/SQL_Anywhere.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,28 @@
+package DBIx::Class::Storage::DBI::ODBC::SQL_Anywhere;
+
+use strict;
+use warnings;
+use base qw/DBIx::Class::Storage::DBI::SQLAnywhere/;
+use mro 'c3';
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::ODBC::SQL_Anywhere - Driver for using Sybase SQL
+Anywhere through ODBC
+
+=head1 SYNOPSIS
+
+All functionality is provided by L<DBIx::Class::Storage::DBI::SQLAnywhere>, see
+that module for details.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/ODBC.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -21,33 +21,17 @@
     }
 }
 
-sub _dbh_last_insert_id {
-    my ($self, $dbh, $source, $col) = @_;
-
-    # punt: if there is no derived class for the specific backend, attempt
-    # to use the DBI->last_insert_id, which may not be sufficient (see the
-    # discussion of last_insert_id in perldoc DBI)
-    return $dbh->last_insert_id(undef, undef, $source->from, $col);
-}
-
 1;
 
 =head1 NAME
 
 DBIx::Class::Storage::DBI::ODBC - Base class for ODBC drivers
 
-=head1 SYNOPSIS
-
-  # In your table classes
-  __PACKAGE__->load_components(qw/Core/);
-
-
 =head1 DESCRIPTION
 
 This class simply provides a mechanism for discovering and loading a sub-class
 for a specific ODBC backend.  It should be transparent to the user.
 
-
 =head1 AUTHORS
 
 Marc Mims C<< <marc at questright.com> >>

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -2,6 +2,8 @@
 
 use strict;
 use warnings;
+use Scope::Guard ();
+use Context::Preserve ();
 
 =head1 NAME
 
@@ -9,15 +11,17 @@
 
 =head1 SYNOPSIS
 
-  # In your table classes
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  # In your result (table) classes
+  use base 'DBIx::Class::Core';
   __PACKAGE__->add_columns({ id => { sequence => 'mysequence', auto_nextval => 1 } });
   __PACKAGE__->set_primary_key('id');
   __PACKAGE__->sequence('mysequence');
 
 =head1 DESCRIPTION
 
-This class implements autoincrements for Oracle.
+This class implements base Oracle support. The subclass
+L<DBIx::Class::Storage::DBI::Oracle::WhereJoins> is for C<(+)> joins in Oracle
+versions before 9.
 
 =head1 METHODS
 
@@ -26,6 +30,22 @@
 use base qw/DBIx::Class::Storage::DBI/;
 use mro 'c3';
 
+sub deployment_statements {
+  my $self = shift;;
+  my ($schema, $type, $version, $dir, $sqltargs, @rest) = @_;
+
+  $sqltargs ||= {};
+  my $quote_char = $self->schema->storage->sql_maker->quote_char;
+  $sqltargs->{quote_table_names} = $quote_char ? 1 : 0;
+  $sqltargs->{quote_field_names} = $quote_char ? 1 : 0;
+
+  my $oracle_version = eval { $self->_get_dbh->get_info(18) };
+
+  $sqltargs->{producer_args}{oracle_version} = $oracle_version;
+
+  $self->next::method($schema, $type, $version, $dir, $sqltargs, @rest);
+}
+
 sub _dbh_last_insert_id {
   my ($self, $dbh, $source, @columns) = @_;
   my @ids = ();
@@ -40,38 +60,42 @@
 sub _dbh_get_autoinc_seq {
   my ($self, $dbh, $source, $col) = @_;
 
-  # look up the correct sequence automatically
-  my $sql = q{
-    SELECT trigger_body FROM ALL_TRIGGERS t
-    WHERE t.table_name = ?
-    AND t.triggering_event = 'INSERT'
-    AND t.status = 'ENABLED'
-  };
+  my $sql_maker = $self->sql_maker;
 
+  my $source_name;
+  if ( ref $source->name eq 'SCALAR' ) {
+    $source_name = ${$source->name};
+  }
+  else {
+    $source_name = $source->name;
+  }
+  $source_name = uc($source_name) unless $sql_maker->quote_char;
+
   # trigger_body is a LONG
   local $dbh->{LongReadLen} = 64 * 1024 if ($dbh->{LongReadLen} < 64 * 1024);
 
-  my $sth;
+  # disable default bindtype
+  local $sql_maker->{bindtype} = 'normal';
 
-  # check for fully-qualified name (eg. SCHEMA.TABLENAME)
-  if ( my ( $schema, $table ) = $source->name =~ /(\w+)\.(\w+)/ ) {
-    $sql = q{
-      SELECT trigger_body FROM ALL_TRIGGERS t
-      WHERE t.owner = ? AND t.table_name = ?
-      AND t.triggering_event = 'INSERT'
-      AND t.status = 'ENABLED'
-    };
-    $sth = $dbh->prepare($sql);
-    $sth->execute( uc($schema), uc($table) );
-  }
-  else {
-    $sth = $dbh->prepare($sql);
-    $sth->execute( uc( $source->name ) );
-  }
+  # look up the correct sequence automatically
+  my ( $schema, $table ) = $source_name =~ /(\w+)\.(\w+)/;
+  my ($sql, @bind) = $sql_maker->select (
+    'ALL_TRIGGERS',
+    ['trigger_body'],
+    {
+      $schema ? (owner => $schema) : (),
+      table_name => $table || $source_name,
+      triggering_event => 'INSERT',
+      status => 'ENABLED',
+     },
+  );
+  my $sth = $dbh->prepare($sql);
+  $sth->execute (@bind);
+
   while (my ($insert_trigger) = $sth->fetchrow_array) {
-    return uc($1) if $insert_trigger =~ m!(\w+)\.nextval!i; # col name goes here???
+    return $1 if $insert_trigger =~ m!("?\w+"?)\.nextval!i; # col name goes here???
   }
-  $self->throw_exception("Unable to find a sequence INSERT trigger on table '" . $source->name . "'.");
+  $self->throw_exception("Unable to find a sequence INSERT trigger on table '$source_name'.");
 }
 
 sub _sequence_fetch {
@@ -86,9 +110,10 @@
   my $dbh = $self->_dbh or return 0;
 
   local $dbh->{RaiseError} = 1;
+  local $dbh->{PrintError} = 0;
 
   eval {
-    $dbh->do("select 1 from dual");
+    $dbh->do('select 1 from dual');
   };
 
   return $@ ? 0 : 1;
@@ -125,7 +150,7 @@
 
   $self->throw_exception($exception) if $exception;
 
-  wantarray ? @res : $res[0]
+  $wantarray ? @res : $res[0]
 }
 
 =head2 get_autoinc_seq
@@ -150,7 +175,7 @@
 sub columns_info_for {
   my ($self, $table) = @_;
 
-  $self->next::method(uc($table));
+  $self->next::method($table);
 }
 
 =head2 datetime_parser_type
@@ -168,10 +193,10 @@
 
     on_connect_call => 'datetime_setup'
 
-In L<DBIx::Class::Storage::DBI/connect_info> to set the session nls date, and
-timestamp values for use with L<DBIx::Class::InflateColumn::DateTime> and the
-necessary environment variables for L<DateTime::Format::Oracle>, which is used
-by it.
+In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set the session nls
+date, and timestamp values for use with L<DBIx::Class::InflateColumn::DateTime>
+and the necessary environment variables for L<DateTime::Format::Oracle>, which
+is used by it.
 
 Maximum allowable precision is used, unless the environment variables have
 already been set.
@@ -199,19 +224,17 @@
   my $timestamp_tz_format = $ENV{NLS_TIMESTAMP_TZ_FORMAT} ||=
     'YYYY-MM-DD HH24:MI:SS.FF TZHTZM';
 
-  $self->_do_query("alter session set nls_date_format = '$date_format'");
   $self->_do_query(
-"alter session set nls_timestamp_format = '$timestamp_format'");
+    "alter session set nls_date_format = '$date_format'"
+  );
   $self->_do_query(
-"alter session set nls_timestamp_tz_format='$timestamp_tz_format'");
+    "alter session set nls_timestamp_format = '$timestamp_format'"
+  );
+  $self->_do_query(
+    "alter session set nls_timestamp_tz_format='$timestamp_tz_format'"
+  );
 }
 
-sub _svp_begin {
-    my ($self, $name) = @_;
-
-    $self->_get_dbh->do("SAVEPOINT $name");
-}
-
 =head2 source_bind_attributes
 
 Handle LOB types in Oracle.  Under a certain size (4k?), you can get away
@@ -229,43 +252,124 @@
 
 =cut
 
-sub source_bind_attributes 
+sub source_bind_attributes
 {
-	require DBD::Oracle;
-	my $self = shift;
-	my($source) = @_;
+  require DBD::Oracle;
+  my $self = shift;
+  my($source) = @_;
 
-	my %bind_attributes;
+  my %bind_attributes;
 
-	foreach my $column ($source->columns) {
-		my $data_type = $source->column_info($column)->{data_type} || '';
-		next unless $data_type;
+  foreach my $column ($source->columns) {
+    my $data_type = $source->column_info($column)->{data_type} || '';
+    next unless $data_type;
 
-		my %column_bind_attrs = $self->bind_attribute_by_data_type($data_type);
+    my %column_bind_attrs = $self->bind_attribute_by_data_type($data_type);
 
-		if ($data_type =~ /^[BC]LOB$/i) {
-			$column_bind_attrs{'ora_type'} = uc($data_type) eq 'CLOB' ?
-				DBD::Oracle::ORA_CLOB() :
-				DBD::Oracle::ORA_BLOB();
-			$column_bind_attrs{'ora_field'} = $column;
-		}
+    if ($data_type =~ /^[BC]LOB$/i) {
+      if ($DBD::Oracle::VERSION eq '1.23') {
+        $self->throw_exception(
+"BLOB/CLOB support in DBD::Oracle == 1.23 is broken, use an earlier or later ".
+"version.\n\nSee: https://rt.cpan.org/Public/Bug/Display.html?id=46016\n"
+        );
+      }
 
-		$bind_attributes{$column} = \%column_bind_attrs;
-	}
+      $column_bind_attrs{'ora_type'} = uc($data_type) eq 'CLOB'
+        ? DBD::Oracle::ORA_CLOB()
+        : DBD::Oracle::ORA_BLOB()
+      ;
+      $column_bind_attrs{'ora_field'} = $column;
+    }
 
-	return \%bind_attributes;
+    $bind_attributes{$column} = \%column_bind_attrs;
+  }
+
+  return \%bind_attributes;
 }
 
+sub _svp_begin {
+  my ($self, $name) = @_;
+  $self->_get_dbh->do("SAVEPOINT $name");
+}
+
 # Oracle automatically releases a savepoint when you start another one with the
 # same name.
 sub _svp_release { 1 }
 
 sub _svp_rollback {
-    my ($self, $name) = @_;
+  my ($self, $name) = @_;
+  $self->_get_dbh->do("ROLLBACK TO SAVEPOINT $name")
+}
 
-    $self->_get_dbh->do("ROLLBACK TO SAVEPOINT $name")
+=head2 relname_to_table_alias
+
+L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
+queries.
+
+Unfortunately, Oracle doesn't support identifiers over 30 chars in length, so
+the L<DBIx::Class::Relationship> name is shortened and appended with half of an
+MD5 hash.
+
+See L<DBIx::Class::Storage/"relname_to_table_alias">.
+
+=cut
+
+sub relname_to_table_alias {
+  my $self = shift;
+  my ($relname, $join_count) = @_;
+
+  my $alias = $self->next::method(@_);
+
+  return $alias if length($alias) <= 30;
+
+  # get a base64 md5 of the alias with join_count
+  require Digest::MD5;
+  my $ctx = Digest::MD5->new;
+  $ctx->add($alias);
+  my $md5 = $ctx->b64digest;
+
+  # remove alignment mark just in case
+  $md5 =~ s/=*\z//;
+
+  # truncate and prepend to truncated relname without vowels
+  (my $devoweled = $relname) =~ s/[aeiou]//g;
+  my $shortened = substr($devoweled, 0, 18);
+
+  my $new_alias =
+    $shortened . '_' . substr($md5, 0, 30 - length($shortened) - 1);
+
+  return $new_alias;
 }
 
+=head2 with_deferred_fk_checks
+
+Runs a coderef between:
+
+  alter session set constraints = deferred
+  ...
+  alter session set constraints = immediate
+
+to defer foreign key checks.
+
+Constraints must be declared C<DEFERRABLE> for this to work.
+
+=cut
+
+sub with_deferred_fk_checks {
+  my ($self, $sub) = @_;
+
+  my $txn_scope_guard = $self->txn_scope_guard;
+
+  $self->_do_query('alter session set constraints = deferred');
+  
+  my $sg = Scope::Guard->new(sub {
+    $self->_do_query('alter session set constraints = immediate');
+  });
+
+  return Context::Preserve::preserve_context(sub { $sub->() },
+    after => sub { $txn_scope_guard->commit });
+}
+
 =head1 AUTHOR
 
 See L<DBIx::Class/CONTRIBUTORS>.

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Oracle/WhereJoins.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Oracle/WhereJoins.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Oracle/WhereJoins.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -23,8 +23,7 @@
 
 This module was originally written to support Oracle < 9i where ANSI joins
 weren't supported at all, but became the module for Oracle >= 8 because
-Oracle's optimising of ANSI joins is horrible.  (See:
-http://scsys.co.uk:8001/7495)
+Oracle's optimising of ANSI joins is horrible.
 
 =head1 SYNOPSIS
 
@@ -44,7 +43,7 @@
 It should properly support left joins, and right joins.  Full outer joins are
 not possible due to the fact that Oracle requires the entire query be written
 to union the results of a left and right join, and by the time this module is
-called to create the where query and table definition part of the sql query,
+called to create the where query and table definition part of the SQL query,
 it's already too late.
 
 =head1 METHODS

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Oracle.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Oracle.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Oracle.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -19,10 +19,8 @@
           ? 'DBIx::Class::Storage::DBI::Oracle::WhereJoins'
           : 'DBIx::Class::Storage::DBI::Oracle::Generic';
 
-        # Load and rebless
-        eval "require $class";
-
-        bless $self, $class unless $@;
+        $self->ensure_class_loaded ($class);
+        bless $self, $class;
     }
 }
 
@@ -32,11 +30,6 @@
 
 DBIx::Class::Storage::DBI::Oracle - Base class for Oracle driver
 
-=head1 SYNOPSIS
-
-  # In your table classes
-  __PACKAGE__->load_components(qw/Core/);
-
 =head1 DESCRIPTION
 
 This class simply provides a mechanism for discovering and loading a sub-class

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Pg.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Pg.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Pg.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -3,69 +3,162 @@
 use strict;
 use warnings;
 
-use base qw/DBIx::Class::Storage::DBI::MultiColumnIn/;
+use base qw/
+    DBIx::Class::Storage::DBI::MultiColumnIn
+/;
 use mro 'c3';
 
 use DBD::Pg qw(:pg_types);
+use Scope::Guard ();
+use Context::Preserve ();
 
 # Ask for a DBD::Pg with array support
-warn "DBD::Pg 2.9.2 or greater is strongly recommended\n"
+warn __PACKAGE__.": DBD::Pg 2.9.2 or greater is strongly recommended\n"
   if ($DBD::Pg::VERSION < 2.009002);  # pg uses (used?) version::qv()
 
+sub _supports_insert_returning {
+  my $self = shift;
+
+  return 1
+    if $self->_server_info->{normalized_dbms_version} >= 8.002;
+
+  return 0;
+}
+
 sub with_deferred_fk_checks {
   my ($self, $sub) = @_;
 
-  $self->_get_dbh->do('SET CONSTRAINTS ALL DEFERRED');
-  $sub->();
-}
+  my $txn_scope_guard = $self->txn_scope_guard;
 
-sub _dbh_last_insert_id {
-  my ($self, $dbh, $seq) = @_;
-  $dbh->last_insert_id(undef, undef, undef, undef, {sequence => $seq});
+  $self->_do_query('SET CONSTRAINTS ALL DEFERRED');
+
+  my $sg = Scope::Guard->new(sub {
+    $self->_do_query('SET CONSTRAINTS ALL IMMEDIATE');
+  });
+
+  return Context::Preserve::preserve_context(sub { $sub->() },
+    after => sub { $txn_scope_guard->commit });
 }
 
+# only used when INSERT ... RETURNING is disabled
 sub last_insert_id {
-  my ($self,$source,$col) = @_;
-  my $seq = ($source->column_info($col)->{sequence} ||= $self->get_autoinc_seq($source,$col));
-  $self->throw_exception("could not fetch primary key for " . $source->name . ", could not "
-    . "get autoinc sequence for $col (check that table and column specifications are correct "
-    . "and in the correct case)") unless defined $seq;
-  $self->dbh_do('_dbh_last_insert_id', $seq);
-}
+  my ($self,$source, at cols) = @_;
 
-sub _dbh_get_autoinc_seq {
-  my ($self, $dbh, $schema, $table, @pri) = @_;
+  my @values;
 
-  while (my $col = shift @pri) {
-    my $info = $dbh->column_info(undef,$schema,$table,$col)->fetchrow_hashref;
-    if(defined $info->{COLUMN_DEF} and
-       $info->{COLUMN_DEF} =~ /^nextval\(+'([^']+)'::(?:text|regclass)\)/) {
-      my $seq = $1;
-      # may need to strip quotes -- see if this works
-      return $seq =~ /\./ ? $seq : $info->{TABLE_SCHEM} . "." . $seq;
-    }
+  for my $col (@cols) {
+    my $seq = ( $source->column_info($col)->{sequence} ||= $self->dbh_do('_dbh_get_autoinc_seq', $source, $col) )
+      or $self->throw_exception( sprintf(
+        'could not determine sequence for column %s.%s, please consider adding a schema-qualified sequence to its column info',
+          $source->name,
+          $col,
+      ));
+
+    push @values, $self->_dbh->last_insert_id(undef, undef, undef, undef, {sequence => $seq});
   }
-  return;
+
+  return @values;
 }
 
-sub get_autoinc_seq {
-  my ($self,$source,$col) = @_;
+sub _sequence_fetch {
+  my ($self, $function, $sequence) = @_;
 
-  my @pri = $source->primary_columns;
+  $self->throw_exception('No sequence to fetch') unless $sequence;
 
+  my ($val) = $self->_get_dbh->selectrow_array(
+    sprintf ("select %s('%s')", $function, $sequence)
+  );
+
+  return $val;
+}
+
+sub _dbh_get_autoinc_seq {
+  my ($self, $dbh, $source, $col) = @_;
+
   my $schema;
   my $table = $source->name;
 
-  if (ref $table eq 'SCALAR') {
-    $table = $$table;
+  # deref table name if it needs it
+  $table = $$table
+      if ref $table eq 'SCALAR';
+
+  # parse out schema name if present
+  if( $table =~ /^(.+)\.(.+)$/ ) {
+    ( $schema, $table ) = ( $1, $2 );
   }
-  elsif ($table =~ /^(.+)\.(.+)$/) {
-    ($schema, $table) = ($1, $2);
+
+  # get the column default using a Postgres-specific pg_catalog query
+  my $seq_expr = $self->_dbh_get_column_default( $dbh, $schema, $table, $col );
+
+  # if no default value is set on the column, or if we can't parse the
+  # default value as a sequence, throw.
+  unless ( defined $seq_expr and $seq_expr =~ /^nextval\(+'([^']+)'::(?:text|regclass)\)/i ) {
+    $seq_expr = '' unless defined $seq_expr;
+    $schema = "$schema." if defined $schema && length $schema;
+    $self->throw_exception( sprintf (
+      'no sequence found for %s%s.%s, check the RDBMS table definition or explicitly set the '.
+      "'sequence' for this column in %s",
+        $schema ? "$schema." : '',
+        $table,
+        $col,
+        $source->source_name,
+    ));
   }
 
-  $self->dbh_do('_dbh_get_autoinc_seq', $schema, $table, @pri);
+  return $1;
 }
 
+# custom method for fetching column default, since column_info has a
+# bug with older versions of DBD::Pg
+sub _dbh_get_column_default {
+  my ( $self, $dbh, $schema, $table, $col ) = @_;
+
+  # Build and execute a query into the pg_catalog to find the Pg
+  # expression for the default value for this column in this table.
+  # If the table name is schema-qualified, query using that specific
+  # schema name.
+
+  # Otherwise, find the table in the standard Postgres way, using the
+  # search path.  This is done with the pg_catalog.pg_table_is_visible
+  # function, which returns true if a given table is 'visible',
+  # meaning the first table of that name to be found in the search
+  # path.
+
+  # I *think* we can be assured that this query will always find the
+  # correct column according to standard Postgres semantics.
+  #
+  # -- rbuels
+
+  my $sqlmaker = $self->sql_maker;
+  local $sqlmaker->{bindtype} = 'normal';
+
+  my ($where, @bind) = $sqlmaker->where ({
+    'a.attnum' => {'>', 0},
+    'c.relname' => $table,
+    'a.attname' => $col,
+    -not_bool => 'a.attisdropped',
+    (defined $schema && length $schema)
+      ? ( 'n.nspname' => $schema )
+      : ( -bool => \'pg_catalog.pg_table_is_visible(c.oid)' )
+  });
+
+  my ($seq_expr) = $dbh->selectrow_array(<<EOS,undef, at bind);
+
+SELECT
+  (SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid)
+   FROM pg_catalog.pg_attrdef d
+   WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef)
+FROM pg_catalog.pg_class c
+     LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+     JOIN pg_catalog.pg_attribute a ON a.attrelid = c.oid
+$where
+
+EOS
+
+  return $seq_expr;
+}
+
+
 sub sqlt_type {
   return 'PostgreSQL';
 }
@@ -88,12 +181,6 @@
   }
 }
 
-sub _sequence_fetch {
-  my ( $self, $type, $seq ) = @_;
-  my ($id) = $self->_get_dbh->selectrow_array("SELECT nextval('${seq}')");
-  return $id;
-}
-
 sub _svp_begin {
     my ($self, $name) = @_;
 
@@ -114,14 +201,16 @@
 
 1;
 
+__END__
+
 =head1 NAME
 
 DBIx::Class::Storage::DBI::Pg - Automatic primary key class for PostgreSQL
 
 =head1 SYNOPSIS
 
-  # In your table classes
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  # In your result (table) classes
+  use base 'DBIx::Class::Core';
   __PACKAGE__->set_primary_key('id');
   __PACKAGE__->sequence('mysequence');
 
@@ -129,9 +218,30 @@
 
 This class implements autoincrements for PostgreSQL.
 
+=head1 POSTGRESQL SCHEMA SUPPORT
+
+This driver supports multiple PostgreSQL schemas, with one caveat: for
+performance reasons, data about the search path, sequence names, and
+so forth is queried as needed and CACHED for subsequent uses.
+
+For this reason, once your schema is instantiated, you should not
+change the PostgreSQL schema search path for that schema's database
+connection. If you do, Bad Things may happen.
+
+You should do any necessary manipulation of the search path BEFORE
+instantiating your schema object, or as part of the on_connect_do
+option to connect(), for example:
+
+   my $schema = My::Schema->connect
+                  ( $dsn,$user,$pass,
+                    { on_connect_do =>
+                        [ 'SET search_path TO myschema, foo, public' ],
+                    },
+                  );
+
 =head1 AUTHORS
 
-Marcus Ramberg <m.ramberg at cpan.org>
+See L<DBIx::Class/CONTRIBUTORS>
 
 =head1 LICENSE
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -19,7 +19,7 @@
 database's (L<DBIx::Class::Storage::DBI::Replicated::Replicant>), defines a
 method by which query load can be spread out across each replicant in the pool.
 
-This Balancer just get's whatever is the first replicant in the pool
+This Balancer just gets whichever is the first replicant in the pool.
 
 =head1 ATTRIBUTES
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -110,7 +110,7 @@
 This method should be defined in the class which consumes this role.
 
 Given a pool object, return the next replicant that will serve queries.  The
-default behavior is to grap the first replicant it finds but you can write 
+default behavior is to grab the first replicant it finds but you can write 
 your own subclasses of L<DBIx::Class::Storage::DBI::Replicated::Balancer> to 
 support other balance systems.
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod	2010-05-17 14:31:46 UTC (rev 9401)
@@ -9,8 +9,8 @@
 This is an introductory document for L<DBIx::Class::Storage::Replication>.
 
 This document is not an overview of what replication is or why you should be
-using it.  It is not a document explaing how to setup MySQL native replication
-either.  Copious external resources are avialable for both.  This document
+using it.  It is not a document explaining how to setup MySQL native replication
+either.  Copious external resources are available for both.  This document
 presumes you have the basics down.
   
 =head1 DESCRIPTION
@@ -33,7 +33,7 @@
 For an easy way to start playing with MySQL native replication, see:
 L<MySQL::Sandbox>.
 
-If you are using this with a L<Catalyst> based appplication, you may also wish
+If you are using this with a L<Catalyst> based application, you may also want
 to see more recent updates to L<Catalyst::Model::DBIC::Schema>, which has 
 support for replication configuration options as well.
 
@@ -41,15 +41,15 @@
 
 By default, when you start L<DBIx::Class>, your Schema (L<DBIx::Class::Schema>)
 is assigned a storage_type, which when fully connected will reflect your
-underlying storage engine as defined by your choosen database driver.  For
+underlying storage engine as defined by your chosen database driver.  For
 example, if you connect to a MySQL database, your storage_type will be
 L<DBIx::Class::Storage::DBI::mysql>  Your storage type class will contain 
 database specific code to help smooth over the differences between databases
 and let L<DBIx::Class> do its thing.
 
 If you want to use replication, you will override this setting so that the
-replicated storage engine will 'wrap' your underlying storages and present to
-the end programmer a unified interface.  This wrapper storage class will
+replicated storage engine will 'wrap' your underlying storages and present 
+a unified interface to the end programmer.  This wrapper storage class will
 delegate method calls to either a master database or one or more replicated
 databases based on if they are read only (by default sent to the replicants)
 or write (reserved for the master).  Additionally, the Replicated storage 
@@ -72,8 +72,8 @@
 storage itself (L<DBIx::Class::Storage::DBI::Replicated>).  A replicated storage
 takes a pool of replicants (L<DBIx::Class::Storage::DBI::Replicated::Pool>)
 and a software balancer (L<DBIx::Class::Storage::DBI::Replicated::Pool>).  The
-balancer does the job of splitting up all the read traffic amongst each
-replicant in the Pool. Currently there are two types of balancers, a Random one
+balancer does the job of splitting up all the read traffic amongst the
+replicants in the Pool. Currently there are two types of balancers, a Random one
 which chooses a Replicant in the Pool using a naive randomizer algorithm, and a
 First replicant, which just uses the first one in the Pool (and obviously is
 only of value when you have a single replicant).
@@ -89,26 +89,25 @@
 you use (or upgrade to) the latest L<Catalyst::Model::DBIC::Schema>, which makes
 this job even easier.
 
-First, you need to connect your L<DBIx::Class::Schema>.  Let's assume you have
-such a schema called, "MyApp::Schema".
+First, you need to get a C<$schema> object and set the storage_type:
 
-	use MyApp::Schema;
-	my $schema = MyApp::Schema->connect($dsn, $user, $pass);
+  my $schema = MyApp::Schema->clone;
+  $schema->storage_type([
+    '::DBI::Replicated' => {
+      balancer_type => '::Random',
+      balancer_args => {
+        auto_validate_every => 5,
+        master_read_weight => 1
+      },
+      pool_args => {
+        maximum_lag =>2,
+      },
+    }
+  ]);
 
-Next, you need to set the storage_type.
+Then, you need to connect your L<DBIx::Class::Schema>.
 
-	$schema->storage_type(
-		::DBI::Replicated' => {
-			balancer_type => '::Random',
-            balancer_args => {
-				auto_validate_every => 5,
-				master_read_weight => 1
-			},
-			pool_args => {
-				maximum_lag =>2,
-			},
-		}
-	);
+  $schema->connection($dsn, $user, $pass);
 
 Let's break down the settings.  The method L<DBIx::Class::Schema/storage_type>
 takes one mandatory parameter, a scalar value, and an option second value which
@@ -133,7 +132,7 @@
 balancers have the 'auto_validate_every' option.  This is the number of seconds
 we allow to pass between validation checks on a load balanced replicant. So
 the higher the number, the more possibility that your reads to the replicant 
-may be inconsistant with what's on the master.  Setting this number too low
+may be inconsistent with what's on the master.  Setting this number too low
 will result in increased database loads, so choose a number with care.  Our
 experience is that setting the number around 5 seconds results in a good
 performance / integrity balance.
@@ -146,25 +145,25 @@
 This object (L<DBIx::Class::Storage::DBI::Replicated::Pool>) manages all the
 declared replicants.  'maximum_lag' is the number of seconds a replicant is
 allowed to lag behind the master before being temporarily removed from the pool.
-Keep in mind that the Balancer option 'auto_validate_every' determins how often
+Keep in mind that the Balancer option 'auto_validate_every' determines how often
 a replicant is tested against this condition, so the true possible lag can be
 higher than the number you set.  The default is zero.
 
 No matter how low you set the maximum_lag or the auto_validate_every settings,
 there is always the chance that your replicants will lag a bit behind the
 master for the supported replication system built into MySQL.  You can ensure
-reliabily reads by using a transaction, which will force both read and write
+reliable reads by using a transaction, which will force both read and write
 activity to the master, however this will increase the load on your master
 database.
 
 After you've configured the replicated storage, you need to add the connection
 information for the replicants:
 
-	$schema->storage->connect_replicants(
-		[$dsn1, $user, $pass, \%opts],
- 		[$dsn2, $user, $pass, \%opts],
- 		[$dsn3, $user, $pass, \%opts],
- 	);
+  $schema->storage->connect_replicants(
+    [$dsn1, $user, $pass, \%opts],
+    [$dsn2, $user, $pass, \%opts],
+    [$dsn3, $user, $pass, \%opts],
+  );
 
 These replicants should be configured as slaves to the master using the
 instructions for MySQL native replication, or if you are just learning, you

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,12 +1,13 @@
 package DBIx::Class::Storage::DBI::Replicated::Pool;
 
 use Moose;
-use MooseX::AttributeHelpers;
 use DBIx::Class::Storage::DBI::Replicated::Replicant;
 use List::Util 'sum';
 use Scalar::Util 'reftype';
+use DBI ();
 use Carp::Clan qw/^DBIx::Class/;
 use MooseX::Types::Moose qw/Num Int ClassName HashRef/;
+use DBIx::Class::Storage::DBI::Replicated::Types 'DBICStorageDBI';
 
 use namespace::clean -except => 'meta';
 
@@ -22,7 +23,7 @@
 =head1 DESCRIPTION
 
 In a replicated storage type, there is at least one replicant to handle the
-read only traffic.  The Pool class manages this replicant, or list of 
+read-only traffic.  The Pool class manages this replicant, or list of 
 replicants, and gives some methods for querying information about their status.
 
 =head1 ATTRIBUTES
@@ -52,7 +53,7 @@
 
 This is an integer representing a time since the last time the replicants were
 validated. It's nothing fancy, just an integer provided via the perl L<time|perlfunc/time>
-builtin.
+built-in.
 
 =cut
 
@@ -86,7 +87,7 @@
 =head2 replicants
 
 A hashref of replicant, with the key being the dsn and the value returning the
-actual replicant storage.  For example if the $dsn element is something like:
+actual replicant storage.  For example, if the $dsn element is something like:
 
   "dbi:SQLite:dbname=dbfile"
 
@@ -116,7 +117,7 @@
 
 =item delete_replicant ($key)
 
-removes the replicant under $key from the pool
+Removes the replicant under $key from the pool
 
 =back
 
@@ -124,19 +125,42 @@
 
 has 'replicants' => (
   is=>'rw',
-  metaclass => 'Collection::Hash',
+  traits => ['Hash'],
   isa=>HashRef['Object'],
   default=>sub {{}},
-  provides  => {
-    'set' => 'set_replicant',
-    'get' => 'get_replicant',
-    'empty' => 'has_replicants',
-    'count' => 'num_replicants',
-    'delete' => 'delete_replicant',
-    'values' => 'all_replicant_storages',
+  handles  => {
+    'set_replicant' => 'set',
+    'get_replicant' => 'get',
+    'has_replicants' => 'is_empty',
+    'num_replicants' => 'count',
+    'delete_replicant' => 'delete',
+    'all_replicant_storages' => 'values',
   },
 );
 
+around has_replicants => sub {
+    my ($orig, $self) = @_;
+    return !$self->$orig;
+};
+
+has next_unknown_replicant_id => (
+  is => 'rw',
+  traits => ['Counter'],
+  isa => Int,
+  default => 1,
+  handles => {
+    'inc_unknown_replicant_id' => 'inc',
+  },
+);
+
+=head2 master
+
+Reference to the master Storage.
+
+=cut
+
+has master => (is => 'rw', isa => DBICStorageDBI, weak_ref => 1);
+
 =head1 METHODS
 
 This class defines the following methods.
@@ -158,16 +182,45 @@
     $connect_info = [ $connect_info ]
       if reftype $connect_info ne 'ARRAY';
 
-    croak "coderef replicant connect_info not supported"
-      if ref $connect_info->[0] && reftype $connect_info->[0] eq 'CODE';
+    my $connect_coderef =
+      (reftype($connect_info->[0])||'') eq 'CODE' ? $connect_info->[0]
+        : (reftype($connect_info->[0])||'') eq 'HASH' &&
+          $connect_info->[0]->{dbh_maker};
 
-    my $replicant = $self->connect_replicant($schema, $connect_info);
+    my $dsn;
+    my $replicant = do {
+# yes this is evil, but it only usually happens once (for coderefs)
+# this will fail if the coderef does not actually DBI::connect
+      no warnings 'redefine';
+      my $connect = \&DBI::connect;
+      local *DBI::connect = sub {
+        $dsn = $_[1];
+        goto $connect;
+      };
+      $self->connect_replicant($schema, $connect_info);
+    };
 
-    my $key = $connect_info->[0];
-    $key = $key->{dsn} if ref $key && reftype $key eq 'HASH';
-    ($key) = ($key =~ m/^dbi\:.+\:(.+)$/);
+    my $key;
 
-    $self->set_replicant( $key => $replicant);  
+    if (!$dsn) {
+      if (!$connect_coderef) {
+        $dsn = $connect_info->[0];
+        $dsn = $dsn->{dsn} if (reftype($dsn)||'') eq 'HASH';
+      }
+      else {
+        # all attempts to get the DSN failed
+        $key = "UNKNOWN_" . $self->next_unknown_replicant_id;
+        $self->inc_unknown_replicant_id;
+      }
+    }
+    if ($dsn) {
+      $replicant->dsn($dsn);
+      ($key) = ($dsn =~ m/^dbi\:.+\:(.+)$/i);
+    }
+
+    $replicant->id($key);
+    $self->set_replicant($key => $replicant);  
+
     push @newly_created, $replicant;
   }
 
@@ -199,7 +252,13 @@
     $replicant->_determine_driver
   });
 
-  DBIx::Class::Storage::DBI::Replicated::Replicant->meta->apply($replicant);  
+  Moose::Meta::Class->initialize(ref $replicant);
+
+  DBIx::Class::Storage::DBI::Replicated::Replicant->meta->apply($replicant);
+
+  # link back to master
+  $replicant->master($self->master);
+
   return $replicant;
 }
 
@@ -209,7 +268,7 @@
 connect.  For the master database this is desirable, but since replicants are
 allowed to fail, this behavior is not desirable.  This method wraps the call
 to ensure_connected in an eval in order to catch any generated errors.  That
-way a slave can go completely offline (ie, the box itself can die) without
+way a slave can go completely offline (e.g. the box itself can die) without
 bringing down your entire pool of databases.
 
 =cut
@@ -236,16 +295,15 @@
 
   eval {
     $code->()
-  }; 
+  };
   if ($@) {
-    $replicant
-      ->debugobj
-      ->print(
-        sprintf( "Exception trying to $name for replicant %s, error is %s",
-          $replicant->_dbi_connect_info->[0], $@)
-        );
-  	return;
+    $replicant->debugobj->print(sprintf(
+      "Exception trying to $name for replicant %s, error is %s",
+      $replicant->_dbi_connect_info->[0], $@)
+    );
+    return undef;
   }
+
   return 1;
 }
 
@@ -307,7 +365,7 @@
 inactive, and thus removed from the replication pool.
 
 This tests L<all_replicants>, since a replicant that has been previous marked
-as inactive can be reactived should it start to pass the validation tests again.
+as inactive can be reactivated should it start to pass the validation tests again.
 
 See L<DBIx::Class::Storage::DBI> for more about checking if a replicating
 connection is not following a master or is lagging.

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -3,7 +3,8 @@
 use Moose::Role;
 requires qw/_query_start/;
 with 'DBIx::Class::Storage::DBI::Replicated::WithDSN';
-use MooseX::Types::Moose 'Bool';
+use MooseX::Types::Moose qw/Bool Str/;
+use DBIx::Class::Storage::DBI::Replicated::Types 'DBICStorageDBI';
 
 use namespace::clean -except => 'meta';
 
@@ -32,14 +33,14 @@
 =head2 active
 
 This is a boolean which allows you to programmatically activate or deactivate a
-replicant from the pool.  This way to you do stuff like disallow a replicant
-when it get's too far behind the master, if it stops replicating, etc.
+replicant from the pool.  This way you can do stuff like disallow a replicant
+when it gets too far behind the master, if it stops replicating, etc.
 
 This attribute DOES NOT reflect a replicant's internal status, i.e. if it is
 properly replicating from a master and has not fallen too many seconds behind a
 reliability threshold.  For that, use L</is_replicating>  and L</lag_behind_master>.
 Since the implementation of those functions database specific (and not all DBIC
-supported DB's support replication) you should refer your database specific
+supported DBs support replication) you should refer your database-specific
 storage driver for more information.
 
 =cut
@@ -52,6 +53,17 @@
   default=>1,
 );
 
+has dsn => (is => 'rw', isa => Str);
+has id  => (is => 'rw', isa => Str);
+
+=head2 master
+
+Reference to the master Storage.
+
+=cut
+
+has master => (is => 'rw', isa => DBICStorageDBI, weak_ref => 1);
+
 =head1 METHODS
 
 This class defines the following methods.
@@ -63,7 +75,9 @@
 =cut
 
 sub debugobj {
-    return shift->schema->storage->debugobj;
+  my $self = shift;
+
+  return $self->master->debugobj;
 }
 
 =head1 ALSO SEE

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,6 +1,7 @@
 package DBIx::Class::Storage::DBI::Replicated::WithDSN;
 
 use Moose::Role;
+use Scalar::Util 'reftype';
 requires qw/_query_start/;
 
 use namespace::clean -except => 'meta';
@@ -30,11 +31,25 @@
 
 around '_query_start' => sub {
   my ($method, $self, $sql, @bind) = @_;
-  my $dsn = $self->_dbi_connect_info->[0];
+
+  my $dsn = eval { $self->dsn } || $self->_dbi_connect_info->[0];
+
   my($op, $rest) = (($sql=~m/^(\w+)(.+)$/),'NOP', 'NO SQL');
   my $storage_type = $self->can('active') ? 'REPLICANT' : 'MASTER';
 
-  $self->$method("$op [DSN_$storage_type=$dsn]$rest", @bind);
+  my $query = do {
+    if ((reftype($dsn)||'') ne 'CODE') {
+      "$op [DSN_$storage_type=$dsn]$rest";
+    }
+    elsif (my $id = eval { $self->id }) {
+      "$op [$storage_type=$id]$rest";
+    }
+    else {
+      "$op [$storage_type]$rest";
+    }
+  };
+
+  $self->$method($query, @bind);
 };
 
 =head1 ALSO SEE

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Replicated.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -2,28 +2,9 @@
 
 BEGIN {
   use Carp::Clan qw/^DBIx::Class/;
-
-  ## Modules required for Replication support not required for general DBIC
-  ## use, so we explicitly test for these.
-
-  my %replication_required = (
-    'Moose' => '0.87',
-    'MooseX::AttributeHelpers' => '0.21',
-    'MooseX::Types' => '0.16',
-    'namespace::clean' => '0.11',
-    'Hash::Merge' => '0.11'
-  );
-
-  my @didnt_load;
-
-  for my $module (keys %replication_required) {
-	eval "use $module $replication_required{$module}";
-	push @didnt_load, "$module $replication_required{$module}"
-	 if $@;
-  }
-
-  croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication")
-    if @didnt_load;
+  use DBIx::Class;
+  croak('The following modules are required for Replication ' . DBIx::Class::Optional::Dependencies->req_missing_for ('replicated') )
+    unless DBIx::Class::Optional::Dependencies->req_ok_for ('replicated');
 }
 
 use Moose;
@@ -33,8 +14,8 @@
 use DBIx::Class::Storage::DBI::Replicated::Types qw/BalancerClassNamePart DBICSchema DBICStorageDBI/;
 use MooseX::Types::Moose qw/ClassName HashRef Object/;
 use Scalar::Util 'reftype';
-use Carp::Clan qw/^DBIx::Class/;
-use Hash::Merge 'merge';
+use Hash::Merge;
+use List::Util qw/min max reduce/;
 
 use namespace::clean -except => 'meta';
 
@@ -45,14 +26,16 @@
 =head1 SYNOPSIS
 
 The Following example shows how to change an existing $schema to a replicated
-storage type, add some replicated (readonly) databases, and perform reporting
+storage type, add some replicated (read-only) databases, and perform reporting
 tasks.
 
 You should set the 'storage_type attribute to a replicated type.  You should
 also define your arguments, such as which balancer you want and any arguments
 that the Pool object should get.
 
+  my $schema = Schema::Class->clone;
   $schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] );
+  $schema->connection(...);
 
 Next, you need to add in the Replicants.  Basically this is an array of 
 arrayrefs, where each arrayref is database connect information.  Think of these
@@ -92,7 +75,7 @@
 Warning: This class is marked BETA.  This has been running a production
 website using MySQL native replication as its backend and we have some decent
 test coverage but the code hasn't yet been stressed by a variety of databases.
-Individual DB's may have quirks we are not aware of.  Please use this in first
+Individual DBs may have quirks we are not aware of.  Please use this in first
 development and pass along your experiences/bug fixes.
 
 This class implements replicated data store for DBI. Currently you can define
@@ -106,29 +89,21 @@
 to all existing storages.  This way our storage class is a drop in replacement
 for L<DBIx::Class::Storage::DBI>.
 
-Read traffic is spread across the replicants (slaves) occuring to a user
+Read traffic is spread across the replicants (slaves) occurring to a user
 selected algorithm.  The default algorithm is random weighted.
 
 =head1 NOTES
 
-The consistancy betweeen master and replicants is database specific.  The Pool
+The consistency between master and replicants is database specific.  The Pool
 gives you a method to validate its replicants, removing and replacing them
 when they fail/pass predefined criteria.  Please make careful use of the ways
 to force a query to run against Master when needed.
 
 =head1 REQUIREMENTS
 
-Replicated Storage has additional requirements not currently part of L<DBIx::Class>
+Replicated Storage has additional requirements not currently part of
+L<DBIx::Class>. See L<DBIx::Class::Optional::Dependencies> for more details.
 
-  Moose => '0.87',
-  MooseX::AttributeHelpers => '0.20',
-  MooseX::Types => '0.16',
-  namespace::clean => '0.11',
-  Hash::Merge => '0.11'
-
-You will need to install these modules manually via CPAN or make them part of the
-Makefile for your distribution.
-
 =head1 ATTRIBUTES
 
 This class defines the following attributes.
@@ -222,7 +197,7 @@
   isa=>'DBIx::Class::Storage::DBI::Replicated::Pool',
   lazy_build=>1,
   handles=>[qw/
-    connect_replicants    
+    connect_replicants
     replicants
     has_replicants
   /],
@@ -277,12 +252,17 @@
     select
     select_single
     columns_info_for
-  /],    
+    _dbh_columns_info_for 
+    _select
+  /],
 );
 
 =head2 write_handler
 
-Defines an object that implements the write side of L<BIx::Class::Storage::DBI>.
+Defines an object that implements the write side of L<BIx::Class::Storage::DBI>,
+as well as methods that don't write or read that can be called on only one
+storage, methods that return a C<$dbh>, and any methods that don't make sense to
+run on a replicant.
 
 =cut
 
@@ -290,18 +270,21 @@
   is=>'ro',
   isa=>Object,
   lazy_build=>1,
-  handles=>[qw/   
+  handles=>[qw/
     on_connect_do
-    on_disconnect_do       
+    on_disconnect_do
+    on_connect_call
+    on_disconnect_call
     connect_info
+    _connect_info
     throw_exception
     sql_maker
     sqlt_type
     create_ddl_dir
     deployment_statements
     datetime_parser
-    datetime_parser_type  
-    build_datetime_parser      
+    datetime_parser_type
+    build_datetime_parser
     last_insert_id
     insert
     insert_bulk
@@ -316,29 +299,96 @@
     sth
     deploy
     with_deferred_fk_checks
-	dbh_do
+    dbh_do
     reload_row
-	with_deferred_fk_checks
+    with_deferred_fk_checks
     _prep_for_execute
 
-	backup
-	is_datatype_numeric
-	_count_select
-	_subq_count_select
-	_subq_update_delete 
-	svp_rollback
-	svp_begin
-	svp_release
+    backup
+    is_datatype_numeric
+    _supports_insert_returning
+    _count_select
+    _subq_update_delete
+    svp_rollback
+    svp_begin
+    svp_release
+    relname_to_table_alias
+    _straight_join_to_node
+    _dbh_last_insert_id
+    _fix_bind_params
+    _default_dbi_connect_attributes
+    _dbi_connect_info
+    auto_savepoint
+    _sqlt_version_ok
+    _query_end
+    bind_attribute_by_data_type
+    transaction_depth
+    _dbh
+    _select_args
+    _dbh_execute_array
+    _sql_maker_args
+    _sql_maker
+    _query_start
+    _sqlt_version_error
+    _per_row_update_delete
+    _dbh_begin_work
+    _dbh_execute_inserts_with_no_binds
+    _select_args_to_query
+    _svp_generate_name
+    _multipk_update_delete
+    source_bind_attributes
+    _normalize_connect_info
+    _parse_connect_do
+    _dbh_commit
+    _execute_array
+    _placeholders_supported
+    savepoints
+    _sqlt_minimum_version
+    _sql_maker_opts
+    _conn_pid
+    _typeless_placeholders_supported
+    _conn_tid
+    _dbh_autocommit
+    _native_data_type
+    _get_dbh
+    sql_maker_class
+    _dbh_rollback
+    _adjust_select_args_for_complex_prefetch
+    _resolve_ident_sources
+    _resolve_column_info
+    _prune_unused_joins
+    _strip_cond_qualifiers
+    _parse_order_by
+    _resolve_aliastypes_from_select_args
+    _execute
+    _do_query
+    _dbh_sth
+    _dbh_execute
+    _prefetch_insert_auto_nextvals
+    _server_info_hash
   /],
 );
 
+my @unimplemented = qw(
+  _arm_global_destructor
+  _preserve_foreign_dbh
+  _verify_pid
+  _verify_tid
+);
+
+for my $method (@unimplemented) {
+  __PACKAGE__->meta->add_method($method, sub {
+    croak "$method must not be called on ".(blessed shift).' objects';
+  });
+}
+
 has _master_connect_info_opts =>
   (is => 'rw', isa => HashRef, default => sub { {} });
 
 =head2 around: connect_info
 
-Preserve master's C<connect_info> options (for merging with replicants.)
-Also set any Replicated related options from connect_info, such as
+Preserves master's C<connect_info> options (for merging with replicants.)
+Also sets any Replicated-related options from connect_info, such as
 C<pool_type>, C<pool_args>, C<balancer_type> and C<balancer_args>.
 
 =cut
@@ -348,10 +398,12 @@
 
   my $wantarray = wantarray;
 
+  my $merge = Hash::Merge->new('LEFT_PRECEDENT');
+
   my %opts;
   for my $arg (@$info) {
     next unless (reftype($arg)||'') eq 'HASH';
-    %opts = %{ merge($arg, \%opts) };
+    %opts = %{ $merge->merge($arg, \%opts) };
   }
   delete $opts{dsn};
 
@@ -360,11 +412,11 @@
       if $opts{pool_type};
 
     $self->pool_args(
-      merge((delete $opts{pool_args} || {}), $self->pool_args)
+      $merge->merge((delete $opts{pool_args} || {}), $self->pool_args)
     );
 
     $self->pool($self->_build_pool)
-	if $self->pool;
+      if $self->pool;
   }
 
   if (@opts{qw/balancer_type balancer_args/}) {
@@ -372,11 +424,11 @@
       if $opts{balancer_type};
 
     $self->balancer_args(
-      merge((delete $opts{balancer_args} || {}), $self->balancer_args)
+      $merge->merge((delete $opts{balancer_args} || {}), $self->balancer_args)
     );
 
     $self->balancer($self->_build_balancer)
-	if $self->balancer;
+      if $self->balancer;
   }
 
   $self->_master_connect_info_opts(\%opts);
@@ -392,8 +444,12 @@
   my $master = $self->master;
   $master->_determine_driver;
   Moose::Meta::Class->initialize(ref $master);
+
   DBIx::Class::Storage::DBI::Replicated::WithDSN->meta->apply($master);
 
+  # link pool back to master
+  $self->pool->master($master);
+
   $wantarray ? @res : $res;
 };
 
@@ -410,12 +466,12 @@
 =cut
 
 sub BUILDARGS {
-  my ($class, $schema, $storage_type_args, @args) = @_;	
+  my ($class, $schema, $storage_type_args, @args) = @_;  
 
   return {
-  	schema=>$schema, 
-  	%$storage_type_args,
-  	@args
+    schema=>$schema,
+    %$storage_type_args,
+    @args
   }
 }
 
@@ -452,7 +508,7 @@
 sub _build_balancer {
   my $self = shift @_;
   $self->create_balancer(
-    pool=>$self->pool, 
+    pool=>$self->pool,
     master=>$self->master,
     %{$self->balancer_args},
   );
@@ -494,32 +550,40 @@
   for my $r (@args) {
     $r = [ $r ] unless reftype $r eq 'ARRAY';
 
-    croak "coderef replicant connect_info not supported"
+    $self->throw_exception('coderef replicant connect_info not supported')
       if ref $r->[0] && reftype $r->[0] eq 'CODE';
 
 # any connect_info options?
     my $i = 0;
     $i++ while $i < @$r && (reftype($r->[$i])||'') ne 'HASH';
 
-# make one if none    
+# make one if none
     $r->[$i] = {} unless $r->[$i];
 
 # merge if two hashes
     my @hashes = @$r[$i .. $#{$r}];
 
-    croak "invalid connect_info options"
+    $self->throw_exception('invalid connect_info options')
       if (grep { reftype($_) eq 'HASH' } @hashes) != @hashes;
 
-    croak "too many hashrefs in connect_info"
+    $self->throw_exception('too many hashrefs in connect_info')
       if @hashes > 2;
 
-    my %opts = %{ merge(reverse @hashes) };
+    my $merge = Hash::Merge->new('LEFT_PRECEDENT');
+    my %opts = %{ $merge->merge(reverse @hashes) };
 
 # delete them
     splice @$r, $i+1, ($#{$r} - $i), ();
 
+# make sure master/replicants opts don't clash
+    my %master_opts = %{ $self->_master_connect_info_opts };
+    if (exists $opts{dbh_maker}) {
+        delete @master_opts{qw/dsn user password/};
+    }
+    delete $master_opts{dbh_maker};
+
 # merge with master
-    %opts = %{ merge(\%opts, $self->_master_connect_info_opts) };
+    %opts = %{ $merge->merge(\%opts, \%master_opts) };
 
 # update
     $r->[$i] = \%opts;
@@ -547,7 +611,7 @@
 =head2 execute_reliably ($coderef, ?@args)
 
 Given a coderef, saves the current state of the L</read_handler>, forces it to
-use reliable storage (ie sets it to the master), executes a coderef and then
+use reliable storage (e.g. sets it to the master), executes a coderef and then
 restores the original state.
 
 Example:
@@ -593,11 +657,11 @@
       ($result[0]) = ($coderef->(@args));
     } else {
       $coderef->(@args);
-    }       
+    }
   };
 
   ##Reset to the original state
-  $self->read_handler($current); 
+  $self->read_handler($current);
 
   ##Exception testing has to come last, otherwise you might leave the 
   ##read_handler set to master.
@@ -627,7 +691,7 @@
 =head2 set_balanced_storage
 
 Sets the current $schema to be use the </balancer> for all reads, while all
-writea are sent to the master only
+writes are sent to the master only
 
 =cut
 
@@ -731,57 +795,42 @@
   if(@_) {
     foreach my $source ($self->all_storages) {
       $source->debug(@_);
-    }   
+    }
   }
   return $self->master->debug;
 }
 
 =head2 debugobj
 
-set a debug object across all storages
+set a debug object
 
 =cut
 
 sub debugobj {
   my $self = shift @_;
-  if(@_) {
-    foreach my $source ($self->all_storages) {
-      $source->debugobj(@_);
-    } 	
-  }
-  return $self->master->debugobj;
+  return $self->master->debugobj(@_);
 }
 
 =head2 debugfh
 
-set a debugfh object across all storages
+set a debugfh object
 
 =cut
 
 sub debugfh {
   my $self = shift @_;
-  if(@_) {
-    foreach my $source ($self->all_storages) {
-      $source->debugfh(@_);
-    }   
-  }
-  return $self->master->debugfh;
+  return $self->master->debugfh(@_);
 }
 
 =head2 debugcb
 
-set a debug callback across all storages
+set a debug callback
 
 =cut
 
 sub debugcb {
   my $self = shift @_;
-  if(@_) {
-    foreach my $source ($self->all_storages) {
-      $source->debugcb(@_);
-    }   
-  }
-  return $self->master->debugcb;
+  return $self->master->debugcb(@_);
 }
 
 =head2 disconnect
@@ -812,6 +861,195 @@
   $self->master->cursor_class;
 }
 
+=head2 cursor
+
+set cursor class on all storages, or return master's, alias for L</cursor_class>
+above.
+
+=cut
+
+sub cursor {
+  my ($self, $cursor_class) = @_;
+
+  if ($cursor_class) {
+    $_->cursor($cursor_class) for $self->all_storages;
+  }
+  $self->master->cursor;
+}
+
+=head2 unsafe
+
+sets the L<DBIx::Class::Storage::DBI/unsafe> option on all storages or returns
+master's current setting
+
+=cut
+
+sub unsafe {
+  my $self = shift;
+
+  if (@_) {
+    $_->unsafe(@_) for $self->all_storages;
+  }
+
+  return $self->master->unsafe;
+}
+
+=head2 disable_sth_caching
+
+sets the L<DBIx::Class::Storage::DBI/disable_sth_caching> option on all storages
+or returns master's current setting
+
+=cut
+
+sub disable_sth_caching {
+  my $self = shift;
+
+  if (@_) {
+    $_->disable_sth_caching(@_) for $self->all_storages;
+  }
+
+  return $self->master->disable_sth_caching;
+}
+
+=head2 lag_behind_master
+
+returns the highest Replicant L<DBIx::Class::Storage::DBI/lag_behind_master>
+setting
+
+=cut
+
+sub lag_behind_master {
+  my $self = shift;
+
+  return max map $_->lag_behind_master, $self->replicants;
+} 
+
+=head2 is_replicating
+
+returns true if all replicants return true for
+L<DBIx::Class::Storage::DBI/is_replicating>
+
+=cut
+
+sub is_replicating {
+  my $self = shift;
+
+  return (grep $_->is_replicating, $self->replicants) == ($self->replicants);
+}
+
+=head2 connect_call_datetime_setup
+
+calls L<DBIx::Class::Storage::DBI/connect_call_datetime_setup> for all storages
+
+=cut
+
+sub connect_call_datetime_setup {
+  my $self = shift;
+  $_->connect_call_datetime_setup for $self->all_storages;
+}
+
+sub _populate_dbh {
+  my $self = shift;
+  $_->_populate_dbh for $self->all_storages;
+}
+
+sub _connect {
+  my $self = shift;
+  $_->_connect for $self->all_storages;
+}
+
+sub _rebless {
+  my $self = shift;
+  $_->_rebless for $self->all_storages;
+}
+
+sub _determine_driver {
+  my $self = shift;
+  $_->_determine_driver for $self->all_storages;
+}
+
+sub _driver_determined {
+  my $self = shift;
+  
+  if (@_) {
+    $_->_driver_determined(@_) for $self->all_storages;
+  }
+
+  return $self->master->_driver_determined;
+}
+
+sub _init {
+  my $self = shift;
+  
+  $_->_init for $self->all_storages;
+}
+
+sub _run_connection_actions {
+  my $self = shift;
+  
+  $_->_run_connection_actions for $self->all_storages;
+}
+
+sub _do_connection_actions {
+  my $self = shift;
+  
+  if (@_) {
+    $_->_do_connection_actions(@_) for $self->all_storages;
+  }
+}
+
+sub connect_call_do_sql {
+  my $self = shift;
+  $_->connect_call_do_sql(@_) for $self->all_storages;
+}
+
+sub disconnect_call_do_sql {
+  my $self = shift;
+  $_->disconnect_call_do_sql(@_) for $self->all_storages;
+}
+
+sub _seems_connected {
+  my $self = shift;
+
+  return min map $_->_seems_connected, $self->all_storages;
+}
+
+sub _ping {
+  my $self = shift;
+
+  return min map $_->_ping, $self->all_storages;
+}
+
+my $numify_ver = sub {
+  my $ver = shift;
+  my @numparts = split /\D+/, $ver;
+  my $format = '%d.' . (join '', ('%05d') x (@numparts - 1));
+
+  return sprintf $format, @numparts;
+};
+
+sub _server_info {
+  my $self = shift;
+
+  if (not $self->_server_info_hash) {
+    my $min_version_info = (
+      reduce { $a->[0] < $b->[0] ? $a : $b } 
+      map [ $numify_ver->($_->{dbms_version}), $_ ],
+      map $_->_server_info, $self->all_storages
+    )->[1];
+
+    $self->_server_info_hash($min_version_info); # on master
+  }
+
+  return $self->_server_info_hash;
+}
+
+sub _get_server_version {
+  my $self = shift;
+
+  return $self->_server_info->{dbms_version};
+}
+
 =head1 GOTCHAS
 
 Due to the fact that replicants can lag behind a master, you must take care to

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Role/QueryCounter.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Role/QueryCounter.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Role/QueryCounter.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -19,7 +19,7 @@
 
 This package defines the following attributes.
 
-head2 _query_count
+=head2 _query_count
 
 Is the attribute holding the current query count.  It defines a public reader
 called 'query_count' which you can use to access the total number of queries
@@ -42,7 +42,7 @@
 
 =head2 _query_start
 
-override on the method so that we count the queries.
+Override on the method so that we count the queries.
 
 =cut
 

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/SQLAnywhere.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/SQLAnywhere.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/SQLAnywhere.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,192 @@
+package DBIx::Class::Storage::DBI::SQLAnywhere;
+
+use strict;
+use warnings;
+use base qw/DBIx::Class::Storage::DBI::UniqueIdentifier/;
+use mro 'c3';
+use List::Util ();
+
+__PACKAGE__->mk_group_accessors(simple => qw/
+  _identity
+/);
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::SQLAnywhere - Driver for Sybase SQL Anywhere
+
+=head1 DESCRIPTION
+
+This class implements autoincrements for Sybase SQL Anywhere, selects the
+RowNumberOver limit implementation and provides
+L<DBIx::Class::InflateColumn::DateTime> support.
+
+You need the C<DBD::SQLAnywhere> driver that comes with the SQL Anywhere
+distribution, B<NOT> the one on CPAN. It is usually under a path such as:
+
+  /opt/sqlanywhere11/sdk/perl
+
+Recommended L<connect_info|DBIx::Class::Storage::DBI/connect_info> settings:
+
+  on_connect_call => 'datetime_setup'
+
+=head1 METHODS
+
+=cut
+
+sub last_insert_id { shift->_identity }
+
+sub _new_uuid { 'UUIDTOSTR(NEWID())' }
+
+sub insert {
+  my $self = shift;
+  my ($source, $to_insert) = @_;
+
+  my $identity_col = List::Util::first {
+      $source->column_info($_)->{is_auto_increment} 
+  } $source->columns;
+
+# user might have an identity PK without is_auto_increment
+  if (not $identity_col) {
+    foreach my $pk_col ($source->primary_columns) {
+      if (not exists $to_insert->{$pk_col} &&
+          $source->column_info($pk_col)->{data_type} !~ /^uniqueidentifier/i)
+      {
+        $identity_col = $pk_col;
+        last;
+      }
+    }
+  }
+
+  if ($identity_col && (not exists $to_insert->{$identity_col})) {
+    my $dbh = $self->_get_dbh;
+    my $table_name = $source->from;
+    $table_name    = $$table_name if ref $table_name;
+
+    my ($identity) = eval {
+      local $@; $dbh->selectrow_array("SELECT GET_IDENTITY('$table_name')")
+    };
+
+    if (defined $identity) {
+      $to_insert->{$identity_col} = $identity;
+      $self->_identity($identity);
+    }
+  }
+
+  return $self->next::method(@_);
+}
+
+# convert UUIDs to strings in selects
+sub _select_args {
+  my $self = shift;
+  my ($ident, $select) = @_;
+
+  my $col_info = $self->_resolve_column_info($ident);
+
+  for my $select_idx (0..$#$select) {
+    my $selected = $select->[$select_idx];
+
+    next if ref $selected;
+
+    my $data_type = $col_info->{$selected}{data_type};
+
+    if ($data_type && lc($data_type) eq 'uniqueidentifier') {
+      $select->[$select_idx] = { UUIDTOSTR => $selected };
+    }
+  }
+
+  return $self->next::method(@_);
+}
+
+# this sub stolen from DB2
+
+sub _sql_maker_opts {
+  my ( $self, $opts ) = @_;
+
+  if ( $opts ) {
+    $self->{_sql_maker_opts} = { %$opts };
+  }
+
+  return { limit_dialect => 'RowNumberOver', %{$self->{_sql_maker_opts}||{}} };
+}
+
+# this sub stolen from MSSQL
+
+sub build_datetime_parser {
+  my $self = shift;
+  my $type = "DateTime::Format::Strptime";
+  eval "use ${type}";
+  $self->throw_exception("Couldn't load ${type}: $@") if $@;
+  return $type->new( pattern => '%Y-%m-%d %H:%M:%S.%6N' );
+}
+
+=head2 connect_call_datetime_setup
+
+Used as:
+
+    on_connect_call => 'datetime_setup'
+
+In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set the date and
+timestamp formats (as temporary options for the session) for use with
+L<DBIx::Class::InflateColumn::DateTime>.
+
+The C<TIMESTAMP> data type supports up to 6 digits after the decimal point for
+second precision. The full precision is used.
+
+The C<DATE> data type supposedly stores hours and minutes too, according to the
+documentation, but I could not get that to work. It seems to only store the
+date.
+
+You will need the L<DateTime::Format::Strptime> module for inflation to work.
+
+=cut
+
+sub connect_call_datetime_setup {
+  my $self = shift;
+
+  $self->_do_query(
+    "set temporary option timestamp_format = 'yyyy-mm-dd hh:mm:ss.ssssss'"
+  );
+  $self->_do_query(
+    "set temporary option date_format      = 'yyyy-mm-dd hh:mm:ss.ssssss'"
+  );
+}
+
+sub _svp_begin {
+    my ($self, $name) = @_;
+
+    $self->_get_dbh->do("SAVEPOINT $name");
+}
+
+# can't release savepoints that have been rolled back
+sub _svp_release { 1 }
+
+sub _svp_rollback {
+    my ($self, $name) = @_;
+
+    $self->_get_dbh->do("ROLLBACK TO SAVEPOINT $name")
+}
+
+1;
+
+=head1 MAXIMUM CURSORS
+
+A L<DBIx::Class> application can use a lot of cursors, due to the usage of
+L<prepare_cached|DBI/prepare_cached>.
+
+The default cursor maximum is C<50>, which can be a bit too low. This limit can
+be turned off (or increased) by the DBA by executing:
+
+  set option max_statement_count = 0
+  set option max_cursor_count    = 0
+
+Highly recommended.
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/SQLite.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/SQLite.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/SQLite.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -10,10 +10,7 @@
 use File::Copy;
 use File::Spec;
 
-sub _dbh_last_insert_id {
-  my ($self, $dbh, $source, $col) = @_;
-  $dbh->func('last_insert_rowid');
-}
+__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks::SQLite');
 
 sub backup
 {
@@ -47,8 +44,50 @@
   return $backupfile;
 }
 
+sub deployment_statements {
+  my $self = shift;;
+  my ($schema, $type, $version, $dir, $sqltargs, @rest) = @_;
+
+  $sqltargs ||= {};
+
+  # it'd be cool to use the normalized perl-style version but this needs sqlt hacking as well
+  if (my $sqlite_version = $self->_server_info->{dbms_version}) {
+    # numify, SQLT does a numeric comparison
+    $sqlite_version =~ s/^(\d+) \. (\d+) (?: \. (\d+))? .*/${1}.${2}/x;
+
+    $sqltargs->{producer_args}{sqlite_version} = $sqlite_version if $sqlite_version;
+  }
+
+  $self->next::method($schema, $type, $version, $dir, $sqltargs, @rest);
+}
+
 sub datetime_parser_type { return "DateTime::Format::SQLite"; } 
 
+=head2 connect_call_use_foreign_keys
+
+Used as:
+
+    on_connect_call => 'use_foreign_keys'
+
+In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to turn on foreign key
+(including cascading) support for recent versions of SQLite and L<DBD::SQLite>.
+
+Executes:
+
+  PRAGMA foreign_keys = ON 
+
+See L<http://www.sqlite.org/foreignkeys.html> for more information.
+
+=cut
+
+sub connect_call_use_foreign_keys {
+  my $self = shift;
+
+  $self->_do_query(
+    'PRAGMA foreign_keys = ON'
+  );
+}
+
 1;
 
 =head1 NAME
@@ -58,7 +97,7 @@
 =head1 SYNOPSIS
 
   # In your table classes
-  __PACKAGE__->load_components(qw/PK::Auto Core/);
+  use base 'DBIx::Class::Core';
   __PACKAGE__->set_primary_key('id');
 
 =head1 DESCRIPTION

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,102 @@
+package DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars;
+
+use base qw/
+  DBIx::Class::Storage::DBI::NoBindVars
+  DBIx::Class::Storage::DBI::Sybase::ASE
+/;
+use mro 'c3';
+use List::Util ();
+use Scalar::Util ();
+
+sub _init {
+  my $self = shift;
+  $self->disable_sth_caching(1);
+  $self->_identity_method('@@IDENTITY');
+  $self->next::method (@_);
+}
+
+sub _fetch_identity_sql { 'SELECT ' . $_[0]->_identity_method }
+
+my $number = sub { Scalar::Util::looks_like_number($_[0]) };
+
+my $decimal = sub { $_[0] =~ /^ [-+]? \d+ (?:\.\d*)? \z/x };
+
+my %noquote = (
+    int => sub { $_[0] =~ /^ [-+]? \d+ \z/x },
+    bit => => sub { $_[0] =~ /^[01]\z/ },
+    money => sub { $_[0] =~ /^\$ \d+ (?:\.\d*)? \z/x },
+    float => $number,
+    real => $number,
+    double => $number,
+    decimal => $decimal,
+    numeric => $decimal,
+);
+
+sub interpolate_unquoted {
+  my $self = shift;
+  my ($type, $value) = @_;
+
+  return $self->next::method(@_) if not defined $value or not defined $type;
+
+  if (my $key = List::Util::first { $type =~ /$_/i } keys %noquote) {
+    return 1 if $noquote{$key}->($value);
+  }
+  elsif ($self->is_datatype_numeric($type) && $number->($value)) {
+    return 1;
+  }
+
+  return $self->next::method(@_);
+}
+
+sub _prep_interpolated_value {
+  my ($self, $type, $value) = @_;
+
+  if ($type =~ /money/i && defined $value) {
+    # change a ^ not followed by \$ to a \$
+    $value =~ s/^ (?! \$) /\$/x;
+  }
+
+  return $value;
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars - Storage::DBI subclass for
+Sybase ASE without placeholder support
+
+=head1 DESCRIPTION
+
+If you're using this driver then your version of Sybase or the libraries you
+use to connect to it do not support placeholders.
+
+You can also enable this driver explicitly using:
+
+  my $schema = SchemaClass->clone;
+  $schema->storage_type('::DBI::Sybase::ASE::NoBindVars');
+  $schema->connect($dsn, $user, $pass, \%opts);
+
+See the discussion in L<< DBD::Sybase/Using ? Placeholders & bind parameters to
+$sth->execute >> for details on the pros and cons of using placeholders.
+
+One advantage of not using placeholders is that C<select @@identity> will work
+for obtaining the last insert id of an C<IDENTITY> column, instead of having to
+do C<select max(col)> in a transaction as the base Sybase driver does.
+
+When using this driver, bind variables will be interpolated (properly quoted of
+course) into the SQL query itself, without using placeholders.
+
+The caching of prepared statements is also explicitly disabled, as the
+interpolation renders it useless.
+
+=head1 AUTHORS
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+# vim:sts=2 sw=2:

Copied: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm (from rev 8895, DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase.pm)
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,1172 @@
+package DBIx::Class::Storage::DBI::Sybase::ASE;
+
+use strict;
+use warnings;
+
+use base qw/
+    DBIx::Class::Storage::DBI::Sybase
+    DBIx::Class::Storage::DBI::AutoCast
+/;
+use mro 'c3';
+use Carp::Clan qw/^DBIx::Class/;
+use Scalar::Util();
+use List::Util();
+use Sub::Name();
+use Data::Dumper::Concise();
+
+__PACKAGE__->mk_group_accessors('simple' =>
+    qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
+       _bulk_storage _is_bulk_storage _began_bulk_work
+       _bulk_disabled_due_to_coderef_connect_info_warned
+       _identity_method/
+);
+
+my @also_proxy_to_extra_storages = qw/
+  connect_call_set_auto_cast auto_cast connect_call_blob_setup
+  connect_call_datetime_setup
+
+  disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
+  auto_savepoint unsafe cursor_class debug debugobj schema
+/;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::ASE - Sybase ASE SQL Server support for
+DBIx::Class
+
+=head1 SYNOPSIS
+
+This subclass supports L<DBD::Sybase> for real (non-Microsoft) Sybase databases.
+
+=head1 DESCRIPTION
+
+If your version of Sybase does not support placeholders, then your storage will
+be reblessed to L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
+You can also enable that driver explicitly, see the documentation for more
+details.
+
+With this driver there is unfortunately no way to get the C<last_insert_id>
+without doing a C<SELECT MAX(col)>. This is done safely in a transaction
+(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
+
+A recommended L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting:
+
+  on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
+
+=head1 METHODS
+
+=cut
+
+sub _rebless {
+  my $self = shift;
+
+  my $no_bind_vars = __PACKAGE__ . '::NoBindVars';
+
+  if ($self->using_freetds) {
+    carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
+
+You are using FreeTDS with Sybase.
+
+We will do our best to support this configuration, but please consider this
+support experimental.
+
+TEXT/IMAGE columns will definitely not work.
+
+You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries
+instead.
+
+See perldoc DBIx::Class::Storage::DBI::Sybase::ASE for more details.
+
+To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment
+variable.
+EOF
+
+    if (not $self->_typeless_placeholders_supported) {
+      if ($self->_placeholders_supported) {
+        $self->auto_cast(1);
+      }
+      else {
+        $self->ensure_class_loaded($no_bind_vars);
+        bless $self, $no_bind_vars;
+        $self->_rebless;
+      }
+    }
+  }
+
+  elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
+    # not necessarily FreeTDS, but no placeholders nevertheless
+    $self->ensure_class_loaded($no_bind_vars);
+    bless $self, $no_bind_vars;
+    $self->_rebless;
+  }
+  # this is highly unlikely, but we check just in case
+  elsif (not $self->_typeless_placeholders_supported) {
+    $self->auto_cast(1);
+  }
+}
+
+sub _init {
+  my $self = shift;
+  $self->_set_max_connect(256);
+
+# create storage for insert/(update blob) transactions,
+# unless this is that storage
+  return if $self->_is_extra_storage;
+
+  my $writer_storage = (ref $self)->new;
+
+  $writer_storage->_is_extra_storage(1);
+  $writer_storage->connect_info($self->connect_info);
+  $writer_storage->auto_cast($self->auto_cast);
+
+  $self->_writer_storage($writer_storage);
+
+# create a bulk storage unless connect_info is a coderef
+  return if ref($self->_dbi_connect_info->[0]) eq 'CODE';
+
+  my $bulk_storage = (ref $self)->new;
+
+  $bulk_storage->_is_extra_storage(1);
+  $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
+  $bulk_storage->connect_info($self->connect_info);
+
+# this is why
+  $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
+
+  $self->_bulk_storage($bulk_storage);
+}
+
+for my $method (@also_proxy_to_extra_storages) {
+  no strict 'refs';
+  no warnings 'redefine';
+
+  my $replaced = __PACKAGE__->can($method);
+
+  *{$method} = Sub::Name::subname $method => sub {
+    my $self = shift;
+    $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
+    $self->_bulk_storage->$replaced(@_)   if $self->_bulk_storage;
+    return $self->$replaced(@_);
+  };
+}
+
+sub disconnect {
+  my $self = shift;
+
+# Even though we call $sth->finish for uses off the bulk API, there's still an
+# "active statement" warning on disconnect, which we throw away here.
+# This is due to the bug described in insert_bulk.
+# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
+  local $SIG{__WARN__} = sub {
+    warn $_[0] unless $_[0] =~ /active statement/i;
+  } if $self->_is_bulk_storage;
+
+# so that next transaction gets a dbh
+  $self->_began_bulk_work(0) if $self->_is_bulk_storage;
+
+  $self->next::method;
+}
+
+# Set up session settings for Sybase databases for the connection.
+#
+# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
+# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
+# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
+# only want when AutoCommit is off.
+#
+# Also SET TEXTSIZE for FreeTDS because LongReadLen doesn't work.
+sub _run_connection_actions {
+  my $self = shift;
+
+  if ($self->_is_bulk_storage) {
+# this should be cleared on every reconnect
+    $self->_began_bulk_work(0);
+    return;
+  }
+
+  if (not $self->using_freetds) {
+    $self->_dbh->{syb_chained_txn} = 1;
+  } else {
+    # based on LongReadLen in connect_info
+    $self->set_textsize;
+
+    if ($self->_dbh_autocommit) {
+      $self->_dbh->do('SET CHAINED OFF');
+    } else {
+      $self->_dbh->do('SET CHAINED ON');
+    }
+  }
+
+  $self->next::method(@_);
+}
+
+=head2 connect_call_blob_setup
+
+Used as:
+
+  on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ]
+
+Does C<< $dbh->{syb_binary_images} = 1; >> to return C<IMAGE> data as raw binary
+instead of as a hex string.
+
+Recommended.
+
+Also sets the C<log_on_update> value for blob write operations. The default is
+C<1>, but C<0> is better if your database is configured for it.
+
+See
+L<DBD::Sybase/Handling_IMAGE/TEXT_data_with_syb_ct_get_data()/syb_ct_send_data()>.
+
+=cut
+
+sub connect_call_blob_setup {
+  my $self = shift;
+  my %args = @_;
+  my $dbh = $self->_dbh;
+  $dbh->{syb_binary_images} = 1;
+
+  $self->_blob_log_on_update($args{log_on_update})
+    if exists $args{log_on_update};
+}
+
+sub _is_lob_type {
+  my $self = shift;
+  my $type = shift;
+  $type && $type =~ /(?:text|image|lob|bytea|binary|memo)/i;
+}
+
+sub _is_lob_column {
+  my ($self, $source, $column) = @_;
+
+  return $self->_is_lob_type($source->column_info($column)->{data_type});
+}
+
+sub _prep_for_execute {
+  my $self = shift;
+  my ($op, $extra_bind, $ident, $args) = @_;
+
+  my ($sql, $bind) = $self->next::method (@_);
+
+  my $table = Scalar::Util::blessed($ident) ? $ident->from : $ident;
+
+  my $bind_info = $self->_resolve_column_info(
+    $ident, [map $_->[0], @{$bind}]
+  );
+  my $bound_identity_col = List::Util::first
+    { $bind_info->{$_}{is_auto_increment} }
+    (keys %$bind_info)
+  ;
+  my $identity_col = Scalar::Util::blessed($ident) &&
+    List::Util::first
+    { $ident->column_info($_)->{is_auto_increment} }
+    $ident->columns
+  ;
+
+  if (($op eq 'insert' && $bound_identity_col) ||
+      ($op eq 'update' && exists $args->[0]{$identity_col})) {
+    $sql = join ("\n",
+      $self->_set_table_identity_sql($op => $table, 'on'),
+      $sql,
+      $self->_set_table_identity_sql($op => $table, 'off'),
+    );
+  }
+
+  if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
+      (not $self->{insert_bulk})) {
+    $sql =
+      "$sql\n" .
+      $self->_fetch_identity_sql($ident, $identity_col);
+  }
+
+  return ($sql, $bind);
+}
+
+sub _set_table_identity_sql {
+  my ($self, $op, $table, $on_off) = @_;
+
+  return sprintf 'SET IDENTITY_%s %s %s',
+    uc($op), $self->sql_maker->_quote($table), uc($on_off);
+}
+
+# Stolen from SQLT, with some modifications. This is a makeshift
+# solution before a sane type-mapping library is available, thus
+# the 'our' for easy overrides.
+our %TYPE_MAPPING  = (
+    number    => 'numeric',
+    money     => 'money',
+    varchar   => 'varchar',
+    varchar2  => 'varchar',
+    timestamp => 'datetime',
+    text      => 'varchar',
+    real      => 'double precision',
+    comment   => 'text',
+    bit       => 'bit',
+    tinyint   => 'smallint',
+    float     => 'double precision',
+    serial    => 'numeric',
+    bigserial => 'numeric',
+    boolean   => 'varchar',
+    long      => 'varchar',
+);
+
+sub _native_data_type {
+  my ($self, $type) = @_;
+
+  $type = lc $type;
+  $type =~ s/\s* identity//x;
+
+  return uc($TYPE_MAPPING{$type} || $type);
+}
+
+sub _fetch_identity_sql {
+  my ($self, $source, $col) = @_;
+
+  return sprintf ("SELECT MAX(%s) FROM %s",
+    map { $self->sql_maker->_quote ($_) } ($col, $source->from)
+  );
+}
+
+sub _execute {
+  my $self = shift;
+  my ($op) = @_;
+
+  my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
+
+  if ($op eq 'insert') {
+    $self->_identity($sth->fetchrow_array);
+    $sth->finish;
+  }
+
+  return wantarray ? ($rv, $sth, @bind) : $rv;
+}
+
+sub last_insert_id { shift->_identity }
+
+# handles TEXT/IMAGE and transaction for last_insert_id
+sub insert {
+  my $self = shift;
+  my ($source, $to_insert) = @_;
+
+  my $identity_col = (List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns) || '';
+
+  # check for empty insert
+  # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
+  # try to insert explicit 'DEFAULT's instead (except for identity, timestamp
+  # and computed columns)
+  if (not %$to_insert) {
+    for my $col ($source->columns) {
+      next if $col eq $identity_col;
+
+      my $info = $source->column_info($col);
+
+      next if ref $info->{default_value} eq 'SCALAR'
+        || (exists $info->{data_type} && (not defined $info->{data_type}));
+
+      next if $info->{data_type} && $info->{data_type} =~ /^timestamp\z/i;
+
+      $to_insert->{$col} = \'DEFAULT';
+    }
+  }
+
+  my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
+
+  # do we need the horrific SELECT MAX(COL) hack?
+  my $dumb_last_insert_id =
+       $identity_col
+    && (not exists $to_insert->{$identity_col})
+    && ($self->_identity_method||'') ne '@@IDENTITY';
+
+  my $next = $self->next::can;
+
+  # we are already in a transaction, or there are no blobs
+  # and we don't need the PK - just (try to) do it
+  if ($self->{transaction_depth}
+        || (!$blob_cols && !$dumb_last_insert_id)
+  ) {
+    return $self->_insert (
+      $next, $source, $to_insert, $blob_cols, $identity_col
+    );
+  }
+
+  # otherwise use the _writer_storage to do the insert+transaction on another
+  # connection
+  my $guard = $self->_writer_storage->txn_scope_guard;
+
+  my $updated_cols = $self->_writer_storage->_insert (
+    $next, $source, $to_insert, $blob_cols, $identity_col
+  );
+
+  $self->_identity($self->_writer_storage->_identity);
+
+  $guard->commit;
+
+  return $updated_cols;
+}
+
+sub _insert {
+  my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
+
+  my $updated_cols = $self->$next ($source, $to_insert);
+
+  my $final_row = {
+    ($identity_col ?
+      ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
+    %$to_insert,
+    %$updated_cols,
+  };
+
+  $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
+
+  return $updated_cols;
+}
+
+sub update {
+  my $self = shift;
+  my ($source, $fields, $where, @rest) = @_;
+
+  my $wantarray = wantarray;
+
+  my $blob_cols = $self->_remove_blob_cols($source, $fields);
+
+  my $table = $source->name;
+
+  my $identity_col = List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns;
+
+  my $is_identity_update = $identity_col && defined $fields->{$identity_col};
+
+  return $self->next::method(@_) unless $blob_cols;
+
+# If there are any blobs in $where, Sybase will return a descriptive error
+# message.
+# XXX blobs can still be used with a LIKE query, and this should be handled.
+
+# update+blob update(s) done atomically on separate connection
+  $self = $self->_writer_storage;
+
+  my $guard = $self->txn_scope_guard;
+
+# First update the blob columns to be updated to '' (taken from $fields, where
+# it is originally put by _remove_blob_cols .)
+  my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
+
+# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
+
+  $self->next::method($source, \%blobs_to_empty, $where, @rest);
+
+# Now update the blobs before the other columns in case the update of other
+# columns makes the search condition invalid.
+  $self->_update_blobs($source, $blob_cols, $where);
+
+  my @res;
+  if (%$fields) {
+    if ($wantarray) {
+      @res    = $self->next::method(@_);
+    }
+    elsif (defined $wantarray) {
+      $res[0] = $self->next::method(@_);
+    }
+    else {
+      $self->next::method(@_);
+    }
+  }
+
+  $guard->commit;
+
+  return $wantarray ? @res : $res[0];
+}
+
+sub insert_bulk {
+  my $self = shift;
+  my ($source, $cols, $data) = @_;
+
+  my $identity_col = List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns;
+
+  my $is_identity_insert = (List::Util::first
+    { $_ eq $identity_col }
+    @{$cols}
+  ) ? 1 : 0;
+
+  my @source_columns = $source->columns;
+
+  my $use_bulk_api =
+    $self->_bulk_storage &&
+    $self->_get_dbh->{syb_has_blk};
+
+  if ((not $use_bulk_api)
+        &&
+      (ref($self->_dbi_connect_info->[0]) eq 'CODE')
+        &&
+      (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
+    carp <<'EOF';
+Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
+regular array inserts.
+EOF
+    $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
+  }
+
+  if (not $use_bulk_api) {
+    my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
+
+# _execute_array uses a txn anyway, but it ends too early in case we need to
+# select max(col) to get the identity for inserting blobs.
+    ($self, my $guard) = $self->{transaction_depth} == 0 ?
+      ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
+      :
+      ($self, undef);
+
+    local $self->{insert_bulk} = 1;
+
+    $self->next::method(@_);
+
+    if ($blob_cols) {
+      if ($is_identity_insert) {
+        $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
+      }
+      else {
+        my @cols_with_identities = (@$cols, $identity_col);
+
+        ## calculate identities
+        # XXX This assumes identities always increase by 1, which may or may not
+        # be true.
+        my ($last_identity) =
+          $self->_dbh->selectrow_array (
+            $self->_fetch_identity_sql($source, $identity_col)
+          );
+        my @identities = (($last_identity - @$data + 1) .. $last_identity);
+
+        my @data_with_identities = map [@$_, shift @identities], @$data;
+
+        $self->_insert_blobs_array (
+          $source, $blob_cols, \@cols_with_identities, \@data_with_identities
+        );
+      }
+    }
+
+    $guard->commit if $guard;
+
+    return;
+  }
+
+# otherwise, use the bulk API
+
+# rearrange @$data so that columns are in database order
+  my %orig_idx;
+  @orig_idx{@$cols} = 0..$#$cols;
+
+  my %new_idx;
+  @new_idx{@source_columns} = 0..$#source_columns;
+
+  my @new_data;
+  for my $datum (@$data) {
+    my $new_datum = [];
+    for my $col (@source_columns) {
+# identity data will be 'undef' if not $is_identity_insert
+# columns with defaults will also be 'undef'
+      $new_datum->[ $new_idx{$col} ] =
+        exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
+    }
+    push @new_data, $new_datum;
+  }
+
+# bcp identity index is 1-based
+  my $identity_idx = exists $new_idx{$identity_col} ?
+    $new_idx{$identity_col} + 1 : 0;
+
+## Set a client-side conversion error handler, straight from DBD::Sybase docs.
+# This ignores any data conversion errors detected by the client side libs, as
+# they are usually harmless.
+  my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
+    Sub::Name::subname insert_bulk => sub {
+      my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
+
+      return 1 if $errno == 36;
+
+      carp
+        "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
+        ($errmsg ? "\n$errmsg" : '') .
+        ($osmsg  ? "\n$osmsg"  : '')  .
+        ($blkmsg ? "\n$blkmsg" : '');
+
+      return 0;
+  });
+
+  eval {
+    my $bulk = $self->_bulk_storage;
+
+    my $guard = $bulk->txn_scope_guard;
+
+## XXX get this to work instead of our own $sth
+## will require SQLA or *Hacks changes for ordered columns
+#    $bulk->next::method($source, \@source_columns, \@new_data, {
+#      syb_bcp_attribs => {
+#        identity_flag   => $is_identity_insert,
+#        identity_column => $identity_idx,
+#      }
+#    });
+    my $sql = 'INSERT INTO ' .
+      $bulk->sql_maker->_quote($source->name) . ' (' .
+# colname list is ignored for BCP, but does no harm
+      (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
+      ' VALUES ('.  (join ', ', ('?') x @source_columns) . ')';
+
+## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
+## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
+## better yet the version above. Should be fixed in DBD::Sybase .
+    my $sth = $bulk->_get_dbh->prepare($sql,
+#      'insert', # op
+      {
+        syb_bcp_attribs => {
+          identity_flag   => $is_identity_insert,
+          identity_column => $identity_idx,
+        }
+      }
+    );
+
+    my @bind = do {
+      my $idx = 0;
+      map [ $_, $idx++ ], @source_columns;
+    };
+
+    $self->_execute_array(
+      $source, $sth, \@bind, \@source_columns, \@new_data, sub {
+        $guard->commit
+      }
+    );
+
+    $bulk->_query_end($sql);
+  };
+
+  my $exception = $@;
+  DBD::Sybase::set_cslib_cb($orig_cslib_cb);
+
+  if ($exception =~ /-Y option/) {
+    carp <<"EOF";
+
+Sybase bulk API operation failed due to character set incompatibility, reverting
+to regular array inserts:
+
+*** Try unsetting the LANG environment variable.
+
+$exception
+EOF
+    $self->_bulk_storage(undef);
+    unshift @_, $self;
+    goto \&insert_bulk;
+  }
+  elsif ($exception) {
+# rollback makes the bulkLogin connection unusable
+    $self->_bulk_storage->disconnect;
+    $self->throw_exception($exception);
+  }
+}
+
+sub _dbh_execute_array {
+  my ($self, $sth, $tuple_status, $cb) = @_;
+
+  my $rv = $self->next::method($sth, $tuple_status);
+  $cb->() if $cb;
+
+  return $rv;
+}
+
+# Make sure blobs are not bound as placeholders, and return any non-empty ones
+# as a hash.
+sub _remove_blob_cols {
+  my ($self, $source, $fields) = @_;
+
+  my %blob_cols;
+
+  for my $col (keys %$fields) {
+    if ($self->_is_lob_column($source, $col)) {
+      my $blob_val = delete $fields->{$col};
+      if (not defined $blob_val) {
+        $fields->{$col} = \'NULL';
+      }
+      else {
+        $fields->{$col} = \"''";
+        $blob_cols{$col} = $blob_val unless $blob_val eq '';
+      }
+    }
+  }
+
+  return %blob_cols ? \%blob_cols : undef;
+}
+
+# same for insert_bulk
+sub _remove_blob_cols_array {
+  my ($self, $source, $cols, $data) = @_;
+
+  my @blob_cols;
+
+  for my $i (0..$#$cols) {
+    my $col = $cols->[$i];
+
+    if ($self->_is_lob_column($source, $col)) {
+      for my $j (0..$#$data) {
+        my $blob_val = delete $data->[$j][$i];
+        if (not defined $blob_val) {
+          $data->[$j][$i] = \'NULL';
+        }
+        else {
+          $data->[$j][$i] = \"''";
+          $blob_cols[$j][$i] = $blob_val
+            unless $blob_val eq '';
+        }
+      }
+    }
+  }
+
+  return @blob_cols ? \@blob_cols : undef;
+}
+
+sub _update_blobs {
+  my ($self, $source, $blob_cols, $where) = @_;
+
+  my @primary_cols = eval { $source->_pri_cols };
+  $self->throw_exception("Cannot update TEXT/IMAGE column(s): $@")
+    if $@;
+
+# check if we're updating a single row by PK
+  my $pk_cols_in_where = 0;
+  for my $col (@primary_cols) {
+    $pk_cols_in_where++ if defined $where->{$col};
+  }
+  my @rows;
+
+  if ($pk_cols_in_where == @primary_cols) {
+    my %row_to_update;
+    @row_to_update{@primary_cols} = @{$where}{@primary_cols};
+    @rows = \%row_to_update;
+  } else {
+    my $cursor = $self->select ($source, \@primary_cols, $where, {});
+    @rows = map {
+      my %row; @row{@primary_cols} = @$_; \%row
+    } $cursor->all;
+  }
+
+  for my $row (@rows) {
+    $self->_insert_blobs($source, $blob_cols, $row);
+  }
+}
+
+sub _insert_blobs {
+  my ($self, $source, $blob_cols, $row) = @_;
+  my $dbh = $self->_get_dbh;
+
+  my $table = $source->name;
+
+  my %row = %$row;
+  my @primary_cols = eval { $source->_pri_cols} ;
+  $self->throw_exception("Cannot update TEXT/IMAGE column(s): $@")
+    if $@;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
+    if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
+
+  for my $col (keys %$blob_cols) {
+    my $blob = $blob_cols->{$col};
+
+    my %where = map { ($_, $row{$_}) } @primary_cols;
+
+    my $cursor = $self->select ($source, [$col], \%where, {});
+    $cursor->next;
+    my $sth = $cursor->sth;
+
+    if (not $sth) {
+
+      $self->throw_exception(
+          "Could not find row in table '$table' for blob update:\n"
+        . Data::Dumper::Concise::Dumper (\%where)
+      );
+    }
+
+    eval {
+      do {
+        $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
+      } while $sth->fetch;
+
+      $sth->func('ct_prepare_send') or die $sth->errstr;
+
+      my $log_on_update = $self->_blob_log_on_update;
+      $log_on_update    = 1 if not defined $log_on_update;
+
+      $sth->func('CS_SET', 1, {
+        total_txtlen => length($blob),
+        log_on_update => $log_on_update
+      }, 'ct_data_info') or die $sth->errstr;
+
+      $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
+
+      $sth->func('ct_finish_send') or die $sth->errstr;
+    };
+    my $exception = $@;
+    $sth->finish if $sth;
+    if ($exception) {
+      if ($self->using_freetds) {
+        $self->throw_exception (
+          'TEXT/IMAGE operation failed, probably because you are using FreeTDS: '
+          . $exception
+        );
+      } else {
+        $self->throw_exception($exception);
+      }
+    }
+  }
+}
+
+sub _insert_blobs_array {
+  my ($self, $source, $blob_cols, $cols, $data) = @_;
+
+  for my $i (0..$#$data) {
+    my $datum = $data->[$i];
+
+    my %row;
+    @row{ @$cols } = @$datum;
+
+    my %blob_vals;
+    for my $j (0..$#$cols) {
+      if (exists $blob_cols->[$i][$j]) {
+        $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
+      }
+    }
+
+    $self->_insert_blobs ($source, \%blob_vals, \%row);
+  }
+}
+
+=head2 connect_call_datetime_setup
+
+Used as:
+
+  on_connect_call => 'datetime_setup'
+
+In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set:
+
+  $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
+  $dbh->do('set dateformat mdy');   # input fmt:  08/13/1979 18:08:55.080
+
+On connection for use with L<DBIx::Class::InflateColumn::DateTime>, using
+L<DateTime::Format::Sybase>, which you will need to install.
+
+This works for both C<DATETIME> and C<SMALLDATETIME> columns, although
+C<SMALLDATETIME> columns only have minute precision.
+
+=cut
+
+{
+  my $old_dbd_warned = 0;
+
+  sub connect_call_datetime_setup {
+    my $self = shift;
+    my $dbh = $self->_get_dbh;
+
+    if ($dbh->can('syb_date_fmt')) {
+      # amazingly, this works with FreeTDS
+      $dbh->syb_date_fmt('ISO_strict');
+    } elsif (not $old_dbd_warned) {
+      carp "Your DBD::Sybase is too old to support ".
+      "DBIx::Class::InflateColumn::DateTime, please upgrade!";
+      $old_dbd_warned = 1;
+    }
+
+    $dbh->do('SET DATEFORMAT mdy');
+
+    1;
+  }
+}
+
+sub datetime_parser_type { "DateTime::Format::Sybase" }
+
+# ->begin_work and such have no effect with FreeTDS but we run them anyway to
+# let the DBD keep any state it needs to.
+#
+# If they ever do start working, the extra statements will do no harm (because
+# Sybase supports nested transactions.)
+
+sub _dbh_begin_work {
+  my $self = shift;
+
+# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
+# TRAN once. However, we need to make sure there's a $dbh.
+  return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
+
+  $self->next::method(@_);
+
+  if ($self->using_freetds) {
+    $self->_get_dbh->do('BEGIN TRAN');
+  }
+
+  $self->_began_bulk_work(1) if $self->_is_bulk_storage;
+}
+
+sub _dbh_commit {
+  my $self = shift;
+  if ($self->using_freetds) {
+    $self->_dbh->do('COMMIT');
+  }
+  return $self->next::method(@_);
+}
+
+sub _dbh_rollback {
+  my $self = shift;
+  if ($self->using_freetds) {
+    $self->_dbh->do('ROLLBACK');
+  }
+  return $self->next::method(@_);
+}
+
+# savepoint support using ASE syntax
+
+sub _svp_begin {
+  my ($self, $name) = @_;
+
+  $self->_get_dbh->do("SAVE TRANSACTION $name");
+}
+
+# A new SAVE TRANSACTION with the same name releases the previous one.
+sub _svp_release { 1 }
+
+sub _svp_rollback {
+  my ($self, $name) = @_;
+
+  $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
+}
+
+1;
+
+=head1 Schema::Loader Support
+
+As of version C<0.05000>, L<DBIx::Class::Schema::Loader> should work well with
+most (if not all) versions of Sybase ASE.
+
+=head1 FreeTDS
+
+This driver supports L<DBD::Sybase> compiled against FreeTDS
+(L<http://www.freetds.org/>) to the best of our ability, however it is
+recommended that you recompile L<DBD::Sybase> against the Sybase Open Client
+libraries. They are a part of the Sybase ASE distribution:
+
+The Open Client FAQ is here:
+L<http://www.isug.com/Sybase_FAQ/ASE/section7.html>.
+
+Sybase ASE for Linux (which comes with the Open Client libraries) may be
+downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
+
+To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
+
+  perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
+
+Some versions of the libraries involved will not support placeholders, in which
+case the storage will be reblessed to
+L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
+
+In some configurations, placeholders will work but will throw implicit type
+conversion errors for anything that's not expecting a string. In such a case,
+the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
+automatically set, which you may enable on connection with
+L<DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>. The type info
+for the C<CAST>s is taken from the L<DBIx::Class::ResultSource/data_type>
+definitions in your Result classes, and are mapped to a Sybase type (if it isn't
+already) using a mapping based on L<SQL::Translator>.
+
+In other configurations, placeholders will work just as they do with the Sybase
+Open Client libraries.
+
+Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
+
+=head1 INSERTS WITH PLACEHOLDERS
+
+With placeholders enabled, inserts are done in a transaction so that there are
+no concurrency issues with getting the inserted identity value using
+C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
+mode.
+
+In addition, they are done on a separate connection so that it's possible to
+have active cursors when doing an insert.
+
+When using C<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars> transactions
+are disabled, as there are no concurrency issues with C<SELECT @@IDENTITY> as
+it's a session variable.
+
+=head1 TRANSACTIONS
+
+Due to limitations of the TDS protocol, L<DBD::Sybase>, or both, you cannot
+begin a transaction while there are active cursors, nor can you use multiple
+active cursors within a transaction. An active cursor is, for example, a
+L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
+C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
+
+For example, this will not work:
+
+  $schema->txn_do(sub {
+    my $rs = $schema->resultset('Book');
+    while (my $row = $rs->next) {
+      $schema->resultset('MetaData')->create({
+        book_id => $row->id,
+        ...
+      });
+    }
+  });
+
+This won't either:
+
+  my $first_row = $large_rs->first;
+  $schema->txn_do(sub { ... });
+
+Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
+are not affected, as they are done on an extra database handle.
+
+Some workarounds:
+
+=over 4
+
+=item * use L<DBIx::Class::Storage::DBI::Replicated>
+
+=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
+
+=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
+
+=back
+
+=head1 MAXIMUM CONNECTIONS
+
+The TDS protocol makes separate connections to the server for active statements
+in the background. By default the number of such connections is limited to 25,
+on both the client side and the server side.
+
+This is a bit too low for a complex L<DBIx::Class> application, so on connection
+the client side setting is set to C<256> (see L<DBD::Sybase/maxConnect>.) You
+can override it to whatever setting you like in the DSN.
+
+See
+L<http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.ase_15.0.sag1/html/sag1/sag1272.htm>
+for information on changing the setting on the server side.
+
+=head1 DATES
+
+See L</connect_call_datetime_setup> to setup date formats
+for L<DBIx::Class::InflateColumn::DateTime>.
+
+=head1 TEXT/IMAGE COLUMNS
+
+L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
+C<TEXT/IMAGE> columns.
+
+Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either:
+
+  $schema->storage->dbh->do("SET TEXTSIZE $bytes");
+
+or
+
+  $schema->storage->set_textsize($bytes);
+
+instead.
+
+However, the C<LongReadLen> you pass in
+L<connect_info|DBIx::Class::Storage::DBI/connect_info> is used to execute the
+equivalent C<SET TEXTSIZE> command on connection.
+
+See L</connect_call_blob_setup> for a
+L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting you need to work
+with C<IMAGE> columns.
+
+=head1 BULK API
+
+The experimental L<DBD::Sybase> Bulk API support is used for
+L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
+on a separate connection.
+
+To use this feature effectively, use a large number of rows for each
+L<populate|DBIx::Class::ResultSet/populate> call, eg.:
+
+  while (my $rows = $data_source->get_100_rows()) {
+    $rs->populate($rows);
+  }
+
+B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
+calls in your C<Result> classes B<must> list columns in database order for this
+to work. Also, you may have to unset the C<LANG> environment variable before
+loading your app, if it doesn't match the character set of your database.
+
+When inserting IMAGE columns using this method, you'll need to use
+L</connect_call_blob_setup> as well.
+
+=head1 COMPUTED COLUMNS
+
+If you have columns such as:
+
+  created_dtm AS getdate()
+
+represent them in your Result classes as:
+
+  created_dtm => {
+    data_type => undef,
+    default_value => \'getdate()',
+    is_nullable => 0,
+  }
+
+The C<data_type> must exist and must be C<undef>. Then empty inserts will work
+on tables with such columns.
+
+=head1 TIMESTAMP COLUMNS
+
+C<timestamp> columns in Sybase ASE are not really timestamps, see:
+L<http://dba.fyicenter.com/Interview-Questions/SYBASE/The_timestamp_datatype_in_Sybase_.html>.
+
+They should be defined in your Result classes as:
+
+  ts => {
+    data_type => 'timestamp',
+    is_nullable => 0,
+    inflate_datetime => 0,
+  }
+
+The C<<inflate_datetime => 0>> is necessary if you use
+L<DBIx::Class::InflateColumn::DateTime>, and most people do, and still want to
+be able to read these values.
+
+The values will come back as hexadecimal.
+
+=head1 TODO
+
+=over
+
+=item *
+
+Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
+any active cursors, using eager cursors.
+
+=item *
+
+Real limits and limited counts using stored procedures deployed on startup.
+
+=item *
+
+Adaptive Server Anywhere (ASA) support, with possible SQLA::Limit support.
+
+=item *
+
+Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
+
+=item *
+
+bulk_insert using prepare_cached (see comments.)
+
+=back
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+# vim:sts=2 sw=2:

Deleted: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/Base.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,54 +0,0 @@
-package # hide from PAUSE
-    DBIx::Class::Storage::DBI::Sybase::Base;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::Storage::DBI/;
-use mro 'c3';
-
-=head1 NAME
-
-DBIx::Class::Storage::DBI::Sybase::Base - Common functionality for drivers using
-DBD::Sybase
-
-=cut
-
-sub _ping {
-  my $self = shift;
-
-  my $dbh = $self->_dbh or return 0;
-
-  local $dbh->{RaiseError} = 1;
-  eval {
-    $dbh->do('select 1');
-  };
-
-  return $@ ? 0 : 1;
-}
-
-sub _placeholders_supported {
-  my $self = shift;
-  my $dbh  = $self->_get_dbh;
-
-  return eval {
-# There's also $dbh->{syb_dynamic_supported} but it can be inaccurate for this
-# purpose.
-    local $dbh->{PrintError} = 0;
-    local $dbh->{RaiseError} = 1;
-# this specifically tests a bind that is NOT a string
-    $dbh->selectrow_array('select 1 where 1 = ?', {}, 1);
-  };
-}
-
-1;
-
-=head1 AUTHORS
-
-See L<DBIx::Class/CONTRIBUTORS>.
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -9,9 +9,8 @@
 /;
 use mro 'c3';
 
-sub _rebless {
+sub _init {
   my $self = shift;
-
   $self->disable_sth_caching(1);
 }
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -4,30 +4,140 @@
 use warnings;
 
 use base qw/
-  DBIx::Class::Storage::DBI::Sybase::Base
+  DBIx::Class::Storage::DBI::Sybase
   DBIx::Class::Storage::DBI::MSSQL
 /;
 use mro 'c3';
+use Carp::Clan qw/^DBIx::Class/;
 
 sub _rebless {
   my $self = shift;
   my $dbh  = $self->_get_dbh;
 
-  if (not $self->_placeholders_supported) {
+  return if ref $self ne __PACKAGE__;
+
+  if (not $self->_typeless_placeholders_supported) {
+    require
+      DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars;
     bless $self,
       'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars';
     $self->_rebless;
   }
+}
 
-# LongReadLen doesn't work with MSSQL through DBD::Sybase, and the default is
-# huge on some versions of SQL server and can cause memory problems, so we
-# fix it up here.
-  my $text_size = eval { $self->_dbi_connect_info->[-1]->{LongReadLen} } ||
-    32768; # the DBD::Sybase default
+sub _run_connection_actions {
+  my $self = shift;
 
-  $dbh->do("set textsize $text_size");
+  # LongReadLen doesn't work with MSSQL through DBD::Sybase, and the default is
+  # huge on some versions of SQL server and can cause memory problems, so we
+  # fix it up here (see ::DBI::Sybase.pm)
+  $self->set_textsize;
+
+  $self->next::method(@_);
 }
 
+sub _dbh_begin_work {
+  my $self = shift;
+
+  $self->_get_dbh->do('BEGIN TRAN');
+}
+
+sub _dbh_commit {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot COMMIT on a disconnected handle');
+  $dbh->do('COMMIT');
+}
+
+sub _dbh_rollback {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
+  $dbh->do('ROLLBACK');
+}
+
+sub _get_server_version {
+  my $self = shift;
+
+  my $product_version = $self->_get_dbh->selectrow_hashref('xp_msver ProductVersion');
+
+  if ((my $version = $product_version->{Character_Value}) =~ /^(\d+)\./) {
+    return $version;
+  }
+  else {
+    $self->throw_exception(
+      "MSSQL Version Retrieval Failed, Your ProductVersion's Character_Value is missing or malformed!"
+    );
+  }
+}
+
+=head2 connect_call_datetime_setup
+
+Used as:
+
+  on_connect_call => 'datetime_setup'
+
+In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set:
+
+  $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
+
+On connection for use with L<DBIx::Class::InflateColumn::DateTime>
+
+This works for both C<DATETIME> and C<SMALLDATETIME> columns, although
+C<SMALLDATETIME> columns only have minute precision.
+
+=cut
+
+{
+  my $old_dbd_warned = 0;
+
+  sub connect_call_datetime_setup {
+    my $self = shift;
+    my $dbh = $self->_get_dbh;
+
+    if ($dbh->can('syb_date_fmt')) {
+      # amazingly, this works with FreeTDS
+      $dbh->syb_date_fmt('ISO_strict');
+    } elsif (not $old_dbd_warned) {
+      carp "Your DBD::Sybase is too old to support ".
+      "DBIx::Class::InflateColumn::DateTime, please upgrade!";
+      $old_dbd_warned = 1;
+    }
+  }
+}
+
+sub datetime_parser_type {
+  'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::DateTime::Format'
+} 
+
+package # hide from PAUSE
+  DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::DateTime::Format;
+
+my $datetime_parse_format  = '%Y-%m-%dT%H:%M:%S.%3NZ';
+my $datetime_format_format = '%Y-%m-%d %H:%M:%S.%3N'; # %F %T 
+
+my ($datetime_parser, $datetime_formatter);
+
+sub parse_datetime {
+  shift;
+  require DateTime::Format::Strptime;
+  $datetime_parser ||= DateTime::Format::Strptime->new(
+    pattern  => $datetime_parse_format,
+    on_error => 'croak',
+  );
+  return $datetime_parser->parse_datetime(shift);
+}
+
+sub format_datetime {
+  shift;
+  require DateTime::Format::Strptime;
+  $datetime_formatter ||= DateTime::Format::Strptime->new(
+    pattern  => $datetime_format_format,
+    on_error => 'croak',
+  );
+  return $datetime_formatter->format_datetime(shift);
+}
+
 1;
 
 =head1 NAME

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -3,62 +3,128 @@
 use strict;
 use warnings;
 
-use base qw/
-    DBIx::Class::Storage::DBI::Sybase::Base
-    DBIx::Class::Storage::DBI::NoBindVars
-/;
-use mro 'c3';
+use base qw/DBIx::Class::Storage::DBI/;
 
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase - Base class for drivers using
+L<DBD::Sybase>
+
+=head1 DESCRIPTION
+
+This is the base class/dispatcher for Storage's designed to work with
+L<DBD::Sybase>
+
+=head1 METHODS
+
+=cut
+
 sub _rebless {
-    my $self = shift;
+  my $self = shift;
 
-    my $dbtype = eval {
-      @{$self->_get_dbh
-        ->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})
-      }[2]
-    };
-    unless ( $@ ) {
-        $dbtype =~ s/\W/_/gi;
-        my $subclass = "DBIx::Class::Storage::DBI::Sybase::${dbtype}";
-        if ($self->load_optional_class($subclass) && !$self->isa($subclass)) {
-            bless $self, $subclass;
-            $self->_rebless;
-        }
+  my $dbtype = eval {
+    @{$self->_get_dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2]
+  };
+
+  $self->throw_exception("Unable to estable connection to determine database type: $@")
+    if $@;
+
+  if ($dbtype) {
+    $dbtype =~ s/\W/_/gi;
+
+    # saner class name
+    $dbtype = 'ASE' if $dbtype eq 'SQL_Server';
+
+    my $subclass = __PACKAGE__ . "::$dbtype";
+    if ($self->load_optional_class($subclass)) {
+      bless $self, $subclass;
+      $self->_rebless;
     }
+  }
 }
 
-sub _dbh_last_insert_id {
-    my ($self, $dbh, $source, $col) = @_;
-    return ($dbh->selectrow_array('select @@identity'))[0];
+sub _ping {
+  my $self = shift;
+
+  my $dbh = $self->_dbh or return 0;
+
+  local $dbh->{RaiseError} = 1;
+  local $dbh->{PrintError} = 0;
+
+  if ($dbh->{syb_no_child_con}) {
+# if extra connections are not allowed, then ->ping is reliable
+    my $ping = eval { $dbh->ping };
+    return $@ ? 0 : $ping;
+  }
+
+  eval {
+# XXX if the main connection goes stale, does opening another for this statement
+# really determine anything?
+    $dbh->do('select 1');
+  };
+
+  return $@ ? 0 : 1;
 }
 
-1;
+sub _set_max_connect {
+  my $self = shift;
+  my $val  = shift || 256;
 
-=head1 NAME
+  my $dsn = $self->_dbi_connect_info->[0];
 
-DBIx::Class::Storage::DBI::Sybase - Storage::DBI subclass for Sybase
+  return if ref($dsn) eq 'CODE';
 
-=head1 SYNOPSIS
+  if ($dsn !~ /maxConnect=/) {
+    $self->_dbi_connect_info->[0] = "$dsn;maxConnect=$val";
+    my $connected = defined $self->_dbh;
+    $self->disconnect;
+    $self->ensure_connected if $connected;
+  }
+}
 
-This subclass supports L<DBD::Sybase> for real Sybase databases.  If
-you are using an MSSQL database via L<DBD::Sybase>, see
-L<DBIx::Class::Storage::DBI::Sybase::MSSQL>.
+=head2 using_freetds
 
-=head1 CAVEATS
+Whether or not L<DBD::Sybase> was compiled against FreeTDS. If false, it means
+the Sybase OpenClient libraries were used.
 
-This storage driver uses L<DBIx::Class::Storage::DBI::NoBindVars> as a base.
-This means that bind variables will be interpolated (properly quoted of course)
-into the SQL query itself, without using bind placeholders.
+=cut
 
-More importantly this means that caching of prepared statements is explicitly
-disabled, as the interpolation renders it useless.
+sub using_freetds {
+  my $self = shift;
 
+  return $self->_get_dbh->{syb_oc_version} =~ /freetds/i;
+}
+
+=head2 set_textsize
+
+When using FreeTDS and/or MSSQL, C<< $dbh->{LongReadLen} >> is not available,
+use this function instead. It does:
+
+  $dbh->do("SET TEXTSIZE $bytes");
+
+Takes the number of bytes, or uses the C<LongReadLen> value from your
+L<connect_info|DBIx::Class::Storage::DBI/connect_info> if omitted, lastly falls
+back to the C<32768> which is the L<DBD::Sybase> default.
+
+=cut
+
+sub set_textsize {
+  my $self = shift;
+  my $text_size = shift ||
+    eval { $self->_dbi_connect_info->[-1]->{LongReadLen} } ||
+    32768; # the DBD::Sybase default
+
+  return unless defined $text_size;
+
+  $self->_dbh->do("SET TEXTSIZE $text_size");
+}
+
+1;
+
 =head1 AUTHORS
 
-Brandon L Black <blblack at gmail.com>
+See L<DBIx::Class/CONTRIBUTORS>.
 
-Justin Hunter <justin.d.hunter at gmail.com>
-
 =head1 LICENSE
 
 You may distribute this code under the same terms as Perl itself.


Property changes on: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/Sybase.pm
___________________________________________________________________
Deleted: svn:eol-style
   - native

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/UniqueIdentifier.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/UniqueIdentifier.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/UniqueIdentifier.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,83 @@
+package DBIx::Class::Storage::DBI::UniqueIdentifier;
+
+use strict;
+use warnings;
+use base 'DBIx::Class::Storage::DBI';
+use mro 'c3';
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::UniqueIdentifier - Storage component for RDBMSes
+supporting the 'uniqueidentifier' type
+
+=head1 DESCRIPTION
+
+This is a storage component for databases that support the C<uniqueidentifier>
+type and the C<NEWID()> function for generating UUIDs.
+
+UUIDs are generated automatically for PK columns with the C<uniqueidentifier>
+L<data_type|DBIx::Class::ResultSource/data_type>, as well as non-PK with this
+L<data_type|DBIx::Class::ResultSource/data_type> and
+L<auto_nextval|DBIx::Class::ResultSource/auto_nextval>.
+
+Currently used by L<DBIx::Class::Storage::DBI::MSSQL> and
+L<DBIx::Class::Storage::DBI::SQLAnywhere>.
+
+The composing class can define a C<_new_uuid> method to override the function
+used to generate a new UUID.
+
+=cut
+
+sub _new_uuid { 'NEWID()' }
+
+sub insert {
+  my $self = shift;
+  my ($source, $to_insert) = @_;
+
+  my $supplied_col_info = $self->_resolve_column_info($source, [keys %$to_insert] );
+
+  my %guid_cols;
+  my @pk_cols = $source->primary_columns;
+  my %pk_cols;
+  @pk_cols{@pk_cols} = ();
+
+  my @pk_guids = grep {
+    $source->column_info($_)->{data_type}
+    &&
+    $source->column_info($_)->{data_type} =~ /^uniqueidentifier/i
+  } @pk_cols;
+
+  my @auto_guids = grep {
+    $source->column_info($_)->{data_type}
+    &&
+    $source->column_info($_)->{data_type} =~ /^uniqueidentifier/i
+    &&
+    $source->column_info($_)->{auto_nextval}
+  } grep { not exists $pk_cols{$_} } $source->columns;
+
+  my @get_guids_for =
+    grep { not exists $to_insert->{$_} } (@pk_guids, @auto_guids);
+
+  my $updated_cols = {};
+
+  for my $guid_col (@get_guids_for) {
+    my ($new_guid) = $self->_get_dbh->selectrow_array('SELECT '.$self->_new_uuid);
+    $updated_cols->{$guid_col} = $to_insert->{$guid_col} = $new_guid;
+  }
+
+  $updated_cols = { %$updated_cols, %{ $self->next::method(@_) } };
+
+  return $updated_cols;
+}
+
+=head1 AUTHOR
+
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/mysql.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/mysql.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI/mysql.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -5,7 +5,6 @@
 
 use base qw/
   DBIx::Class::Storage::DBI::MultiColumnIn
-  DBIx::Class::Storage::DBI::AmbiguousGlob
   DBIx::Class::Storage::DBI
 /;
 use mro 'c3';
@@ -33,6 +32,21 @@
   $dbh->{mysql_insertid};
 }
 
+# we need to figure out what mysql version we're running
+sub sql_maker {
+  my $self = shift;
+
+  unless ($self->_sql_maker) {
+    my $maker = $self->next::method (@_);
+
+    # mysql 3 does not understand a bare JOIN
+    my $mysql_ver = $self->_get_dbh->get_info(18);
+    $maker->{_default_jointype} = 'INNER' if $mysql_ver =~ /^3/;
+  }
+
+  return $self->_sql_maker;
+}
+
 sub sqlt_type {
   return 'MySQL';
 }
@@ -85,12 +99,30 @@
 
 =head1 DESCRIPTION
 
-This class implements MySQL specific bits of L<DBIx::Class::Storage::DBI>.
+This class implements MySQL specific bits of L<DBIx::Class::Storage::DBI>,
+like AutoIncrement column support and savepoints. Also it augments the
+SQL maker to support the MySQL-specific C<STRAIGHT_JOIN> join type, which
+you can use by specifying C<< join_type => 'straight' >> in the
+L<relationship attributes|DBIx::Class::Relationship::Base/join_type>
 
+
 It also provides a one-stop on-connect macro C<set_strict_mode> which sets
 session variables such that MySQL behaves more predictably as far as the
 SQL standard is concerned.
 
+=head1 STORAGE OPTIONS
+
+=head2 set_strict_mode
+
+Enables session-wide strict options upon connecting. Equivalent to:
+
+  ->connect ( ... , {
+    on_connect_do => [
+      q|SET SQL_MODE = CONCAT('ANSI,TRADITIONAL,ONLY_FULL_GROUP_BY,', @@sql_mode)|,
+      q|SET SQL_AUTO_IS_NULL = 0|,
+    ]
+  });
+
 =head1 AUTHORS
 
 See L<DBIx::Class/CONTRIBUTORS>

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBI.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -4,7 +4,7 @@
 use strict;
 use warnings;
 
-use base 'DBIx::Class::Storage';
+use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/;
 use mro 'c3';
 
 use Carp::Clan qw/^DBIx::Class/;
@@ -13,10 +13,15 @@
 use DBIx::Class::Storage::Statistics;
 use Scalar::Util();
 use List::Util();
+use Data::Dumper::Concise();
+use Sub::Name ();
 
+use File::Path ();
+
 __PACKAGE__->mk_group_accessors('simple' =>
   qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid
-     _conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/
+     _conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints
+     _server_info_hash/
 );
 
 # the values for these accessors are picked out (and deleted) from
@@ -31,10 +36,46 @@
 # default cursor class, overridable in connect_info attributes
 __PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
 
-__PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class/);
+__PACKAGE__->mk_group_accessors('inherited' => qw/
+  sql_maker_class
+  _supports_insert_returning
+/);
 __PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks');
 
+# Each of these methods need _determine_driver called before itself
+# in order to function reliably. This is a purely DRY optimization
+my @rdbms_specific_methods = qw/
+  deployment_statements
+  sqlt_type
+  sql_maker
+  build_datetime_parser
+  datetime_parser_type
 
+  insert
+  insert_bulk
+  update
+  delete
+  select
+  select_single
+/;
+
+for my $meth (@rdbms_specific_methods) {
+
+  my $orig = __PACKAGE__->can ($meth)
+    or next;
+
+  no strict qw/refs/;
+  no warnings qw/redefine/;
+  *{__PACKAGE__ ."::$meth"} = Sub::Name::subname $meth => sub {
+    if (not $_[0]->_driver_determined) {
+      $_[0]->_determine_driver;
+      goto $_[0]->can($meth);
+    }
+    $orig->(@_);
+  };
+}
+
+
 =head1 NAME
 
 DBIx::Class::Storage::DBI - DBI storage handler
@@ -44,10 +85,17 @@
   my $schema = MySchema->connect('dbi:SQLite:my.db');
 
   $schema->storage->debug(1);
-  $schema->dbh_do("DROP TABLE authors");
 
+  my @stuff = $schema->storage->dbh_do(
+    sub {
+      my ($storage, $dbh, @args) = @_;
+      $dbh->do("DROP TABLE authors");
+    },
+    @column_list
+  );
+
   $schema->resultset('Book')->search({
-     written_on => $schema->storage->datetime_parser(DateTime->now)
+     written_on => $schema->storage->datetime_parser->format_datetime(DateTime->now)
   });
 
 =head1 DESCRIPTION
@@ -69,9 +117,102 @@
   $new->{_in_dbh_do} = 0;
   $new->{_dbh_gen} = 0;
 
+  # read below to see what this does
+  $new->_arm_global_destructor;
+
   $new;
 }
 
+# This is hack to work around perl shooting stuff in random
+# order on exit(). If we do not walk the remaining storage
+# objects in an END block, there is a *small but real* chance
+# of a fork()ed child to kill the parent's shared DBI handle,
+# *before perl reaches the DESTROY in this package*
+# Yes, it is ugly and effective.
+{
+  my %seek_and_destroy;
+
+  sub _arm_global_destructor {
+    my $self = shift;
+    my $key = Scalar::Util::refaddr ($self);
+    $seek_and_destroy{$key} = $self;
+    Scalar::Util::weaken ($seek_and_destroy{$key});
+  }
+
+  END {
+    local $?; # just in case the DBI destructor changes it somehow
+
+    # destroy just the object if not native to this process/thread
+    $_->_preserve_foreign_dbh for (grep
+      { defined $_ }
+      values %seek_and_destroy
+    );
+  }
+}
+
+sub DESTROY {
+  my $self = shift;
+
+  # destroy just the object if not native to this process/thread
+  $self->_preserve_foreign_dbh;
+
+  # some databases need this to stop spewing warnings
+  if (my $dbh = $self->_dbh) {
+    local $@;
+    eval {
+      %{ $dbh->{CachedKids} } = ();
+      $dbh->disconnect;
+    };
+  }
+
+  $self->_dbh(undef);
+}
+
+sub _preserve_foreign_dbh {
+  my $self = shift;
+
+  return unless $self->_dbh;
+
+  $self->_verify_tid;
+
+  return unless $self->_dbh;
+
+  $self->_verify_pid;
+
+}
+
+# handle pid changes correctly - do not destroy parent's connection
+sub _verify_pid {
+  my $self = shift;
+
+  return if ( defined $self->_conn_pid and $self->_conn_pid == $$ );
+
+  $self->_dbh->{InactiveDestroy} = 1;
+  $self->_dbh(undef);
+  $self->{_dbh_gen}++;
+
+  return;
+}
+
+# very similar to above, but seems to FAIL if I set InactiveDestroy
+sub _verify_tid {
+  my $self = shift;
+
+  if ( ! defined $self->_conn_tid ) {
+    return; # no threads
+  }
+  elsif ( $self->_conn_tid == threads->tid ) {
+    return; # same thread
+  }
+
+  #$self->_dbh->{InactiveDestroy} = 1;  # why does t/51threads.t fail...?
+  $self->_dbh(undef);
+  $self->{_dbh_gen}++;
+
+  return;
+}
+
+
 =head2 connect_info
 
 This method is normally called by L<DBIx::Class::Schema/connection>, which
@@ -112,6 +253,12 @@
     %extra_attributes,
   }];
 
+  $connect_info_args = [{
+    dbh_maker => sub { DBI->connect (...) },
+    %dbi_attributes,
+    %extra_attributes,
+  }];
+
 This is particularly useful for L<Catalyst> based applications, allowing the
 following config (L<Config::General> style):
 
@@ -125,6 +272,10 @@
     </connect_info>
   </Model::DB>
 
+The C<dsn>/C<user>/C<password> combination can be substituted by the
+C<dbh_maker> key whose value is a coderef that returns a connected
+L<DBI database handle|DBI/connect>
+
 =back
 
 Please note that the L<DBI> docs recommend that you always explicitly
@@ -139,7 +290,7 @@
 In addition to the standard L<DBI|DBI/ATTRIBUTES_COMMON_TO_ALL_HANDLES>
 L<connection|DBI/Database_Handle_Attributes> attributes, DBIx::Class recognizes
 the following connection options. These options can be mixed in with your other
-L<DBI> connection attributes, or placed in a seperate hashref
+L<DBI> connection attributes, or placed in a separate hashref
 (C<\%extra_attributes>) as shown above.
 
 Every time C<connect_info> is invoked, any previous settings for
@@ -291,7 +442,7 @@
 =item name_sep
 
 This only needs to be used in conjunction with C<quote_char>, and is used to
-specify the charecter that seperates elements (schemas, tables, columns) from
+specify the character that separates elements (schemas, tables, columns) from
 each other. In most cases this is simply a C<.>.
 
 The consequences of not supplying this value is that L<SQL::Abstract>
@@ -337,6 +488,12 @@
   # Connect via subref
   ->connect_info([ sub { DBI->connect(...) } ]);
 
+  # Connect via subref in hashref
+  ->connect_info([{
+    dbh_maker => sub { DBI->connect(...) },
+    on_connect_do => 'alter session ...',
+  }]);
+
   # A bit more complicated
   ->connect_info(
     [
@@ -389,14 +546,51 @@
 =cut
 
 sub connect_info {
+  my ($self, $info) = @_;
+
+  return $self->_connect_info if !$info;
+
+  $self->_connect_info($info); # copy for _connect_info
+
+  $info = $self->_normalize_connect_info($info)
+    if ref $info eq 'ARRAY';
+
+  for my $storage_opt (keys %{ $info->{storage_options} }) {
+    my $value = $info->{storage_options}{$storage_opt};
+
+    $self->$storage_opt($value);
+  }
+
+  # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
+  #  the new set of options
+  $self->_sql_maker(undef);
+  $self->_sql_maker_opts({});
+
+  for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) {
+    my $value = $info->{sql_maker_options}{$sql_maker_opt};
+
+    $self->_sql_maker_opts->{$sql_maker_opt} = $value;
+  }
+
+  my %attrs = (
+    %{ $self->_default_dbi_connect_attributes || {} },
+    %{ $info->{attributes} || {} },
+  );
+
+  my @args = @{ $info->{arguments} };
+
+  $self->_dbi_connect_info([@args,
+    %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]);
+
+  return $self->_connect_info;
+}
+
+sub _normalize_connect_info {
   my ($self, $info_arg) = @_;
+  my %info;
 
-  return $self->_connect_info if !$info_arg;
-
   my @args = @$info_arg;  # take a shallow copy for further mutilation
-  $self->_connect_info([@args]); # copy for _connect_info
 
-
   # combine/pre-parse arguments depending on invocation style
 
   my %attrs;
@@ -407,9 +601,22 @@
   elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config)
     %attrs = %{$args[0]};
     @args = ();
-    for (qw/password user dsn/) {
-      unshift @args, delete $attrs{$_};
+    if (my $code = delete $attrs{dbh_maker}) {
+      @args = $code;
+
+      my @ignored = grep { delete $attrs{$_} } (qw/dsn user password/);
+      if (@ignored) {
+        carp sprintf (
+            'Attribute(s) %s in connect_info were ignored, as they can not be applied '
+          . "to the result of 'dbh_maker'",
+
+          join (', ', map { "'$_'" } (@ignored) ),
+        );
+      }
     }
+    else {
+      @args = delete @attrs{qw/dsn user password/};
+    }
   }
   else {                # otherwise assume dsn/user/password + \%attrs + \%extra_attrs
     %attrs = (
@@ -419,36 +626,23 @@
     @args = @args[0,1,2];
   }
 
-  # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only
-  #  the new set of options
-  $self->_sql_maker(undef);
-  $self->_sql_maker_opts({});
+  $info{arguments} = \@args;
 
-  if(keys %attrs) {
-    for my $storage_opt (@storage_options, 'cursor_class') {    # @storage_options is declared at the top of the module
-      if(my $value = delete $attrs{$storage_opt}) {
-        $self->$storage_opt($value);
-      }
-    }
-    for my $sql_maker_opt (qw/limit_dialect quote_char name_sep/) {
-      if(my $opt_val = delete $attrs{$sql_maker_opt}) {
-        $self->_sql_maker_opts->{$sql_maker_opt} = $opt_val;
-      }
-    }
-  }
+  my @storage_opts = grep exists $attrs{$_},
+    @storage_options, 'cursor_class';
 
-  if (ref $args[0] eq 'CODE') {
-    # _connect() never looks past $args[0] in this case
-    %attrs = ()
-  } else {
-    %attrs = (
-      %{ $self->_default_dbi_connect_attributes || {} },
-      %attrs,
-    );
-  }
+  @{ $info{storage_options} }{@storage_opts} =
+    delete @attrs{@storage_opts} if @storage_opts;
 
-  $self->_dbi_connect_info([@args, keys %attrs ? \%attrs : ()]);
-  $self->_connect_info;
+  my @sql_maker_opts = grep exists $attrs{$_},
+    qw/limit_dialect quote_char name_sep/;
+
+  @{ $info{sql_maker_options} }{@sql_maker_opts} =
+    delete @attrs{@sql_maker_opts} if @sql_maker_opts;
+
+  $info{attributes} = \%attrs if %attrs;
+
+  return \%info;
 }
 
 sub _default_dbi_connect_attributes {
@@ -527,7 +721,7 @@
   my $self = shift;
   my $code = shift;
 
-  my $dbh = $self->_dbh;
+  my $dbh = $self->_get_dbh;
 
   return $self->$code($dbh, @_) if $self->{_in_dbh_do}
       || $self->{transaction_depth};
@@ -538,11 +732,6 @@
   my $want_array = wantarray;
 
   eval {
-    $self->_verify_pid if $dbh;
-    if(!$self->_dbh) {
-        $self->_populate_dbh;
-        $dbh = $self->_dbh;
-    }
 
     if($want_array) {
         @result = $self->$code($dbh, @_);
@@ -589,8 +778,7 @@
   my $tried = 0;
   while(1) {
     eval {
-      $self->_verify_pid if $self->_dbh;
-      $self->_populate_dbh if !$self->_dbh;
+      $self->_get_dbh;
 
       $self->txn_begin;
       if($want_array) {
@@ -651,7 +839,9 @@
 
     $self->_do_connection_actions(disconnect_call_ => $_) for @actions;
 
-    $self->_dbh->rollback unless $self->_dbh_autocommit;
+    $self->_dbh_rollback unless $self->_dbh_autocommit;
+
+    %{ $self->_dbh->{CachedKids} } = ();
     $self->_dbh->disconnect;
     $self->_dbh(undef);
     $self->{_dbh_gen}++;
@@ -676,7 +866,6 @@
 # Storage subclasses should override this
 sub with_deferred_fk_checks {
   my ($self, $sub) = @_;
-
   $sub->();
 }
 
@@ -690,8 +879,8 @@
 
 =back
 
-Verifies that the the current database handle is active and ready to execute
-an SQL statement (i.e. the connection did not get stale, server is still
+Verifies that the current database handle is active and ready to execute
+an SQL statement (e.g. the connection did not get stale, server is still
 answering, etc.) This method is used internally by L</dbh>.
 
 =cut
@@ -709,19 +898,11 @@
 sub _seems_connected {
   my $self = shift;
 
+  $self->_preserve_foreign_dbh;
+
   my $dbh = $self->_dbh
     or return 0;
 
-  if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) {
-    $self->_dbh(undef);
-    $self->{_dbh_gen}++;
-    return 0;
-  }
-  else {
-    $self->_verify_pid;
-    return 0 if !$self->_dbh;
-  }
-
   return $dbh->FETCH('Active');
 }
 
@@ -733,20 +914,6 @@
   return $dbh->ping;
 }
 
-# handle pid changes correctly
-#  NOTE: assumes $self->_dbh is a valid $dbh
-sub _verify_pid {
-  my ($self) = @_;
-
-  return if defined $self->_conn_pid && $self->_conn_pid == $$;
-
-  $self->_dbh->{InactiveDestroy} = 1;
-  $self->_dbh(undef);
-  $self->{_dbh_gen}++;
-
-  return;
-}
-
 sub ensure_connected {
   my ($self) = @_;
 
@@ -760,7 +927,7 @@
 Returns a C<$dbh> - a data base handle of class L<DBI>. The returned handle
 is guaranteed to be healthy by implicitly calling L</connected>, and if
 necessary performing a reconnection before returning. Keep in mind that this
-is very B<expensive> on some database engines. Consider using L<dbh_do>
+is very B<expensive> on some database engines. Consider using L</dbh_do>
 instead.
 
 =cut
@@ -779,6 +946,7 @@
 # this is the internal "get dbh or connect (don't check)" method
 sub _get_dbh {
   my $self = shift;
+  $self->_preserve_foreign_dbh;
   $self->_populate_dbh unless $self->_dbh;
   return $self->_dbh;
 }
@@ -804,13 +972,16 @@
   return $self->_sql_maker;
 }
 
+# nothing to do by default
 sub _rebless {}
+sub _init {}
 
 sub _populate_dbh {
   my ($self) = @_;
 
   my @info = @{$self->_dbi_connect_info || []};
   $self->_dbh(undef); # in case ->connected failed we might get sent here
+  $self->_server_info_hash (undef);
   $self->_dbh($self->_connect(@info));
 
   $self->_conn_pid($$);
@@ -835,36 +1006,96 @@
   $self->_do_connection_actions(connect_call_ => $_) for @actions;
 }
 
+sub _server_info {
+  my $self = shift;
+
+  unless ($self->_server_info_hash) {
+
+    my %info;
+
+    my $server_version = do {
+      local $@; # might be happenin in some sort of destructor
+      eval { $self->_get_server_version };
+    };
+
+    if (defined $server_version) {
+      $info{dbms_version} = $server_version;
+
+      my ($numeric_version) = $server_version =~ /^([\d\.]+)/;
+      my @verparts = split (/\./, $numeric_version);
+      if (
+        @verparts
+          &&
+        $verparts[0] <= 999
+      ) {
+        # consider only up to 3 version parts, iff not more than 3 digits
+        my @use_parts;
+        while (@verparts && @use_parts < 3) {
+          my $p = shift @verparts;
+          last if $p > 999;
+          push @use_parts, $p;
+        }
+        push @use_parts, 0 while @use_parts < 3;
+
+        $info{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts;
+      }
+    }
+
+    $self->_server_info_hash(\%info);
+  }
+
+  return $self->_server_info_hash
+}
+
+sub _get_server_version {
+  shift->_get_dbh->get_info(18);
+}
+
 sub _determine_driver {
   my ($self) = @_;
 
   if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) {
-    my $started_unconnected = 0;
+    my $started_connected = 0;
     local $self->{_in_determine_driver} = 1;
 
     if (ref($self) eq __PACKAGE__) {
       my $driver;
       if ($self->_dbh) { # we are connected
         $driver = $self->_dbh->{Driver}{Name};
+        $started_connected = 1;
       } else {
-        # try to use dsn to not require being connected, the driver may still
-        # force a connection in _rebless to determine version
-        ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
-        $started_unconnected = 1;
+        # if connect_info is a CODEREF, we have no choice but to connect
+        if (ref $self->_dbi_connect_info->[0] &&
+            Scalar::Util::reftype($self->_dbi_connect_info->[0]) eq 'CODE') {
+          $self->_populate_dbh;
+          $driver = $self->_dbh->{Driver}{Name};
+        }
+        else {
+          # try to use dsn to not require being connected, the driver may still
+          # force a connection in _rebless to determine version
+          # (dsn may not be supplied at all if all we do is make a mock-schema)
+          my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || '';
+          ($driver) = $dsn =~ /dbi:([^:]+):/i;
+          $driver ||= $ENV{DBI_DRIVER};
+        }
       }
 
-      my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
-      if ($self->load_optional_class($storage_class)) {
-        mro::set_mro($storage_class, 'c3');
-        bless $self, $storage_class;
-        $self->_rebless();
+      if ($driver) {
+        my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
+        if ($self->load_optional_class($storage_class)) {
+          mro::set_mro($storage_class, 'c3');
+          bless $self, $storage_class;
+          $self->_rebless();
+        }
       }
     }
 
     $self->_driver_determined(1);
 
+    $self->_init; # run driver-specific initializations
+
     $self->_run_connection_actions
-        if $started_unconnected && defined $self->_dbh;
+        if !$started_connected && defined $self->_dbh;
   }
 }
 
@@ -922,7 +1153,7 @@
     my @bind = map { [ undef, $_ ] } @do_args;
 
     $self->_query_start($sql, @bind);
-    $self->_dbh->do($sql, $attrs, @do_args);
+    $self->_get_dbh->do($sql, $attrs, @do_args);
     $self->_query_end($sql, @bind);
   }
 
@@ -944,7 +1175,7 @@
 
   eval {
     if(ref $info[0] eq 'CODE') {
-       $dbh = &{$info[0]}
+       $dbh = $info[0]->();
     }
     else {
        $dbh = DBI->connect(@info);
@@ -958,6 +1189,8 @@
             $weak_self->throw_exception("DBI Exception: $_[0]");
           }
           else {
+            # the handler may be invoked by something totally out of
+            # the scope of DBIC
             croak ("DBI Exception: $_[0]");
           }
       };
@@ -1064,30 +1297,43 @@
 
 sub txn_begin {
   my $self = shift;
+
+  # this means we have not yet connected and do not know the AC status
+  # (e.g. coderef $dbh)
+  $self->ensure_connected if (! defined $self->_dbh_autocommit);
+
   if($self->{transaction_depth} == 0) {
     $self->debugobj->txn_begin()
       if $self->debug;
-
-    # being here implies we have AutoCommit => 1
-    # if the user is utilizing txn_do - good for
-    # him, otherwise we need to ensure that the
-    # $dbh is healthy on BEGIN
-    my $dbh_method = $self->{_in_dbh_do} ? '_dbh' : 'dbh';
-    $self->$dbh_method->begin_work;
-
-  } elsif ($self->auto_savepoint) {
+    $self->_dbh_begin_work;
+  }
+  elsif ($self->auto_savepoint) {
     $self->svp_begin;
   }
   $self->{transaction_depth}++;
 }
 
+sub _dbh_begin_work {
+  my $self = shift;
+
+  # if the user is utilizing txn_do - good for him, otherwise we need to
+  # ensure that the $dbh is healthy on BEGIN.
+  # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
+  # will be replaced by a failure of begin_work itself (which will be
+  # then retried on reconnect)
+  if ($self->{_in_dbh_do}) {
+    $self->_dbh->begin_work;
+  } else {
+    $self->dbh_do(sub { $_[1]->begin_work });
+  }
+}
+
 sub txn_commit {
   my $self = shift;
   if ($self->{transaction_depth} == 1) {
-    my $dbh = $self->_dbh;
     $self->debugobj->txn_commit()
       if ($self->debug);
-    $dbh->commit;
+    $self->_dbh_commit;
     $self->{transaction_depth} = 0
       if $self->_dbh_autocommit;
   }
@@ -1098,6 +1344,13 @@
   }
 }
 
+sub _dbh_commit {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot COMMIT on a disconnected handle');
+  $dbh->commit;
+}
+
 sub txn_rollback {
   my $self = shift;
   my $dbh = $self->_dbh;
@@ -1107,7 +1360,7 @@
         if ($self->debug);
       $self->{transaction_depth} = 0
         if $self->_dbh_autocommit;
-      $dbh->rollback;
+      $self->_dbh_rollback;
     }
     elsif($self->{transaction_depth} > 1) {
       $self->{transaction_depth}--;
@@ -1130,6 +1383,13 @@
   }
 }
 
+sub _dbh_rollback {
+  my $self = shift;
+  my $dbh  = $self->_dbh
+    or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
+  $dbh->rollback;
+}
+
 # This used to be the top-half of _execute.  It was split out to make it
 #  easier to override in NoBindVars without duping the rest.  It takes up
 #  all of _execute's args, and emits $sql, @bind.
@@ -1224,60 +1484,170 @@
 
 sub _execute {
     my $self = shift;
-    $self->dbh_do('_dbh_execute', @_)
+    $self->dbh_do('_dbh_execute', @_);  # retry over disconnects
 }
 
-sub insert {
+sub _prefetch_insert_auto_nextvals {
   my ($self, $source, $to_insert) = @_;
 
-# redispatch to insert method of storage we reblessed into, if necessary
-  if (not $self->_driver_determined) {
-    $self->_determine_driver;
-    goto $self->can('insert');
-  }
+  my $upd = {};
 
-  my $ident = $source->from;
-  my $bind_attributes = $self->source_bind_attributes($source);
-
-  my $updated_cols = {};
-
   foreach my $col ( $source->columns ) {
     if ( !defined $to_insert->{$col} ) {
       my $col_info = $source->column_info($col);
 
       if ( $col_info->{auto_nextval} ) {
-        $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch(
+        $upd->{$col} = $to_insert->{$col} = $self->_sequence_fetch(
           'nextval',
-          $col_info->{sequence} ||
-            $self->_dbh_get_autoinc_seq($self->_get_dbh, $source)
+          $col_info->{sequence} ||=
+            $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col)
         );
       }
     }
   }
 
-  $self->_execute('insert' => [], $source, $bind_attributes, $to_insert);
+  return $upd;
+}
 
+sub insert {
+  my $self = shift;
+  my ($source, $to_insert, $opts) = @_;
+
+  my $updated_cols = $self->_prefetch_insert_auto_nextvals (@_);
+
+  my $bind_attributes = $self->source_bind_attributes($source);
+
+  my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $opts);
+
+  if ($opts->{returning}) {
+    my @ret_cols = @{$opts->{returning}};
+
+    my @ret_vals = eval {
+      local $SIG{__WARN__} = sub {};
+      my @r = $sth->fetchrow_array;
+      $sth->finish;
+      @r;
+    };
+
+    my %ret;
+    @ret{@ret_cols} = @ret_vals if (@ret_vals);
+
+    $updated_cols = {
+      %$updated_cols,
+      %ret,
+    };
+  }
+
   return $updated_cols;
 }
 
-## Still not quite perfect, and EXPERIMENTAL
 ## Currently it is assumed that all values passed will be "normal", i.e. not
 ## scalar refs, or at least, all the same type as the first set, the statement is
 ## only prepped once.
 sub insert_bulk {
   my ($self, $source, $cols, $data) = @_;
+
   my %colvalues;
-  my $table = $source->from;
   @colvalues{@$cols} = (0..$#$cols);
-  my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues);
 
-  $self->_determine_driver;
+  for my $i (0..$#$cols) {
+    my $first_val = $data->[0][$i];
+    next unless ref $first_val eq 'SCALAR';
 
-  $self->_query_start( $sql, @bind );
+    $colvalues{ $cols->[$i] } = $first_val;
+  }
+
+  # check for bad data and stringify stringifiable objects
+  my $bad_slice = sub {
+    my ($msg, $col_idx, $slice_idx) = @_;
+    $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
+      $msg,
+      $cols->[$col_idx],
+      do {
+        local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any
+        Data::Dumper::Concise::Dumper({
+          map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
+        }),
+      }
+    );
+  };
+
+  for my $datum_idx (0..$#$data) {
+    my $datum = $data->[$datum_idx];
+
+    for my $col_idx (0..$#$cols) {
+      my $val            = $datum->[$col_idx];
+      my $sqla_bind      = $colvalues{ $cols->[$col_idx] };
+      my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
+
+      if ($is_literal_sql) {
+        if (not ref $val) {
+          $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx);
+        }
+        elsif ((my $reftype = ref $val) ne 'SCALAR') {
+          $bad_slice->("$reftype reference found where literal SQL expected",
+            $col_idx, $datum_idx);
+        }
+        elsif ($$val ne $$sqla_bind){
+          $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
+            $col_idx, $datum_idx);
+        }
+      }
+      elsif (my $reftype = ref $val) {
+        require overload;
+        if (overload::Method($val, '""')) {
+          $datum->[$col_idx] = "".$val;
+        }
+        else {
+          $bad_slice->("$reftype reference found where bind expected",
+            $col_idx, $datum_idx);
+        }
+      }
+    }
+  }
+
+  my ($sql, $bind) = $self->_prep_for_execute (
+    'insert', undef, $source, [\%colvalues]
+  );
+  my @bind = @$bind;
+
+  my $empty_bind = 1 if (not @bind) &&
+    (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols;
+
+  if ((not @bind) && (not $empty_bind)) {
+    $self->throw_exception(
+      'Cannot insert_bulk without support for placeholders'
+    );
+  }
+
+  # neither _execute_array, nor _execute_inserts_with_no_binds are
+  # atomic (even if _execute _array is a single call). Thus a safety
+  # scope guard
+  my $guard = $self->txn_scope_guard;
+
+  $self->_query_start( $sql, ['__BULK__'] );
   my $sth = $self->sth($sql);
+  my $rv = do {
+    if ($empty_bind) {
+      # bind_param_array doesn't work if there are no binds
+      $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
+    }
+    else {
+#      @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+      $self->_execute_array( $source, $sth, \@bind, $cols, $data );
+    }
+  };
 
-#  @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+  $self->_query_end( $sql, ['__BULK__'] );
 
+  $guard->commit;
+
+  return (wantarray ? ($rv, $sth, @bind) : $rv);
+}
+
+sub _execute_array {
+  my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
+
   ## This must be an arrayref, else nothing works!
   my $tuple_status = [];
 
@@ -1287,7 +1657,7 @@
   ## Bind the values and execute
   my $placeholder_index = 1;
 
-  foreach my $bound (@bind) {
+  foreach my $bound (@$bind) {
 
     my $attributes = {};
     my ($column_name, $data_index) = @$bound;
@@ -1299,60 +1669,89 @@
 
     my @data = map { $_->[$data_index] } @$data;
 
-    $sth->bind_param_array( $placeholder_index, [@data], $attributes );
+    $sth->bind_param_array(
+      $placeholder_index,
+      [@data],
+      (%$attributes ?  $attributes : ()),
+    );
     $placeholder_index++;
   }
-  my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) };
-  if (my $err = $@) {
+
+  my $rv = eval {
+    $self->_dbh_execute_array($sth, $tuple_status, @extra);
+  };
+  my $err = $@ || $sth->errstr;
+
+# Statement must finish even if there was an exception.
+  eval { $sth->finish };
+  $err = $@ unless $err;
+
+  if ($err) {
     my $i = 0;
     ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
 
-    $self->throw_exception($sth->errstr || "Unexpected populate error: $err")
+    $self->throw_exception("Unexpected populate error: $err")
       if ($i > $#$tuple_status);
 
-    require Data::Dumper;
-    local $Data::Dumper::Terse = 1;
-    local $Data::Dumper::Indent = 1;
-    local $Data::Dumper::Useqq = 1;
-    local $Data::Dumper::Quotekeys = 0;
-
     $self->throw_exception(sprintf "%s for populate slice:\n%s",
-      $tuple_status->[$i][1],
-      Data::Dumper::Dumper(
-        { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) }
-      ),
+      ($tuple_status->[$i][1] || $err),
+      Data::Dumper::Concise::Dumper({
+        map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols)
+      }),
     );
   }
-  $self->throw_exception($sth->errstr) if !$rv;
+  return $rv;
+}
 
-  $self->_query_end( $sql, @bind );
-  return (wantarray ? ($rv, $sth, @bind) : $rv);
+sub _dbh_execute_array {
+    my ($self, $sth, $tuple_status, @extra) = @_;
+
+    return $sth->execute_array({ArrayTupleStatus => $tuple_status});
 }
 
+sub _dbh_execute_inserts_with_no_binds {
+  my ($self, $sth, $count) = @_;
+
+  eval {
+    my $dbh = $self->_get_dbh;
+    local $dbh->{RaiseError} = 1;
+    local $dbh->{PrintError} = 0;
+
+    $sth->execute foreach 1..$count;
+  };
+  my $exception = $@;
+
+# Make sure statement is finished even if there was an exception.
+  eval { $sth->finish };
+  $exception = $@ unless $exception;
+
+  $self->throw_exception($exception) if $exception;
+
+  return $count;
+}
+
 sub update {
-  my $self = shift @_;
-  my $source = shift @_;
-  $self->_determine_driver;
-  my $bind_attributes = $self->source_bind_attributes($source);
+  my ($self, $source, @args) = @_;
 
-  return $self->_execute('update' => [], $source, $bind_attributes, @_);
+  my $bind_attrs = $self->source_bind_attributes($source);
+
+  return $self->_execute('update' => [], $source, $bind_attrs, @args);
 }
 
 
 sub delete {
-  my $self = shift @_;
-  my $source = shift @_;
-  $self->_determine_driver;
+  my ($self, $source, @args) = @_;
+
   my $bind_attrs = $self->source_bind_attributes($source);
 
-  return $self->_execute('delete' => [], $source, $bind_attrs, @_);
+  return $self->_execute('delete' => [], $source, $bind_attrs, @args);
 }
 
 # We were sent here because the $rs contains a complex search
 # which will require a subquery to select the correct rows
-# (i.e. joined or limited resultsets)
+# (i.e. joined or limited resultsets, or non-introspectable conditions)
 #
-# Genarating a single PK column subquery is trivial and supported
+# Generating a single PK column subquery is trivial and supported
 # by all RDBMS. However if we have a multicolumn PK, things get ugly.
 # Look at _multipk_update_delete()
 sub _subq_update_delete {
@@ -1361,14 +1760,19 @@
 
   my $rsrc = $rs->result_source;
 
-  # we already check this, but double check naively just in case. Should be removed soon
+  # quick check if we got a sane rs on our hands
+  my @pcols = $rsrc->_pri_cols;
+
   my $sel = $rs->_resolved_attrs->{select};
   $sel = [ $sel ] unless ref $sel eq 'ARRAY';
-  my @pcols = $rsrc->primary_columns;
-  if (@$sel != @pcols) {
+
+  if (
+      join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
+        ne
+      join ("\x00", sort @$sel )
+  ) {
     $self->throw_exception (
-      'Subquery update/delete can not be called on resultsets selecting a'
-     .' number of columns different than the number of primary keys'
+      '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
     );
   }
 
@@ -1410,7 +1814,7 @@
   my ($rs, $op, $values) = @_;
 
   my $rsrc = $rs->result_source;
-  my @pcols = $rsrc->primary_columns;
+  my @pcols = $rsrc->_pri_cols;
 
   my $guard = $self->txn_scope_guard;
 
@@ -1418,11 +1822,12 @@
   my $row_cnt = '0E0';
 
   my $subrs_cur = $rs->cursor;
-  while (my @pks = $subrs_cur->next) {
+  my @all_pk = $subrs_cur->all;
+  for my $pks ( @all_pk) {
 
     my $cond;
     for my $i (0.. $#pcols) {
-      $cond->{$pcols[$i]} = $pks[$i];
+      $cond->{$pcols[$i]} = $pks->[$i];
     }
 
     $self->$op (
@@ -1441,31 +1846,18 @@
 
 sub _select {
   my $self = shift;
-
-  # localization is neccessary as
-  # 1) there is no infrastructure to pass this around before SQLA2
-  # 2) _select_args sets it and _prep_for_execute consumes it
-  my $sql_maker = $self->sql_maker;
-  local $sql_maker->{_dbic_rs_attrs};
-
-  return $self->_execute($self->_select_args(@_));
+  $self->_execute($self->_select_args(@_));
 }
 
 sub _select_args_to_query {
   my $self = shift;
 
-  # localization is neccessary as
-  # 1) there is no infrastructure to pass this around before SQLA2
-  # 2) _select_args sets it and _prep_for_execute consumes it
-  my $sql_maker = $self->sql_maker;
-  local $sql_maker->{_dbic_rs_attrs};
-
-  # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $order, $rows, $offset)
+  # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $rs_attrs, $rows, $offset)
   #  = $self->_select_args($ident, $select, $cond, $attrs);
   my ($op, $bind, $ident, $bind_attrs, @args) =
     $self->_select_args(@_);
 
-  # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $order, $rows, $offset ]);
+  # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]);
   my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args);
   $prepared_bind ||= [];
 
@@ -1478,16 +1870,16 @@
 sub _select_args {
   my ($self, $ident, $select, $where, $attrs) = @_;
 
+  my $sql_maker = $self->sql_maker;
   my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident);
 
-  my $sql_maker = $self->sql_maker;
-  $sql_maker->{_dbic_rs_attrs} = {
+  $attrs = {
     %$attrs,
     select => $select,
     from => $ident,
     where => $where,
-    $rs_alias
-      ? ( _source_handle => $alias2source->{$rs_alias}->handle )
+    $rs_alias && $alias2source->{$rs_alias}
+      ? ( _rsroot_source_handle => $alias2source->{$rs_alias}->handle )
       : ()
     ,
   };
@@ -1537,13 +1929,21 @@
 
   my @limit;
 
-  # see if we need to tear the prefetch apart (either limited has_many or grouped prefetch)
-  # otherwise delegate the limiting to the storage, unless software limit was requested
+  # see if we need to tear the prefetch apart otherwise delegate the limiting to the
+  # storage, unless software limit was requested
   if (
+    #limited has_many
     ( $attrs->{rows} && keys %{$attrs->{collapse}} )
        ||
-    ( $attrs->{group_by} && @{$attrs->{group_by}} &&
-      $attrs->{_prefetch_select} && @{$attrs->{_prefetch_select}} )
+    # grouped prefetch (to satisfy group_by == select)
+    ( $attrs->{group_by}
+        &&
+      @{$attrs->{group_by}}
+        &&
+      $attrs->{_prefetch_select}
+        &&
+      @{$attrs->{_prefetch_select}}
+    )
   ) {
     ($ident, $select, $where, $attrs)
       = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
@@ -1552,6 +1952,9 @@
     push @limit, $attrs->{rows}, $attrs->{offset};
   }
 
+  # try to simplify the joinmap further (prune unreferenced type-single joins)
+  $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
+
 ###
   # This would be the point to deflate anything found in $where
   # (and leave $attrs->{bind} intact). Problem is - inflators historically
@@ -1562,287 +1965,9 @@
   # invoked, and that's just bad...
 ###
 
-  my $order = { map
-    { $attrs->{$_} ? ( $_ => $attrs->{$_} ) : ()  }
-    (qw/order_by group_by having/ )
-  };
-
-  return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit);
+  return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $attrs, @limit);
 }
 
-#
-# This is the code producing joined subqueries like:
-# SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ... 
-#
-sub _adjust_select_args_for_complex_prefetch {
-  my ($self, $from, $select, $where, $attrs) = @_;
-
-  $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
-    if (ref $from ne 'ARRAY');
-
-  # copies for mangling
-  $from = [ @$from ];
-  $select = [ @$select ];
-  $attrs = { %$attrs };
-
-  # separate attributes
-  my $sub_attrs = { %$attrs };
-  delete $attrs->{$_} for qw/where bind rows offset group_by having/;
-  delete $sub_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
-
-  my $select_root_alias = $attrs->{alias};
-  my $sql_maker = $self->sql_maker;
-
-  # create subquery select list - consider only stuff *not* brought in by the prefetch
-  my $sub_select = [];
-  my $sub_group_by;
-  for my $i (0 .. @{$attrs->{select}} - @{$attrs->{_prefetch_select}} - 1) {
-    my $sel = $attrs->{select}[$i];
-
-    # alias any functions to the dbic-side 'as' label
-    # adjust the outer select accordingly
-    if (ref $sel eq 'HASH' ) {
-      $sel->{-as} ||= $attrs->{as}[$i];
-      $select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "select_$i") );
-    }
-
-    push @$sub_select, $sel;
-  }
-
-  # bring over all non-collapse-induced order_by into the inner query (if any)
-  # the outer one will have to keep them all
-  delete $sub_attrs->{order_by};
-  if (my $ord_cnt = @{$attrs->{order_by}} - @{$attrs->{_collapse_order_by}} ) {
-    $sub_attrs->{order_by} = [
-      @{$attrs->{order_by}}[ 0 .. $ord_cnt - 1]
-    ];
-  }
-
-  # mangle {from}, keep in mind that $from is "headless" from here on
-  my $join_root = shift @$from;
-
-  my %inner_joins;
-  my %join_info = map { $_->[0]{-alias} => $_->[0] } (@$from);
-
-  # in complex search_related chains $select_root_alias may *not* be
-  # 'me' so always include it in the inner join
-  $inner_joins{$select_root_alias} = 1 if ($join_root->{-alias} ne $select_root_alias);
-
-
-  # decide which parts of the join will remain on the inside
-  #
-  # this is not a very viable optimisation, but it was written
-  # before I realised this, so might as well remain. We can throw
-  # away _any_ branches of the join tree that are:
-  # 1) not mentioned in the condition/order
-  # 2) left-join leaves (or left-join leaf chains)
-  # Most of the join conditions will not satisfy this, but for real
-  # complex queries some might, and we might make some RDBMS happy.
-  #
-  #
-  # since we do not have introspectable SQLA, we fall back to ugly
-  # scanning of raw SQL for WHERE, and for pieces of ORDER BY
-  # in order to determine what goes into %inner_joins
-  # It may not be very efficient, but it's a reasonable stop-gap
-  {
-    # produce stuff unquoted, so it can be scanned
-    local $sql_maker->{quote_char};
-    my $sep = $self->_sql_maker_opts->{name_sep} || '.';
-    $sep = "\Q$sep\E";
-
-    my @order_by = (map
-      { ref $_ ? $_->[0] : $_ }
-      $sql_maker->_order_by_chunks ($sub_attrs->{order_by})
-    );
-
-    my $where_sql = $sql_maker->where ($where);
-    my $select_sql = $sql_maker->_recurse_fields ($sub_select);
-
-    # sort needed joins
-    for my $alias (keys %join_info) {
-
-      # any table alias found on a column name in where or order_by
-      # gets included in %inner_joins
-      # Also any parent joins that are needed to reach this particular alias
-      for my $piece ($select_sql, $where_sql, @order_by ) {
-        if ($piece =~ /\b $alias $sep/x) {
-          $inner_joins{$alias} = 1;
-        }
-      }
-    }
-  }
-
-  # scan for non-leaf/non-left joins and mark as needed
-  # also mark all ancestor joins that are needed to reach this particular alias
-  # (e.g.  join => { cds => 'tracks' } - tracks will bring cds too )
-  #
-  # traverse by the size of the -join_path i.e. reverse depth first
-  for my $alias (sort { @{$join_info{$b}{-join_path}} <=> @{$join_info{$a}{-join_path}} } (keys %join_info) ) {
-
-    my $j = $join_info{$alias};
-    $inner_joins{$alias} = 1 if (! $j->{-join_type} || ($j->{-join_type} !~ /^left$/i) );
-
-    if ($inner_joins{$alias}) {
-      $inner_joins{$_} = 1 for (@{$j->{-join_path}});
-    }
-  }
-
-  # construct the inner $from for the subquery
-  my $inner_from = [ $join_root ];
-  for my $j (@$from) {
-    push @$inner_from, $j if $inner_joins{$j->[0]{-alias}};
-  }
-
-  # if a multi-type join was needed in the subquery ("multi" is indicated by
-  # presence in {collapse}) - add a group_by to simulate the collapse in the subq
-  unless ($sub_attrs->{group_by}) {
-    for my $alias (keys %inner_joins) {
-
-      # the dot comes from some weirdness in collapse
-      # remove after the rewrite
-      if ($attrs->{collapse}{".$alias"}) {
-        $sub_attrs->{group_by} ||= $sub_select;
-        last;
-      }
-    }
-  }
-
-  # generate the subquery
-  my $subq = $self->_select_args_to_query (
-    $inner_from,
-    $sub_select,
-    $where,
-    $sub_attrs
-  );
-  my $subq_joinspec = {
-    -alias => $select_root_alias,
-    -source_handle => $join_root->{-source_handle},
-    $select_root_alias => $subq,
-  };
-
-  # Generate a new from (really just replace the join slot with the subquery)
-  # Before we would start the outer chain from the subquery itself (i.e.
-  # SELECT ... FROM (SELECT ... ) alias JOIN ..., but this turned out to be
-  # a bad idea for search_related, as the root of the chain was effectively
-  # lost (i.e. $artist_rs->search_related ('cds'... ) would result in alias
-  # of 'cds', which would prevent from doing things like order_by artist.*)
-  # See t/prefetch/via_search_related.t for a better idea
-  my @outer_from;
-  if ($join_root->{-alias} eq $select_root_alias) { # just swap the root part and we're done
-    @outer_from = (
-      $subq_joinspec,
-      @$from,
-    )
-  }
-  else {  # this is trickier
-    @outer_from = ($join_root);
-
-    for my $j (@$from) {
-      if ($j->[0]{-alias} eq $select_root_alias) {
-        push @outer_from, [
-          $subq_joinspec,
-          @{$j}[1 .. $#$j],
-        ];
-      }
-      else {
-        push @outer_from, $j;
-      }
-    }
-  }
-
-  # This is totally horrific - the $where ends up in both the inner and outer query
-  # Unfortunately not much can be done until SQLA2 introspection arrives, and even
-  # then if where conditions apply to the *right* side of the prefetch, you may have
-  # to both filter the inner select (e.g. to apply a limit) and then have to re-filter
-  # the outer select to exclude joins you didin't want in the first place
-  #
-  # OTOH it can be seen as a plus: <ash> (notes that this query would make a DBA cry ;)
-  return (\@outer_from, $select, $where, $attrs);
-}
-
-sub _resolve_ident_sources {
-  my ($self, $ident) = @_;
-
-  my $alias2source = {};
-  my $rs_alias;
-
-  # the reason this is so contrived is that $ident may be a {from}
-  # structure, specifying multiple tables to join
-  if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
-    # this is compat mode for insert/update/delete which do not deal with aliases
-    $alias2source->{me} = $ident;
-    $rs_alias = 'me';
-  }
-  elsif (ref $ident eq 'ARRAY') {
-
-    for (@$ident) {
-      my $tabinfo;
-      if (ref $_ eq 'HASH') {
-        $tabinfo = $_;
-        $rs_alias = $tabinfo->{-alias};
-      }
-      if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') {
-        $tabinfo = $_->[0];
-      }
-
-      $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve
-        if ($tabinfo->{-source_handle});
-    }
-  }
-
-  return ($alias2source, $rs_alias);
-}
-
-# Takes $ident, \@column_names
-#
-# returns { $column_name => \%column_info, ... }
-# also note: this adds -result_source => $rsrc to the column info
-#
-# usage:
-#   my $col_sources = $self->_resolve_column_info($ident, @column_names);
-sub _resolve_column_info {
-  my ($self, $ident, $colnames) = @_;
-  my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident);
-
-  my $sep = $self->_sql_maker_opts->{name_sep} || '.';
-  $sep = "\Q$sep\E";
-
-  my (%return, %seen_cols);
-
-  # compile a global list of column names, to be able to properly
-  # disambiguate unqualified column names (if at all possible)
-  for my $alias (keys %$alias2src) {
-    my $rsrc = $alias2src->{$alias};
-    for my $colname ($rsrc->columns) {
-      push @{$seen_cols{$colname}}, $alias;
-    }
-  }
-
-  COLUMN:
-  foreach my $col (@$colnames) {
-    my ($alias, $colname) = $col =~ m/^ (?: ([^$sep]+) $sep)? (.+) $/x;
-
-    unless ($alias) {
-      # see if the column was seen exactly once (so we know which rsrc it came from)
-      if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1) {
-        $alias = $seen_cols{$colname}[0];
-      }
-      else {
-        next COLUMN;
-      }
-    }
-
-    my $rsrc = $alias2src->{$alias};
-    $return{$col} = $rsrc && {
-      %{$rsrc->column_info($colname)},
-      -result_source => $rsrc,
-      -source_alias => $alias,
-    };
-  }
-
-  return \%return;
-}
-
 # Returns a counting SELECT for a simple count
 # query. Abstracted so that a storage could override
 # this to { count => 'firstcol' } or whatever makes
@@ -1852,22 +1977,7 @@
   return { count => '*' };
 }
 
-# Returns a SELECT which will end up in the subselect
-# There may or may not be a group_by, as the subquery
-# might have been called to accomodate a limit
-#
-# Most databases would be happy with whatever ends up
-# here, but some choke in various ways.
-#
-sub _subq_count_select {
-  my ($self, $source, $rs_attrs) = @_;
-  return $rs_attrs->{group_by} if $rs_attrs->{group_by};
 
-  my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns);
-  return @pcols ? \@pcols : [ 1 ];
-}
-
-
 sub source_bind_attributes {
   my ($self, $source) = @_;
 
@@ -1942,7 +2052,7 @@
 
 sub sth {
   my ($self, $sql) = @_;
-  $self->dbh_do('_dbh_sth', $sql);
+  $self->dbh_do('_dbh_sth', $sql);  # retry over disconnects
 }
 
 sub _dbh_columns_info_for {
@@ -2004,7 +2114,7 @@
 
 sub columns_info_for {
   my ($self, $table) = @_;
-  $self->dbh_do('_dbh_columns_info_for', $table);
+  $self->_dbh_columns_info_for ($self->_get_dbh, $table);
 }
 
 =head2 last_insert_id
@@ -2014,32 +2124,91 @@
 =cut
 
 sub _dbh_last_insert_id {
-    # All Storage's need to register their own _dbh_last_insert_id
-    # the old SQLite-based method was highly inappropriate
+    my ($self, $dbh, $source, $col) = @_;
 
-    my $self = shift;
+    my $id = eval { $dbh->last_insert_id (undef, undef, $source->name, $col) };
+
+    return $id if defined $id;
+
     my $class = ref $self;
-    $self->throw_exception (<<EOE);
-
-No _dbh_last_insert_id() method found in $class.
-Since the method of obtaining the autoincrement id of the last insert
-operation varies greatly between different databases, this method must be
-individually implemented for every storage class.
-EOE
+    $self->throw_exception ("No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed");
 }
 
 sub last_insert_id {
   my $self = shift;
-  $self->dbh_do('_dbh_last_insert_id', @_);
+  $self->_dbh_last_insert_id ($self->_dbh, @_);
 }
 
+=head2 _native_data_type
+
+=over 4
+
+=item Arguments: $type_name
+
+=back
+
+This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
+currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
+L<::Sybase::ASE|DBIx::Class::Storage::DBI::Sybase::ASE>.
+
+The default implementation returns C<undef>, implement in your Storage driver if
+you need this functionality.
+
+Should map types from other databases to the native RDBMS type, for example
+C<VARCHAR2> to C<VARCHAR>.
+
+Types with modifiers should map to the underlying data type. For example,
+C<INTEGER AUTO_INCREMENT> should become C<INTEGER>.
+
+Composite types should map to the container type, for example
+C<ENUM(foo,bar,baz)> becomes C<ENUM>.
+
+=cut
+
+sub _native_data_type {
+  #my ($self, $data_type) = @_;
+  return undef
+}
+
+# Check if placeholders are supported at all
+sub _placeholders_supported {
+  my $self = shift;
+  my $dbh  = $self->_get_dbh;
+
+  # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
+  # but it is inaccurate more often than not
+  eval {
+    local $dbh->{PrintError} = 0;
+    local $dbh->{RaiseError} = 1;
+    $dbh->do('select ?', {}, 1);
+  };
+  return $@ ? 0 : 1;
+}
+
+# Check if placeholders bound to non-string types throw exceptions
+#
+sub _typeless_placeholders_supported {
+  my $self = shift;
+  my $dbh  = $self->_get_dbh;
+
+  eval {
+    local $dbh->{PrintError} = 0;
+    local $dbh->{RaiseError} = 1;
+    # this specifically tests a bind that is NOT a string
+    $dbh->do('select 1 where 1 = ?', {}, 1);
+  };
+  return $@ ? 0 : 1;
+}
+
 =head2 sqlt_type
 
 Returns the database driver name.
 
 =cut
 
-sub sqlt_type { shift->_get_dbh->{Driver}->{Name} }
+sub sqlt_type {
+  shift->_get_dbh->{Driver}->{Name};
+}
 
 =head2 bind_attribute_by_data_type
 
@@ -2076,7 +2245,7 @@
 }
 
 
-=head2 create_ddl_dir (EXPERIMENTAL)
+=head2 create_ddl_dir
 
 =over 4
 
@@ -2128,20 +2297,24 @@
  { ignore_constraint_names => 0, # ... other options }
 
 
-Note that this feature is currently EXPERIMENTAL and may not work correctly
-across all databases, or fully handle complex relationships.
+WARNING: You are strongly advised to check all SQL files created, before applying
+them.
 
-WARNING: Please check all SQL files created, before applying them.
-
 =cut
 
 sub create_ddl_dir {
   my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
 
-  if(!$dir || !-d $dir) {
+  unless ($dir) {
     carp "No directory given, using ./\n";
-    $dir = "./";
+    $dir = './';
+  } else {
+      -d $dir or File::Path::mkpath($dir)
+          or $self->throw_exception("create_ddl_dir: $! creating dir '$dir'");
   }
+
+  $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir);
+
   $databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
   $databases = [ $databases ] if(ref($databases) ne 'ARRAY');
 
@@ -2155,9 +2328,9 @@
     %{$sqltargs || {}}
   };
 
-  $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09003: '}
-      . $self->_check_sqlt_message . q{'})
-          if !$self->_check_sqlt_version;
+  unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) {
+    $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
+  }
 
   my $sqlt = SQL::Translator->new( $sqltargs );
 
@@ -2299,22 +2472,35 @@
       return join('', @rows);
   }
 
-  $self->throw_exception(q{Can't deploy without SQL::Translator 0.09003: '}
-      . $self->_check_sqlt_message . q{'})
-          if !$self->_check_sqlt_version;
+  unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) {
+    $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') );
+  }
 
-  require SQL::Translator::Parser::DBIx::Class;
-  eval qq{use SQL::Translator::Producer::${type}};
-  $self->throw_exception($@) if $@;
-
   # sources needs to be a parser arg, but for simplicty allow at top level
   # coming in
   $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources}
       if exists $sqltargs->{sources};
 
-  my $tr = SQL::Translator->new(%$sqltargs);
-  SQL::Translator::Parser::DBIx::Class::parse( $tr, $schema );
-  return "SQL::Translator::Producer::${type}"->can('produce')->($tr);
+  my $tr = SQL::Translator->new(
+    producer => "SQL::Translator::Producer::${type}",
+    %$sqltargs,
+    parser => 'SQL::Translator::Parser::DBIx::Class',
+    data => $schema,
+  );
+
+  my @ret;
+  my $wa = wantarray;
+  if ($wa) {
+    @ret = $tr->translate;
+  }
+  else {
+    $ret[0] = $tr->translate;
+  }
+
+  $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
+    unless (@ret && defined $ret[0]);
+
+  return $wa ? @ret : $ret[0];
 }
 
 sub deploy {
@@ -2338,7 +2524,7 @@
     }
     $self->_query_end($line);
   };
-  my @statements = $self->deployment_statements($schema, $type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } );
+  my @statements = $schema->deployment_statements($type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } );
   if (@statements > 1) {
     foreach my $statement (@statements) {
       $deploy->( $statement );
@@ -2360,7 +2546,6 @@
 sub datetime_parser {
   my $self = shift;
   return $self->{datetime_parser} ||= do {
-    $self->_populate_dbh unless $self->_dbh;
     $self->build_datetime_parser(@_);
   };
 }
@@ -2383,27 +2568,11 @@
 sub build_datetime_parser {
   my $self = shift;
   my $type = $self->datetime_parser_type(@_);
-  eval "use ${type}";
-  $self->throw_exception("Couldn't load ${type}: $@") if $@;
+  $self->ensure_class_loaded ($type);
   return $type;
 }
 
-{
-    my $_check_sqlt_version; # private
-    my $_check_sqlt_message; # private
-    sub _check_sqlt_version {
-        return $_check_sqlt_version if defined $_check_sqlt_version;
-        eval 'use SQL::Translator "0.09003"';
-        $_check_sqlt_message = $@ || '';
-        $_check_sqlt_version = !$@;
-    }
 
-    sub _check_sqlt_message {
-        _check_sqlt_version if !defined $_check_sqlt_message;
-        $_check_sqlt_message;
-    }
-}
-
 =head2 is_replicating
 
 A boolean that reports if a particular L<DBIx::Class::Storage::DBI> is set to
@@ -2429,16 +2598,32 @@
     return;
 }
 
-sub DESTROY {
-  my $self = shift;
-  $self->_verify_pid if $self->_dbh;
+=head2 relname_to_table_alias
 
-  # some databases need this to stop spewing warnings
-  if (my $dbh = $self->_dbh) {
-    eval { $dbh->disconnect };
-  }
+=over 4
 
-  $self->_dbh(undef);
+=item Arguments: $relname, $join_count
+
+=back
+
+L<DBIx::Class> uses L<DBIx::Class::Relationship> names as table aliases in
+queries.
+
+This hook is to allow specific L<DBIx::Class::Storage> drivers to change the
+way these aliases are named.
+
+The default behavior is C<< "$relname_$join_count" if $join_count > 1 >>,
+otherwise C<"$relname">.
+
+=cut
+
+sub relname_to_table_alias {
+  my ($self, $relname, $join_count) = @_;
+
+  my $alias = ($join_count && $join_count > 1 ?
+    join('_', $relname, $join_count) : $relname);
+
+  return $alias;
 }
 
 1;

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBIHacks.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBIHacks.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/DBIHacks.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,566 @@
+package   #hide from PAUSE
+  DBIx::Class::Storage::DBIHacks;
+
+#
+# This module contains code that should never have seen the light of day,
+# does not belong in the Storage, or is otherwise unfit for public
+# display. The arrival of SQLA2 should immediately oboslete 90% of this
+#
+
+use strict;
+use warnings;
+
+use base 'DBIx::Class::Storage';
+use mro 'c3';
+
+use Carp::Clan qw/^DBIx::Class/;
+
+#
+# This code will remove non-selecting/non-restricting joins from
+# {from} specs, aiding the RDBMS query optimizer
+#
+sub _prune_unused_joins {
+  my ($self) = shift;
+
+  my ($from, $select, $where, $attrs) = @_;
+
+  if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY') {
+    return $from;   # only standard {from} specs are supported
+  }
+
+  my $aliastypes = $self->_resolve_aliastypes_from_select_args(@_);
+
+  # a grouped set will not be affected by amount of rows. Thus any
+  # {multiplying} joins can go
+  delete $aliastypes->{multiplying} if $attrs->{group_by};
+
+
+  my @newfrom = $from->[0]; # FROM head is always present
+
+  my %need_joins = (map { %{$_||{}} } (values %$aliastypes) );
+  for my $j (@{$from}[1..$#$from]) {
+    push @newfrom, $j if (
+      (! $j->[0]{-alias}) # legacy crap
+        ||
+      $need_joins{$j->[0]{-alias}}
+    );
+  }
+
+  return \@newfrom;
+}
+
+#
+# This is the code producing joined subqueries like:
+# SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ... 
+#
+sub _adjust_select_args_for_complex_prefetch {
+  my ($self, $from, $select, $where, $attrs) = @_;
+
+  $self->throw_exception ('Nothing to prefetch... how did we get here?!')
+    if not @{$attrs->{_prefetch_select}};
+
+  $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
+    if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY');
+
+
+  # generate inner/outer attribute lists, remove stuff that doesn't apply
+  my $outer_attrs = { %$attrs };
+  delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
+
+  my $inner_attrs = { %$attrs };
+  delete $inner_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
+
+
+  # bring over all non-collapse-induced order_by into the inner query (if any)
+  # the outer one will have to keep them all
+  delete $inner_attrs->{order_by};
+  if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
+    $inner_attrs->{order_by} = [
+      @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
+    ];
+  }
+
+  # generate the inner/outer select lists
+  # for inside we consider only stuff *not* brought in by the prefetch
+  # on the outside we substitute any function for its alias
+  my $outer_select = [ @$select ];
+  my $inner_select = [];
+  for my $i (0 .. ( @$outer_select - @{$outer_attrs->{_prefetch_select}} - 1) ) {
+    my $sel = $outer_select->[$i];
+
+    if (ref $sel eq 'HASH' ) {
+      $sel->{-as} ||= $attrs->{as}[$i];
+      $outer_select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "inner_column_$i") );
+    }
+
+    push @$inner_select, $sel;
+
+    push @{$inner_attrs->{as}}, $attrs->{as}[$i];
+  }
+
+  # construct the inner $from for the subquery
+  # we need to prune first, because this will determine if we need a group_by below
+  my $inner_from = $self->_prune_unused_joins ($from, $inner_select, $where, $inner_attrs);
+
+  # if a multi-type join was needed in the subquery - add a group_by to simulate the
+  # collapse in the subq
+  $inner_attrs->{group_by} ||= $inner_select
+    if List::Util::first
+      { ! $_->[0]{-is_single} }
+      (@{$inner_from}[1 .. $#$inner_from])
+  ;
+
+  # generate the subquery
+  my $subq = $self->_select_args_to_query (
+    $inner_from,
+    $inner_select,
+    $where,
+    $inner_attrs,
+  );
+
+  my $subq_joinspec = {
+    -alias => $attrs->{alias},
+    -source_handle => $inner_from->[0]{-source_handle},
+    $attrs->{alias} => $subq,
+  };
+
+  # Generate the outer from - this is relatively easy (really just replace
+  # the join slot with the subquery), with a major caveat - we can not
+  # join anything that is non-selecting (not part of the prefetch), but at
+  # the same time is a multi-type relationship, as it will explode the result.
+  #
+  # There are two possibilities here
+  # - either the join is non-restricting, in which case we simply throw it away
+  # - it is part of the restrictions, in which case we need to collapse the outer
+  #   result by tackling yet another group_by to the outside of the query
+
+  # normalize a copy of $from, so it will be easier to work with further
+  # down (i.e. promote the initial hashref to an AoH)
+  $from = [ @$from ];
+  $from->[0] = [ $from->[0] ];
+
+  # so first generate the outer_from, up to the substitution point
+  my @outer_from;
+  while (my $j = shift @$from) {
+    if ($j->[0]{-alias} eq $attrs->{alias}) { # time to swap
+      push @outer_from, [
+        $subq_joinspec,
+        @{$j}[1 .. $#$j],
+      ];
+      last; # we'll take care of what's left in $from below
+    }
+    else {
+      push @outer_from, $j;
+    }
+  }
+
+  # scan the from spec against different attributes, and see which joins are needed
+  # in what role
+  my $outer_aliastypes =
+    $self->_resolve_aliastypes_from_select_args( $from, $outer_select, $where, $outer_attrs );
+
+  # see what's left - throw away if not selecting/restricting
+  # also throw in a group_by if restricting to guard against
+  # cross-join explosions
+  #
+  while (my $j = shift @$from) {
+    my $alias = $j->[0]{-alias};
+
+    if ($outer_aliastypes->{selecting}{$alias}) {
+      push @outer_from, $j;
+    }
+    elsif ($outer_aliastypes->{restricting}{$alias}) {
+      push @outer_from, $j;
+      $outer_attrs->{group_by} ||= $outer_select unless $j->[0]{-is_single};
+    }
+  }
+
+  # demote the outer_from head
+  $outer_from[0] = $outer_from[0][0];
+
+  # This is totally horrific - the $where ends up in both the inner and outer query
+  # Unfortunately not much can be done until SQLA2 introspection arrives, and even
+  # then if where conditions apply to the *right* side of the prefetch, you may have
+  # to both filter the inner select (e.g. to apply a limit) and then have to re-filter
+  # the outer select to exclude joins you didin't want in the first place
+  #
+  # OTOH it can be seen as a plus: <ash> (notes that this query would make a DBA cry ;)
+  return (\@outer_from, $outer_select, $where, $outer_attrs);
+}
+
+#
+# I KNOW THIS SUCKS! GET SQLA2 OUT THE DOOR SO THIS CAN DIE!
+#
+# Due to a lack of SQLA2 we fall back to crude scans of all the
+# select/where/order/group attributes, in order to determine what
+# aliases are neded to fulfill the query. This information is used
+# throughout the code to prune unnecessary JOINs from the queries
+# in an attempt to reduce the execution time.
+# Although the method is pretty horrific, the worst thing that can
+# happen is for it to fail due to some scalar SQL, which in turn will
+# result in a vocal exception.
+sub _resolve_aliastypes_from_select_args {
+  my ( $self, $from, $select, $where, $attrs ) = @_;
+
+  $self->throw_exception ('Unable to analyze custom {from}')
+    if ref $from ne 'ARRAY';
+
+  # what we will return
+  my $aliases_by_type;
+
+  # see what aliases are there to work with
+  my $alias_list;
+  for (@$from) {
+    my $j = $_;
+    $j = $j->[0] if ref $j eq 'ARRAY';
+    my $al = $j->{-alias}
+      or next;
+
+    $alias_list->{$al} = $j;
+    $aliases_by_type->{multiplying}{$al} = 1
+      unless $j->{-is_single};
+  }
+
+  # get a column to source/alias map (including unqualified ones)
+  my $colinfo = $self->_resolve_column_info ($from);
+
+  # set up a botched SQLA
+  my $sql_maker = $self->sql_maker;
+  my $sep = quotemeta ($self->_sql_maker_opts->{name_sep} || '.');
+
+  my ($orig_lquote, $orig_rquote) = map { quotemeta $_ } (do {
+    if (ref $sql_maker->{quote_char} eq 'ARRAY') {
+      @{$sql_maker->{quote_char}}
+    }
+    else {
+      ($sql_maker->{quote_char} || '') x 2;
+    }
+  });
+
+  local $sql_maker->{quote_char} = "\x00"; # so that we can regex away
+
+  # generate sql chunks
+  my $to_scan = {
+    restricting => [
+      $sql_maker->_recurse_where ($where),
+      $sql_maker->_order_by({
+        map { $_ => $attrs->{$_} } (qw/group_by having/)
+      }),
+    ],
+    selecting => [
+      $self->_parse_order_by ($attrs->{order_by}, $sql_maker),
+      $sql_maker->_recurse_fields ($select),
+    ],
+  };
+
+  # throw away empty chunks
+  $_ = [ map { $_ || () } @$_ ] for values %$to_scan;
+
+  # first loop through all fully qualified columns and get the corresponding
+  # alias (should work even if they are in scalarrefs)
+  for my $alias (keys %$alias_list) {
+    my $al_re = qr/
+      \x00 $alias \x00 $sep
+        |
+      \b $alias $sep
+    /x;
+
+    # add matching for possible quoted literal sql
+    $al_re = qr/ $al_re | $orig_lquote $alias $orig_rquote /x
+      if ($orig_lquote && $orig_rquote);
+
+
+    for my $type (keys %$to_scan) {
+      for my $piece (@{$to_scan->{$type}}) {
+        $aliases_by_type->{$type}{$alias} = 1 if ($piece =~ $al_re);
+      }
+    }
+  }
+
+  # now loop through unqualified column names, and try to locate them within
+  # the chunks
+  for my $col (keys %$colinfo) {
+    next if $col =~ $sep;   # if column is qualified it was caught by the above
+
+    my $col_re = qr/ \x00 $col \x00 /x;
+
+    $col_re = qr/ $col_re | $orig_lquote $col $orig_rquote /x
+      if ($orig_lquote && $orig_rquote);
+
+    for my $type (keys %$to_scan) {
+      for my $piece (@{$to_scan->{$type}}) {
+        $aliases_by_type->{$type}{$colinfo->{$col}{-source_alias}} = 1 if ($piece =~ $col_re);
+      }
+    }
+  }
+
+  # Add any non-left joins to the restriction list (such joins are indeed restrictions)
+  for my $j (values %$alias_list) {
+    my $alias = $j->{-alias} or next;
+    $aliases_by_type->{restricting}{$alias} = 1 if (
+      (not $j->{-join_type})
+        or
+      ($j->{-join_type} !~ /^left (?: \s+ outer)? $/xi)
+    );
+  }
+
+  # mark all join parents as mentioned
+  # (e.g.  join => { cds => 'tracks' } - tracks will need to bring cds too )
+  for my $type (keys %$aliases_by_type) {
+    for my $alias (keys %{$aliases_by_type->{$type}}) {
+      $aliases_by_type->{$type}{$_} = 1
+        for (map { values %$_ } @{ $alias_list->{$alias}{-join_path} || [] });
+    }
+  }
+
+  return $aliases_by_type;
+}
+
+sub _resolve_ident_sources {
+  my ($self, $ident) = @_;
+
+  my $alias2source = {};
+  my $rs_alias;
+
+  # the reason this is so contrived is that $ident may be a {from}
+  # structure, specifying multiple tables to join
+  if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
+    # this is compat mode for insert/update/delete which do not deal with aliases
+    $alias2source->{me} = $ident;
+    $rs_alias = 'me';
+  }
+  elsif (ref $ident eq 'ARRAY') {
+
+    for (@$ident) {
+      my $tabinfo;
+      if (ref $_ eq 'HASH') {
+        $tabinfo = $_;
+        $rs_alias = $tabinfo->{-alias};
+      }
+      if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') {
+        $tabinfo = $_->[0];
+      }
+
+      $alias2source->{$tabinfo->{-alias}} = $tabinfo->{-source_handle}->resolve
+        if ($tabinfo->{-source_handle});
+    }
+  }
+
+  return ($alias2source, $rs_alias);
+}
+
+# Takes $ident, \@column_names
+#
+# returns { $column_name => \%column_info, ... }
+# also note: this adds -result_source => $rsrc to the column info
+#
+# If no columns_names are supplied returns info about *all* columns
+# for all sources
+sub _resolve_column_info {
+  my ($self, $ident, $colnames) = @_;
+  my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident);
+
+  my $sep = $self->_sql_maker_opts->{name_sep} || '.';
+  my $qsep = quotemeta $sep;
+
+  my (%return, %seen_cols, @auto_colnames);
+
+  # compile a global list of column names, to be able to properly
+  # disambiguate unqualified column names (if at all possible)
+  for my $alias (keys %$alias2src) {
+    my $rsrc = $alias2src->{$alias};
+    for my $colname ($rsrc->columns) {
+      push @{$seen_cols{$colname}}, $alias;
+      push @auto_colnames, "$alias$sep$colname" unless $colnames;
+    }
+  }
+
+  $colnames ||= [
+    @auto_colnames,
+    grep { @{$seen_cols{$_}} == 1 } (keys %seen_cols),
+  ];
+
+  COLUMN:
+  foreach my $col (@$colnames) {
+    my ($alias, $colname) = $col =~ m/^ (?: ([^$qsep]+) $qsep)? (.+) $/x;
+
+    unless ($alias) {
+      # see if the column was seen exactly once (so we know which rsrc it came from)
+      if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1) {
+        $alias = $seen_cols{$colname}[0];
+      }
+      else {
+        next COLUMN;
+      }
+    }
+
+    my $rsrc = $alias2src->{$alias};
+    $return{$col} = $rsrc && {
+      %{$rsrc->column_info($colname)},
+      -result_source => $rsrc,
+      -source_alias => $alias,
+    };
+  }
+
+  return \%return;
+}
+
+# The DBIC relationship chaining implementation is pretty simple - every
+# new related_relationship is pushed onto the {from} stack, and the {select}
+# window simply slides further in. This means that when we count somewhere
+# in the middle, we got to make sure that everything in the join chain is an
+# actual inner join, otherwise the count will come back with unpredictable
+# results (a resultset may be generated with _some_ rows regardless of if
+# the relation which the $rs currently selects has rows or not). E.g.
+# $artist_rs->cds->count - normally generates:
+# SELECT COUNT( * ) FROM artist me LEFT JOIN cd cds ON cds.artist = me.artistid
+# which actually returns the number of artists * (number of cds || 1)
+#
+# So what we do here is crawl {from}, determine if the current alias is at
+# the top of the stack, and if not - make sure the chain is inner-joined down
+# to the root.
+#
+sub _straight_join_to_node {
+  my ($self, $from, $alias) = @_;
+
+  # subqueries and other oddness are naturally not supported
+  return $from if (
+    ref $from ne 'ARRAY'
+      ||
+    @$from <= 1
+      ||
+    ref $from->[0] ne 'HASH'
+      ||
+    ! $from->[0]{-alias}
+      ||
+    $from->[0]{-alias} eq $alias  # this last bit means $alias is the head of $from - nothing to do
+  );
+
+  # find the current $alias in the $from structure
+  my $switch_branch;
+  JOINSCAN:
+  for my $j (@{$from}[1 .. $#$from]) {
+    if ($j->[0]{-alias} eq $alias) {
+      $switch_branch = $j->[0]{-join_path};
+      last JOINSCAN;
+    }
+  }
+
+  # something else went quite wrong
+  return $from unless $switch_branch;
+
+  # So it looks like we will have to switch some stuff around.
+  # local() is useless here as we will be leaving the scope
+  # anyway, and deep cloning is just too fucking expensive
+  # So replace the first hashref in the node arrayref manually 
+  my @new_from = ($from->[0]);
+  my $sw_idx = { map { (values %$_), 1 } @$switch_branch }; #there's one k/v per join-path
+
+  for my $j (@{$from}[1 .. $#$from]) {
+    my $jalias = $j->[0]{-alias};
+
+    if ($sw_idx->{$jalias}) {
+      my %attrs = %{$j->[0]};
+      delete $attrs{-join_type};
+      push @new_from, [
+        \%attrs,
+        @{$j}[ 1 .. $#$j ],
+      ];
+    }
+    else {
+      push @new_from, $j;
+    }
+  }
+
+  return \@new_from;
+}
+
+# Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus
+# a condition containing 'me' or other table prefixes will not work
+# at all. What this code tries to do (badly) is introspect the condition
+# and remove all column qualifiers. If it bails out early (returns undef)
+# the calling code should try another approach (e.g. a subquery)
+sub _strip_cond_qualifiers {
+  my ($self, $where) = @_;
+
+  my $cond = {};
+
+  # No-op. No condition, we're updating/deleting everything
+  return $cond unless $where;
+
+  if (ref $where eq 'ARRAY') {
+    $cond = [
+      map {
+        my %hash;
+        foreach my $key (keys %{$_}) {
+          $key =~ /([^.]+)$/;
+          $hash{$1} = $_->{$key};
+        }
+        \%hash;
+      } @$where
+    ];
+  }
+  elsif (ref $where eq 'HASH') {
+    if ( (keys %$where) == 1 && ( (keys %{$where})[0] eq '-and' )) {
+      $cond->{-and} = [];
+      my @cond = @{$where->{-and}};
+       for (my $i = 0; $i < @cond; $i++) {
+        my $entry = $cond[$i];
+        my $hash;
+        my $ref = ref $entry;
+        if ($ref eq 'HASH' or $ref eq 'ARRAY') {
+          $hash = $self->_strip_cond_qualifiers($entry);
+        }
+        elsif (! $ref) {
+          $entry =~ /([^.]+)$/;
+          $hash->{$1} = $cond[++$i];
+        }
+        else {
+          $self->throw_exception ("_strip_cond_qualifiers() is unable to handle a condition reftype $ref");
+        }
+        push @{$cond->{-and}}, $hash;
+      }
+    }
+    else {
+      foreach my $key (keys %$where) {
+        $key =~ /([^.]+)$/;
+        $cond->{$1} = $where->{$key};
+      }
+    }
+  }
+  else {
+    return undef;
+  }
+
+  return $cond;
+}
+
+sub _parse_order_by {
+  my ($self, $order_by, $sql_maker) = @_;
+
+  my $parser = sub {
+    my ($sql_maker, $order_by) = @_;
+
+    return scalar $sql_maker->_order_by_chunks ($order_by)
+      unless wantarray;
+
+    my @chunks;
+    for my $chunk (map { ref $_ ? @$_ : $_ } ($sql_maker->_order_by_chunks ($order_by) ) ) {
+      $chunk =~ s/\s+ (?: ASC|DESC ) \s* $//ix;
+      push @chunks, $chunk;
+    }
+
+    return @chunks;
+  };
+
+  if ($sql_maker) {
+    return $parser->($sql_maker, $order_by);
+  }
+  else {
+    $sql_maker = $self->sql_maker;
+    local $sql_maker->{quote_char};
+    return $parser->($sql_maker, $order_by);
+  }
+}
+
+1;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/Statistics.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/Statistics.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/Statistics.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -2,7 +2,7 @@
 use strict;
 use warnings;
 
-use base qw/Class::Accessor::Grouped/;
+use base qw/DBIx::Class/;
 use IO::File;
 
 __PACKAGE__->mk_group_accessors(simple => qw/callback debugfh silence/);

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/TxnScopeGuard.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/TxnScopeGuard.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage/TxnScopeGuard.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -2,7 +2,7 @@
 
 use strict;
 use warnings;
-use Carp ();
+use Carp::Clan qw/^DBIx::Class/;
 
 sub new {
   my ($class, $storage) = @_;
@@ -24,21 +24,33 @@
   return if $dismiss;
 
   my $exception = $@;
-  Carp::cluck("A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or an error - bad")
-    unless $exception; 
+
   {
     local $@;
+
+    carp 'A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or error. Rolling back.'
+      unless $exception;
+
     eval { $storage->txn_rollback };
     my $rollback_exception = $@;
-    if($rollback_exception) {
-      my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
 
-      $storage->throw_exception(
-        "Transaction aborted: ${exception}. "
-        . "Rollback failed: ${rollback_exception}"
-      ) unless $rollback_exception =~ /$exception_class/;
+    if ($rollback_exception && $rollback_exception !~ /DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION/) {
+      if ($exception) {
+        $exception = "Transaction aborted: ${exception} "
+          ."Rollback failed: ${rollback_exception}";
+      }
+      else {
+        carp (join ' ',
+          "********************* ROLLBACK FAILED!!! ********************",
+          "\nA rollback operation failed after the guard went out of scope.",
+          'This is potentially a disastrous situation, check your data for',
+          "consistency: $rollback_exception"
+        );
+      }
     }
   }
+
+  $@ = $exception;
 }
 
 1;
@@ -77,7 +89,7 @@
 =head2 commit
 
 Commit the transaction, and stop guarding the scope. If this method is not
-called and this object goes out of scope (i.e. an exception is thrown) then
+called and this object goes out of scope (e.g. an exception is thrown) then
 the transaction is rolled back, via L<DBIx::Class::Storage/txn_rollback>
 
 =cut
@@ -90,7 +102,7 @@
 
 Ash Berlin, 2008.
 
-Insipred by L<Scope::Guard> by chocolateboy.
+Inspired by L<Scope::Guard> by chocolateboy.
 
 This module is free software. It may be used, redistributed and/or modified
 under the same terms as Perl itself.

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/Storage.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -6,8 +6,8 @@
 use base qw/DBIx::Class/;
 use mro 'c3';
 
-use Scalar::Util qw/weaken/;
-use Carp::Clan qw/^DBIx::Class/;
+use DBIx::Class::Exception;
+use Scalar::Util();
 use IO::File;
 use DBIx::Class::Storage::TxnScopeGuard;
 
@@ -83,7 +83,7 @@
 sub set_schema {
   my ($self, $schema) = @_;
   $self->schema($schema);
-  weaken($self->{schema}) if ref $self->{schema};
+  Scalar::Util::weaken($self->{schema}) if ref $self->{schema};
 }
 
 =head2 connected
@@ -120,8 +120,12 @@
 sub throw_exception {
   my $self = shift;
 
-  $self->schema->throw_exception(@_) if $self->schema;
-  croak @_;
+  if ($self->schema) {
+    $self->schema->throw_exception(@_);
+  }
+  else {
+    DBIx::Class::Exception->throw(@_);
+  }
 }
 
 =head2 txn_do
@@ -349,7 +353,7 @@
 =head2 debugfh
 
 Set or retrieve the filehandle used for trace/debug output.  This should be
-an IO::Handle compatible ojbect (only the C<print> method is used.  Initially
+an IO::Handle compatible object (only the C<print> method is used.  Initially
 set to be STDERR - although see information on the
 L<DBIC_TRACE> environment variable.
 

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/UTF8Columns.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/UTF8Columns.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class/UTF8Columns.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -3,27 +3,18 @@
 use warnings;
 use base qw/DBIx::Class/;
 
-BEGIN {
-
-    # Perl 5.8.0 doesn't have utf8::is_utf8()
-    # Yes, 5.8.0 support for Unicode is suboptimal, but things like RHEL3 ship with it.
-    if ($] <= 5.008000) {
-        require Encode;
-    } else {
-        require utf8;
-    }
-}
-
 __PACKAGE__->mk_classdata( '_utf8_columns' );
 
 =head1 NAME
 
-DBIx::Class::UTF8Columns - Force UTF8 (Unicode) flag on columns
+DBIx::Class::UTF8Columns - Force UTF8 (Unicode) flag on columns (DEPRECATED)
 
 =head1 SYNOPSIS
 
     package Artist;
-    __PACKAGE__->load_components(qw/UTF8Columns Core/);
+    use base 'DBIx::Class::Core';
+
+    __PACKAGE__->load_components(qw/UTF8Columns/);
     __PACKAGE__->utf8_columns(qw/name description/);
 
     # then belows return strings with utf8 flag
@@ -32,8 +23,62 @@
 
 =head1 DESCRIPTION
 
-This module allows you to get columns data that have utf8 (Unicode) flag.
+This module allows you to get and store utf8 (unicode) column data
+in a database that does not natively support unicode. It ensures
+that column data is correctly serialised as a byte stream when
+stored and de-serialised to unicode strings on retrieval.
 
+  THE USE OF THIS MODULE (AND ITS COUSIN DBIx::Class::ForceUTF8) IS VERY
+  STRONGLY DISCOURAGED, PLEASE READ THE WARNINGS BELOW FOR AN EXPLANATION.
+
+If you want to continue using this module and do not want to recieve
+further warnings set the environmane variable C<DBIC_UTF8COLUMNS_OK>
+to a true value.
+
+=head2 Warning - Module does not function properly on create/insert
+
+Recently (April 2010) a bug was found deep in the core of L<DBIx::Class>
+which affects any component attempting to perform encoding/decoding by
+overloading L<store_column|DBIx::Class::Row/store_column> and
+L<get_columns|DBIx::Class::Row/get_columns>. As a result of this problem
+L<create|DBIx::Class::ResultSet/create> sends the original column values
+to the database, while L<update|DBIx::Class::ResultSet/update> sends the
+encoded values. L<DBIx::Class::UTF8Columns> and L<DBIx::Class::ForceUTF8>
+are both affected by ths bug.
+
+It is unclear how this bug went undetected for so long (it was
+introduced in March 2006), No attempts to fix it will be made while the
+implications of changing such a fundamental behavior of DBIx::Class are
+being evaluated. However in this day and age you should not be using
+this module anyway as Unicode is properly supported by all major
+database engines, as explained below.
+
+If you have specific questions about the integrity of your data in light
+of this development - please 
+L<join us on IRC or the mailing list|DBIx::Class/GETTING HELP/SUPPORT>
+to further discuss your concerns with the team.
+
+=head2 Warning - Native Database Unicode Support
+
+If your database natively supports Unicode (as does SQLite with the
+C<sqlite_unicode> connect flag, MySQL with C<mysql_enable_utf8>
+connect flag or Postgres with the C<pg_enable_utf8> connect flag),
+then this component should B<not> be used, and will corrupt unicode
+data in a subtle and unexpected manner.
+
+It is far better to do Unicode support within the database if
+possible rather than converting data to and from raw bytes on every
+database round trip.
+
+=head2 Warning - Component Overloading
+
+Note that this module overloads L<DBIx::Class::Row/store_column> in a way
+that may prevent other components overloading the same method from working
+correctly. This component must be the last one before L<DBIx::Class::Row>
+(which is provided by L<DBIx::Class::Core>). DBIx::Class will detect such
+incorrect component order and issue an appropriate warning, advising which
+components need to be loaded differently.
+
 =head1 SEE ALSO
 
 L<Template::Stash::ForceUTF8>, L<DBIx::Class::UUIDColumns>.
@@ -50,7 +95,7 @@
         foreach my $col (@_) {
             $self->throw_exception("column $col doesn't exist")
                 unless $self->has_column($col);
-        }        
+        }
         return $self->_utf8_columns({ map { $_ => 1 } @_ });
     } else {
         return $self->_utf8_columns;
@@ -67,17 +112,11 @@
     my ( $self, $column ) = @_;
     my $value = $self->next::method($column);
 
-    my $cols = $self->_utf8_columns;
-    if ( $cols and defined $value and $cols->{$column} ) {
+    utf8::decode($value) if (
+      defined $value and $self->_is_utf8_column($column) and ! utf8::is_utf8($value)
+    );
 
-        if ($] <= 5.008000) {
-            Encode::_utf8_on($value) unless Encode::is_utf8($value);
-        } else {
-            utf8::decode($value) unless utf8::is_utf8($value);
-        }
-    }
-
-    $value;
+    return $value;
 }
 
 =head2 get_columns
@@ -88,16 +127,13 @@
     my $self = shift;
     my %data = $self->next::method(@_);
 
-    foreach my $col (grep { defined $data{$_} } keys %{ $self->_utf8_columns || {} }) {
-
-        if ($] <= 5.008000) {
-            Encode::_utf8_on($data{$col}) unless Encode::is_utf8($data{$col});
-        } else {
-            utf8::decode($data{$col}) unless utf8::is_utf8($data{$col});
-        }
+    foreach my $col (keys %data) {
+      utf8::decode($data{$col}) if (
+        exists $data{$col} and defined $data{$col} and $self->_is_utf8_column($col) and ! utf8::is_utf8($data{$col})
+      );
     }
 
-    %data;
+    return %data;
 }
 
 =head2 store_column
@@ -107,32 +143,33 @@
 sub store_column {
     my ( $self, $column, $value ) = @_;
 
-    my $cols = $self->_utf8_columns;
-    if ( $cols and defined $value and $cols->{$column} ) {
+    # the dirtyness comparison must happen on the non-encoded value
+    my $copy;
 
-        if ($] <= 5.008000) {
-            Encode::_utf8_off($value) if Encode::is_utf8($value);
-        } else {
-            utf8::encode($value) if utf8::is_utf8($value);
-        }
+    if ( defined $value and $self->_is_utf8_column($column) and utf8::is_utf8($value) ) {
+      $copy = $value;
+      utf8::encode($value);
     }
 
     $self->next::method( $column, $value );
+
+    return $copy || $value;
 }
 
-=head1 AUTHOR
+# override this if you want to force everything to be encoded/decoded
+sub _is_utf8_column {
+  # my ($self, $col) = @_;
+  return ($_[0]->utf8_columns || {})->{$_[1]};
+}
 
-Daisuke Murase <typester at cpan.org>
+=head1 AUTHORS
 
-=head1 COPYRIGHT
+See L<DBIx::Class/CONTRIBUTORS>.
 
-This program is free software; you can redistribute
-it and/or modify it under the same terms as Perl itself.
+=head1 LICENSE
 
-The full text of the license can be found in the
-LICENSE file included with this module.
+You may distribute this code under the same terms as Perl itself.
 
 =cut
 
 1;
-

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/DBIx/Class.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -4,7 +4,10 @@
 use warnings;
 
 use MRO::Compat;
+use mro 'c3';
 
+use DBIx::Class::Optional::Dependencies;
+
 use vars qw($VERSION);
 use base qw/DBIx::Class::Componentised Class::Accessor::Grouped/;
 use DBIx::Class::StartupCheck;
@@ -24,11 +27,10 @@
 # Always remember to do all digits for the version even if they're 0
 # i.e. first release of 0.XX *must* be 0.XX000. This avoids fBSD ports
 # brain damage and presumably various other packaging systems too
+$VERSION = '0.08121_01';
 
-$VERSION = '0.08108';
+$VERSION = eval $VERSION if $VERSION =~ /_/; # numify for warning-free dev releases
 
-$VERSION = eval $VERSION; # numify for warning-free dev releases
-
 sub MODIFY_CODE_ATTRIBUTES {
   my ($class,$code, at attrs) = @_;
   $class->mk_classdata('__attr_cache' => {})
@@ -54,14 +56,21 @@
 
 The community can be found via:
 
-  Mailing list: http://lists.scsys.co.uk/mailman/listinfo/dbix-class/
+=over
 
-  SVN: http://dev.catalyst.perl.org/repos/bast/DBIx-Class/
+=item * IRC: L<irc.perl.org#dbix-class (click for instant chatroom login)
+|http://mibbit.com/chat/#dbix-class@irc.perl.org>
 
-  SVNWeb: http://dev.catalyst.perl.org/svnweb/bast/browse/DBIx-Class/
+=item * Mailing list: L<http://lists.scsys.co.uk/mailman/listinfo/dbix-class>
 
-  IRC: irc.perl.org#dbix-class
+=item * RT Bug Tracker: L<https://rt.cpan.org/Dist/Display.html?Queue=DBIx-Class>
 
+=item * SVNWeb: L<http://dev.catalyst.perl.org/svnweb/bast/browse/DBIx-Class/0.08>
+
+=item * SVN: L<http://dev.catalyst.perl.org/repos/bast/DBIx-Class/0.08>
+
+=back
+
 =head1 SYNOPSIS
 
 Create a schema class called MyDB/Schema.pm:
@@ -79,9 +88,8 @@
 See L<DBIx::Class::ResultSource> for docs on defining result classes.
 
   package MyDB::Schema::Result::Artist;
-  use base qw/DBIx::Class/;
+  use base qw/DBIx::Class::Core/;
 
-  __PACKAGE__->load_components(qw/Core/);
   __PACKAGE__->table('artist');
   __PACKAGE__->add_columns(qw/ artistid name /);
   __PACKAGE__->set_primary_key('artistid');
@@ -93,9 +101,9 @@
 MyDB/Schema/Result/CD.pm:
 
   package MyDB::Schema::Result::CD;
-  use base qw/DBIx::Class/;
+  use base qw/DBIx::Class::Core/;
 
-  __PACKAGE__->load_components(qw/Core/);
+  __PACKAGE__->load_components(qw/InflateColumn::DateTime/);
   __PACKAGE__->table('cd');
   __PACKAGE__->add_columns(qw/ cdid artistid title year /);
   __PACKAGE__->set_primary_key('cdid');
@@ -116,9 +124,9 @@
   my $all_artists_rs = $schema->resultset('Artist');
 
   # Output all artists names
-  # $artist here is a DBIx::Class::Row, which has accessors 
+  # $artist here is a DBIx::Class::Row, which has accessors
   # for all its columns. Rows are also subclasses of your Result class.
-  foreach $artist (@artists) {
+  foreach $artist (@all_artists) {
     print $artist->name, "\n";
   }
 
@@ -210,10 +218,12 @@
 
 =head1 CONTRIBUTORS
 
-abraxxa: Alexander Hartmaier <alex_hartmaier at hotmail.com>
+abraxxa: Alexander Hartmaier <abraxxa at cpan.org>
 
 aherzog: Adam Herzog <adam at herzogdesigns.com>
 
+amoore: Andrew Moore <amoore at cpan.org>
+
 andyg: Andy Grundman <andy at hybridized.org>
 
 ank: Andres Kievsky
@@ -228,8 +238,12 @@
 
 bluefeet: Aran Deltac <bluefeet at cpan.org>
 
+boghead: Bryan Beeley <cpan at beeley.org>
+
 bricas: Brian Cassidy <bricas at cpan.org>
 
+brunov: Bruno Vecchi <vecchi.b at gmail.com>
+
 caelum: Rafael Kitover <rkitover at cpan.org>
 
 castaway: Jess Robinson
@@ -242,20 +256,28 @@
 
 debolaz: Anders Nor Berle <berle at cpan.org>
 
+dew: Dan Thomas <dan at godders.org>
+
 dkubb: Dan Kubb <dan.kubb-cpan at onautopilot.com>
 
 dnm: Justin Wheeler <jwheeler at datademons.com>
 
+dpetrov: Dimitar Petrov <mitakaa at gmail.com>
+
 dwc: Daniel Westermann-Clark <danieltwc at cpan.org>
 
 dyfrgi: Michael Leuchtenburg <michael at slashhome.org>
 
 frew: Arthur Axel "fREW" Schmidt <frioux at gmail.com>
 
+goraxe: Gordon Irving <goraxe at cpan.org>
+
 gphat: Cory G Watson <gphat at cpan.org>
 
 groditi: Guillermo Roditi <groditi at cpan.org>
 
+hobbs: Andrew Rodland <arodland at cpan.org>
+
 ilmari: Dagfinn Ilmari MannsE<aring>ker <ilmari at ilmari.org>
 
 jasonmay: Jason May <jason.a.may at gmail.com>
@@ -266,6 +288,8 @@
 
 jguenther: Justin Guenther <jguenther at cpan.org>
 
+jhannah: Jay Hannah <jay at jays.net>
+
 jnapiorkowski: John Napiorkowski <jjn1056 at yahoo.com>
 
 jon: Jon Schutz <jjschutz at cpan.org>
@@ -292,8 +316,12 @@
 
 norbi: Norbert Buchmuller <norbi at nix.hu>
 
+nuba: Nuba Princigalli <nuba at cpan.org>
+
 Numa: Dan Sully <daniel at cpan.org>
 
+ovid: Curtis "Ovid" Poe <ovid at cpan.org>
+
 oyse: Øystein Torget <oystein.torget at dnv.com>
 
 paulm: Paul Makepeace
@@ -312,14 +340,18 @@
 
 rafl: Florian Ragwitz <rafl at debian.org>
 
+rbuels: Robert Buels <rmb32 at cornell.edu>
+
 rdj: Ryan D Johnson <ryan at innerfence.com>
 
-ribasushi: Peter Rabbitson <rabbit+dbic at rabbit.us>
+ribasushi: Peter Rabbitson <ribasushi at cpan.org>
 
 rjbs: Ricardo Signes <rjbs at cpan.org>
 
 robkinyon: Rob Kinyon <rkinyon at cpan.org>
 
+Roman: Roman Filippov <romanf at cpan.org>
+
 sc_: Just Another Perl Hacker
 
 scotty: Scotty Allen <scotty at scottyallen.com>
@@ -328,6 +360,8 @@
 
 solomon: Jared Johnson <jaredj at nmgi.com>
 
+spb: Stephen Bennett <stephen at freenode.net>
+
 sszabo: Stephan Szabo <sszabo at bigpanda.com>
 
 teejay : Aaron Trevena <teejay at cpan.org>
@@ -336,6 +370,8 @@
 
 Tom Hukins
 
+triode: Pete Gamache <gamache at cpan.org>
+
 typester: Daisuke Murase <typester at cpan.org>
 
 victori: Victor Igumnov <victori at cpan.org>
@@ -348,8 +384,16 @@
 
 zamolxes: Bogdan Lucaciu <bogdan at wiz.ro>
 
+Possum: Daniel LeWarne <possum at cpan.org>
+
+=head1 COPYRIGHT
+
+Copyright (c) 2005 - 2010 the DBIx::Class L</AUTHOR> and L</CONTRIBUTORS>
+as listed above.
+
 =head1 LICENSE
 
-You may distribute this code under the same terms as Perl itself.
+This library is free software and may be distributed under the same terms
+as perl itself.
 
 =cut

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/SQL/Translator/Parser/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/SQL/Translator/Parser/DBIx/Class.pm	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/lib/SQL/Translator/Parser/DBIx/Class.pm	2010-05-17 14:31:46 UTC (rev 9401)
@@ -15,6 +15,7 @@
 use Exporter;
 use SQL::Translator::Utils qw(debug normalize_name);
 use Carp::Clan qw/^SQL::Translator|^DBIx::Class/;
+use Scalar::Util ();
 
 use base qw(Exporter);
 
@@ -30,6 +31,10 @@
 # We're working with DBIx::Class Schemas, not data streams.
 # -------------------------------------------------------------------
 sub parse {
+    # this is a hack to prevent schema leaks due to a retarded SQLT implementation
+    # DO NOT REMOVE (until SQLT2 is out, the all of this will be rewritten anyway)
+    Scalar::Util::weaken ($_[1]) if ref ($_[1]);
+
     my ($tr, $data)   = @_;
     my $args          = $tr->parser_args;
     my $dbicschema    = $args->{'DBIx::Class::Schema'} ||  $args->{"DBIx::Schema"} ||$data;
@@ -65,19 +70,19 @@
     }
 
 
-    my(@table_monikers, @view_monikers);
+    my(%table_monikers, %view_monikers);
     for my $moniker (@monikers){
       my $source = $dbicschema->source($moniker);
        if ( $source->isa('DBIx::Class::ResultSource::Table') ) {
-         push(@table_monikers, $moniker);
+         $table_monikers{$moniker}++;
       } elsif( $source->isa('DBIx::Class::ResultSource::View') ){
           next if $source->is_virtual;
-         push(@view_monikers, $moniker);
+         $view_monikers{$moniker}++;
       }
     }
 
     my %tables;
-    foreach my $moniker (sort @table_monikers)
+    foreach my $moniker (sort keys %table_monikers)
     {
         my $source = $dbicschema->source($moniker);
         my $table_name = $source->name;
@@ -86,7 +91,7 @@
         # support quoting properly to be signaled about this
         $table_name = $$table_name if ref $table_name eq 'SCALAR';
 
-        # Its possible to have multiple DBIC sources using the same table
+        # It's possible to have multiple DBIC sources using the same table
         next if $tables{$table_name};
 
         $tables{$table_name}{source} = $source;
@@ -112,9 +117,11 @@
             my $f = $table->add_field(%colinfo)
               || $dbicschema->throw_exception ($table->error);
         }
-        $table->primary_key($source->primary_columns);
 
         my @primary = $source->primary_columns;
+
+        $table->primary_key(@primary) if @primary;
+
         my %unique_constraints = $source->unique_constraints;
         foreach my $uniq (sort keys %unique_constraints) {
             if (!$source->_compare_relationship_keys($unique_constraints{$uniq}, \@primary)) {
@@ -131,18 +138,23 @@
         my %created_FK_rels;
 
         # global add_fk_index set in parser_args
-        my $add_fk_index = (exists $args->{add_fk_index} && ($args->{add_fk_index} == 0)) ? 0 : 1;
+        my $add_fk_index = (exists $args->{add_fk_index} && ! $args->{add_fk_index}) ? 0 : 1;
 
         foreach my $rel (sort @rels)
         {
+
             my $rel_info = $source->relationship_info($rel);
 
             # Ignore any rel cond that isn't a straight hash
             next unless ref $rel_info->{cond} eq 'HASH';
 
-            my $othertable = $source->related_source($rel);
-            my $rel_table = $othertable->name;
+            my $relsource = $source->related_source($rel);
 
+            # related sources might be excluded via a {sources} filter or might be views
+            next unless exists $table_monikers{$relsource->source_name};
+
+            my $rel_table = $relsource->name;
+
             # FIXME - this isn't the right way to do it, but sqlt does not
             # support quoting properly to be signaled about this
             $rel_table = $$rel_table if ref $rel_table eq 'SCALAR';
@@ -152,7 +164,7 @@
 
             # Force the order of @cond to match the order of ->add_columns
             my $idx;
-            my %other_columns_idx = map {'foreign.'.$_ => ++$idx } $othertable->columns;            
+            my %other_columns_idx = map {'foreign.'.$_ => ++$idx } $relsource->columns;
             my @cond = sort { $other_columns_idx{$a} cmp $other_columns_idx{$b} } keys(%{$rel_info->{cond}}); 
 
             # Get the key information, mapping off the foreign/self markers
@@ -184,7 +196,7 @@
                     if ($fk_constraint) {
                         $cascade->{$c} = $rel_info->{attrs}{"on_$c"};
                     }
-                    else {
+                    elsif ( $rel_info->{attrs}{"on_$c"} ) {
                         carp "SQLT attribute 'on_$c' was supplied for relationship '$moniker/$rel', which does not appear to be a foreign constraint. "
                             . "If you are sure that SQLT must generate a constraint for this relationship, add 'is_foreign_key_constraint => 1' to the attributes.\n";
                     }
@@ -194,47 +206,53 @@
                 }
             }
 
-            if($rel_table)
-            {
+            if($rel_table) {
                 # Constraints are added only if applicable
                 next unless $fk_constraint;
 
                 # Make sure we dont create the same foreign key constraint twice
-                my $key_test = join("\x00", @keys);
+                my $key_test = join("\x00", sort @keys);
                 next if $created_FK_rels{$rel_table}->{$key_test};
 
                 if (scalar(@keys)) {
-
                   $created_FK_rels{$rel_table}->{$key_test} = 1;
 
                   my $is_deferrable = $rel_info->{attrs}{is_deferrable};
 
-                  # do not consider deferrable constraints and self-references
-                  # for dependency calculations
+                  # calculate dependencies: do not consider deferrable constraints and
+                  # self-references for dependency calculations
                   if (! $is_deferrable and $rel_table ne $table_name) {
                     $tables{$table_name}{foreign_table_deps}{$rel_table}++;
                   }
 
                   $table->add_constraint(
-                                    type             => 'foreign_key',
-                                    name             => join('_', $table_name, 'fk', @keys),
-                                    fields           => \@keys,
-                                    reference_fields => \@refkeys,
-                                    reference_table  => $rel_table,
-                                    on_delete        => uc ($cascade->{delete} || ''),
-                                    on_update        => uc ($cascade->{update} || ''),
-                                    (defined $is_deferrable ? ( deferrable => $is_deferrable ) : ()),
+                    type             => 'foreign_key',
+                    name             => join('_', $table_name, 'fk', @keys),
+                    fields           => \@keys,
+                    reference_fields => \@refkeys,
+                    reference_table  => $rel_table,
+                    on_delete        => uc ($cascade->{delete} || ''),
+                    on_update        => uc ($cascade->{update} || ''),
+                    (defined $is_deferrable ? ( deferrable => $is_deferrable ) : ()),
                   );
 
                   # global parser_args add_fk_index param can be overridden on the rel def
                   my $add_fk_index_rel = (exists $rel_info->{attrs}{add_fk_index}) ? $rel_info->{attrs}{add_fk_index} : $add_fk_index;
 
+                  # Check that we do not create an index identical to the PK index
+                  # (some RDBMS croak on this, and it generally doesn't make much sense)
+                  # NOTE: we do not sort the key columns because the order of
+                  # columns is important for indexes and two indexes with the
+                  # same cols but different order are allowed and sometimes
+                  # needed
+                  next if join("\x00", @keys) eq join("\x00", @primary);
+
                   if ($add_fk_index_rel) {
                       my $index = $table->add_index(
-                                                    name   => join('_', $table_name, 'idx', @keys),
-                                                    fields => \@keys,
-                                                    type   => 'NORMAL',
-                                                    );
+                          name   => join('_', $table_name, 'idx', @keys),
+                          fields => \@keys,
+                          type   => 'NORMAL',
+                      );
                   }
               }
             }
@@ -280,8 +298,8 @@
         <=>
         (exists $b->depends_on->{$a->source_name} ? 1 : 0)
       }
-      map { $dbicschema->source($_) } @view_monikers;
-    
+      map { $dbicschema->source($_) } (sort keys %@view_monikers);
+
     foreach my $source (@view_sources)
     {
         my $view_name = $source->name;
@@ -296,6 +314,9 @@
         # Its possible to have multiple DBIC source using same table
         next if $views{$view_name}++;
 
+        $dbicschema->throw_exception ("view $view_name is missing a view_definition")
+            unless $source->view_definition;
+
         my $view = $schema->add_view (
           name => $view_name,
           fields => [ $source->columns ],
@@ -371,7 +392,14 @@
  my $schema = MyApp::Schema->connect;
  my $trans  = SQL::Translator->new (
       parser      => 'SQL::Translator::Parser::DBIx::Class',
-      parser_args => { package => $schema },
+      parser_args => {
+          package => $schema,
+          add_fk_index => 0,
+          sources => [qw/
+            Artist
+            CD
+          /],
+      },
       producer    => 'SQLite',
      ) or die SQL::Translator->error;
  my $out = $trans->translate() or die $trans->error;
@@ -393,14 +421,34 @@
 have SQL::Translator installed. To do this see
 L<DBIx::Class::Schema/create_ddl_dir>.
 
+=head1 PARSER OPTIONS
+
+=head2 add_fk_index
+
+Create an index for each foreign key.
+Enabled by default, as having indexed foreign key columns is normally the
+sensible thing to do.
+
+=head2 sources
+
+=over 4
+
+=item Arguments: \@class_names
+
+=back
+
+Limit the amount of parsed sources by supplying an explicit list of source names.
+
 =head1 SEE ALSO
 
 L<SQL::Translator>, L<DBIx::Class::Schema>
 
 =head1 AUTHORS
 
-Jess Robinson
+See L<DBIx::Class/CONTRIBUTORS>.
 
-Matt S Trout
+=head1 LICENSE
 
-Ash Berlin
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/benchmark_datafetch.pl
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/benchmark_datafetch.pl	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/benchmark_datafetch.pl	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,38 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+
+use Benchmark qw/cmpthese/;
+use FindBin;
+use lib "$FindBin::Bin/../t/lib";
+use lib "$FindBin::Bin/../lib";
+use DBICTest::Schema;
+use DBIx::Class::ResultClass::HashRefInflator;  # older dbic didn't load it
+
+printf "Benchmarking DBIC version %s\n", DBIx::Class->VERSION;
+
+my $schema = DBICTest::Schema->connect ('dbi:SQLite::memory:');
+$schema->deploy;
+
+my $rs = $schema->resultset ('Artist');
+$rs->populate ([ map { { name => "Art_$_"} } (1 .. 10000) ]);
+
+my $dbh = $schema->storage->dbh;
+my $sql = sprintf ('SELECT %s FROM %s %s',
+  join (',', @{$rs->_resolved_attrs->{select}} ),
+  $rs->result_source->name,
+  $rs->_resolved_attrs->{alias},
+);
+
+my $compdbi = sub {
+  my @r = $schema->storage->dbh->selectall_arrayref ('SELECT * FROM ' . ${$rs->as_query}->[0] )
+} if $rs->can ('as_query');
+
+cmpthese(-3, {
+  Cursor => sub { $rs->reset; my @r = $rs->cursor->all },
+  HRI => sub { $rs->reset; my @r = $rs->search ({}, { result_class => 'DBIx::Class::ResultClass::HashRefInflator' } )->all },
+  RowObj => sub { $rs->reset; my @r = $rs->all },
+  RawDBI => sub { my @r = $dbh->selectall_arrayref ($sql) },
+  $compdbi ? (CompDBI => $compdbi) : (),
+});


Property changes on: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/benchmark_datafetch.pl
___________________________________________________________________
Added: svn:executable
   + *

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/gen-schema.pl
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/gen-schema.pl	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/gen-schema.pl	2010-05-17 14:31:46 UTC (rev 9401)
@@ -8,4 +8,10 @@
 use SQL::Translator;
 
 my $schema = DBICTest::Schema->connect;
-print scalar ($schema->storage->deployment_statements($schema, 'SQLite'));
+print scalar ($schema->storage->deployment_statements(
+  $schema,
+  'SQLite',
+  undef,
+  undef,
+  { producer_args => { no_transaction => 1 } }
+));

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/joint_deps.pl
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/joint_deps.pl	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/joint_deps.pl	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,60 @@
+#!/usr/bin/perl
+
+use warnings;
+use strict;
+
+use CPANDB;
+use DBIx::Class::Schema::Loader 0.05;
+use Data::Dumper::Concise;
+
+{
+  package CPANDB::Schema;
+  use base qw/DBIx::Class::Schema::Loader/;
+
+  __PACKAGE__->loader_options (
+    naming => 'v5',
+  );
+}
+
+my $s = CPANDB::Schema->connect (sub { CPANDB->dbh } );
+
+# reference names are unstable - just create rels manually
+my $distrsrc = $s->source('Distribution');
+
+# the has_many helper is a class-only method (why?), thus
+# manual add_rel
+$distrsrc->add_relationship (
+  'deps',
+  $s->class('Dependency'),
+  { 'foreign.distribution' => 'self.' . ($distrsrc->primary_columns)[0] },
+  { accessor => 'multi', join_type => 'left' },
+);
+
+# here is how one could use the helper currently:
+#
+#my $distresult = $s->class('Distribution');
+#$distresult->has_many (
+#  'deps',
+#  $s->class('Dependency'),
+#  'distribution',
+#);
+#$s->unregister_source ('Distribution');
+#$s->register_class ('Distribution', $distresult);
+
+
+# a proof of concept how to find out who uses us *AND* SQLT
+my $us_and_sqlt = $s->resultset('Distribution')->search (
+  {
+    'deps.dependency' => 'DBIx-Class',
+    'deps_2.dependency' => 'SQL-Translator',
+  },
+  {
+    join => [qw/deps deps/],
+    order_by => 'me.author',
+    select => [ 'me.distribution', 'me.author', map { "$_.phase" } (qw/deps deps_2/)],
+    as => [qw/dist_name dist_author req_dbic_at req_sqlt_at/],
+    result_class => 'DBIx::Class::ResultClass::HashRefInflator',
+  },
+);
+
+print Dumper [$us_and_sqlt->all];


Property changes on: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/joint_deps.pl
___________________________________________________________________
Added: svn:executable
   + *

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/svn-log.perl
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/svn-log.perl	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/maint/svn-log.perl	2010-05-17 14:31:46 UTC (rev 9401)
@@ -17,8 +17,8 @@
 use XML::Parser;
 
 my %month = qw(
-	Jan 01 Feb 02 Mar 03 Apr 04 May 05 Jun 06
-	Jul 07 Aug 08 Sep 09 Oct 10 Nov 11 Dec 12
+ Jan 01 Feb 02 Mar 03 Apr 04 May 05 Jun 06
+ Jul 07 Aug 08 Sep 09 Oct 10 Nov 11 Dec 12
 );
 
 $Text::Wrap::huge     = "wrap";
@@ -48,28 +48,28 @@
 GetOptions(
   "age=s"      => \$days_back,
   "repo=s"     => \$svn_repo,
-	"help"       => \$send_help,
+  "help"       => \$send_help,
 ) or exit;
 
 # Find the trunk for the current repository if one isn't specified.
 unless (defined $svn_repo) {
-	$svn_repo = `svn info . | grep '^URL: '`;
-	if (length $svn_repo) {
-		chomp $svn_repo;
-		$svn_repo =~ s{^URL\:\s+(.+?)/trunk/?.*$}{$1};
-	}
-	else {
-		$send_help = 1;
-	}
+  $svn_repo = `svn info . | grep '^URL: '`;
+  if (length $svn_repo) {
+    chomp $svn_repo;
+    $svn_repo =~ s{^URL\:\s+(.+?)/trunk/?.*$}{$1};
+  }
+  else {
+    $send_help = 1;
+  }
 }
 
 die(
-	"$0 usage:\n",
-	"  --repo REPOSITORY\n",
-	"  [--age DAYS]\n",
-	"\n",
-	"REPOSITORY must have a trunk subdirectory and a tags directory where\n",
-	"release tags are kept.\n",
+  "$0 usage:\n",
+  "  --repo REPOSITORY\n",
+  "  [--age DAYS]\n",
+  "\n",
+  "REPOSITORY must have a trunk subdirectory and a tags directory where\n",
+  "release tags are kept.\n",
 ) if $send_help;
 
 my $earliest_date = strftime "%F", gmtime(time() - $days_back * 86400);
@@ -81,31 +81,31 @@
 
 open(TAG, "svn -v list $svn_repo/tags|") or die $!;
 while (<TAG>) {
-	# The date is unused, however.
-	next unless (
-		my ($rev, $date, $tag) = m{
-			(\d+).*?(\S\S\S\s+\d\d\s+(?:\d\d\d\d|\d\d:\d\d))\s+(v[0-9_.]+)
-		}x
-	);
+  # The date is unused, however.
+  next unless (
+    my ($rev, $date, $tag) = m{
+      (\d+).*?(\S\S\S\s+\d\d\s+(?:\d\d\d\d|\d\d:\d\d))\s+(v[0-9_.]+)
+    }x
+  );
 
-	my @tag_log = gather_log("$svn_repo/tags/$tag", "--stop-on-copy");
-	die "Tag $tag has changes after tagging!\n" if @tag_log > 1;
+  my @tag_log = gather_log("$svn_repo/tags/$tag", "--stop-on-copy");
+  die "Tag $tag has changes after tagging!\n" if @tag_log > 1;
 
-	my $timestamp = $tag_log[0][LOG_DATE];
-	$tag{$timestamp} = [
-		$rev,     # TAG_REV
-		$tag,     # TAG_TAG
-		[ ],      # TAG_LOG
-	];
+  my $timestamp = $tag_log[0][LOG_DATE];
+  $tag{$timestamp} = [
+    $rev,     # TAG_REV
+    $tag,     # TAG_TAG
+    [ ],      # TAG_LOG
+  ];
 }
 close TAG;
 
 # Fictitious "HEAD" tag for revisions that came after the last tag.
 
 $tag{+MAX_TIMESTAMP} = [
-	"HEAD",         # TAG_REV
-	"(untagged)",   # TAG_TAG
-	undef,          # TAG_LOG
+  "HEAD",         # TAG_REV
+  "(untagged)",   # TAG_TAG
+  undef,          # TAG_LOG
 ];
 
 ### 2. Gather the log for the trunk.  Place log entries under their
@@ -114,184 +114,184 @@
 my @tag_dates = sort keys %tag;
 while (my $date = pop(@tag_dates)) {
 
-	# We're done if this date's before our earliest date.
-	if ($date lt $earliest_date) {
-		delete $tag{$date};
-		next;
-	}
+  # We're done if this date's before our earliest date.
+  if ($date lt $earliest_date) {
+    delete $tag{$date};
+    next;
+  }
 
-	my $tag = $tag{$date}[TAG_TAG];
-	#warn "Gathering information for tag $tag...\n";
+  my $tag = $tag{$date}[TAG_TAG];
+  #warn "Gathering information for tag $tag...\n";
 
-	my $this_rev = $tag{$date}[TAG_REV];
-	my $prev_rev;
-	if (@tag_dates) {
-		$prev_rev = $tag{$tag_dates[-1]}[TAG_REV];
-	}
-	else {
-		$prev_rev = 0;
-	}
+  my $this_rev = $tag{$date}[TAG_REV];
+  my $prev_rev;
+  if (@tag_dates) {
+    $prev_rev = $tag{$tag_dates[-1]}[TAG_REV];
+  }
+  else {
+    $prev_rev = 0;
+  }
 
-	my @log = gather_log("$svn_repo/trunk", "-r", "$this_rev:$prev_rev");
+  my @log = gather_log("$svn_repo/trunk", "-r", "$this_rev:$prev_rev");
 
-	$tag{$date}[TAG_LOG] = \@log;
+  $tag{$date}[TAG_LOG] = \@log;
 }
 
 ### 3. PROFIT!  No, wait... generate the nice log file.
 
 foreach my $timestamp (sort { $b cmp $a } keys %tag) {
-	my $tag_rec = $tag{$timestamp};
+  my $tag_rec = $tag{$timestamp};
 
-	# Skip this tag if there are no log entries.
-	next unless @{$tag_rec->[TAG_LOG]};
+  # Skip this tag if there are no log entries.
+  next unless @{$tag_rec->[TAG_LOG]};
 
-	my $tag_line = "$timestamp $tag_rec->[TAG_TAG]";
-	my $tag_bar  = "=" x length($tag_line);
-	print $tag_bar, "\n", $tag_line, "\n", $tag_bar, "\n\n";
+  my $tag_line = "$timestamp $tag_rec->[TAG_TAG]";
+  my $tag_bar  = "=" x length($tag_line);
+  print $tag_bar, "\n", $tag_line, "\n", $tag_bar, "\n\n";
 
-	foreach my $log_rec (@{$tag_rec->[TAG_LOG]}) {
+  foreach my $log_rec (@{$tag_rec->[TAG_LOG]}) {
 
-		my @paths = @{$log_rec->[LOG_PATHS]};
-		if (@paths > 1) {
-			@paths = grep {
-				$_->[PATH_PATH] ne "/trunk" or $_->[PATH_ACTION] ne "M"
-			} @paths;
-		}
+    my @paths = @{$log_rec->[LOG_PATHS]};
+    if (@paths > 1) {
+      @paths = grep {
+        $_->[PATH_PATH] ne "/trunk" or $_->[PATH_ACTION] ne "M"
+      } @paths;
+    }
 
-		my $time_line = wrap(
-			"  ", "  ",
-			join(
-				"; ",
-				"$log_rec->[LOG_DATE] (r$log_rec->[LOG_REV]) by $log_rec->[LOG_WHO]",
-				map { "$_->[PATH_PATH] $_->[PATH_ACTION]" } @paths
-			)
-		);
+    my $time_line = wrap(
+      "  ", "  ",
+      join(
+        "; ",
+        "$log_rec->[LOG_DATE] (r$log_rec->[LOG_REV]) by $log_rec->[LOG_WHO]",
+        map { "$_->[PATH_PATH] $_->[PATH_ACTION]" } @paths
+      )
+    );
 
-		if ($time_line =~ /\n/) {
-			$time_line = wrap(
-				"  ", "  ",
-				"$log_rec->[LOG_DATE] (r$log_rec->[LOG_REV]) by $log_rec->[LOG_WHO]\n"
-			) .
-			wrap(
-				"  ", "  ",
-				join(
-					"; ",
-					map { "$_->[PATH_PATH] $_->[PATH_ACTION]" } @paths
-				)
-			);
-		}
+    if ($time_line =~ /\n/) {
+      $time_line = wrap(
+        "  ", "  ",
+        "$log_rec->[LOG_DATE] (r$log_rec->[LOG_REV]) by $log_rec->[LOG_WHO]\n"
+      ) .
+      wrap(
+        "  ", "  ",
+        join(
+          "; ",
+          map { "$_->[PATH_PATH] $_->[PATH_ACTION]" } @paths
+        )
+      );
+    }
 
-		print $time_line, "\n\n";
+    print $time_line, "\n\n";
 
-		# Blank lines should have the indent level of whitespace.  This
-		# makes it easier for other utilities to parse them.
+    # Blank lines should have the indent level of whitespace.  This
+    # makes it easier for other utilities to parse them.
 
-		my @paragraphs = split /\n\s*\n/, $log_rec->[LOG_MESSAGE];
-		foreach my $paragraph (@paragraphs) {
+    my @paragraphs = split /\n\s*\n/, $log_rec->[LOG_MESSAGE];
+    foreach my $paragraph (@paragraphs) {
 
-			# Trim off identical leading space from every line.
-			my ($whitespace) = $paragraph =~ /^(\s*)/;
-			if (length $whitespace) {
-				$paragraph =~ s/^$whitespace//mg;
-			}
+      # Trim off identical leading space from every line.
+      my ($whitespace) = $paragraph =~ /^(\s*)/;
+      if (length $whitespace) {
+        $paragraph =~ s/^$whitespace//mg;
+      }
 
-			# Re-flow the paragraph if it isn't indented from the norm.
-			# This should preserve indented quoted text, wiki-style.
-			unless ($paragraph =~ /^\s/) {
-				$paragraph = fill("    ", "    ", $paragraph);
-			}
-		}
+      # Re-flow the paragraph if it isn't indented from the norm.
+      # This should preserve indented quoted text, wiki-style.
+      unless ($paragraph =~ /^\s/) {
+        $paragraph = fill("    ", "    ", $paragraph);
+      }
+    }
 
-		print join("\n    \n", @paragraphs), "\n\n";
-	}
+    print join("\n    \n", @paragraphs), "\n\n";
+  }
 }
 
 print(
-	"==============\n",
-	"End of Excerpt\n",
-	"==============\n",
+  "==============\n",
+  "End of Excerpt\n",
+  "==============\n",
 );
 
 ### Z. Helper functions.
 
 sub gather_log {
-	my ($url, @flags) = @_;
+  my ($url, @flags) = @_;
 
-	my (@log, @stack);
+  my (@log, @stack);
 
-	my $parser = XML::Parser->new(
-		Handlers => {
-			Start => sub {
-				my ($self, $tag, %att) = @_;
-				push @stack, [ $tag, \%att ];
-				if ($tag eq "logentry") {
-					push @log, [ ];
-					$log[-1][LOG_WHO] = "(nobody)";
-				}
-			},
-			Char  => sub {
-				my ($self, $text) = @_;
-				$stack[-1][1]{0} .= $text;
-			},
-			End => sub {
-				my ($self, $tag) = @_;
-				die "close $tag w/out open" unless @stack;
-				my ($pop_tag, $att) = @{pop @stack};
+  my $parser = XML::Parser->new(
+    Handlers => {
+      Start => sub {
+        my ($self, $tag, %att) = @_;
+        push @stack, [ $tag, \%att ];
+        if ($tag eq "logentry") {
+          push @log, [ ];
+          $log[-1][LOG_WHO] = "(nobody)";
+        }
+      },
+      Char  => sub {
+        my ($self, $text) = @_;
+        $stack[-1][1]{0} .= $text;
+      },
+      End => sub {
+        my ($self, $tag) = @_;
+        die "close $tag w/out open" unless @stack;
+        my ($pop_tag, $att) = @{pop @stack};
 
-				die "$tag ne $pop_tag" if $tag ne $pop_tag;
+        die "$tag ne $pop_tag" if $tag ne $pop_tag;
 
-				if ($tag eq "date") {
-					my $timestamp = $att->{0};
-					my ($date, $time) = split /[T.]/, $timestamp;
-					$log[-1][LOG_DATE] = "$date $time";
-					return;
-				}
+        if ($tag eq "date") {
+          my $timestamp = $att->{0};
+          my ($date, $time) = split /[T.]/, $timestamp;
+          $log[-1][LOG_DATE] = "$date $time";
+          return;
+        }
 
-				if ($tag eq "logentry") {
-					$log[-1][LOG_REV] = $att->{revision};
-					return;
-				}
+        if ($tag eq "logentry") {
+          $log[-1][LOG_REV] = $att->{revision};
+          return;
+        }
 
-				if ($tag eq "msg") {
-					$log[-1][LOG_MESSAGE] = $att->{0};
-					return;
-				}
+        if ($tag eq "msg") {
+          $log[-1][LOG_MESSAGE] = $att->{0};
+          return;
+        }
 
-				if ($tag eq "author") {
-					$log[-1][LOG_WHO] = $att->{0};
-					return;
-				}
+        if ($tag eq "author") {
+          $log[-1][LOG_WHO] = $att->{0};
+          return;
+        }
 
-				if ($tag eq "path") {
-					my $path = $att->{0};
-					$path =~ s{^/trunk/}{};
-					push(
-						@{$log[-1][LOG_PATHS]}, [
-							$path,            # PATH_PATH
-							$att->{action},   # PATH_ACTION
-						]
-					);
+        if ($tag eq "path") {
+          my $path = $att->{0};
+          $path =~ s{^/trunk/}{};
+          push(
+            @{$log[-1][LOG_PATHS]}, [
+              $path,            # PATH_PATH
+              $att->{action},   # PATH_ACTION
+            ]
+          );
 
-					$log[-1][LOG_PATHS][-1][PATH_CPF_PATH] = $att->{"copyfrom-path"} if (
-						exists $att->{"copyfrom-path"}
-					);
+          $log[-1][LOG_PATHS][-1][PATH_CPF_PATH] = $att->{"copyfrom-path"} if (
+            exists $att->{"copyfrom-path"}
+          );
 
-					$log[-1][LOG_PATHS][-1][PATH_CPF_REV] = $att->{"copyfrom-rev"} if (
-						exists $att->{"copyfrom-rev"}
-					);
-					return;
-				}
+          $log[-1][LOG_PATHS][-1][PATH_CPF_REV] = $att->{"copyfrom-rev"} if (
+            exists $att->{"copyfrom-rev"}
+          );
+          return;
+        }
 
-			}
-		}
-	);
+      }
+    }
+  );
 
-	my $cmd = "svn -v --xml @flags log $url";
-	#warn "Command: $cmd\n";
+  my $cmd = "svn -v --xml @flags log $url";
+  #warn "Command: $cmd\n";
 
-	open(LOG, "$cmd|") or die $!;
-	$parser->parse(*LOG);
-	close LOG;
+  open(LOG, "$cmd|") or die $!;
+  $parser->parse(*LOG);
+  close LOG;
 
-	return @log;
+  return @log;
 }

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/script/dbicadmin
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/script/dbicadmin	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/script/dbicadmin	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,221 +1,134 @@
 #!/usr/bin/perl
+
 use strict;
 use warnings;
 
-use Getopt::Long;
-use Pod::Usage;
-use JSON::Any;
+BEGIN {
+  use DBIx::Class;
+  die (  'The following modules are required for the dbicadmin utility: '
+       . DBIx::Class::Optional::Dependencies->req_missing_for ('admin_script')
+       . "\n"
+  ) unless DBIx::Class::Optional::Dependencies->req_ok_for ('admin_script');
+}
 
+use DBIx::Class::Admin::Descriptive;
+#use Getopt::Long::Descriptive;
+use DBIx::Class::Admin;
 
-my $json = JSON::Any->new(allow_barekey => 1, allow_singlequote => 1);
+my $short_description = "utility for administrating DBIx::Class schemata";
+my $synopsis_text =q|
+  deploy a schema to a database
+  %c --schema=MyApp::Schema \
+    --connect='["dbi:SQLite:my.db", "", ""]' \
+    --deploy
 
-GetOptions(
-    'schema=s'  => \my $schema_class,
-    'class=s'   => \my $resultset_class,
-    'connect=s' => \my $connect,
-    'op=s'      => \my $op,
-    'set=s'     => \my $set,
-    'where=s'   => \my $where,
-    'attrs=s'   => \my $attrs,
-    'format=s'  => \my $format,
-    'force'     => \my $force,
-    'trace'     => \my $trace,
-    'quiet'     => \my $quiet,
-    'help'      => \my $help,
-    'tlibs'      => \my $t_libs,
+  update an existing record
+  %c --schema=MyApp::Schema --class=Employee \
+    --connect='["dbi:SQLite:my.db", "", ""]' \
+    --op=update --set='{ "name": "New_Employee" }'
+|;
+
+my ($opts, $usage) = describe_options(
+    "%c: %o",
+  (
+    ['Actions'],
+    ["action" => hidden => { one_of => [
+      ['create' => 'Create version diffs needs preversion',],
+      ['upgrade' => 'Upgrade the database to the current schema '],
+      ['install' => 'Install the schema version tables to an existing database',],
+      ['deploy' => 'Deploy the schema to the database',],
+      ['select'   => 'Select data from the schema', ],
+      ['insert'   => 'Insert data into the schema', ],
+      ['update'   => 'Update data in the schema', ], 
+      ['delete'   => 'Delete data from the schema',],
+      ['op:s' => 'compatiblity option all of the above can be suppied as --op=<action>'],
+      ['help' => 'display this help', { implies => { schema_class => '__dummy__' } } ],
+      ['selfinject-pod' => 'hidden', { implies => { schema_class => '__dummy__' } } ],
+    ], required=> 1 }],
+    ['Arguments'],
+    ['schema-class:s' => 'The class of the schema to load', { required => 1 } ],
+    ['resultset|resultset-class|class:s' => 'The resultset to operate on for data manipulation' ],
+    ['config-stanza:s' => 'Where in the config to find the connection_info, supply in form MyApp::Model::DB',],
+    ['config:s' => 'Supply the config file for parsing by Config::Any', { depends => 'config_stanza'} ],
+    ['connect-info:s%' => 'Supply the connect info as additonal options ie -I dsn=<dsn> user=<user> password=<pass> '],
+    ['connect:s' => 'Supply the connect info as a json string' ],
+    ['sql-dir:s' => 'The directory where sql diffs will be created'],
+    ['sql-type:s' => 'The RDBMs flavour you wish to use'],
+    ['version:i' => 'Supply a version install'],
+    ['preversion:s' => 'The previous version to diff against',],
+    ['set:s' => 'JSON data used to perform data operations' ],
+    ['attrs:s' => 'JSON string to be used for the second argument for search'],
+    ['where:s' => 'JSON string to be used for the where clause of search'],
+    ['force' => 'Be forceful with some operations'],
+    ['trace' => 'Turn on DBIx::Class trace output'],
+    ['quiet' => 'Be less verbose'],
+  )
 );
 
-if ($t_libs) {
-    unshift( @INC, 't/lib', 'lib' );
-}
+die "please only use one of --config or --connect-info\n" if ($opts->{config} and $opts->{connect_info});
 
-pod2usage(1) if ($help);
-$ENV{DBIX_CLASS_STORAGE_DBI_DEBUG} = 1 if ($trace);
+if($opts->{selfinject_pod}) {
 
-die('No op specified') if(!$op);
-die('Invalid op') if ($op!~/^insert|update|delete|select$/s);
-my $csv_class;
-if ($op eq 'select') {
-    $format ||= 'tsv';
-    die('Invalid format') if ($format!~/^tsv|csv$/s);
-    $csv_class = 'Text::CSV_XS';
-    eval{ require Text::CSV_XS };
-    if ($@) {
-        $csv_class = 'Text::CSV_PP';
-        eval{ require Text::CSV_PP };
-        die('The select op requires either the Text::CSV_XS or the Text::CSV_PP module') if ($@);
-    }
+    die "This is an internal method, do not call!!!\n"
+      unless $ENV{MAKELEVEL};
+
+    $usage->synopsis($synopsis_text);
+    $usage->short_description($short_description);
+    exec (
+      $^X,
+      qw/-p -0777 -i -e/,
+      (
+        's/^# auto_pod_begin.*^# auto_pod_end/'
+      . quotemeta($usage->pod)
+      . '/ms'
+      ),
+      __FILE__
+    );
 }
 
-die('No schema specified') if(!$schema_class);
-eval("require $schema_class");
-die('Unable to load schema') if ($@);
-$connect = $json->jsonToObj( $connect ) if ($connect);
-my $schema = $schema_class->connect(
-    ( $connect ? @$connect : () )
-);
-
-die('No class specified') if(!$resultset_class);
-my $resultset = eval{ $schema->resultset($resultset_class) };
-die('Unable to load the class with the schema') if ($@);
-
-$set = $json->jsonToObj( $set ) if ($set);
-$where = $json->jsonToObj( $where ) if ($where);
-$attrs = $json->jsonToObj( $attrs ) if ($attrs);
-
-if ($op eq 'insert') {
-    die('Do not use the where option with the insert op') if ($where);
-    die('Do not use the attrs option with the insert op') if ($attrs);
-    my $obj = $resultset->create( $set );
-    print ''.ref($resultset).' ID: '.join(',',$obj->id())."\n" if (!$quiet);
+if($opts->{help}) {
+    $usage->die();
 }
-elsif ($op eq 'update') {
-    $resultset = $resultset->search( ($where||{}) );
-    my $count = $resultset->count();
-    print "This action will modify $count ".ref($resultset)." records.\n" if (!$quiet);
-    if ( $force || confirm() ) {
-        $resultset->update_all( $set );
-    }
-}
-elsif ($op eq 'delete') {
-    die('Do not use the set option with the delete op') if ($set);
-    $resultset = $resultset->search( ($where||{}), ($attrs||()) );
-    my $count = $resultset->count();
-    print "This action will delete $count ".ref($resultset)." records.\n" if (!$quiet);
-    if ( $force || confirm() ) {
-        $resultset->delete_all();
-    }
-}
-elsif ($op eq 'select') {
-    die('Do not use the set option with the select op') if ($set);
-    my $csv = $csv_class->new({
-        sep_char => ( $format eq 'tsv' ? "\t" : ',' ),
-    });
-    $resultset = $resultset->search( ($where||{}), ($attrs||()) );
-    my @columns = $resultset->result_source->columns();
-    $csv->combine( @columns );
-    print $csv->string()."\n";
-    while (my $row = $resultset->next()) {
-        my @fields;
-        foreach my $column (@columns) {
-            push( @fields, $row->get_column($column) );
-        }
-        $csv->combine( @fields );
-        print $csv->string()."\n";
-    }
-}
 
-sub confirm {
-    print "Are you sure you want to do this? (type YES to confirm) ";
-    my $response = <STDIN>;
-    return 1 if ($response=~/^YES/);
-    return;
+# option compatability mangle
+if($opts->{connect}) {
+  $opts->{connect_info} = delete $opts->{connect};
 }
 
-__END__
+my $admin = DBIx::Class::Admin->new( %$opts );
 
-=head1 NAME
 
-dbicadmin - Execute operations upon DBIx::Class objects.
+my $action = $opts->{action};
 
-=head1 SYNOPSIS
+$action = $opts->{op} if ($action eq 'op');
 
-  dbicadmin --op=insert --schema=My::Schema --class=Class --set=JSON
-  dbicadmin --op=update --schema=My::Schema --class=Class --set=JSON --where=JSON
-  dbicadmin --op=delete --schema=My::Schema --class=Class --where=JSON
-  dbicadmin --op=select --schema=My::Schema --class=Class --where=JSON --format=tsv
+print "Performig action $action...\n";
 
-=head1 DESCRIPTION
+my $res = $admin->$action();
+if ($action eq 'select') {
 
-This utility provides the ability to run INSERTs, UPDATEs, 
-DELETEs, and SELECTs on any DBIx::Class object.
+  my $format = $opts->{format} || 'tsv';
+  die('Invalid format') if ($format!~/^tsv|csv$/s);
 
-=head1 OPTIONS
+  require Text::CSV;
 
-=head2 op
+  my $csv = Text::CSV->new({
+    sep_char => ( $format eq 'tsv' ? "\t" : ',' ),
+  });
 
-The type of operation.  Valid values are insert, update, delete, 
-and select.
+  foreach my $row (@$res) {
+    $csv->combine( @$row );
+    print $csv->string()."\n";
+  }
+}
 
-=head2 schema
 
-The name of your schema class.
+__END__
 
-=head2 class
+# auto_pod_begin
+#
+# This will be replaced by the actual pod when selfinject-pod is invoked
+#
+# auto_pod_end
 
-The name of the class, within your schema, that you want to run 
-the operation on.
-
-=head2 connect
-
-A JSON array to be passed to your schema class upon connecting.  
-The array will need to be compatible with whatever the DBIC 
-->connect() method requires.
-
-=head2 set
-
-This option must be valid JSON data string and is passed in to 
-the DBIC update() method.  Use this option with the update 
-and insert ops.
-
-=head2 where
-
-This option must be valid JSON data string and is passed in as 
-the first argument to the DBIC search() method.  Use this 
-option with the update, delete, and select ops.
-
-=head2 attrs
-
-This option must be valid JSON data string and is passed in as 
-the second argument to the DBIC search() method.  Use this 
-option with the update, delete, and select ops.
-
-=head2 help
-
-Display this help page.
-
-=head2 force
-
-Suppresses the confirmation dialogues that are usually displayed 
-when someone runs a DELETE or UPDATE action.
-
-=head2 quiet
-
-Do not display status messages.
-
-=head2 trace
-
-Turns on tracing on the DBI storage, thus printing SQL as it is 
-executed.
-
-=head2 tlibs
-
-This option is purely for testing during the DBIC installation.  Do 
-not use it.
-
-=head1 JSON
-
-JSON is a lightweight data-interchange format.  It allows you 
-to express complex data structures for use in the where and 
-set options.
-
-This module turns on L<JSON>'s BareKey and QuotApos options so 
-that your data can look a bit more readable.
-
-  --where={"this":"that"} # generic JSON
-  --where={this:'that'}   # with BareKey and QuoteApos
-
-Consider wrapping your JSON in outer quotes so that you don't 
-have to escape your inner quotes.
-
-  --where={this:\"that\"} # no outer quote
-  --where='{this:"that"}' # outer quoted
-
-=head1 AUTHOR
-
-Aran Deltac <bluefeet at cpan.org>
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
+# vim: et ft=perl

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/02pod.t
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/02pod.t	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/02pod.t	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,6 +1,21 @@
+use warnings;
+use strict;
+
 use Test::More;
+use lib qw(t/lib);
+use DBICTest;
 
-eval "use Test::Pod 1.14";
-plan skip_all => 'Test::Pod 1.14 required' if $@;
+# Don't run tests for installs
+unless ( DBICTest::AuthorCheck->is_author || $ENV{AUTOMATED_TESTING} || $ENV{RELEASE_TESTING} ) {
+  plan( skip_all => "Author tests not required for installation" );
+}
 
-all_pod_files_ok();
+require DBIx::Class;
+unless ( DBIx::Class::Optional::Dependencies->req_ok_for ('test_pod') ) {
+  my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('test_pod');
+  $ENV{RELEASE_TESTING} || DBICTest::AuthorCheck->is_author
+    ? die ("Failed to load release-testing module requirements: $missing")
+    : plan skip_all => "Test needs: $missing"
+}
+
+Test::Pod::all_pod_files_ok();

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/03podcoverage.t
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/03podcoverage.t	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/03podcoverage.t	2010-05-17 14:31:46 UTC (rev 9401)
@@ -1,20 +1,32 @@
+use warnings;
+use strict;
+
 use Test::More;
+use List::Util ();
+use lib qw(t/lib);
+use DBICTest;
 
-eval "use Pod::Coverage 0.19";
-plan skip_all => 'Pod::Coverage 0.19 required' if $@;
-eval "use Test::Pod::Coverage 1.04";
-plan skip_all => 'Test::Pod::Coverage 1.04 required' if $@;
+# Don't run tests for installs
+unless ( DBICTest::AuthorCheck->is_author || $ENV{AUTOMATED_TESTING} || $ENV{RELEASE_TESTING} ) {
+  plan( skip_all => "Author tests not required for installation" );
+}
 
-plan skip_all => 'set TEST_POD to enable this test'
-  unless ($ENV{TEST_POD} || -e 'MANIFEST.SKIP');
+require DBIx::Class;
+unless ( DBIx::Class::Optional::Dependencies->req_ok_for ('test_podcoverage') ) {
+  my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('test_podcoverage');
+  $ENV{RELEASE_TESTING} || DBICTest::AuthorCheck->is_author
+    ? die ("Failed to load release-testing module requirements: $missing")
+    : plan skip_all => "Test needs: $missing"
+}
 
-my @modules = sort { $a cmp $b } (Test::Pod::Coverage::all_modules());
-plan tests => scalar(@modules);
-
 # Since this is about checking documentation, a little documentation
-# of what this is doing might be in order...
+# of what this is doing might be in order.
 # The exceptions structure below is a hash keyed by the module
-# name.  The value for each is a hash, which contains one or more
+# name. Any * in a name is treated like a wildcard and will behave
+# as expected. Modules are matched by longest string first, so 
+# A::B::C will match even if there is A::B*
+
+# The value for each is a hash, which contains one or more
 # (although currently more than one makes no sense) of the following
 # things:-
 #   skip   => a true value means this module is not checked
@@ -22,131 +34,124 @@
 #             do not need to be documented.
 my $exceptions = {
     'DBIx::Class' => {
-        ignore => [
-            qw/MODIFY_CODE_ATTRIBUTES
-              component_base_class
-              mk_classdata
-              mk_classaccessor/
-        ]
+        ignore => [qw/
+            MODIFY_CODE_ATTRIBUTES
+            component_base_class
+            mk_classdata
+            mk_classaccessor
+        /]
     },
     'DBIx::Class::Row' => {
-        ignore => [
-           qw( MULTICREATE_DEBUG )
-        ],
+        ignore => [qw/
+            MULTICREATE_DEBUG
+        /],
     },
+    'DBIx::Class::FilterColumn' => {
+        ignore => [qw/
+            new
+            update
+            store_column
+            get_column
+            get_columns
+        /],
+    },
     'DBIx::Class::ResultSource' => {
         ignore => [qw/
-          compare_relationship_keys
-          pk_depends_on
-          resolve_condition
-          resolve_join
-          resolve_prefetch
+            compare_relationship_keys
+            pk_depends_on
+            resolve_condition
+            resolve_join
+            resolve_prefetch
         /],
     },
+    'DBIx::Class::ResultSourceHandle' => {
+        ignore => [qw/
+            schema
+            source_moniker
+        /],
+    },
     'DBIx::Class::Storage' => {
-        ignore => [
-            qw(cursor)
-        ]
+        ignore => [qw/
+            schema
+            cursor
+        /]
     },
     'DBIx::Class::Schema' => {
-        ignore => [
-            qw(setup_connection_class)
-        ]
+        ignore => [qw/
+            setup_connection_class
+        /]
     },
-    'DBIx::Class::Storage::DBI::Sybase' => {
-        ignore => [
-            qw/should_quote_data_type/,
-        ]
+
+    'DBIx::Class::Schema::Versioned' => {
+        ignore => [ qw/
+            connection
+        /]
     },
-    'DBIx::Class::CDBICompat::AccessorMapping'          => { skip => 1 },
-    'DBIx::Class::CDBICompat::AbstractSearch' => {
-        ignore => [qw(search_where)]
+
+    'DBIx::Class::Storage::DBI::Replicated*'        => {
+        ignore => [ qw/
+            connect_call_do_sql
+            disconnect_call_do_sql
+        /]
     },
-    'DBIx::Class::CDBICompat::AttributeAPI'             => { skip => 1 },
-    'DBIx::Class::CDBICompat::AutoUpdate'               => { skip => 1 },
-    'DBIx::Class::CDBICompat::ColumnsAsHash' => {
-        ignore => [qw(inflate_result new update)]
-    },
-    'DBIx::Class::CDBICompat::ColumnCase'               => { skip => 1 },
-    'DBIx::Class::CDBICompat::ColumnGroups'             => { skip => 1 },
-    'DBIx::Class::CDBICompat::Constraints'              => { skip => 1 },
-    'DBIx::Class::CDBICompat::Constructor'              => { skip => 1 },
-    'DBIx::Class::CDBICompat::Copy' => {
-        ignore => [qw(copy)]
-    },
-    'DBIx::Class::CDBICompat::DestroyWarning'           => { skip => 1 },
-    'DBIx::Class::CDBICompat::GetSet'                   => { skip => 1 },
-    'DBIx::Class::CDBICompat::HasA'                     => { skip => 1 },
-    'DBIx::Class::CDBICompat::HasMany'                  => { skip => 1 },
-    'DBIx::Class::CDBICompat::ImaDBI'                   => { skip => 1 },
-    'DBIx::Class::CDBICompat::LazyLoading'              => { skip => 1 },
-    'DBIx::Class::CDBICompat::LiveObjectIndex'          => { skip => 1 },
-    'DBIx::Class::CDBICompat::MightHave'                => { skip => 1 },
-    'DBIx::Class::CDBICompat::NoObjectIndex'            => { skip => 1 },
-    'DBIx::Class::CDBICompat::Pager'                    => { skip => 1 },
-    'DBIx::Class::CDBICompat::ReadOnly'                 => { skip => 1 },
-    'DBIx::Class::CDBICompat::Relationship'             => { skip => 1 },
-    'DBIx::Class::CDBICompat::Relationships'            => { skip => 1 },
-    'DBIx::Class::CDBICompat::Retrieve'                 => { skip => 1 },
-    'DBIx::Class::CDBICompat::SQLTransformer'           => { skip => 1 },
-    'DBIx::Class::CDBICompat::Stringify'                => { skip => 1 },
-    'DBIx::Class::CDBICompat::TempColumns'              => { skip => 1 },
-    'DBIx::Class::CDBICompat::Triggers'                 => { skip => 1 },
-    'DBIx::Class::ClassResolver::PassThrough'           => { skip => 1 },
-    'DBIx::Class::Componentised'                        => { skip => 1 },
-    'DBIx::Class::Relationship::Accessor'               => { skip => 1 },
-    'DBIx::Class::Relationship::BelongsTo'              => { skip => 1 },
-    'DBIx::Class::Relationship::CascadeActions'         => { skip => 1 },
-    'DBIx::Class::Relationship::HasMany'                => { skip => 1 },
-    'DBIx::Class::Relationship::HasOne'                 => { skip => 1 },
-    'DBIx::Class::Relationship::Helpers'                => { skip => 1 },
-    'DBIx::Class::Relationship::ManyToMany'             => { skip => 1 },
-    'DBIx::Class::Relationship::ProxyMethods'           => { skip => 1 },
-    'DBIx::Class::ResultSetProxy'                       => { skip => 1 },
-    'DBIx::Class::ResultSetManager'                     => { skip => 1 },
-    'DBIx::Class::ResultSourceProxy'                    => { skip => 1 },
-    'DBIx::Class::Storage::DBI'                         => { skip => 1 },
-    'DBIx::Class::Storage::DBI::Replicated::Types'      => { skip => 1 },
-    'DBIx::Class::Storage::DBI::DB2'                    => { skip => 1 },
-    'DBIx::Class::Storage::DBI::MSSQL'                  => { skip => 1 },
-    'DBIx::Class::Storage::DBI::Sybase::MSSQL'          => { skip => 1 },
-    'DBIx::Class::Storage::DBI::ODBC400'                => { skip => 1 },
-    'DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL'      => { skip => 1 },
-    'DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server' => { skip => 1 },
-    'DBIx::Class::Storage::DBI::Oracle'                 => { skip => 1 },
-    'DBIx::Class::Storage::DBI::Pg'                     => { skip => 1 },
-    'DBIx::Class::Storage::DBI::SQLite'                 => { skip => 1 },
-    'DBIx::Class::Storage::DBI::mysql'                  => { skip => 1 },
-    'DBIx::Class::SQLAHacks'                            => { skip => 1 },
-    'DBIx::Class::SQLAHacks::MySQL'                     => { skip => 1 },
-    'DBIx::Class::SQLAHacks::MSSQL'                     => { skip => 1 },
-    'SQL::Translator::Parser::DBIx::Class'              => { skip => 1 },
-    'SQL::Translator::Producer::DBIx::Class::File'      => { skip => 1 },
 
-# skipped because the synopsis covers it clearly
+    'DBIx::Class::Admin::*'                         => { skip => 1 },
+    'DBIx::Class::ClassResolver::PassThrough'       => { skip => 1 },
+    'DBIx::Class::Componentised'                    => { skip => 1 },
+    'DBIx::Class::Relationship::*'                  => { skip => 1 },
+    'DBIx::Class::ResultSetProxy'                   => { skip => 1 },
+    'DBIx::Class::ResultSourceProxy'                => { skip => 1 },
+    'DBIx::Class::Storage::Statistics'              => { skip => 1 },
+    'DBIx::Class::Storage::DBI::Replicated::Types'  => { skip => 1 },
 
-    'DBIx::Class::InflateColumn::File'                  => { skip => 1 },
+# test some specific components whose parents are exempt below
+    'DBIx::Class::Relationship::Base'               => {},
 
-# skip connection since it's just an override
+# internals
+    'DBIx::Class::SQLAHacks*'                       => { skip => 1 },
+    'DBIx::Class::Storage::DBI*'                    => { skip => 1 },
+    'SQL::Translator::*'                            => { skip => 1 },
 
-    'DBIx::Class::Schema::Versioned' => { ignore => [ qw(connection) ] },
+# deprecated / backcompat stuff
+    'DBIx::Class::CDBICompat*'                      => { skip => 1 },
+    'DBIx::Class::ResultSetManager'                 => { skip => 1 },
+    'DBIx::Class::DB'                               => { skip => 1 },
 
-# don't bother since it's heavily deprecated
-    'DBIx::Class::ResultSetManager' => { skip => 1 },
+# skipped because the synopsis covers it clearly
+    'DBIx::Class::InflateColumn::File'              => { skip => 1 },
 };
 
+my $ex_lookup = {};
+for my $string (keys %$exceptions) {
+  my $ex = $exceptions->{$string};
+  $string =~ s/\*/'.*?'/ge;
+  my $re = qr/^$string$/;
+  $ex_lookup->{$re} = $ex;
+}
+
+my @modules = sort { $a cmp $b } (Test::Pod::Coverage::all_modules());
+
 foreach my $module (@modules) {
-  SKIP:
-    {
-        skip "$module - No real methods", 1 if ($exceptions->{$module}{skip});
+  SKIP: {
 
-        # build parms up from ignore list
-        my $parms = {};
-        $parms->{trustme} =
-          [ map { qr/^$_$/ } @{ $exceptions->{$module}{ignore} } ]
-          if exists($exceptions->{$module}{ignore});
+    my ($match) = List::Util::first
+      { $module =~ $_ }
+      (sort { length $b <=> length $a || $b cmp $a } (keys %$ex_lookup) )
+    ;
 
-        # run the test with the potentially modified parm set
-        pod_coverage_ok($module, $parms, "$module POD coverage");
-    }
+    my $ex = $ex_lookup->{$match} if $match;
+
+    skip ("$module exempt", 1) if ($ex->{skip});
+
+    # build parms up from ignore list
+    my $parms = {};
+    $parms->{trustme} =
+      [ map { qr/^$_$/ } @{ $ex->{ignore} } ]
+        if exists($ex->{ignore});
+
+    # run the test with the potentially modified parm set
+    Test::Pod::Coverage::pod_coverage_ok($module, $parms, "$module POD coverage");
+  }
 }
+
+done_testing;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/05components.t
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/05components.t	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/05components.t	2010-05-17 14:31:46 UTC (rev 9401)
@@ -7,8 +7,6 @@
 use lib qw(t/lib);
 use DBICTest::ForeignComponent;
 
-plan tests => 6;
-
 #   Tests if foreign component was loaded by calling foreign's method
 ok( DBICTest::ForeignComponent->foreign_test_method, 'foreign component' );
 
@@ -35,32 +33,7 @@
     'inject_base filters duplicates'
 );
 
-# Test for a warning with incorrect order in load_components
-my @warnings = ();
-{
-  package A::Test;
-  our @ISA = 'DBIx::Class';
-  {
-    local $SIG{__WARN__} = sub { push @warnings, shift};
-    __PACKAGE__->load_components(qw(Core UTF8Columns));
-  }
-}
-like( $warnings[0], qr/Core loaded before UTF8Columns/,
-      'warning issued for incorrect order in load_components()' );
-is( scalar @warnings, 1,
-    'only one warning issued for incorrect load_components call' );
-
-# Test that no warning is issued for the correct order in load_components
-{
-  @warnings = ();
-  package B::Test;
-  our @ISA = 'DBIx::Class';
-  {
-    local $SIG{__WARN__} = sub { push @warnings, shift };
-    __PACKAGE__->load_components(qw(UTF8Columns Core));
-  }
-}
-is( scalar @warnings, 0,
-    'warning not issued for correct order in load_components()' );
-
 use_ok('DBIx::Class::AccessorGroup');
+use_ok('DBIx::Class::Componentised');
+
+done_testing;

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/06notabs.t
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/06notabs.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/06notabs.t	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,24 @@
+use warnings;
+use strict;
+
+use Test::More;
+use lib 't/lib';
+use DBICTest;
+
+# Don't run tests for installs
+unless ( DBICTest::AuthorCheck->is_author || $ENV{AUTOMATED_TESTING} || $ENV{RELEASE_TESTING} ) {
+  plan( skip_all => "Author tests not required for installation" );
+}
+
+require DBIx::Class;
+unless ( DBIx::Class::Optional::Dependencies->req_ok_for ('test_notabs') ) {
+  my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('test_notabs');
+  $ENV{RELEASE_TESTING} || DBICTest::AuthorCheck->is_author
+    ? die ("Failed to load release-testing module requirements: $missing")
+    : plan skip_all => "Test needs: $missing"
+}
+
+Test::NoTabs::all_perl_files_ok(qw/t lib script maint/);
+
+# FIXME - need to fix Test::NoTabs
+#done_testing;

Added: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/07eol.t
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/07eol.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/07eol.t	2010-05-17 14:31:46 UTC (rev 9401)
@@ -0,0 +1,29 @@
+use warnings;
+use strict;
+
+use Test::More;
+use lib 't/lib';
+use DBICTest;
+
+# Don't run tests for installs
+unless ( DBICTest::AuthorCheck->is_author || $ENV{AUTOMATED_TESTING} || $ENV{RELEASE_TESTING} ) {
+  plan( skip_all => "Author tests not required for installation" );
+}
+
+plan skip_all => 'Test::EOL very broken';
+
+require DBIx::Class;
+unless ( DBIx::Class::Optional::Dependencies->req_ok_for ('test_eol') ) {
+  my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('test_eol');
+  $ENV{RELEASE_TESTING} || DBICTest::AuthorCheck->is_author
+    ? die ("Failed to load release-testing module requirements: $missing")
+    : plan skip_all => "Test needs: $missing"
+}
+
+TODO: {
+  local $TODO = 'Do not fix those yet - we have way too many branches out there, merging will be hell';
+  Test::EOL::all_perl_files_ok({ trailing_whitespace => 1}, qw/t lib script maint/);
+}
+
+# FIXME - need to fix Test::EOL
+#done_testing;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/100populate.t
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/100populate.t	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/100populate.t	2010-05-17 14:31:46 UTC (rev 9401)
@@ -5,9 +5,8 @@
 use Test::Exception;
 use lib qw(t/lib);
 use DBICTest;
+use Path::Class::File ();
 
-plan tests => 23;
-
 my $schema = DBICTest->init_schema();
 
 # The map below generates stuff like:
@@ -116,3 +115,205 @@
 is($link7->url, undef, 'Link 7 url');
 is($link7->title, 'gtitle', 'Link 7 title');
 
+my $rs = $schema->resultset('Artist');
+$rs->delete;
+
+# test _execute_array_empty (insert_bulk with all literal sql)
+
+$rs->populate([
+    (+{
+        name => \"'DT'",
+        rank => \500,
+        charfield => \"'mtfnpy'",
+    }) x 5
+]);
+
+is((grep {
+  $_->name eq 'DT' &&
+  $_->rank == 500  &&
+  $_->charfield eq 'mtfnpy'
+} $rs->all), 5, 'populate with all literal SQL');
+
+$rs->delete;
+
+# test mixed binds with literal sql
+
+$rs->populate([
+    (+{
+        name => \"'DT'",
+        rank => 500,
+        charfield => \"'mtfnpy'",
+    }) x 5
+]);
+
+is((grep {
+  $_->name eq 'DT' &&
+  $_->rank == 500  &&
+  $_->charfield eq 'mtfnpy'
+} $rs->all), 5, 'populate with all literal SQL');
+
+$rs->delete;
+
+###
+
+throws_ok {
+    $rs->populate([
+        {
+            artistid => 1,
+            name => 'foo1',
+        },
+        {
+            artistid => 'foo', # this dies
+            name => 'foo2',
+        },
+        {
+            artistid => 3,
+            name => 'foo3',
+        },
+    ]);
+} qr/slice/, 'bad slice';
+
+is($rs->count, 0, 'populate is atomic');
+
+# Trying to use a column marked as a bind in the first slice with literal sql in
+# a later slice should throw.
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => 1,
+      name => \"'foo'",
+    },
+    {
+      artistid => \2,
+      name => \"'foo'",
+    }
+  ]);
+} qr/bind expected/, 'literal sql where bind expected throws';
+
+# ... and vice-versa.
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => \1,
+      name => \"'foo'",
+    },
+    {
+      artistid => 2,
+      name => \"'foo'",
+    }
+  ]);
+} qr/literal SQL expected/i, 'bind where literal sql expected throws';
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => 1,
+      name => \"'foo'",
+    },
+    {
+      artistid => 2,
+      name => \"'bar'",
+    }
+  ]);
+} qr/inconsistent/, 'literal sql must be the same in all slices';
+
+# the stringification has nothing to do with the artist name
+# this is solely for testing consistency
+my $fn = Path::Class::File->new ('somedir/somefilename.tmp');
+my $fn2 = Path::Class::File->new ('somedir/someotherfilename.tmp');
+
+lives_ok {
+  $rs->populate([
+    {
+      name => 'supplied before stringifying object',
+    },
+    {
+      name => $fn,
+    }
+  ]);
+} 'stringifying objects pass through';
+
+# ... and vice-versa.
+
+lives_ok {
+  $rs->populate([
+    {
+      name => $fn2,
+    },
+    {
+      name => 'supplied after stringifying object',
+    },
+  ]);
+} 'stringifying objects pass through';
+
+for (
+  $fn,
+  $fn2,
+  'supplied after stringifying object',
+  'supplied before stringifying object'
+) {
+  my $row = $rs->find ({name => $_});
+  ok ($row, "Stringification test row '$_' properly inserted");
+}
+
+$rs->delete;
+
+# test stringification with ->create rather than Storage::insert_bulk as well
+
+lives_ok {
+  my @dummy = $rs->populate([
+    {
+      name => 'supplied before stringifying object',
+    },
+    {
+      name => $fn,
+    }
+  ]);
+} 'stringifying objects pass through';
+
+# ... and vice-versa.
+
+lives_ok {
+  my @dummy = $rs->populate([
+    {
+      name => $fn2,
+    },
+    {
+      name => 'supplied after stringifying object',
+    },
+  ]);
+} 'stringifying objects pass through';
+
+for (
+  $fn,
+  $fn2,
+  'supplied after stringifying object',
+  'supplied before stringifying object'
+) {
+  my $row = $rs->find ({name => $_});
+  ok ($row, "Stringification test row '$_' properly inserted");
+}
+
+lives_ok {
+   $schema->resultset('TwoKeys')->populate([{
+      artist => 1,
+      cd     => 5,
+      fourkeys_to_twokeys => [{
+            f_foo => 1,
+            f_bar => 1,
+            f_hello => 1,
+            f_goodbye => 1,
+            autopilot => 'a',
+      },{
+            f_foo => 2,
+            f_bar => 2,
+            f_hello => 2,
+            f_goodbye => 2,
+            autopilot => 'b',
+      }]
+   }])
+} 'multicol-PK has_many populate works';
+
+done_testing;

Modified: DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/101populate_rs.t
===================================================================
--- DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/101populate_rs.t	2010-05-17 14:31:32 UTC (rev 9400)
+++ DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/view-deps/t/101populate_rs.t	2010-05-17 14:31:46 UTC (rev 9401)
@@ -15,17 +15,17 @@
 use lib qw(t/lib);
 use DBICTest;
 
-plan tests => 142;
 
-
 ## ----------------------------------------------------------------------------
 ## Get a Schema and some ResultSets we can play with.
 ## ----------------------------------------------------------------------------
 
-my $schema	= DBICTest->init_schema();
-my $art_rs	= $schema->resultset('Artist');
-my $cd_rs	= $schema->resultset('CD');
+my $schema  = DBICTest->init_schema();
+my $art_rs  = $schema->resultset('Artist');
+my $cd_rs  = $schema->resultset('CD');
 
+my $restricted_art_rs  = $art_rs->search({rank => 42});
+
 ok( $schema, 'Got a Schema object');
 ok( $art_rs, 'Got Good Artist Resultset');
 ok( $cd_rs, 'Got Good CD Resultset');
@@ -37,87 +37,87 @@
 
 SCHEMA_POPULATE1: {
 
-	## Test to make sure that the old $schema->populate is using the new method
-	## for $resultset->populate when in void context and with sub objects.
-	
-	$schema->populate('Artist', [
-	
-		[qw/name cds/],
-		["001First Artist", [
-			{title=>"001Title1", year=>2000},
-			{title=>"001Title2", year=>2001},
-			{title=>"001Title3", year=>2002},
-		]],
-		["002Second Artist", []],
-		["003Third Artist", [
-			{title=>"003Title1", year=>2005},
-		]],
-		[undef, [
-			{title=>"004Title1", year=>2010}
-		]],
-	]);
-	
-	isa_ok $schema, 'DBIx::Class::Schema';
-	
-	my ($undef, $artist1, $artist2, $artist3 ) = $schema->resultset('Artist')->search({
-		name=>["001First Artist","002Second Artist","003Third Artist", undef]},
-		{order_by=>'name ASC'})->all;
-	
-	isa_ok  $artist1, 'DBICTest::Artist';
-	isa_ok  $artist2, 'DBICTest::Artist';
-	isa_ok  $artist3, 'DBICTest::Artist';
-	isa_ok  $undef, 'DBICTest::Artist';	
-	
-	ok $artist1->name eq '001First Artist', "Got Expected Artist Name for Artist001";
-	ok $artist2->name eq '002Second Artist', "Got Expected Artist Name for Artist002";
-	ok $artist3->name eq '003Third Artist', "Got Expected Artist Name for Artist003";
-	ok !defined $undef->name, "Got Expected Artist Name for Artist004";	
-	
-	ok $artist1->cds->count eq 3, "Got Right number of CDs for Artist1";
-	ok $artist2->cds->count eq 0, "Got Right number of CDs for Artist2";
-	ok $artist3->cds->count eq 1, "Got Right number of CDs for Artist3";
-	ok $undef->cds->count eq 1, "Got Right number of CDs for Artist4";	
-	
-	ARTIST1CDS: {
-	
-		my ($cd1, $cd2, $cd3) = $artist1->cds->search(undef, {order_by=>'year ASC'});
-		
-		isa_ok $cd1, 'DBICTest::CD';
-		isa_ok $cd2, 'DBICTest::CD';
-		isa_ok $cd3, 'DBICTest::CD';
-		
-		ok $cd1->year == 2000;
-		ok $cd2->year == 2001;
-		ok $cd3->year == 2002;
-		
-		ok $cd1->title eq '001Title1';
-		ok $cd2->title eq '001Title2';
-		ok $cd3->title eq '001Title3';
-	}
-	
-	ARTIST3CDS: {
-	
-		my ($cd1) = $artist3->cds->search(undef, {order_by=>'year ASC'});
-		
-		isa_ok $cd1, 'DBICTest::CD';
+  ## Test to make sure that the old $schema->populate is using the new method
+  ## for $resultset->populate when in void context and with sub objects.
 
-		ok $cd1->year == 2005;
-		ok $cd1->title eq '003Title1';
-	}
+  $schema->populate('Artist', [
 
-	ARTIST4CDS: {
-	
-		my ($cd1) = $undef->cds->search(undef, {order_by=>'year ASC'});
-		
-		isa_ok $cd1, 'DBICTest::CD';
+    [qw/name cds/],
+    ["001First Artist", [
+      {title=>"001Title1", year=>2000},
+      {title=>"001Title2", year=>2001},
+      {title=>"001Title3", year=>2002},
+    ]],
+    ["002Second Artist", []],
+    ["003Third Artist", [
+      {title=>"003Title1", year=>2005},
+    ]],
+    [undef, [
+      {title=>"004Title1", year=>2010}
+    ]],
+  ]);
 
-		ok $cd1->year == 2010;
-		ok $cd1->title eq '004Title1';
-	}
-	
-	## Need to do some cleanup so that later tests don't get borked
-	
-	$undef->delete;
+  isa_ok $schema, 'DBIx::Class::Schema';
+
+  my ($undef, $artist1, $artist2, $artist3 ) = $schema->resultset('Artist')->search({
+    name=>["001First Artist","002Second Artist","003Third Artist", undef]},
+    {order_by=>'name ASC'})->all;
+
+  isa_ok  $artist1, 'DBICTest::Artist';
+  isa_ok  $artist2, 'DBICTest::Artist';
+  isa_ok  $artist3, 'DBICTest::Artist';
+  isa_ok  $undef, 'DBICTest::Artist';  
+
+  ok $artist1->name eq '001First Artist', "Got Expected Artist Name for Artist001";
+  ok $artist2->name eq '002Second Artist', "Got Expected Artist Name for Artist002";
+  ok $artist3->name eq '003Third Artist', "Got Expected Artist Name for Artist003";
+  ok !defined $undef->name, "Got Expected Artist Name for Artist004";  
+
+  ok $artist1->cds->count eq 3, "Got Right number of CDs for Artist1";
+  ok $artist2->cds->count eq 0, "Got Right number of CDs for Artist2";
+  ok $artist3->cds->count eq 1, "Got Right number of CDs for Artist3";
+  ok $undef->cds->count eq 1, "Got Right number of CDs for Artist4";  
+
+  ARTIST1CDS: {
+
+    my ($cd1, $cd2, $cd3) = $artist1->cds->search(undef, {order_by=>'year ASC'});
+
+    isa_ok $cd1, 'DBICTest::CD';
+    isa_ok $cd2, 'DBICTest::CD';
+    isa_ok $cd3, 'DBICTest::CD';
+
+    ok $cd1->year == 2000;
+    ok $cd2->year == 2001;
+    ok $cd3->year == 2002;
+
+    ok $cd1->title eq '001Title1';
+    ok $cd2->title eq '001Title2';
+    ok $cd3->title eq '001Title3';
+  }
+
+  ARTIST3CDS: {
+
+    my ($cd1) = $artist3->cds->search(undef, {order_by=>'year ASC'});
+
+    isa_ok $cd1, 'DBICTest::CD';
+
+    ok $cd1->year == 2005;
+    ok $cd1->title eq '003Title1';
+  }
+
+  ARTIST4CDS: {
+
+    my ($cd1) = $undef->cds->search(undef, {order_by=>'year ASC'});
+
+    isa_ok $cd1, 'DBICTest::CD';
+
+    ok $cd1->year == 2010;
+    ok $cd1->title eq '004Title1';
+  }
+
+  ## Need to do some cleanup so that later tests don't get borked
+
+  $undef->delete;
 }
 
 
@@ -127,212 +127,224 @@
 
 ARRAY_CONTEXT: {
 
-	## These first set of tests are cake because array context just delegates
-	## all it's processing to $resultset->create
-	
-	HAS_MANY_NO_PKS: {
-	
-		## This first group of tests checks to make sure we can call populate
-		## with the parent having many children and let the keys be automatic
+  ## These first set of tests are cake because array context just delegates
+  ## all it's processing to $resultset->create
 
-		my $artists = [
-			{	
-				name => 'Angsty-Whiny Girl',
-				cds => [
-					{ title => 'My First CD', year => 2006 },
-					{ title => 'Yet More Tweeny-Pop crap', year => 2007 },
-				],					
-			},		
-			{
-				name => 'Manufactured Crap',
-			},
-			{
-				name => 'Like I Give a Damn',
-				cds => [
-					{ title => 'My parents sold me to a record company' ,year => 2005 },
-					{ title => 'Why Am I So Ugly?', year => 2006 },
-					{ title => 'I Got Surgery and am now Popular', year => 2007 }				
-				],
-			},
-			{	
-				name => 'Formerly Named',
-				cds => [
-					{ title => 'One Hit Wonder', year => 2006 },
-				],					
-			},			
-		];
-		
-		## Get the result row objects.
-		
-		my ($girl, $crap, $damn, $formerly) = $art_rs->populate($artists);
-		
-		## Do we have the right object?
-		
-		isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");	
-		isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");	
-		
-		## Find the expected information?
+  HAS_MANY_NO_PKS: {
 
-		ok( $crap->name eq 'Manufactured Crap', "Got Correct name for result object");
-		ok( $girl->name eq 'Angsty-Whiny Girl', "Got Correct name for result object");
-		ok( $damn->name eq 'Like I Give a Damn', "Got Correct name for result object");	
-		ok( $formerly->name eq 'Formerly Named', "Got Correct name for result object");
-		
-		## Create the expected children sub objects?
-		
-		ok( $crap->cds->count == 0, "got Expected Number of Cds");
-		ok( $girl->cds->count == 2, "got Expected Number of Cds");	
-		ok( $damn->cds->count == 3, "got Expected Number of Cds");
-		ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+    ## This first group of tests checks to make sure we can call populate
+    ## with the parent having many children and let the keys be automatic
 
-		## Did the cds get expected information?
-		
-		my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
-		
-		ok( $cd1->title eq "My First CD", "Got Expected CD Title");
-		ok( $cd2->title eq "Yet More Tweeny-Pop crap", "Got Expected CD Title");
-	}
-	
-	HAS_MANY_WITH_PKS: {
-	
-		## This group tests the ability to specify the PK in the parent and let
-		## DBIC transparently pass the PK down to the Child and also let's the
-		## child create any other needed PK's for itself.
-		
-		my $aid		=  $art_rs->get_column('artistid')->max || 0;
-		
-		my $first_aid = ++$aid;
-		
-		my $artists = [
-			{
-				artistid => $first_aid,
-				name => 'PK_Angsty-Whiny Girl',
-				cds => [
-					{ artist => $first_aid, title => 'PK_My First CD', year => 2006 },
-					{ artist => $first_aid, title => 'PK_Yet More Tweeny-Pop crap', year => 2007 },
-				],					
-			},		
-			{
-				artistid => ++$aid,
-				name => 'PK_Manufactured Crap',
-			},
-			{
-				artistid => ++$aid,
-				name => 'PK_Like I Give a Damn',
-				cds => [
-					{ title => 'PK_My parents sold me to a record company' ,year => 2005 },
-					{ title => 'PK_Why Am I So Ugly?', year => 2006 },
-					{ title => 'PK_I Got Surgery and am now Popular', year => 2007 }				
-				],
-			},
-			{
-				artistid => ++$aid,
-				name => 'PK_Formerly Named',
-				cds => [
-					{ title => 'PK_One Hit Wonder', year => 2006 },
-				],					
-			},			
-		];
-		
-		## Get the result row objects.
-		
-		my ($girl, $crap, $damn, $formerly) = $art_rs->populate($artists);
-		
-		## Do we have the right object?
-		
-		isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");	
-		isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");	
-		
-		## Find the expected information?
+    my $artists = [
+      {
+        name => 'Angsty-Whiny Girl',
+        cds => [
+          { title => 'My First CD', year => 2006 },
+          { title => 'Yet More Tweeny-Pop crap', year => 2007 },
+        ],
+      },
+      {
+        name => 'Manufactured Crap',
+      },
+      {
+        name => 'Like I Give a Damn',
+        cds => [
+          { title => 'My parents sold me to a record company' ,year => 2005 },
+          { title => 'Why Am I So Ugly?', year => 2006 },
+          { title => 'I Got Surgery and am now Popular', year => 2007 }
+        ],
+      },
+      {
+        name => 'Formerly Named',
+        cds => [
+          { title => 'One Hit Wonder', year => 2006 },
+        ],
+      },
+    ];
 
-		ok( $crap->name eq 'PK_Manufactured Crap', "Got Correct name for result object");
-		ok( $girl->name eq 'PK_Angsty-Whiny Girl', "Got Correct name for result object");
-		ok( $girl->artistid == $first_aid, "Got Correct artist PK for result object");		
-		ok( $damn->name eq 'PK_Like I Give a Damn', "Got Correct name for result object");	
-		ok( $formerly->name eq 'PK_Formerly Named', "Got Correct name for result object");
-		
-		## Create the expected children sub objects?
-		
-		ok( $crap->cds->count == 0, "got Expected Number of Cds");
-		ok( $girl->cds->count == 2, "got Expected Number of Cds");	
-		ok( $damn->cds->count == 3, "got Expected Number of Cds");
-		ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+    ## Get the result row objects.
 
-		## Did the cds get expected information?
-		
-		my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
-		
-		ok( $cd1->title eq "PK_My First CD", "Got Expected CD Title");
-		ok( $cd2->title eq "PK_Yet More Tweeny-Pop crap", "Got Expected CD Title");
-	}
-	
-	BELONGS_TO_NO_PKs: {
+    my ($girl, $crap, $damn, $formerly) = $art_rs->populate($artists);
 
-		## Test from a belongs_to perspective, should create artist first, 
-		## then CD with artistid.  This test we let the system automatically
-		## create the PK's.  Chances are good you'll use it this way mostly.
-		
-		my $cds = [
-			{
-				title => 'Some CD3',
-				year => '1997',
-				artist => { name => 'Fred BloggsC'},
-			},
-			{
-				title => 'Some CD4',
-				year => '1997',
-				artist => { name => 'Fred BloggsD'},
-			},		
-		];
-		
-		my ($cdA, $cdB) = $cd_rs->populate($cds);
-		
+    ## Do we have the right object?
 
-		isa_ok($cdA, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdA->artist->name, 'Fred BloggsC', 'Set Artist to FredC');
+    isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");  
+    isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");
 
-		
-		isa_ok($cdB, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdB->artist->name, 'Fred BloggsD', 'Set Artist to FredD');
-	}
+    ## Find the expected information?
 
-	BELONGS_TO_WITH_PKs: {
+    ok( $crap->name eq 'Manufactured Crap', "Got Correct name for result object");
+    ok( $girl->name eq 'Angsty-Whiny Girl', "Got Correct name for result object");
+    ok( $damn->name eq 'Like I Give a Damn', "Got Correct name for result object");
+    ok( $formerly->name eq 'Formerly Named', "Got Correct name for result object");
 
-		## Test from a belongs_to perspective, should create artist first, 
-		## then CD with artistid.  This time we try setting the PK's
-		
-		my $aid	= $art_rs->get_column('artistid')->max || 0;
+    ## Create the expected children sub objects?
 
-		my $cds = [
-			{
-				title => 'Some CD3',
-				year => '1997',
-				artist => { artistid=> ++$aid, name => 'Fred BloggsC'},
-			},
-			{
-				title => 'Some CD4',
-				year => '1997',
-				artist => { artistid=> ++$aid, name => 'Fred BloggsD'},
-			},		
-		];
-		
-		my ($cdA, $cdB) = $cd_rs->populate($cds);
-		
-		isa_ok($cdA, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdA->artist->name, 'Fred BloggsC', 'Set Artist to FredC');
-		
-		isa_ok($cdB, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdB->artist->name, 'Fred BloggsD', 'Set Artist to FredD');
-		ok($cdB->artist->artistid == $aid, "Got Expected Artist ID");
-	}
+    ok( $crap->cds->count == 0, "got Expected Number of Cds");
+    ok( $girl->cds->count == 2, "got Expected Number of Cds");
+    ok( $damn->cds->count == 3, "got Expected Number of Cds");
+    ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+
+    ## Did the cds get expected information?
+
+    my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year'});
+
+    ok( $cd1->title eq "My First CD", "Got Expected CD Title");
+    ok( $cd2->title eq "Yet More Tweeny-Pop crap", "Got Expected CD Title");
+  }
+
+  HAS_MANY_WITH_PKS: {
+
+    ## This group tests the ability to specify the PK in the parent and let
+    ## DBIC transparently pass the PK down to the Child and also let's the
+    ## child create any other needed PK's for itself.
+
+    my $aid    =  $art_rs->get_column('artistid')->max || 0;
+
+    my $first_aid = ++$aid;
+
+    my $artists = [
+      {
+        artistid => $first_aid,
+        name => 'PK_Angsty-Whiny Girl',
+        cds => [
+          { artist => $first_aid, title => 'PK_My First CD', year => 2006 },
+          { artist => $first_aid, title => 'PK_Yet More Tweeny-Pop crap', year => 2007 },
+        ],
+      },
+      {
+        artistid => ++$aid,
+        name => 'PK_Manufactured Crap',
+      },
+      {
+        artistid => ++$aid,
+        name => 'PK_Like I Give a Damn',
+        cds => [
+          { title => 'PK_My parents sold me to a record company' ,year => 2005 },
+          { title => 'PK_Why Am I So Ugly?', year => 2006 },
+          { title => 'PK_I Got Surgery and am now Popular', year => 2007 }
+        ],
+      },
+      {
+        artistid => ++$aid,
+        name => 'PK_Formerly Named',
+        cds => [
+          { title => 'PK_One Hit Wonder', year => 2006 },
+        ],
+      },
+    ];
+
+    ## Get the result row objects.
+
+    my ($girl, $crap, $damn, $formerly) = $art_rs->populate($artists);
+
+    ## Do we have the right object?
+
+    isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");  
+    isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");
+
+    ## Find the expected information?
+
+    ok( $crap->name eq 'PK_Manufactured Crap', "Got Correct name for result object");
+    ok( $girl->name eq 'PK_Angsty-Whiny Girl', "Got Correct name for result object");
+    ok( $girl->artistid == $first_aid, "Got Correct artist PK for result object");
+    ok( $damn->name eq 'PK_Like I Give a Damn', "Got Correct name for result object");
+    ok( $formerly->name eq 'PK_Formerly Named', "Got Correct name for result object");
+
+    ## Create the expected children sub objects?
+
+    ok( $crap->cds->count == 0, "got Expected Number of Cds");
+    ok( $girl->cds->count == 2, "got Expected Number of Cds");  
+    ok( $damn->cds->count == 3, "got Expected Number of Cds");
+    ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+
+    ## Did the cds get expected information?
+
+    my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
+
+    ok( $cd1->title eq "PK_My First CD", "Got Expected CD Title");
+    ok( $cd2->title eq "PK_Yet More Tweeny-Pop crap", "Got Expected CD Title");
+  }
+
+  BELONGS_TO_NO_PKs: {
+
+    ## Test from a belongs_to perspective, should create artist first, 
+    ## then CD with artistid.  This test we let the system automatically
+    ## create the PK's.  Chances are good you'll use it this way mostly.
+
+    my $cds = [
+      {
+        title => 'Some CD3',
+        year => '1997',
+        artist => { name => 'Fred BloggsC'},
+      },
+      {
+        title => 'Some CD4',
+        year => '1997',
+        artist => { name => 'Fred BloggsD'},
+      },    
+    ];
+
+    my ($cdA, $cdB) = $cd_rs->populate($cds);
+
+
+    isa_ok($cdA, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdA->artist->name, 'Fred BloggsC', 'Set Artist to FredC');
+
+
+    isa_ok($cdB, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdB->artist->name, 'Fred BloggsD', 'Set Artist to FredD');
+  }
+
+  BELONGS_TO_WITH_PKs: {
+
+    ## Test from a belongs_to perspective, should create artist first, 
+    ## then CD with artistid.  This time we try setting the PK's
+
+    my $aid  = $art_rs->get_column('artistid')->max || 0;
+
+    my $cds = [
+      {
+        title => 'Some CD3',
+        year => '1997',
+        artist => { artistid=> ++$aid, name => 'Fred BloggsC'},
+      },
+      {
+        title => 'Some CD4',
+        year => '1997',
+        artist => { artistid=> ++$aid, name => 'Fred BloggsD'},
+      },    
+    ];
+
+    my ($cdA, $cdB) = $cd_rs->populate($cds);
+
+    isa_ok($cdA, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdA->artist->name, 'Fred BloggsC', 'Set Artist to FredC');
+
+    isa_ok($cdB, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdB->artist->name, 'Fred BloggsD', 'Set Artist to FredD');
+    ok($cdB->artist->artistid == $aid, "Got Expected Artist ID");
+  }
+
+  WITH_COND_FROM_RS: {
+
+    my ($more_crap) = $restricted_art_rs->populate([
+      {
+        name => 'More Manufactured Crap',
+      },
+    ]);
+
+    ## Did it use the condition in the resultset?
+    cmp_ok( $more_crap->rank, '==', 42, "Got Correct rank for result object");
+  } 
 }
 
 
@@ -342,265 +354,280 @@
 
 VOID_CONTEXT: {
 
-	## All these tests check the ability to use populate without asking for 
-	## any returned resultsets.  This uses bulk_insert as much as possible
-	## in order to increase speed.
-	
-	HAS_MANY_WITH_PKS: {
-	
-		## This first group of tests checks to make sure we can call populate
-		## with the parent having many children and the parent PK is set
+  ## All these tests check the ability to use populate without asking for 
+  ## any returned resultsets.  This uses bulk_insert as much as possible
+  ## in order to increase speed.
 
-		my $aid		=  $art_rs->get_column('artistid')->max || 0;
-		
-		my $first_aid = ++$aid;
-		
-		my $artists = [
-			{
-				artistid => $first_aid,
-				name => 'VOID_PK_Angsty-Whiny Girl',
-				cds => [
-					{ artist => $first_aid, title => 'VOID_PK_My First CD', year => 2006 },
-					{ artist => $first_aid, title => 'VOID_PK_Yet More Tweeny-Pop crap', year => 2007 },
-				],					
-			},		
-			{
-				artistid => ++$aid,
-				name => 'VOID_PK_Manufactured Crap',
-			},
-			{
-				artistid => ++$aid,
-				name => 'VOID_PK_Like I Give a Damn',
-				cds => [
-					{ title => 'VOID_PK_My parents sold me to a record company' ,year => 2005 },
-					{ title => 'VOID_PK_Why Am I So Ugly?', year => 2006 },
-					{ title => 'VOID_PK_I Got Surgery and am now Popular', year => 2007 }				
-				],
-			},
-			{
-				artistid => ++$aid,
-				name => 'VOID_PK_Formerly Named',
-				cds => [
-					{ title => 'VOID_PK_One Hit Wonder', year => 2006 },
-				],					
-			},	
-			{
-				artistid => ++$aid,
-				name => undef,
-				cds => [
-					{ title => 'VOID_PK_Zundef test', year => 2006 },
-				],					
-			},		
-		];
-		
-		## Get the result row objects.
-		
-		$art_rs->populate($artists);
-		
-		my ($undef, $girl, $formerly, $damn, $crap) = $art_rs->search(
-		
-			{name=>[ map { $_->{name} } @$artists]},
-			{order_by=>'name ASC'},
-		);
-		
-		## Do we have the right object?
-		
-		isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");	
-		isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");	
-		isa_ok( $undef, 'DBICTest::Artist', "Got 'Artist'");		
-	
-		## Find the expected information?
+  HAS_MANY_WITH_PKS: {
 
-		ok( $crap->name eq 'VOID_PK_Manufactured Crap', "Got Correct name 'VOID_PK_Manufactured Crap' for result object");
-		ok( $girl->name eq 'VOID_PK_Angsty-Whiny Girl', "Got Correct name for result object");
-		ok( $damn->name eq 'VOID_PK_Like I Give a Damn', "Got Correct name for result object");	
-		ok( $formerly->name eq 'VOID_PK_Formerly Named', "Got Correct name for result object");
-		ok( !defined $undef->name, "Got Correct name 'is undef' for result object");		
-		
-		## Create the expected children sub objects?
-		ok( $crap->can('cds'), "Has cds relationship");
-		ok( $girl->can('cds'), "Has cds relationship");
-		ok( $damn->can('cds'), "Has cds relationship");
-		ok( $formerly->can('cds'), "Has cds relationship");
-		ok( $undef->can('cds'), "Has cds relationship");	
-	
-		ok( $crap->cds->count == 0, "got Expected Number of Cds");
-		ok( $girl->cds->count == 2, "got Expected Number of Cds");	
-		ok( $damn->cds->count == 3, "got Expected Number of Cds");
-		ok( $formerly->cds->count == 1, "got Expected Number of Cds");
-		ok( $undef->cds->count == 1, "got Expected Number of Cds");
-		
-		## Did the cds get expected information?
-		
-		my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
-		
-		ok( $cd1->title eq "VOID_PK_My First CD", "Got Expected CD Title");
-		ok( $cd2->title eq "VOID_PK_Yet More Tweeny-Pop crap", "Got Expected CD Title");
-	}
-	
-	
-	BELONGS_TO_WITH_PKs: {
+    ## This first group of tests checks to make sure we can call populate
+    ## with the parent having many children and the parent PK is set
 
-		## Test from a belongs_to perspective, should create artist first, 
-		## then CD with artistid.  This time we try setting the PK's
-		
-		my $aid	= $art_rs->get_column('artistid')->max || 0;
+    my $aid = $art_rs->get_column('artistid')->max || 0;
 
-		my $cds = [
-			{
-				title => 'Some CD3B',
-				year => '1997',
-				artist => { artistid=> ++$aid, name => 'Fred BloggsCB'},
-			},
-			{
-				title => 'Some CD4B',
-				year => '1997',
-				artist => { artistid=> ++$aid, name => 'Fred BloggsDB'},
-			},		
-		];
-		
-		$cd_rs->populate($cds);
-		
-		my ($cdA, $cdB) = $cd_rs->search(
-			{title=>[sort map {$_->{title}} @$cds]},
-			{order_by=>'title ASC'},
-		);
-		
-		isa_ok($cdA, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdA->artist->name, 'Fred BloggsCB', 'Set Artist to FredCB');
-		
-		isa_ok($cdB, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdB->artist->name, 'Fred BloggsDB', 'Set Artist to FredDB');
-		ok($cdB->artist->artistid == $aid, "Got Expected Artist ID");
-	}
+    my $first_aid = ++$aid;
 
-	BELONGS_TO_NO_PKs: {
+    my $artists = [
+      {
+        artistid => $first_aid,
+        name => 'VOID_PK_Angsty-Whiny Girl',
+        cds => [
+          { artist => $first_aid, title => 'VOID_PK_My First CD', year => 2006 },
+          { artist => $first_aid, title => 'VOID_PK_Yet More Tweeny-Pop crap', year => 2007 },
+        ],
+      },
+      {
+        artistid => ++$aid,
+        name => 'VOID_PK_Manufactured Crap',
+      },
+      {
+        artistid => ++$aid,
+        name => 'VOID_PK_Like I Give a Damn',
+        cds => [
+          { title => 'VOID_PK_My parents sold me to a record company' ,year => 2005 },
+          { title => 'VOID_PK_Why Am I So Ugly?', year => 2006 },
+          { title => 'VOID_PK_I Got Surgery and am now Popular', year => 2007 }        
+        ],
+      },
+      {
+        artistid => ++$aid,
+        name => 'VOID_PK_Formerly Named',
+        cds => [
+          { title => 'VOID_PK_One Hit Wonder', year => 2006 },
+        ],
+      },
+      {
+        artistid => ++$aid,
+        name => undef,
+        cds => [
+          { title => 'VOID_PK_Zundef test', year => 2006 },
+        ],
+      },
+    ];
 
-		## Test from a belongs_to perspective, should create artist first, 
-		## then CD with artistid.
-				
-		my $cds = [
-			{
-				title => 'Some CD3BB',
-				year => '1997',
-				artist => { name => 'Fred BloggsCBB'},
-			},
-			{
-				title => 'Some CD4BB',
-				year => '1997',
-				artist => { name => 'Fred BloggsDBB'},
-			},
-			{
-				title => 'Some CD5BB',
-				year => '1997',
-				artist => { name => undef},
-			},		
-		];
-		
-		$cd_rs->populate($cds);
-		
-		my ($cdA, $cdB, $cdC) = $cd_rs->search(
-			{title=>[sort map {$_->{title}} @$cds]},
-			{order_by=>'title ASC'},
-		);
-		
-		isa_ok($cdA, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdA->title, 'Some CD3BB', 'Found Expected title');
-		is($cdA->artist->name, 'Fred BloggsCBB', 'Set Artist to FredCBB');
-		
-		isa_ok($cdB, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdB->title, 'Some CD4BB', 'Found Expected title');
-		is($cdB->artist->name, 'Fred BloggsDBB', 'Set Artist to FredDBB');
-		
-		isa_ok($cdC, 'DBICTest::CD', 'Created CD');
-		isa_ok($cdC->artist, 'DBICTest::Artist', 'Set Artist');
-		is($cdC->title, 'Some CD5BB', 'Found Expected title');
-		is( $cdC->artist->name, undef, 'Set Artist to something undefined');
-	}
-	
-	
-	HAS_MANY_NO_PKS: {
-	
-		## This first group of tests checks to make sure we can call populate
-		## with the parent having many children and let the keys be automatic
+    ## Get the result row objects.
 
-		my $artists = [
-			{	
-				name => 'VOID_Angsty-Whiny Girl',
-				cds => [
-					{ title => 'VOID_My First CD', year => 2006 },
-					{ title => 'VOID_Yet More Tweeny-Pop crap', year => 2007 },
-				],					
-			},		
-			{
-				name => 'VOID_Manufactured Crap',
-			},
-			{
-				name => 'VOID_Like I Give a Damn',
-				cds => [
-					{ title => 'VOID_My parents sold me to a record company' ,year => 2005 },
-					{ title => 'VOID_Why Am I So Ugly?', year => 2006 },
-					{ title => 'VOID_I Got Surgery and am now Popular', year => 2007 }				
-				],
-			},
-			{	
-				name => 'VOID_Formerly Named',
-				cds => [
-					{ title => 'VOID_One Hit Wonder', year => 2006 },
-				],					
-			},			
-		];
-		
-		## Get the result row objects.
-		
-		$art_rs->populate($artists);
-		
-		my ($girl, $formerly, $damn, $crap) = $art_rs->search(
-			{name=>[sort map {$_->{name}} @$artists]},
-			{order_by=>'name ASC'},
-		);
-		
-		## Do we have the right object?
-		
-		isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
-		isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");	
-		isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");	
-		
-		## Find the expected information?
+    $art_rs->populate($artists);
 
-		ok( $crap->name eq 'VOID_Manufactured Crap', "Got Correct name for result object");
-		ok( $girl->name eq 'VOID_Angsty-Whiny Girl', "Got Correct name for result object");
-		ok( $damn->name eq 'VOID_Like I Give a Damn', "Got Correct name for result object");	
-		ok( $formerly->name eq 'VOID_Formerly Named', "Got Correct name for result object");
-		
-		## Create the expected children sub objects?
-		ok( $crap->can('cds'), "Has cds relationship");
-		ok( $girl->can('cds'), "Has cds relationship");
-		ok( $damn->can('cds'), "Has cds relationship");
-		ok( $formerly->can('cds'), "Has cds relationship");
-		
-		ok( $crap->cds->count == 0, "got Expected Number of Cds");
-		ok( $girl->cds->count == 2, "got Expected Number of Cds");	
-		ok( $damn->cds->count == 3, "got Expected Number of Cds");
-		ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+    my ($undef, $girl, $formerly, $damn, $crap) = $art_rs->search(
 
-		## Did the cds get expected information?
-		
-		my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
+      {name=>[ map { $_->{name} } @$artists]},
+      {order_by=>'name ASC'},
+    );
 
-		ok($cd1, "Got a got CD");
-		ok($cd2, "Got a got CD");
-		ok( $cd1->title eq "VOID_My First CD", "Got Expected CD Title");
-		ok( $cd2->title eq "VOID_Yet More Tweeny-Pop crap", "Got Expected CD Title");
-	}
+    ## Do we have the right object?
 
+    isa_ok( $crap, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $girl, 'DBICTest::Artist', "Got 'Artist'");
+    isa_ok( $damn, 'DBICTest::Artist', "Got 'Artist'");  
+    isa_ok( $formerly, 'DBICTest::Artist', "Got 'Artist'");  
+    isa_ok( $undef, 'DBICTest::Artist', "Got 'Artist'");    
+
+    ## Find the expected information?
+
+    ok( $crap->name eq 'VOID_PK_Manufactured Crap', "Got Correct name 'VOID_PK_Manufactured Crap' for result object");
+    ok( $girl->name eq 'VOID_PK_Angsty-Whiny Girl', "Got Correct name for result object");
+    ok( $damn->name eq 'VOID_PK_Like I Give a Damn', "Got Correct name for result object");  
+    ok( $formerly->name eq 'VOID_PK_Formerly Named', "Got Correct name for result object");
+    ok( !defined $undef->name, "Got Correct name 'is undef' for result object");    
+
+    ## Create the expected children sub objects?
+    ok( $crap->can('cds'), "Has cds relationship");
+    ok( $girl->can('cds'), "Has cds relationship");
+    ok( $damn->can('cds'), "Has cds relationship");
+    ok( $formerly->can('cds'), "Has cds relationship");
+    ok( $undef->can('cds'), "Has cds relationship");  
+
+    ok( $crap->cds->count == 0, "got Expected Number of Cds");
+    ok( $girl->cds->count == 2, "got Expected Number of Cds");  
+    ok( $damn->cds->count == 3, "got Expected Number of Cds");
+    ok( $formerly->cds->count == 1, "got Expected Number of Cds");
+    ok( $undef->cds->count == 1, "got Expected Number of Cds");
+
+    ## Did the cds get expected information?
+
+    my ($cd1, $cd2) = $girl->cds->search({},{order_by=>'year ASC'});
+
+    ok( $cd1->title eq "VOID_PK_My First CD", "Got Expected CD Title");
+    ok( $cd2->title eq "VOID_PK_Yet More Tweeny-Pop crap", "Got Expected CD Title");
+  }
+
+
+  BELONGS_TO_WITH_PKs: {
+
+    ## Test from a belongs_to perspective, should create artist first, 
+    ## then CD with artistid.  This time we try setting the PK's
+
+    my $aid  = $art_rs->get_column('artistid')->max || 0;
+
+    my $cds = [
+      {
+        title => 'Some CD3B',
+        year => '1997',
+        artist => { artistid=> ++$aid, name => 'Fred BloggsCB'},
+      },
+      {
+        title => 'Some CD4B',
+        year => '1997',
+        artist => { artistid=> ++$aid, name => 'Fred BloggsDB'},
+      },
+    ];
+
+    $cd_rs->populate($cds);
+
+    my ($cdA, $cdB) = $cd_rs->search(
+      {title=>[sort map {$_->{title}} @$cds]},
+      {order_by=>'title ASC'},
+    );
+
+    isa_ok($cdA, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdA->artist->name, 'Fred BloggsCB', 'Set Artist to FredCB');
+
+    isa_ok($cdB, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdB->artist->name, 'Fred BloggsDB', 'Set Artist to FredDB');
+    ok($cdB->artist->artistid == $aid, "Got Expected Artist ID");
+  }
+
+  BELONGS_TO_NO_PKs: {
+
+    ## Test from a belongs_to perspective, should create artist first, 
+    ## then CD with artistid.
+
+    my $cds = [
+      {
+        title => 'Some CD3BB',
+        year => '1997',
+        artist => { name => 'Fred BloggsCBB'},
+      },
+      {
+        title => 'Some CD4BB',
+        year => '1997',
+        artist => { name => 'Fred BloggsDBB'},
+      },
+      {
+        title => 'Some CD5BB',
+        year => '1997',
+        artist => { name => undef},
+      },    
+    ];
+
+    $cd_rs->populate($cds);
+
+    my ($cdA, $cdB, $cdC) = $cd_rs->search(
+      {title=>[sort map {$_->{title}} @$cds]},
+      {order_by=>'title ASC'},
+    );
+
+    isa_ok($cdA, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdA->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdA->title, 'Some CD3BB', 'Found Expected title');
+    is($cdA->artist->name, 'Fred BloggsCBB', 'Set Artist to FredCBB');
+
+    isa_ok($cdB, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdB->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdB->title, 'Some CD4BB', 'Found Expected title');
+    is($cdB->artist->name, 'Fred BloggsDBB', 'Set Artist to FredDBB');
+
+    isa_ok($cdC, 'DBICTest::CD', 'Created CD');
+    isa_ok($cdC->artist, 'DBICTest::Artist', 'Set Artist');
+    is($cdC->title, 'Some CD5BB', 'Found Expected title');
+    is( $cdC->artist->name, undef, 'Set Artist to something undefined');
+  }
+
+
+  HAS_MANY_NO_PKS: {
+
+    ## This first group of tests checks to make sure we can call populate
+    ## with the parent having many children and let the keys be automatic
+
+    my $artists = [
+      {  
+        name => 'VOID_Angsty-Whiny Girl',
+        cds => [
+          { title => 'VOID_My First CD', year => 2006 },
+          { title => 'VOID_Yet More Tweeny-Pop crap', year => 2007 },
+        ],          
+      },    
+      {
+        name => 'VOID_Manufactured Crap',
+      },
+      {
+        name => 'VOID_Lik