[Bast-commits] r8025 - in DBIx-Class/0.08/branches/create_scalarref_rt51559: . lib/DBIx/Class lib/DBIx/Class/CDBICompat lib/DBIx/Class/Manual lib/DBIx/Class/Schema lib/DBIx/Class/Storage lib/DBIx/Class/Storage/DBI lib/DBIx/Class/Storage/DBI/ADO lib/DBIx/Class/Storage/DBI/Sybase lib/DBIx/Class/Storage/DBI/Sybase/ASE t t/count t/inflate t/lib t/lib/DBICTest/Schema t/prefetch t/resultset

ribasushi at dev.catalyst.perl.org ribasushi at dev.catalyst.perl.org
Fri Dec 4 01:44:09 GMT 2009


Author: ribasushi
Date: 2009-12-04 01:44:09 +0000 (Fri, 04 Dec 2009)
New Revision: 8025

Added:
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/ASE/
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/count/search_related.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/resultset/plus_select.t
Removed:
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/Common.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/NoBindVars.pm
Modified:
   DBIx-Class/0.08/branches/create_scalarref_rt51559/
   DBIx-Class/0.08/branches/create_scalarref_rt51559/Changes
   DBIx-Class/0.08/branches/create_scalarref_rt51559/Makefile.PL
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/AccessorGroup.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/CDBICompat/Constructor.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Manual/Joining.pod
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Relationship.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/ResultSet.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Row.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/SQLAHacks.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Schema.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Schema/Versioned.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/101populate_rs.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/60core.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/71mysql.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/746sybase.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/93single_accessor_object.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/95sql_maker.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/95sql_maker_quote.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/count/prefetch.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/from_subquery.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/inflate/datetime_sybase.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/lib/DBICTest.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/lib/DBICTest/Schema/CD.pm
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/prefetch/via_search_related.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/resultset/as_query.t
   DBIx-Class/0.08/branches/create_scalarref_rt51559/t/resultset/is_paged.t
Log:
 r7937 at Thesaurus (orig r7925):  ribasushi | 2009-11-19 12:04:21 +0100
 Bail out eary in Versioned if no versioning checks are requested
 r7938 at Thesaurus (orig r7926):  ribasushi | 2009-11-19 12:06:13 +0100
 POD fixes
 r7940 at Thesaurus (orig r7928):  caelum | 2009-11-22 11:03:33 +0100
 fix connection setup for Sybase
 r7943 at Thesaurus (orig r7931):  caelum | 2009-11-22 13:27:43 +0100
 override _run_connection_actions for internal connection setup in sybase stuff, much cleaner this way
 r7947 at Thesaurus (orig r7935):  ribasushi | 2009-11-23 01:18:28 +0100
 Whoops
 r7948 at Thesaurus (orig r7936):  ribasushi | 2009-11-23 01:28:50 +0100
 Fix ::Versioned regression introduced in r7925
 r7951 at Thesaurus (orig r7939):  caelum | 2009-11-23 12:32:10 +0100
 add subname to rdbms_specific_methods wrapper
 r7953 at Thesaurus (orig r7941):  caelum | 2009-11-23 13:23:14 +0100
  r21187 at hlagh (orig r7933):  ribasushi | 2009-11-22 18:38:34 -0500
  New sybase refactor branch
  r21188 at hlagh (orig r7934):  ribasushi | 2009-11-22 19:06:48 -0500
  refactor part1
  r21192 at hlagh (orig r7938):  ribasushi | 2009-11-22 19:30:05 -0500
  refactor part 2
  r21194 at hlagh (orig r7940):  caelum | 2009-11-23 07:06:46 -0500
  fix test
 
 r7955 at Thesaurus (orig r7943):  ribasushi | 2009-11-23 16:30:13 +0100
 Add missing Sub::Name invocations and improve the SQLA Carp overrides
 r7957 at Thesaurus (orig r7945):  ribasushi | 2009-11-24 10:12:49 +0100
  r7749 at Thesaurus (orig r7738):  norbi | 2009-09-28 22:01:39 +0200
  Created branch 'void_populate_resultset_cond': Fixing a bug: $rs->populate in void context does not use the conditions from $rs.
  r7751 at Thesaurus (orig r7740):  norbi | 2009-09-28 23:26:06 +0200
   r7935 at vger:  mendel | 2009-09-28 23:25:52 +0200
   Undid the previous tweaks to the already existing tests and added new tests instead.
  
  r7928 at Thesaurus (orig r7916):  ribasushi | 2009-11-16 08:48:42 +0100
  Change plan
  r7956 at Thesaurus (orig r7944):  ribasushi | 2009-11-24 10:10:49 +0100
  Better naming and a bit leaner implementation. Main idea remains the same
 
 r7959 at Thesaurus (orig r7947):  ribasushi | 2009-11-24 10:39:52 +0100
 Changes and prevent a spurious todo-pass
 r7962 at Thesaurus (orig r7950):  ribasushi | 2009-11-24 19:43:42 +0100
 Extra sqla quoting test
 r7963 at Thesaurus (orig r7951):  ribasushi | 2009-11-24 19:48:01 +0100
 Extra sqla quoting test(2)
 r7964 at Thesaurus (orig r7952):  ribasushi | 2009-11-25 21:24:10 +0100
 wtf
 r7967 at Thesaurus (orig r7955):  ribasushi | 2009-11-26 11:07:06 +0100
 cleanups
 r7968 at Thesaurus (orig r7956):  ribasushi | 2009-11-26 12:11:21 +0100
 Sanify search_related chaining code (no functional changes)
 r7969 at Thesaurus (orig r7957):  ribasushi | 2009-11-26 12:52:05 +0100
 Another count() quirk down
 r7970 at Thesaurus (orig r7958):  ribasushi | 2009-11-26 14:23:28 +0100
 Add a no-accessor column to generally test handling
 r7972 at Thesaurus (orig r7960):  ribasushi | 2009-11-26 15:32:17 +0100
 Whoops, wrong accessor (things still work though)
 r7977 at Thesaurus (orig r7965):  ribasushi | 2009-11-26 16:43:21 +0100
  r7971 at Thesaurus (orig r7959):  ribasushi | 2009-11-26 14:54:17 +0100
  New branch for get_inflated_column bugfix
  r7974 at Thesaurus (orig r7962):  ribasushi | 2009-11-26 15:56:20 +0100
  Fix for rt46953
  r7975 at Thesaurus (orig r7963):  ribasushi | 2009-11-26 16:05:17 +0100
  Make Test::More happy
  r7976 at Thesaurus (orig r7964):  ribasushi | 2009-11-26 16:43:09 +0100
  Changes
 
 r7980 at Thesaurus (orig r7968):  ribasushi | 2009-11-27 01:38:11 +0100
 Fix search_related wrt grouped resultsets (distinct is currently passed to the new resultset, this is probably wrong)
 r7987 at Thesaurus (orig r7975):  ribasushi | 2009-11-28 16:54:23 +0100
 Cleanup the s.c.o. index
 r7988 at Thesaurus (orig r7976):  ribasushi | 2009-11-28 16:57:04 +0100
 Test based on http://lists.scsys.co.uk/pipermail/dbix-class/2009-November/008599.html
 r8007 at Thesaurus (orig r7995):  castaway | 2009-11-30 16:20:19 +0100
 Remove over-emphasis on +select/+as. Add docs on prefetch and other ways to get related data, with caveats etc. 
 
 r8009 at Thesaurus (orig r7997):  dew | 2009-11-30 19:37:00 +0100
 Alter the docs for has_many relationships to make them a little easier to grok
 r8021 at Thesaurus (orig r8009):  castaway | 2009-12-02 14:19:40 +0100
 Added note about prefetch and has_many related objects
 
 r8029 at Thesaurus (orig r8017):  ribasushi | 2009-12-03 13:24:04 +0100
 Source sanity check on subqueried update/delete
 r8030 at Thesaurus (orig r8018):  ribasushi | 2009-12-03 14:39:37 +0100
 Sanify populate arg handling



Property changes on: DBIx-Class/0.08/branches/create_scalarref_rt51559
___________________________________________________________________
Name: svk:merge
   - 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/cookbook_fixes:7657
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/prefetch_bug-unqualified_column_in_search_related_cond:7959
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7982
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/ado_mssql:7886
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/autocast:7418
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connect_info_hash:7435
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cookbook_fixes:7479
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_has_many_join:7382
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/is_resultset_paginated:7769
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7842
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch-group_by:7917
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_bug-unqualified_column_in_search_related_cond:7900
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:7682
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulk_insert:7679
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulkinsert_support:7796
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_support:7797
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/view_rels:7908
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
   + 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/cookbook_fixes:7657
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/prefetch_bug-unqualified_column_in_search_related_cond:7959
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/void_populate_resultset_cond:7935
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7982
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/ado_mssql:7886
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/autocast:7418
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connect_info_hash:7435
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cookbook_fixes:7479
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/get_inflated_columns_rt46953:7964
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_has_many_join:7382
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/is_resultset_paginated:7769
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7842
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch-group_by:7917
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_bug-unqualified_column_in_search_related_cond:7900
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:7682
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulk_insert:7679
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulkinsert_support:7796
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_refactor:7940
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_support:7797
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/view_rels:7908
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/void_populate_resultset_cond:7944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:8018
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/Changes
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/Changes	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/Changes	2009-12-04 01:44:09 UTC (rev 8025)
@@ -3,6 +3,13 @@
         - Fix distinct => 1 with non-selecting order_by (the columns
           in order_by also need to be aded to the resulting group_by)
         - Do not attempt to deploy FK constraints pointing to a View
+        - Refactored Sybase storage driver into a central ::DBI::Sybase
+          dispatcher, and a sybase-specific ::DBI::Sybase::ASE
+        - Make sure populate() inherits the resultset conditions just
+          like create() does
+        - Fix count/objects from search_related on limited resultset
+        - Make get_inflated_columns behave identically to get_columns
+          wrt +select/+as (RT#46953)
 
 0.08114 2009-11-14 17:45:00 (UTC)
         - Preliminary support for MSSQL via DBD::ADO

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/Makefile.PL
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/Makefile.PL	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/Makefile.PL	2009-12-04 01:44:09 UTC (rev 8025)
@@ -142,13 +142,17 @@
 resources 'repository'  => 'http://dev.catalyst.perl.org/repos/bast/DBIx-Class/';
 resources 'MailingList' => 'http://lists.scsys.co.uk/cgi-bin/mailman/listinfo/dbix-class';
 
-no_index 'DBIx::Class::Storage::DBI::Sybase::Common';
 no_index 'DBIx::Class::SQLAHacks';
 no_index 'DBIx::Class::SQLAHacks::MSSQL';
+no_index 'DBIx::Class::SQLAHacks::OracleJoins';
 no_index 'DBIx::Class::Storage::DBI::AmbiguousGlob';
-no_index 'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server';
-no_index 'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars';
 no_index 'DBIx::Class::Storage::DBIHacks';
+no_index 'DBIx::Class::PK::Auto::DB2';
+no_index 'DBIx::Class::PK::Auto::MSSQL';
+no_index 'DBIx::Class::PK::Auto::MySQL';
+no_index 'DBIx::Class::PK::Auto::Oracle';
+no_index 'DBIx::Class::PK::Auto::Pg';
+no_index 'DBIx::Class::PK::Auto::SQLite';
 
 # re-build README and require extra modules for testing if we're in a checkout
 

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/AccessorGroup.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/AccessorGroup.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/AccessorGroup.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -17,8 +17,6 @@
 
 This class now exists in its own right on CPAN as Class::Accessor::Grouped
 
-1;
-
 =head1 AUTHORS
 
 Matt S. Trout <mst at shadowcatsystems.co.uk>

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/CDBICompat/Constructor.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/CDBICompat/Constructor.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/CDBICompat/Constructor.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -3,6 +3,8 @@
 
 use base qw(DBIx::Class::CDBICompat::ImaDBI);
 
+use Sub::Name();
+
 use strict;
 use warnings;
 
@@ -22,7 +24,7 @@
     return carp("$method already exists in $class")
             if *$meth{CODE};
 
-    *$meth = sub {
+    *$meth = Sub::Name::subname $meth => sub {
             my $self = shift;
             $self->sth_to_objects($self->sql_Retrieve($fragment), \@_);
     };

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Manual/Joining.pod
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Manual/Joining.pod	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Manual/Joining.pod	2009-12-04 01:44:09 UTC (rev 8025)
@@ -17,7 +17,7 @@
 But I'll explain anyway. Assuming you have created your database in a
 more or less sensible way, you will end up with several tables that
 contain C<related> information. For example, you may have a table
-containing information about C<CDs>, containing the CD title and it's
+containing information about C<CD>s, containing the CD title and it's
 year of publication, and another table containing all the C<Track>s
 for the CDs, one track per row.
 
@@ -34,7 +34,8 @@
 So, joins are a way of extending simple select statements to include
 fields from other, related, tables. There are various types of joins,
 depending on which combination of the data you wish to retrieve, see
-MySQL's doc on JOINs: L<http://dev.mysql.com/doc/refman/5.0/en/join.html>.
+MySQL's doc on JOINs:
+L<http://dev.mysql.com/doc/refman/5.0/en/join.html>.
 
 =head1 DEFINING JOINS AND RELATIONSHIPS
 
@@ -42,7 +43,7 @@
 be defined in the L<ResultSource|DBIx::Class::Manual::Glossary/ResultSource> for the
 table. If the relationship needs to be accessed in both directions
 (i.e. Fetch all tracks of a CD, and fetch the CD data for a Track),
-then it needs to be defined in both tables.
+then it needs to be defined for both tables.
 
 For the CDs/Tracks example, that means writing, in C<MySchema::CD>:
 
@@ -68,14 +69,15 @@
 
 When performing either a L<search|DBIx::Class::ResultSet/search> or a
 L<find|DBIx::Class::ResultSet/find> operation, you can specify which
-C<relations> to also fetch data from (or sort by), using the
+C<relations> to also refine your results based on, using the
 L<join|DBIx::Class::ResultSet/join> attribute, like this:
 
   $schema->resultset('CD')->search(
-    { 'Title' => 'Funky CD' },
+    { 'Title' => 'Funky CD',
+      'tracks.Name' => { like => 'T%' }
+    },
     { join      => 'tracks',
-      '+select' => [ 'tracks.Name', 'tracks.Artist' ],
-      '+as'     => [ 'TrackName', 'ArtistName' ]
+      order_by  => ['tracks.id'],
     }
   );
 
@@ -84,18 +86,125 @@
 L<DBIx::Class::ResultSet/ATTRIBUTES>, but here's a quick break down:
 
 The first argument to search is a hashref of the WHERE attributes, in
-this case a simple restriction on the Title column. The second
-argument is a hashref of attributes to the search, '+select' adds
-extra columns to the select (from the joined table(s) or from
-calculations), and '+as' gives aliases to those fields.
+this case a restriction on the Title column in the CD table, and a
+restriction on the name of the track in the Tracks table, but ONLY for
+tracks actually related to the chosen CD(s). The second argument is a
+hashref of attributes to the search, the results will be returned
+sorted by the C<id> of the related tracks.
 
-'join' specifies which C<relationships> to include in the query. The
-distinction between C<relationships> and C<tables> is important here,
-only the C<relationship> names are valid.
+The special 'join' attribute specifies which C<relationships> to
+include in the query. The distinction between C<relationships> and
+C<tables> is important here, only the C<relationship> names are valid.
 
-This example should magically produce SQL like the second select in
-L</WHAT ARE JOINS> above.
+This slightly nonsense example will produce SQL similar to:
 
+  SELECT cd.ID, cd.Title, cd.Year FROM CD cd JOIN Tracks tracks ON cd.ID = tracks.CDID WHERE cd.Title = 'Funky CD' AND tracks.Name LIKE 'T%' ORDER BY 'tracks.id';
+
+=head1 FETCHING RELATED DATA
+
+Another common use for joining to related tables, is to fetch the data
+from both tables in one query, preventing extra round-trips to the
+database. See the example above in L</WHAT ARE JOINS>.
+
+Three techniques are described here. Of the three, only the
+C<prefetch> technique will deal sanely with fetching related objects
+over a C<has_many> relation. The others work fine for 1 to 1 type
+relationships.
+
+=head2 Whole related objects
+
+To fetch entire related objects, eg CDs and all Track data, use the
+'prefetch' attribute:
+
+  $schema->resultset('CD')->search(
+    { 'Title' => 'Funky CD',
+    },
+    { prefetch      => 'tracks',
+      order_by  => ['tracks.id'],
+    }
+  );
+
+This will produce SQL similar to the following:
+
+  SELECT cd.ID, cd.Title, cd.Year, tracks.id, tracks.Name, tracks.Artist FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
+
+The syntax of 'prefetch' is the same as 'join' and implies the
+joining, so no need to use both together.
+
+=head2 Subset of related fields
+
+To fetch a subset or the related fields, the '+select' and '+as'
+attributes can be used. For example, if the CD data is required and
+just the track name from the Tracks table:
+
+  $schema->resultset('CD')->search(
+    { 'Title' => 'Funky CD',
+    },
+    { join      => 'tracks',
+      '+select' => ['tracks.Name'],
+      '+as'     => ['track_name'],
+      order_by  => ['tracks.id'],
+    }
+  );
+
+Which will produce the query:
+
+  SELECT cd.ID, cd.Title, cd.Year, tracks.Name FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
+
+Note that the '+as' does not produce an SQL 'AS' keyword in the
+output, see the L<DBIx::Class::Manual::FAQ> for an explanation.
+
+This type of column restriction has a downside, the resulting $row
+object will have no 'track_name' accessor:
+
+  while(my $row = $search_rs->next) {
+     print $row->track_name; ## ERROR
+  }
+
+Instead C<get_column> must be used:
+
+  while(my $row = $search_rs->next) {
+     print $row->get_colum('track_name'); ## WORKS
+  }
+
+=head2 Incomplete related objects
+
+In rare circumstances, you may also wish to fetch related data as
+incomplete objects. The usual reason to do is when the related table
+has a very large field you don't need for the current data
+output. This is better solved by storing that field in a separate
+table which you only join to when needed.
+
+To fetch an incomplete related object, supply the dotted notation to the '+as' attribute: 
+
+  $schema->resultset('CD')->search(
+    { 'Title' => 'Funky CD',
+    },
+    { join      => 'tracks',
+      '+select' => ['tracks.Name'],
+      '+as'     => ['tracks.Name'], 
+      order_by  => ['tracks.id'],
+    }
+  );
+
+Which will produce same query as above;
+
+  SELECT cd.ID, cd.Title, cd.Year, tracks.Name FROM CD JOIN Tracks ON CD.ID = tracks.CDID WHERE cd.Title = 'Funky CD' ORDER BY 'tracks.id';
+
+Now you can access the result using the relationship accessor:
+
+  while(my $row = $search_rs->next) {
+     print $row->tracks->name; ## WORKS
+  }
+
+However, this will produce broken objects. If the tracks id column is
+not fetched, the object will not be usable for any operation other
+than reading its data. Use the L</Whole related objects> method as
+much as possible to avoid confusion in your code later.
+
+Broken means: Update will not work. Fetching other related objects
+will not work. Deleting the object will not work.
+
 =head1 COMPLEX JOINS AND STUFF
 
 =head2 Across multiple relations
@@ -114,14 +223,12 @@
   $schema->resultset('CD')->search(
     { 'Title' => 'Funky CD' },
     { join      => { 'tracks' => 'artist' },
-      '+select' => [ 'tracks.Name', 'artist.Artist' ],
-      '+as'     => [ 'TrackName', 'ArtistName' ]
     }
   );
 
 Which is:
 
-  SELECT me.ID, me.Title, me.Year, tracks.Name, artist.Artist FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD';
+  SELECT me.ID, me.Title, me.Year FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD';
 
 To perform joins using relations of the tables you are joining to, use
 a hashref to indicate the join depth. This can theoretically go as
@@ -147,12 +254,10 @@
     { 'Title' => 'Funky CD' },
     { join      => { 'tracks' => 'artist' },
       order_by  => [ 'tracks.Name', 'artist.Artist' ],
-      '+select' => [ 'tracks.Name', 'artist.Artist' ],
-      '+as'     => [ 'TrackName', 'ArtistName' ]
     }
   );
 
-  SELECT me.ID, me.Title, me.Year, tracks.Name, artist.Artist FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD' ORDER BY tracks.Name, artist.Artist;
+  SELECT me.ID, me.Title, me.Year FROM CD me JOIN Tracks tracks ON CD.ID = tracks.CDID JOIN Artists artist ON tracks.ArtistID = artist.ID WHERE me.Title = 'Funky CD' ORDER BY tracks.Name, artist.Artist;
 
 This is essential if any of your tables have columns with the same names.
 

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Relationship.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Relationship.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Relationship.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -232,13 +232,13 @@
 
 =back
 
-Creates a one-to-many relationship, where the corresponding elements
-of the foreign class store the calling class's primary key in one (or
-more) of the foreign class columns. This relationship defaults to using
-the end of this classes namespace as the foreign key in C<$related_class>
-to resolve the join, unless C<$their_fk_column> specifies the foreign
-key column in C<$related_class> or C<cond> specifies a reference to a
-join condition hash.
+Creates a one-to-many relationship where the foreign class refers to
+this class's primary key. This relationship refers to zero or more
+records in the foreign table (ie, a C<LEFT JOIN>). This relationship 
+defaults to using the end of this classes namespace as the foreign key
+in C<$related_class> to resolve the join, unless C<$their_fk_column>
+specifies the foreign key column in C<$related_class> or C<cond>
+specifies a reference to a join condition hash.
 
 =over
 

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/ResultSet.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/ResultSet.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/ResultSet.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -1658,11 +1658,11 @@
 =cut
 
 sub populate {
-  my $self = shift @_;
-  my $data = ref $_[0][0] eq 'HASH'
-    ? $_[0] : ref $_[0][0] eq 'ARRAY' ? $self->_normalize_populate_args($_[0]) :
-    $self->throw_exception('Populate expects an arrayref of hashes or arrayref of arrayrefs');
+  my $self = shift;
 
+  # cruft placed in standalone method
+  my $data = $self->_normalize_populate_args(@_);
+
   if(defined wantarray) {
     my @created;
     foreach my $item (@$data) {
@@ -1715,11 +1715,17 @@
       }
     }
 
+    ## inherit the data locked in the conditions of the resultset
+    my ($rs_data) = $self->_merge_cond_with_data({});
+    delete @{$rs_data}{@columns};
+    my @inherit_cols = keys %$rs_data;
+    my @inherit_data = values %$rs_data;
+
     ## do bulk insert on current row
     $self->result_source->storage->insert_bulk(
       $self->result_source,
-      \@columns,
-      [ map { [ @$_{@columns} ] } @$data ],
+      [@columns, @inherit_cols],
+      [ map { [ @$_{@columns}, @inherit_data ] } @$data ],
     );
 
     ## do the has_many relationships
@@ -1748,26 +1754,27 @@
   }
 }
 
-=head2 _normalize_populate_args ($args)
 
-Private method used by L</populate> to normalize its incoming arguments.  Factored
-out in case you want to subclass and accept new argument structures to the
-L</populate> method.
+# populate() argumnets went over several incarnations
+# What we ultimately support is AoH
+sub _normalize_populate_args {
+  my ($self, $arg) = @_;
 
-=cut
-
-sub _normalize_populate_args {
-  my ($self, $data) = @_;
-  my @names = @{shift(@$data)};
-  my @results_to_create;
-  foreach my $datum (@$data) {
-    my %result_to_create;
-    foreach my $index (0..$#names) {
-      $result_to_create{$names[$index]} = $$datum[$index];
+  if (ref $arg eq 'ARRAY') {
+    if (ref $arg->[0] eq 'HASH') {
+      return $arg;
     }
-    push @results_to_create, \%result_to_create;
+    elsif (ref $arg->[0] eq 'ARRAY') {
+      my @ret;
+      my @colnames = @{$arg->[0]};
+      foreach my $values (@{$arg}[1 .. $#$arg]) {
+        push @ret, { map { $colnames[$_] => $values->[$_] } (0 .. $#colnames) };
+      }
+      return \@ret;
+    }
   }
-  return \@results_to_create;
+
+  $self->throw_exception('Populate expects an arrayref of hashrefs or arrayref of arrayrefs');
 }
 
 =head2 pager
@@ -1856,46 +1863,66 @@
   $self->throw_exception( "new_result needs a hash" )
     unless (ref $values eq 'HASH');
 
-  my %new;
+  my ($merged_cond, $cols_from_relations) = $self->_merge_cond_with_data($values);
+
+  my %new = (
+    %$merged_cond,
+    @$cols_from_relations
+      ? (-cols_from_relations => $cols_from_relations)
+      : (),
+    -source_handle => $self->_source_handle,
+    -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED
+  );
+
+  return $self->result_class->new(\%new);
+}
+
+# _merge_cond_with_data
+#
+# Takes a simple hash of K/V data and returns its copy merged with the
+# condition already present on the resultset. Additionally returns an
+# arrayref of value/condition names, which were inferred from related
+# objects (this is needed for in-memory related objects)
+sub _merge_cond_with_data {
+  my ($self, $data) = @_;
+
+  my (%new_data, @cols_from_relations);
+
   my $alias = $self->{attrs}{alias};
 
-  if (
-    defined $self->{cond}
-    && $self->{cond} eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION
-  ) {
-    %new = %{ $self->{attrs}{related_objects} || {} };  # nothing might have been inserted yet
-    $new{-from_resultset} = [ keys %new ] if keys %new;
-  } else {
+  if (! defined $self->{cond}) {
+    # just massage $data below
+  }
+  elsif ($self->{cond} eq $DBIx::Class::ResultSource::UNRESOLVABLE_CONDITION) {
+    %new_data = %{ $self->{attrs}{related_objects} || {} };  # nothing might have been inserted yet
+    @cols_from_relations = keys %new_data;
+  }
+  elsif (ref $self->{cond} ne 'HASH') {
     $self->throw_exception(
-      "Can't abstract implicit construct, condition not a hash"
-    ) if ($self->{cond} && !(ref $self->{cond} eq 'HASH'));
-
-    my $collapsed_cond = (
-      $self->{cond}
-        ? $self->_collapse_cond($self->{cond})
-        : {}
+      "Can't abstract implicit construct, resultset condition not a hash"
     );
-
+  }
+  else {
     # precendence must be given to passed values over values inherited from
     # the cond, so the order here is important.
-    my %implied =  %{$self->_remove_alias($collapsed_cond, $alias)};
-    while( my($col,$value) = each %implied ){
-      if(ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '='){
-        $new{$col} = $value->{'='};
+    my $collapsed_cond = $self->_collapse_cond($self->{cond});
+    my %implied = %{$self->_remove_alias($collapsed_cond, $alias)};
+
+    while ( my($col, $value) = each %implied ) {
+      if (ref($value) eq 'HASH' && keys(%$value) && (keys %$value)[0] eq '=') {
+        $new_data{$col} = $value->{'='};
         next;
       }
-      $new{$col} = $value if $self->_is_deterministic_value($value);
+      $new_data{$col} = $value if $self->_is_deterministic_value($value);
     }
   }
 
-  %new = (
-    %new,
-    %{ $self->_remove_alias($values, $alias) },
-    -source_handle => $self->_source_handle,
-    -result_source => $self->result_source, # DO NOT REMOVE THIS, REQUIRED
+  %new_data = (
+    %new_data,
+    %{ $self->_remove_alias($data, $alias) },
   );
 
-  return $self->result_class->new(\%new);
+  return (\%new_data, \@cols_from_relations);
 }
 
 # _is_deterministic_value
@@ -2492,14 +2519,13 @@
         "' has no such relationship $rel")
       unless $rel_info;
 
-    my ($from,$seen) = $self->_chain_relationship($rel);
+    my $attrs = $self->_chain_relationship($rel);
 
-    my $join_count = $seen->{$rel};
+    my $join_count = $attrs->{seen_join}{$rel};
     my $alias = ($join_count > 1 ? join('_', $rel, $join_count) : $rel);
 
     #XXX - temp fix for result_class bug. There likely is a more elegant fix -groditi
-    my %attrs = %{$self->{attrs}||{}};
-    delete @attrs{qw(result_class alias)};
+    delete @{$attrs}{qw(result_class alias)};
 
     my $new_cache;
 
@@ -2520,20 +2546,14 @@
       # to work sanely (e.g. RestrictWithObject wants to be able to add
       # extra query restrictions, and these may need to be $alias.)
 
-      my $attrs = $rel_source->resultset_attributes;
-      local $attrs->{alias} = $alias;
+      my $rel_attrs = $rel_source->resultset_attributes;
+      local $rel_attrs->{alias} = $alias;
 
       $rel_source->resultset
                  ->search_rs(
                      undef, {
-                       %attrs,
-                       join => undef,
-                       prefetch => undef,
-                       select => undef,
-                       as => undef,
-                       where => $self->{cond},
-                       seen_join => $seen,
-                       from => $from,
+                       %$attrs,
+                       where => $attrs->{where},
                    });
     };
     $new->set_cache($new_cache) if $new_cache;
@@ -2591,37 +2611,58 @@
 # with a relation_chain_depth less than the depth of the
 # current prefetch is not considered)
 #
-# The increments happen in 1/2s to make it easier to correlate the
-# join depth with the join path. An integer means a relationship
-# specified via a search_related, whereas a fraction means an added
-# join/prefetch via attributes
+# The increments happen twice per join. An even number means a
+# relationship specified via a search_related, whereas an odd
+# number indicates a join/prefetch added via attributes
+#
+# Also this code will wrap the current resultset (the one we
+# chain to) in a subselect IFF it contains limiting attributes
 sub _chain_relationship {
   my ($self, $rel) = @_;
   my $source = $self->result_source;
-  my $attrs = $self->{attrs};
+  my $attrs = { %{$self->{attrs}||{}} };
 
-  my $from = [ @{
-      $attrs->{from}
-        ||
-      [{
-        -source_handle => $source->handle,
-        -alias => $attrs->{alias},
-        $attrs->{alias} => $source->from,
-      }]
-  }];
+  # we need to take the prefetch the attrs into account before we
+  # ->_resolve_join as otherwise they get lost - captainL
+  my $join = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
 
-  my $seen = { %{$attrs->{seen_join} || {} } };
-  my $jpath = ($attrs->{seen_join} && keys %{$attrs->{seen_join}})
+  delete @{$attrs}{qw/join prefetch collapse select as columns +select +as +columns/};
+
+  my $seen = { %{ (delete $attrs->{seen_join}) || {} } };
+
+  my $from;
+  my @force_subq_attrs = qw/offset rows group_by having/;
+
+  if (
+    ($attrs->{from} && ref $attrs->{from} ne 'ARRAY')
+      ||
+    $self->_has_resolved_attr (@force_subq_attrs)
+  ) {
+    $from = [{
+      -source_handle => $source->handle,
+      -alias => $attrs->{alias},
+      $attrs->{alias} => $self->as_query,
+    }];
+    delete @{$attrs}{@force_subq_attrs, 'where'};
+    $seen->{-relation_chain_depth} = 0;
+  }
+  elsif ($attrs->{from}) {  #shallow copy suffices
+    $from = [ @{$attrs->{from}} ];
+  }
+  else {
+    $from = [{
+      -source_handle => $source->handle,
+      -alias => $attrs->{alias},
+      $attrs->{alias} => $source->from,
+    }];
+  }
+
+  my $jpath = ($seen->{-relation_chain_depth})
     ? $from->[-1][0]{-join_path}
     : [];
 
-
-  # we need to take the prefetch the attrs into account before we
-  # ->_resolve_join as otherwise they get lost - captainL
-  my $merged = $self->_merge_attr( $attrs->{join}, $attrs->{prefetch} );
-
   my @requested_joins = $source->_resolve_join(
-    $merged,
+    $join,
     $attrs->{alias},
     $seen,
     $jpath,
@@ -2629,7 +2670,7 @@
 
   push @$from, @requested_joins;
 
-  $seen->{-relation_chain_depth} += 0.5;
+  $seen->{-relation_chain_depth}++;
 
   # if $self already had a join/prefetch specified on it, the requested
   # $rel might very well be already included. What we do in this case
@@ -2641,7 +2682,7 @@
   # we consider the last one thus reverse
   for my $j (reverse @requested_joins) {
     if ($rel eq $j->[0]{-join_path}[-1]) {
-      $j->[0]{-relation_chain_depth} += 0.5;
+      $j->[0]{-relation_chain_depth}++;
       $already_joined++;
       last;
     }
@@ -2651,7 +2692,7 @@
 #  for my $j (reverse @$from) {
 #    next unless ref $j eq 'ARRAY';
 #    if ($j->[0]{-join_path} && $j->[0]{-join_path}[-1] eq $rel) {
-#      $j->[0]{-relation_chain_depth} += 0.5;
+#      $j->[0]{-relation_chain_depth}++;
 #      $already_joined++;
 #      last;
 #    }
@@ -2666,9 +2707,9 @@
     );
   }
 
-  $seen->{-relation_chain_depth} += 0.5;
+  $seen->{-relation_chain_depth}++;
 
-  return ($from,$seen);
+  return {%$attrs, from => $from, seen_join => $seen};
 }
 
 # too many times we have to do $attrs = { %{$self->_resolved_attrs} }
@@ -2882,8 +2923,8 @@
 
   my $cur_depth = $seen->{-relation_chain_depth} || 0;
 
-  if (int ($cur_depth) != $cur_depth) {
-    $self->throw_exception ("-relation_chain_depth is not an integer, something went horribly wrong ($cur_depth)");
+  if ($cur_depth % 2) {
+    $self->throw_exception ("-relation_chain_depth is not even, something went horribly wrong ($cur_depth)");
   }
 
   for my $j (@$fromspec) {
@@ -2894,7 +2935,7 @@
     my $jpath = $j->[0]{-join_path};
 
     my $p = $paths;
-    $p = $p->{$_} ||= {} for @{$jpath}[$cur_depth .. $#$jpath];
+    $p = $p->{$_} ||= {} for @{$jpath}[$cur_depth/2 .. $#$jpath]; #only even depths are actual jpath boundaries
     push @{$p->{-join_aliases} }, $j->[0]{-alias};
   }
 

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Row.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Row.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Row.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -155,7 +155,7 @@
     $new->result_source($source);
   }
 
-  if (my $related = delete $attrs->{-from_resultset}) {
+  if (my $related = delete $attrs->{-cols_from_relations}) {
     @{$new->{_ignore_at_insert}={}}{@$related} = ();
   }
 
@@ -751,10 +751,27 @@
 
 sub get_inflated_columns {
   my $self = shift;
-  return map {
-    my $accessor = $self->column_info($_)->{'accessor'} || $_;
-    ($_ => $self->$accessor);
-  } grep $self->has_column_loaded($_), $self->columns;
+
+  my %loaded_colinfo = (map
+    { $_ => $self->column_info($_) }
+    (grep { $self->has_column_loaded($_) } $self->columns)
+  );
+
+  my %inflated;
+  for my $col (keys %loaded_colinfo) {
+    if (exists $loaded_colinfo{$col}{accessor}) {
+      my $acc = $loaded_colinfo{$col}{accessor};
+      if (defined $acc) {
+        $inflated{$col} = $self->$acc;
+      }
+    }
+    else {
+      $inflated{$col} = $self->$col;
+    }
+  }
+
+  # return all loaded columns with the inflations overlayed on top
+  return ($self->get_columns, %inflated);
 }
 
 =head2 set_column

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/SQLAHacks.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/SQLAHacks.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/SQLAHacks.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -9,6 +9,7 @@
 use strict;
 use warnings;
 use Carp::Clan qw/^DBIx::Class|^SQL::Abstract/;
+use Sub::Name();
 
 BEGIN {
   # reinstall the carp()/croak() functions imported into SQL::Abstract
@@ -18,17 +19,15 @@
   for my $f (qw/carp croak/) {
 
     my $orig = \&{"SQL::Abstract::$f"};
-    *{"SQL::Abstract::$f"} = sub {
-
-      local $Carp::CarpLevel = 1;   # even though Carp::Clan ignores this, $orig will not
-
-      if (Carp::longmess() =~ /DBIx::Class::SQLAHacks::[\w]+ .+? called \s at/x) {
-        __PACKAGE__->can($f)->(@_);
-      }
-      else {
-        $orig->(@_);
-      }
-    }
+    *{"SQL::Abstract::$f"} = Sub::Name::subname "SQL::Abstract::$f" =>
+      sub {
+        if (Carp::longmess() =~ /DBIx::Class::SQLAHacks::[\w]+ .+? called \s at/x) {
+          __PACKAGE__->can($f)->(@_);
+        }
+        else {
+          goto $orig;
+        }
+      };
   }
 }
 

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Schema/Versioned.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Schema/Versioned.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Schema/Versioned.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -472,9 +472,13 @@
   my ($self, $args) = @_;
 
   $args = {} unless $args;
+
   $self->{vschema} = DBIx::Class::Version->connect(@{$self->storage->connect_info()});
   my $vtable = $self->{vschema}->resultset('Table');
 
+  # useful when connecting from scripts etc
+  return if ($args->{ignore_version} || ($ENV{DBIC_NO_VERSION_CHECK} && !exists $args->{ignore_version}));
+
   # check for legacy versions table and move to new if exists
   my $vschema_compat = DBIx::Class::VersionCompat->connect(@{$self->storage->connect_info()});
   unless ($self->_source_exists($vtable)) {
@@ -486,8 +490,6 @@
     }
   }
 
-  # useful when connecting from scripts etc
-  return if ($args->{ignore_version} || ($ENV{DBIC_NO_VERSION_CHECK} && !exists $args->{ignore_version}));
   my $pversion = $self->get_db_version();
 
   if($pversion eq $self->schema_version)

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Schema.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Schema.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Schema.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -406,12 +406,10 @@
 
 Set the storage class that will be instantiated when L</connect> is called.
 If the classname starts with C<::>, the prefix C<DBIx::Class::Storage> is
-assumed by L</connect>.  
+assumed by L</connect>.
 
 You want to use this to set subclasses of L<DBIx::Class::Storage::DBI>
-in cases where the appropriate subclass is not autodetected, such as
-when dealing with MSSQL via L<DBD::Sybase>, in which case you'd set it
-to C<::DBI::Sybase::MSSQL>.
+in cases where the appropriate subclass is not autodetected.
 
 If your storage type requires instantiation arguments, those are
 defined as a second argument in the form of a hashref and the entire
@@ -631,13 +629,13 @@
 This interface is preferred over using the individual methods L</txn_begin>,
 L</txn_commit>, and L</txn_rollback> below.
 
-WARNING: If you are connected with C<AutoCommit => 0> the transaction is
+WARNING: If you are connected with C<< AutoCommit => 0 >> the transaction is
 considered nested, and you will still need to call L</txn_commit> to write your
-changes when appropriate. You will also want to connect with C<auto_savepoint =>
-1> to get partial rollback to work, if the storage driver for your database
+changes when appropriate. You will also want to connect with C<< auto_savepoint =>
+1 >> to get partial rollback to work, if the storage driver for your database
 supports it.
 
-Connecting with C<AutoCommit => 1> is recommended.
+Connecting with C<< AutoCommit => 1 >> is recommended.
 
 =cut
 
@@ -910,7 +908,7 @@
     no strict 'refs';
     no warnings 'redefine';
     foreach my $meth (qw/class source resultset/) {
-      *{"${target}::${meth}"} =
+      *{"${target}::${meth}"} = Sub::Name::subname "${target}::${meth}" =>
         sub { shift->schema->$meth(@_) };
     }
   }

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/ADO/Microsoft_SQL_Server.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -18,7 +18,7 @@
 
 =head1 NAME
 
-DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server - Support for Microsoft
+DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server - Support for Microsoft
 SQL Server via DBD::ADO
 
 =head1 SYNOPSIS

Copied: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm (from rev 7923, DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/NoBindVars.pm)
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/ASE/NoBindVars.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -0,0 +1,102 @@
+package DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars;
+
+use base qw/
+  DBIx::Class::Storage::DBI::NoBindVars
+  DBIx::Class::Storage::DBI::Sybase::ASE
+/;
+use mro 'c3';
+use List::Util ();
+use Scalar::Util ();
+
+sub _init {
+  my $self = shift;
+  $self->disable_sth_caching(1);
+  $self->_identity_method('@@IDENTITY');
+  $self->next::method (@_);
+}
+
+sub _fetch_identity_sql { 'SELECT ' . $_[0]->_identity_method }
+
+my $number = sub { Scalar::Util::looks_like_number($_[0]) };
+
+my $decimal = sub { $_[0] =~ /^ [-+]? \d+ (?:\.\d*)? \z/x };
+
+my %noquote = (
+    int => sub { $_[0] =~ /^ [-+]? \d+ \z/x },
+    bit => => sub { $_[0] =~ /^[01]\z/ },
+    money => sub { $_[0] =~ /^\$ \d+ (?:\.\d*)? \z/x },
+    float => $number,
+    real => $number,
+    double => $number,
+    decimal => $decimal,
+    numeric => $decimal,
+);
+
+sub interpolate_unquoted {
+  my $self = shift;
+  my ($type, $value) = @_;
+
+  return $self->next::method(@_) if not defined $value or not defined $type;
+
+  if (my $key = List::Util::first { $type =~ /$_/i } keys %noquote) {
+    return 1 if $noquote{$key}->($value);
+  }
+  elsif ($self->is_datatype_numeric($type) && $number->($value)) {
+    return 1;
+  }
+
+  return $self->next::method(@_);
+}
+
+sub _prep_interpolated_value {
+  my ($self, $type, $value) = @_;
+
+  if ($type =~ /money/i && defined $value) {
+    # change a ^ not followed by \$ to a \$
+    $value =~ s/^ (?! \$) /\$/x;
+  }
+
+  return $value;
+}
+
+1;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars - Storage::DBI subclass for
+Sybase ASE without placeholder support
+
+=head1 DESCRIPTION
+
+If you're using this driver than your version of Sybase, or the libraries you
+use to connect to it, do not support placeholders.
+
+You can also enable this driver explicitly using:
+
+  my $schema = SchemaClass->clone;
+  $schema->storage_type('::DBI::Sybase::ASE::NoBindVars');
+  $schema->connect($dsn, $user, $pass, \%opts);
+
+See the discussion in L<< DBD::Sybase/Using ? Placeholders & bind parameters to
+$sth->execute >> for details on the pros and cons of using placeholders.
+
+One advantage of not using placeholders is that C<select @@identity> will work
+for obtainging the last insert id of an C<IDENTITY> column, instead of having to
+do C<select max(col)> in a transaction as the base Sybase driver does.
+
+When using this driver, bind variables will be interpolated (properly quoted of
+course) into the SQL query itself, without using placeholders.
+
+The caching of prepared statements is also explicitly disabled, as the
+interpolation renders it useless.
+
+=head1 AUTHORS
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+# vim:sts=2 sw=2:

Copied: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm (from rev 7923, DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase.pm)
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -0,0 +1,1132 @@
+package DBIx::Class::Storage::DBI::Sybase::ASE;
+
+use strict;
+use warnings;
+
+use base qw/
+    DBIx::Class::Storage::DBI::Sybase
+    DBIx::Class::Storage::DBI::AutoCast
+/;
+use mro 'c3';
+use Carp::Clan qw/^DBIx::Class/;
+use Scalar::Util();
+use List::Util();
+use Sub::Name();
+use Data::Dumper::Concise();
+
+__PACKAGE__->mk_group_accessors('simple' =>
+    qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
+       _bulk_storage _is_bulk_storage _began_bulk_work
+       _bulk_disabled_due_to_coderef_connect_info_warned
+       _identity_method/
+);
+
+my @also_proxy_to_extra_storages = qw/
+  connect_call_set_auto_cast auto_cast connect_call_blob_setup
+  connect_call_datetime_setup
+
+  disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
+  auto_savepoint unsafe cursor_class debug debugobj schema
+/;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase::ASE - Sybase ASE SQL Server support for
+DBIx::Class
+
+=head1 SYNOPSIS
+
+This subclass supports L<DBD::Sybase> for real (non-Microsoft) Sybase databases.
+
+=head1 DESCRIPTION
+
+If your version of Sybase does not support placeholders, then your storage will
+be reblessed to L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
+You can also enable that driver explicitly, see the documentation for more
+details.
+
+With this driver there is unfortunately no way to get the C<last_insert_id>
+without doing a C<SELECT MAX(col)>. This is done safely in a transaction
+(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
+
+A recommended L<DBIx::Class::Storage::DBI/connect_info> setting:
+
+  on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
+
+=head1 METHODS
+
+=cut
+
+sub _rebless {
+  my $self = shift;
+
+  my $no_bind_vars = __PACKAGE__ . '::NoBindVars';
+
+  if ($self->using_freetds) {
+    carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
+
+You are using FreeTDS with Sybase.
+
+We will do our best to support this configuration, but please consider this
+support experimental.
+
+TEXT/IMAGE columns will definitely not work.
+
+You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries
+instead.
+
+See perldoc DBIx::Class::Storage::DBI::Sybase::ASE for more details.
+
+To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment
+variable.
+EOF
+
+    if (not $self->_typeless_placeholders_supported) {
+      if ($self->_placeholders_supported) {
+        $self->auto_cast(1);
+      }
+      else {
+        $self->ensure_class_loaded($no_bind_vars);
+        bless $self, $no_bind_vars;
+        $self->_rebless;
+      }
+    }
+  }
+
+  elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
+    # not necessarily FreeTDS, but no placeholders nevertheless
+    $self->ensure_class_loaded($no_bind_vars);
+    bless $self, $no_bind_vars;
+    $self->_rebless;
+  }
+  # this is highly unlikely, but we check just in case
+  elsif (not $self->_typeless_placeholders_supported) {
+    $self->auto_cast(1);
+  }
+}
+
+sub _init {
+  my $self = shift;
+  $self->_set_max_connect(256);
+
+# create storage for insert/(update blob) transactions,
+# unless this is that storage
+  return if $self->_is_extra_storage;
+
+  my $writer_storage = (ref $self)->new;
+
+  $writer_storage->_is_extra_storage(1);
+  $writer_storage->connect_info($self->connect_info);
+  $writer_storage->auto_cast($self->auto_cast);
+
+  $self->_writer_storage($writer_storage);
+
+# create a bulk storage unless connect_info is a coderef
+  return if ref($self->_dbi_connect_info->[0]) eq 'CODE';
+
+  my $bulk_storage = (ref $self)->new;
+
+  $bulk_storage->_is_extra_storage(1);
+  $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
+  $bulk_storage->connect_info($self->connect_info);
+
+# this is why
+  $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
+
+  $self->_bulk_storage($bulk_storage);
+}
+
+for my $method (@also_proxy_to_extra_storages) {
+  no strict 'refs';
+  no warnings 'redefine';
+
+  my $replaced = __PACKAGE__->can($method);
+
+  *{$method} = Sub::Name::subname $method => sub {
+    my $self = shift;
+    $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
+    $self->_bulk_storage->$replaced(@_)   if $self->_bulk_storage;
+    return $self->$replaced(@_);
+  };
+}
+
+sub disconnect {
+  my $self = shift;
+
+# Even though we call $sth->finish for uses off the bulk API, there's still an
+# "active statement" warning on disconnect, which we throw away here.
+# This is due to the bug described in insert_bulk.
+# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
+  local $SIG{__WARN__} = sub {
+    warn $_[0] unless $_[0] =~ /active statement/i;
+  } if $self->_is_bulk_storage;
+
+# so that next transaction gets a dbh
+  $self->_began_bulk_work(0) if $self->_is_bulk_storage;
+
+  $self->next::method;
+}
+
+# Set up session settings for Sybase databases for the connection.
+#
+# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
+# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
+# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
+# only want when AutoCommit is off.
+#
+# Also SET TEXTSIZE for FreeTDS because LongReadLen doesn't work.
+sub _run_connection_actions {
+  my $self = shift;
+
+  if ($self->_is_bulk_storage) {
+# this should be cleared on every reconnect
+    $self->_began_bulk_work(0);
+    return;
+  }
+
+  if (not $self->using_freetds) {
+    $self->_dbh->{syb_chained_txn} = 1;
+  } else {
+    # based on LongReadLen in connect_info
+    $self->set_textsize;
+
+    if ($self->_dbh_autocommit) {
+      $self->_dbh->do('SET CHAINED OFF');
+    } else {
+      $self->_dbh->do('SET CHAINED ON');
+    }
+  }
+
+  $self->next::method(@_);
+}
+
+=head2 connect_call_blob_setup
+
+Used as:
+
+  on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ]
+
+Does C<< $dbh->{syb_binary_images} = 1; >> to return C<IMAGE> data as raw binary
+instead of as a hex string.
+
+Recommended.
+
+Also sets the C<log_on_update> value for blob write operations. The default is
+C<1>, but C<0> is better if your database is configured for it.
+
+See
+L<DBD::Sybase/Handling_IMAGE/TEXT_data_with_syb_ct_get_data()/syb_ct_send_data()>.
+
+=cut
+
+sub connect_call_blob_setup {
+  my $self = shift;
+  my %args = @_;
+  my $dbh = $self->_dbh;
+  $dbh->{syb_binary_images} = 1;
+
+  $self->_blob_log_on_update($args{log_on_update})
+    if exists $args{log_on_update};
+}
+
+sub _is_lob_type {
+  my $self = shift;
+  my $type = shift;
+  $type && $type =~ /(?:text|image|lob|bytea|binary|memo)/i;
+}
+
+sub _is_lob_column {
+  my ($self, $source, $column) = @_;
+
+  return $self->_is_lob_type($source->column_info($column)->{data_type});
+}
+
+sub _prep_for_execute {
+  my $self = shift;
+  my ($op, $extra_bind, $ident, $args) = @_;
+
+  my ($sql, $bind) = $self->next::method (@_);
+
+  my $table = Scalar::Util::blessed($ident) ? $ident->from : $ident;
+
+  my $bind_info = $self->_resolve_column_info(
+    $ident, [map $_->[0], @{$bind}]
+  );
+  my $bound_identity_col = List::Util::first
+    { $bind_info->{$_}{is_auto_increment} }
+    (keys %$bind_info)
+  ;
+  my $identity_col = Scalar::Util::blessed($ident) &&
+    List::Util::first
+    { $ident->column_info($_)->{is_auto_increment} }
+    $ident->columns
+  ;
+
+  if (($op eq 'insert' && $bound_identity_col) ||
+      ($op eq 'update' && exists $args->[0]{$identity_col})) {
+    $sql = join ("\n",
+      $self->_set_table_identity_sql($op => $table, 'on'),
+      $sql,
+      $self->_set_table_identity_sql($op => $table, 'off'),
+    );
+  }
+
+  if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
+      (not $self->{insert_bulk})) {
+    $sql =
+      "$sql\n" .
+      $self->_fetch_identity_sql($ident, $identity_col);
+  }
+
+  return ($sql, $bind);
+}
+
+sub _set_table_identity_sql {
+  my ($self, $op, $table, $on_off) = @_;
+
+  return sprintf 'SET IDENTITY_%s %s %s',
+    uc($op), $self->sql_maker->_quote($table), uc($on_off);
+}
+
+# Stolen from SQLT, with some modifications. This is a makeshift
+# solution before a sane type-mapping library is available, thus
+# the 'our' for easy overrides.
+our %TYPE_MAPPING  = (
+    number    => 'numeric',
+    money     => 'money',
+    varchar   => 'varchar',
+    varchar2  => 'varchar',
+    timestamp => 'datetime',
+    text      => 'varchar',
+    real      => 'double precision',
+    comment   => 'text',
+    bit       => 'bit',
+    tinyint   => 'smallint',
+    float     => 'double precision',
+    serial    => 'numeric',
+    bigserial => 'numeric',
+    boolean   => 'varchar',
+    long      => 'varchar',
+);
+
+sub _native_data_type {
+  my ($self, $type) = @_;
+
+  $type = lc $type;
+  $type =~ s/\s* identity//x;
+
+  return uc($TYPE_MAPPING{$type} || $type);
+}
+
+sub _fetch_identity_sql {
+  my ($self, $source, $col) = @_;
+
+  return sprintf ("SELECT MAX(%s) FROM %s",
+    map { $self->sql_maker->_quote ($_) } ($col, $source->from)
+  );
+}
+
+sub _execute {
+  my $self = shift;
+  my ($op) = @_;
+
+  my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
+
+  if ($op eq 'insert') {
+    $self->_identity($sth->fetchrow_array);
+    $sth->finish;
+  }
+
+  return wantarray ? ($rv, $sth, @bind) : $rv;
+}
+
+sub last_insert_id { shift->_identity }
+
+# handles TEXT/IMAGE and transaction for last_insert_id
+sub insert {
+  my $self = shift;
+  my ($source, $to_insert) = @_;
+
+  my $identity_col = (List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns) || '';
+
+  # check for empty insert
+  # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
+  # try to insert explicit 'DEFAULT's instead (except for identity)
+  if (not %$to_insert) {
+    for my $col ($source->columns) {
+      next if $col eq $identity_col;
+      $to_insert->{$col} = \'DEFAULT';
+    }
+  }
+
+  my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
+
+  # do we need the horrific SELECT MAX(COL) hack?
+  my $dumb_last_insert_id =
+       $identity_col
+    && (not exists $to_insert->{$identity_col})
+    && ($self->_identity_method||'') ne '@@IDENTITY';
+
+  my $next = $self->next::can;
+
+  # we are already in a transaction, or there are no blobs
+  # and we don't need the PK - just (try to) do it
+  if ($self->{transaction_depth}
+        || (!$blob_cols && !$dumb_last_insert_id)
+  ) {
+    return $self->_insert (
+      $next, $source, $to_insert, $blob_cols, $identity_col
+    );
+  }
+
+  # otherwise use the _writer_storage to do the insert+transaction on another
+  # connection
+  my $guard = $self->_writer_storage->txn_scope_guard;
+
+  my $updated_cols = $self->_writer_storage->_insert (
+    $next, $source, $to_insert, $blob_cols, $identity_col
+  );
+
+  $self->_identity($self->_writer_storage->_identity);
+
+  $guard->commit;
+
+  return $updated_cols;
+}
+
+sub _insert {
+  my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
+
+  my $updated_cols = $self->$next ($source, $to_insert);
+
+  my $final_row = {
+    ($identity_col ?
+      ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
+    %$to_insert,
+    %$updated_cols,
+  };
+
+  $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
+
+  return $updated_cols;
+}
+
+sub update {
+  my $self = shift;
+  my ($source, $fields, $where, @rest) = @_;
+
+  my $wantarray = wantarray;
+
+  my $blob_cols = $self->_remove_blob_cols($source, $fields);
+
+  my $table = $source->name;
+
+  my $identity_col = List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns;
+
+  my $is_identity_update = $identity_col && defined $fields->{$identity_col};
+
+  return $self->next::method(@_) unless $blob_cols;
+
+# If there are any blobs in $where, Sybase will return a descriptive error
+# message.
+# XXX blobs can still be used with a LIKE query, and this should be handled.
+
+# update+blob update(s) done atomically on separate connection
+  $self = $self->_writer_storage;
+
+  my $guard = $self->txn_scope_guard;
+
+# First update the blob columns to be updated to '' (taken from $fields, where
+# it is originally put by _remove_blob_cols .)
+  my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
+
+# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
+
+  $self->next::method($source, \%blobs_to_empty, $where, @rest);
+
+# Now update the blobs before the other columns in case the update of other
+# columns makes the search condition invalid.
+  $self->_update_blobs($source, $blob_cols, $where);
+
+  my @res;
+  if (%$fields) {
+    if ($wantarray) {
+      @res    = $self->next::method(@_);
+    }
+    elsif (defined $wantarray) {
+      $res[0] = $self->next::method(@_);
+    }
+    else {
+      $self->next::method(@_);
+    }
+  }
+
+  $guard->commit;
+
+  return $wantarray ? @res : $res[0];
+}
+
+sub insert_bulk {
+  my $self = shift;
+  my ($source, $cols, $data) = @_;
+
+  my $identity_col = List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns;
+
+  my $is_identity_insert = (List::Util::first
+    { $_ eq $identity_col }
+    @{$cols}
+  ) ? 1 : 0;
+
+  my @source_columns = $source->columns;
+
+  my $use_bulk_api =
+    $self->_bulk_storage &&
+    $self->_get_dbh->{syb_has_blk};
+
+  if ((not $use_bulk_api)
+        &&
+      (ref($self->_dbi_connect_info->[0]) eq 'CODE')
+        &&
+      (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
+    carp <<'EOF';
+Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
+regular array inserts.
+EOF
+    $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
+  }
+
+  if (not $use_bulk_api) {
+    my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
+
+# _execute_array uses a txn anyway, but it ends too early in case we need to
+# select max(col) to get the identity for inserting blobs.
+    ($self, my $guard) = $self->{transaction_depth} == 0 ?
+      ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
+      :
+      ($self, undef);
+
+    local $self->{insert_bulk} = 1;
+
+    $self->next::method(@_);
+
+    if ($blob_cols) {
+      if ($is_identity_insert) {
+        $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
+      }
+      else {
+        my @cols_with_identities = (@$cols, $identity_col);
+
+        ## calculate identities
+        # XXX This assumes identities always increase by 1, which may or may not
+        # be true.
+        my ($last_identity) =
+          $self->_dbh->selectrow_array (
+            $self->_fetch_identity_sql($source, $identity_col)
+          );
+        my @identities = (($last_identity - @$data + 1) .. $last_identity);
+
+        my @data_with_identities = map [@$_, shift @identities], @$data;
+
+        $self->_insert_blobs_array (
+          $source, $blob_cols, \@cols_with_identities, \@data_with_identities
+        );
+      }
+    }
+
+    $guard->commit if $guard;
+
+    return;
+  }
+
+# otherwise, use the bulk API
+
+# rearrange @$data so that columns are in database order
+  my %orig_idx;
+  @orig_idx{@$cols} = 0..$#$cols;
+
+  my %new_idx;
+  @new_idx{@source_columns} = 0..$#source_columns;
+
+  my @new_data;
+  for my $datum (@$data) {
+    my $new_datum = [];
+    for my $col (@source_columns) {
+# identity data will be 'undef' if not $is_identity_insert
+# columns with defaults will also be 'undef'
+      $new_datum->[ $new_idx{$col} ] =
+        exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
+    }
+    push @new_data, $new_datum;
+  }
+
+# bcp identity index is 1-based
+  my $identity_idx = exists $new_idx{$identity_col} ?
+    $new_idx{$identity_col} + 1 : 0;
+
+## Set a client-side conversion error handler, straight from DBD::Sybase docs.
+# This ignores any data conversion errors detected by the client side libs, as
+# they are usually harmless.
+  my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
+    Sub::Name::subname insert_bulk => sub {
+      my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
+
+      return 1 if $errno == 36;
+
+      carp
+        "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
+        ($errmsg ? "\n$errmsg" : '') .
+        ($osmsg  ? "\n$osmsg"  : '')  .
+        ($blkmsg ? "\n$blkmsg" : '');
+
+      return 0;
+  });
+
+  eval {
+    my $bulk = $self->_bulk_storage;
+
+    my $guard = $bulk->txn_scope_guard;
+
+## XXX get this to work instead of our own $sth
+## will require SQLA or *Hacks changes for ordered columns
+#    $bulk->next::method($source, \@source_columns, \@new_data, {
+#      syb_bcp_attribs => {
+#        identity_flag   => $is_identity_insert,
+#        identity_column => $identity_idx,
+#      }
+#    });
+    my $sql = 'INSERT INTO ' .
+      $bulk->sql_maker->_quote($source->name) . ' (' .
+# colname list is ignored for BCP, but does no harm
+      (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
+      ' VALUES ('.  (join ', ', ('?') x @source_columns) . ')';
+
+## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
+## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
+## better yet the version above. Should be fixed in DBD::Sybase .
+    my $sth = $bulk->_get_dbh->prepare($sql,
+#      'insert', # op
+      {
+        syb_bcp_attribs => {
+          identity_flag   => $is_identity_insert,
+          identity_column => $identity_idx,
+        }
+      }
+    );
+
+    my @bind = do {
+      my $idx = 0;
+      map [ $_, $idx++ ], @source_columns;
+    };
+
+    $self->_execute_array(
+      $source, $sth, \@bind, \@source_columns, \@new_data, sub {
+        $guard->commit
+      }
+    );
+
+    $bulk->_query_end($sql);
+  };
+
+  my $exception = $@;
+  DBD::Sybase::set_cslib_cb($orig_cslib_cb);
+
+  if ($exception =~ /-Y option/) {
+    carp <<"EOF";
+
+Sybase bulk API operation failed due to character set incompatibility, reverting
+to regular array inserts:
+
+*** Try unsetting the LANG environment variable.
+
+$exception
+EOF
+    $self->_bulk_storage(undef);
+    unshift @_, $self;
+    goto \&insert_bulk;
+  }
+  elsif ($exception) {
+# rollback makes the bulkLogin connection unusable
+    $self->_bulk_storage->disconnect;
+    $self->throw_exception($exception);
+  }
+}
+
+sub _dbh_execute_array {
+  my ($self, $sth, $tuple_status, $cb) = @_;
+
+  my $rv = $self->next::method($sth, $tuple_status);
+  $cb->() if $cb;
+
+  return $rv;
+}
+
+# Make sure blobs are not bound as placeholders, and return any non-empty ones
+# as a hash.
+sub _remove_blob_cols {
+  my ($self, $source, $fields) = @_;
+
+  my %blob_cols;
+
+  for my $col (keys %$fields) {
+    if ($self->_is_lob_column($source, $col)) {
+      my $blob_val = delete $fields->{$col};
+      if (not defined $blob_val) {
+        $fields->{$col} = \'NULL';
+      }
+      else {
+        $fields->{$col} = \"''";
+        $blob_cols{$col} = $blob_val unless $blob_val eq '';
+      }
+    }
+  }
+
+  return %blob_cols ? \%blob_cols : undef;
+}
+
+# same for insert_bulk
+sub _remove_blob_cols_array {
+  my ($self, $source, $cols, $data) = @_;
+
+  my @blob_cols;
+
+  for my $i (0..$#$cols) {
+    my $col = $cols->[$i];
+
+    if ($self->_is_lob_column($source, $col)) {
+      for my $j (0..$#$data) {
+        my $blob_val = delete $data->[$j][$i];
+        if (not defined $blob_val) {
+          $data->[$j][$i] = \'NULL';
+        }
+        else {
+          $data->[$j][$i] = \"''";
+          $blob_cols[$j][$i] = $blob_val
+            unless $blob_val eq '';
+        }
+      }
+    }
+  }
+
+  return @blob_cols ? \@blob_cols : undef;
+}
+
+sub _update_blobs {
+  my ($self, $source, $blob_cols, $where) = @_;
+
+  my (@primary_cols) = $source->primary_columns;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
+    unless @primary_cols;
+
+# check if we're updating a single row by PK
+  my $pk_cols_in_where = 0;
+  for my $col (@primary_cols) {
+    $pk_cols_in_where++ if defined $where->{$col};
+  }
+  my @rows;
+
+  if ($pk_cols_in_where == @primary_cols) {
+    my %row_to_update;
+    @row_to_update{@primary_cols} = @{$where}{@primary_cols};
+    @rows = \%row_to_update;
+  } else {
+    my $cursor = $self->select ($source, \@primary_cols, $where, {});
+    @rows = map {
+      my %row; @row{@primary_cols} = @$_; \%row
+    } $cursor->all;
+  }
+
+  for my $row (@rows) {
+    $self->_insert_blobs($source, $blob_cols, $row);
+  }
+}
+
+sub _insert_blobs {
+  my ($self, $source, $blob_cols, $row) = @_;
+  my $dbh = $self->_get_dbh;
+
+  my $table = $source->name;
+
+  my %row = %$row;
+  my (@primary_cols) = $source->primary_columns;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
+    unless @primary_cols;
+
+  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
+    if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
+
+  for my $col (keys %$blob_cols) {
+    my $blob = $blob_cols->{$col};
+
+    my %where = map { ($_, $row{$_}) } @primary_cols;
+
+    my $cursor = $self->select ($source, [$col], \%where, {});
+    $cursor->next;
+    my $sth = $cursor->sth;
+
+    if (not $sth) {
+
+      $self->throw_exception(
+          "Could not find row in table '$table' for blob update:\n"
+        . Data::Dumper::Concise::Dumper (\%where)
+      );
+    }
+
+    eval {
+      do {
+        $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
+      } while $sth->fetch;
+
+      $sth->func('ct_prepare_send') or die $sth->errstr;
+
+      my $log_on_update = $self->_blob_log_on_update;
+      $log_on_update    = 1 if not defined $log_on_update;
+
+      $sth->func('CS_SET', 1, {
+        total_txtlen => length($blob),
+        log_on_update => $log_on_update
+      }, 'ct_data_info') or die $sth->errstr;
+
+      $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
+
+      $sth->func('ct_finish_send') or die $sth->errstr;
+    };
+    my $exception = $@;
+    $sth->finish if $sth;
+    if ($exception) {
+      if ($self->using_freetds) {
+        $self->throw_exception (
+          'TEXT/IMAGE operation failed, probably because you are using FreeTDS: '
+          . $exception
+        );
+      } else {
+        $self->throw_exception($exception);
+      }
+    }
+  }
+}
+
+sub _insert_blobs_array {
+  my ($self, $source, $blob_cols, $cols, $data) = @_;
+
+  for my $i (0..$#$data) {
+    my $datum = $data->[$i];
+
+    my %row;
+    @row{ @$cols } = @$datum;
+
+    my %blob_vals;
+    for my $j (0..$#$cols) {
+      if (exists $blob_cols->[$i][$j]) {
+        $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
+      }
+    }
+
+    $self->_insert_blobs ($source, \%blob_vals, \%row);
+  }
+}
+
+=head2 connect_call_datetime_setup
+
+Used as:
+
+  on_connect_call => 'datetime_setup'
+
+In L<DBIx::Class::Storage::DBI/connect_info> to set:
+
+  $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
+  $dbh->do('set dateformat mdy');   # input fmt:  08/13/1979 18:08:55.080
+
+On connection for use with L<DBIx::Class::InflateColumn::DateTime>, using
+L<DateTime::Format::Sybase>, which you will need to install.
+
+This works for both C<DATETIME> and C<SMALLDATETIME> columns, although
+C<SMALLDATETIME> columns only have minute precision.
+
+=cut
+
+{
+  my $old_dbd_warned = 0;
+
+  sub connect_call_datetime_setup {
+    my $self = shift;
+    my $dbh = $self->_get_dbh;
+
+    if ($dbh->can('syb_date_fmt')) {
+      # amazingly, this works with FreeTDS
+      $dbh->syb_date_fmt('ISO_strict');
+    } elsif (not $old_dbd_warned) {
+      carp "Your DBD::Sybase is too old to support ".
+      "DBIx::Class::InflateColumn::DateTime, please upgrade!";
+      $old_dbd_warned = 1;
+    }
+
+    $dbh->do('SET DATEFORMAT mdy');
+
+    1;
+  }
+}
+
+sub datetime_parser_type { "DateTime::Format::Sybase" }
+
+# ->begin_work and such have no effect with FreeTDS but we run them anyway to
+# let the DBD keep any state it needs to.
+#
+# If they ever do start working, the extra statements will do no harm (because
+# Sybase supports nested transactions.)
+
+sub _dbh_begin_work {
+  my $self = shift;
+
+# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
+# TRAN once. However, we need to make sure there's a $dbh.
+  return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
+
+  $self->next::method(@_);
+
+  if ($self->using_freetds) {
+    $self->_get_dbh->do('BEGIN TRAN');
+  }
+
+  $self->_began_bulk_work(1) if $self->_is_bulk_storage;
+}
+
+sub _dbh_commit {
+  my $self = shift;
+  if ($self->using_freetds) {
+    $self->_dbh->do('COMMIT');
+  }
+  return $self->next::method(@_);
+}
+
+sub _dbh_rollback {
+  my $self = shift;
+  if ($self->using_freetds) {
+    $self->_dbh->do('ROLLBACK');
+  }
+  return $self->next::method(@_);
+}
+
+# savepoint support using ASE syntax
+
+sub _svp_begin {
+  my ($self, $name) = @_;
+
+  $self->_get_dbh->do("SAVE TRANSACTION $name");
+}
+
+# A new SAVE TRANSACTION with the same name releases the previous one.
+sub _svp_release { 1 }
+
+sub _svp_rollback {
+  my ($self, $name) = @_;
+
+  $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
+}
+
+1;
+
+=head1 Schema::Loader Support
+
+There is an experimental branch of L<DBIx::Class::Schema::Loader> that will
+allow you to dump a schema from most (if not all) versions of Sybase.
+
+It is available via subversion from:
+
+  http://dev.catalyst.perl.org/repos/bast/branches/DBIx-Class-Schema-Loader/current/
+
+=head1 FreeTDS
+
+This driver supports L<DBD::Sybase> compiled against FreeTDS
+(L<http://www.freetds.org/>) to the best of our ability, however it is
+recommended that you recompile L<DBD::Sybase> against the Sybase Open Client
+libraries. They are a part of the Sybase ASE distribution:
+
+The Open Client FAQ is here:
+L<http://www.isug.com/Sybase_FAQ/ASE/section7.html>.
+
+Sybase ASE for Linux (which comes with the Open Client libraries) may be
+downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
+
+To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
+
+  perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
+
+Some versions of the libraries involved will not support placeholders, in which
+case the storage will be reblessed to
+L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
+
+In some configurations, placeholders will work but will throw implicit type
+conversion errors for anything that's not expecting a string. In such a case,
+the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
+automatically set, which you may enable on connection with
+L<DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>. The type info
+for the C<CAST>s is taken from the L<DBIx::Class::ResultSource/data_type>
+definitions in your Result classes, and are mapped to a Sybase type (if it isn't
+already) using a mapping based on L<SQL::Translator>.
+
+In other configurations, placeholers will work just as they do with the Sybase
+Open Client libraries.
+
+Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
+
+=head1 INSERTS WITH PLACEHOLDERS
+
+With placeholders enabled, inserts are done in a transaction so that there are
+no concurrency issues with getting the inserted identity value using
+C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
+mode.
+
+In addition, they are done on a separate connection so that it's possible to
+have active cursors when doing an insert.
+
+When using C<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars> transactions
+are disabled, as there are no concurrency issues with C<SELECT @@IDENTITY> as
+it's a session variable.
+
+=head1 TRANSACTIONS
+
+Due to limitations of the TDS protocol, L<DBD::Sybase>, or both; you cannot
+begin a transaction while there are active cursors; nor can you use multiple
+active cursors within a transaction. An active cursor is, for example, a
+L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
+C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
+
+For example, this will not work:
+
+  $schema->txn_do(sub {
+    my $rs = $schema->resultset('Book');
+    while (my $row = $rs->next) {
+      $schema->resultset('MetaData')->create({
+        book_id => $row->id,
+        ...
+      });
+    }
+  });
+
+This won't either:
+
+  my $first_row = $large_rs->first;
+  $schema->txn_do(sub { ... });
+
+Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
+are not affected, as they are done on an extra database handle.
+
+Some workarounds:
+
+=over 4
+
+=item * use L<DBIx::Class::Storage::DBI::Replicated>
+
+=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
+
+=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
+
+=back
+
+=head1 MAXIMUM CONNECTIONS
+
+The TDS protocol makes separate connections to the server for active statements
+in the background. By default the number of such connections is limited to 25,
+on both the client side and the server side.
+
+This is a bit too low for a complex L<DBIx::Class> application, so on connection
+the client side setting is set to C<256> (see L<DBD::Sybase/maxConnect>.) You
+can override it to whatever setting you like in the DSN.
+
+See
+L<http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.ase_15.0.sag1/html/sag1/sag1272.htm>
+for information on changing the setting on the server side.
+
+=head1 DATES
+
+See L</connect_call_datetime_setup> to setup date formats
+for L<DBIx::Class::InflateColumn::DateTime>.
+
+=head1 TEXT/IMAGE COLUMNS
+
+L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
+C<TEXT/IMAGE> columns.
+
+Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either:
+
+  $schema->storage->dbh->do("SET TEXTSIZE $bytes");
+
+or
+
+  $schema->storage->set_textsize($bytes);
+
+instead.
+
+However, the C<LongReadLen> you pass in
+L<DBIx::Class::Storage::DBI/connect_info> is used to execute the equivalent
+C<SET TEXTSIZE> command on connection.
+
+See L</connect_call_blob_setup> for a L<DBIx::Class::Storage::DBI/connect_info>
+setting you need to work with C<IMAGE> columns.
+
+=head1 BULK API
+
+The experimental L<DBD::Sybase> Bulk API support is used for
+L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
+on a separate connection.
+
+To use this feature effectively, use a large number of rows for each
+L<populate|DBIx::Class::ResultSet/populate> call, eg.:
+
+  while (my $rows = $data_source->get_100_rows()) {
+    $rs->populate($rows);
+  }
+
+B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
+calls in your C<Result> classes B<must> list columns in database order for this
+to work. Also, you may have to unset the C<LANG> environment variable before
+loading your app, if it doesn't match the character set of your database.
+
+When inserting IMAGE columns using this method, you'll need to use
+L</connect_call_blob_setup> as well.
+
+=head1 TODO
+
+=over
+
+=item *
+
+Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
+any active cursors, using eager cursors.
+
+=item *
+
+Real limits and limited counts using stored procedures deployed on startup.
+
+=item *
+
+Adaptive Server Anywhere (ASA) support, with possible SQLA::Limit support.
+
+=item *
+
+Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
+
+=item *
+
+bulk_insert using prepare_cached (see comments.)
+
+=back
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+# vim:sts=2 sw=2:

Deleted: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/Common.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/Common.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/Common.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -1,111 +0,0 @@
-package DBIx::Class::Storage::DBI::Sybase::Common;
-
-use strict;
-use warnings;
-
-use base qw/DBIx::Class::Storage::DBI/;
-use mro 'c3';
-
-=head1 NAME
-
-DBIx::Class::Storage::DBI::Sybase::Common - Common functionality for drivers using
-DBD::Sybase
-
-=head1 DESCRIPTION
-
-This is the base class for L<DBIx::Class::Storage::DBI::Sybase> and
-L<DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server>. It provides some
-utility methods related to L<DBD::Sybase> and the supported functions of the
-database you are connecting to.
-
-=head1 METHODS
-
-=cut
-
-sub _ping {
-  my $self = shift;
-
-  my $dbh = $self->_dbh or return 0;
-
-  local $dbh->{RaiseError} = 1;
-  local $dbh->{PrintError} = 0;
-
-  if ($dbh->{syb_no_child_con}) {
-# ping is impossible with an active statement, we return false if so
-    my $ping = eval { $dbh->ping };
-    return $@ ? 0 : $ping;
-  }
-
-  eval {
-# XXX if the main connection goes stale, does opening another for this statement
-# really determine anything?
-    $dbh->do('select 1');
-  };
-
-  return $@ ? 0 : 1;
-}
-
-sub _set_max_connect {
-  my $self = shift;
-  my $val  = shift || 256;
-
-  my $dsn = $self->_dbi_connect_info->[0];
-
-  return if ref($dsn) eq 'CODE';
-
-  if ($dsn !~ /maxConnect=/) {
-    $self->_dbi_connect_info->[0] = "$dsn;maxConnect=$val";
-    my $connected = defined $self->_dbh;
-    $self->disconnect;
-    $self->ensure_connected if $connected;
-  }
-}
-
-=head2 using_freetds
-
-Whether or not L<DBD::Sybase> was compiled against FreeTDS. If false, it means
-the Sybase OpenClient libraries were used.
-
-=cut
-
-sub using_freetds {
-  my $self = shift;
-
-  return $self->_get_dbh->{syb_oc_version} =~ /freetds/i;
-}
-
-=head2 set_textsize
-
-When using FreeTDS and/or MSSQL, C<< $dbh->{LongReadLen} >> is not available,
-use this function instead. It does:
-
-  $dbh->do("SET TEXTSIZE $bytes");
-
-Takes the number of bytes, or uses the C<LongReadLen> value from your
-L<DBIx::Class/connect_info> if omitted, lastly falls back to the C<32768> which
-is the L<DBD::Sybase> default.
-
-=cut
-
-sub set_textsize {
-  my $self = shift;
-  my $text_size = shift ||
-    eval { $self->_dbi_connect_info->[-1]->{LongReadLen} } ||
-    32768; # the DBD::Sybase default
-
-  return unless defined $text_size;
-
-  $self->_dbh->do("SET TEXTSIZE $text_size");
-}
-
-1;
-
-=head1 AUTHORS
-
-See L<DBIx::Class/CONTRIBUTORS>.
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -4,7 +4,7 @@
 use warnings;
 
 use base qw/
-  DBIx::Class::Storage::DBI::Sybase::Common
+  DBIx::Class::Storage::DBI::Sybase
   DBIx::Class::Storage::DBI::MSSQL
 /;
 use mro 'c3';
@@ -20,13 +20,15 @@
   }
 }
 
-sub _init {
+sub _run_connection_actions {
   my $self = shift;
 
   # LongReadLen doesn't work with MSSQL through DBD::Sybase, and the default is
   # huge on some versions of SQL server and can cause memory problems, so we
-  # fix it up here (see Sybase/Common.pm)
+  # fix it up here (see ::DBI::Sybase.pm)
   $self->set_textsize;
+
+  $self->next::method(@_);
 }
 
 sub _dbh_begin_work {

Deleted: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/NoBindVars.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/NoBindVars.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -1,102 +0,0 @@
-package DBIx::Class::Storage::DBI::Sybase::NoBindVars;
-
-use base qw/
-  DBIx::Class::Storage::DBI::NoBindVars
-  DBIx::Class::Storage::DBI::Sybase
-/;
-use mro 'c3';
-use List::Util ();
-use Scalar::Util ();
-
-sub _init {
-  my $self = shift;
-  $self->disable_sth_caching(1);
-  $self->_identity_method('@@IDENTITY');
-  $self->next::method (@_);
-}
-
-sub _fetch_identity_sql { 'SELECT ' . $_[0]->_identity_method }
-
-my $number = sub { Scalar::Util::looks_like_number($_[0]) };
-
-my $decimal = sub { $_[0] =~ /^ [-+]? \d+ (?:\.\d*)? \z/x };
-
-my %noquote = (
-    int => sub { $_[0] =~ /^ [-+]? \d+ \z/x },
-    bit => => sub { $_[0] =~ /^[01]\z/ },
-    money => sub { $_[0] =~ /^\$ \d+ (?:\.\d*)? \z/x },
-    float => $number,
-    real => $number,
-    double => $number,
-    decimal => $decimal,
-    numeric => $decimal,
-);
-
-sub interpolate_unquoted {
-  my $self = shift;
-  my ($type, $value) = @_;
-
-  return $self->next::method(@_) if not defined $value or not defined $type;
-
-  if (my $key = List::Util::first { $type =~ /$_/i } keys %noquote) {
-    return 1 if $noquote{$key}->($value);
-  }
-  elsif ($self->is_datatype_numeric($type) && $number->($value)) {
-    return 1;
-  }
-
-  return $self->next::method(@_);
-}
-
-sub _prep_interpolated_value {
-  my ($self, $type, $value) = @_;
-
-  if ($type =~ /money/i && defined $value) {
-    # change a ^ not followed by \$ to a \$
-    $value =~ s/^ (?! \$) /\$/x;
-  }
-
-  return $value;
-}
-
-1;
-
-=head1 NAME
-
-DBIx::Class::Storage::DBI::Sybase::NoBindVars - Storage::DBI subclass for Sybase
-without placeholder support
-
-=head1 DESCRIPTION
-
-If you're using this driver than your version of Sybase, or the libraries you
-use to connect to it, do not support placeholders.
-
-You can also enable this driver explicitly using:
-
-  my $schema = SchemaClass->clone;
-  $schema->storage_type('::DBI::Sybase::NoBindVars');
-  $schema->connect($dsn, $user, $pass, \%opts);
-
-See the discussion in L<< DBD::Sybase/Using ? Placeholders & bind parameters to
-$sth->execute >> for details on the pros and cons of using placeholders.
-
-One advantage of not using placeholders is that C<select @@identity> will work
-for obtainging the last insert id of an C<IDENTITY> column, instead of having to
-do C<select max(col)> in a transaction as the base Sybase driver does.
-
-When using this driver, bind variables will be interpolated (properly quoted of
-course) into the SQL query itself, without using placeholders.
-
-The caching of prepared statements is also explicitly disabled, as the
-interpolation renders it useless.
-
-=head1 AUTHORS
-
-See L<DBIx::Class/CONTRIBUTORS>.
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
-# vim:sts=2 sw=2:

Deleted: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -1,1140 +0,0 @@
-package DBIx::Class::Storage::DBI::Sybase;
-
-use strict;
-use warnings;
-
-use base qw/
-    DBIx::Class::Storage::DBI::Sybase::Common
-    DBIx::Class::Storage::DBI::AutoCast
-/;
-use mro 'c3';
-use Carp::Clan qw/^DBIx::Class/;
-use List::Util();
-use Sub::Name();
-use Data::Dumper::Concise();
-
-__PACKAGE__->mk_group_accessors('simple' =>
-    qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
-       _bulk_storage _is_bulk_storage _began_bulk_work
-       _bulk_disabled_due_to_coderef_connect_info_warned
-       _identity_method/
-);
-
-my @also_proxy_to_extra_storages = qw/
-  connect_call_set_auto_cast auto_cast connect_call_blob_setup
-  connect_call_datetime_setup
-
-  disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
-  auto_savepoint unsafe cursor_class debug debugobj schema
-/;
-
-=head1 NAME
-
-DBIx::Class::Storage::DBI::Sybase - Sybase support for DBIx::Class
-
-=head1 SYNOPSIS
-
-This subclass supports L<DBD::Sybase> for real Sybase databases.  If you are
-using an MSSQL database via L<DBD::Sybase>, your storage will be reblessed to
-L<DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server>.
-
-=head1 DESCRIPTION
-
-If your version of Sybase does not support placeholders, then your storage
-will be reblessed to L<DBIx::Class::Storage::DBI::Sybase::NoBindVars>. You can
-also enable that driver explicitly, see the documentation for more details.
-
-With this driver there is unfortunately no way to get the C<last_insert_id>
-without doing a C<SELECT MAX(col)>. This is done safely in a transaction
-(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
-
-A recommended L<DBIx::Class::Storage::DBI/connect_info> setting:
-
-  on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
-
-=head1 METHODS
-
-=cut
-
-sub _rebless {
-  my $self = shift;
-
-  if (ref($self) eq 'DBIx::Class::Storage::DBI::Sybase') {
-    my $dbtype = eval {
-      @{$self->_get_dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2]
-    } || '';
-    $self->throw_exception("Unable to estable connection to determine database type: $@")
-      if $@;
-
-    $dbtype =~ s/\W/_/gi;
-    my $subclass = "DBIx::Class::Storage::DBI::Sybase::${dbtype}";
-
-    if ($dbtype && $self->load_optional_class($subclass)) {
-      bless $self, $subclass;
-      $self->_rebless;
-    } else { # real Sybase
-      my $no_bind_vars = 'DBIx::Class::Storage::DBI::Sybase::NoBindVars';
-
-      if ($self->using_freetds) {
-        carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
-
-You are using FreeTDS with Sybase.
-
-We will do our best to support this configuration, but please consider this
-support experimental.
-
-TEXT/IMAGE columns will definitely not work.
-
-You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries
-instead.
-
-See perldoc DBIx::Class::Storage::DBI::Sybase for more details.
-
-To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment
-variable.
-EOF
-        if (not $self->_typeless_placeholders_supported) {
-          if ($self->_placeholders_supported) {
-            $self->auto_cast(1);
-          } else {
-            $self->ensure_class_loaded($no_bind_vars);
-            bless $self, $no_bind_vars;
-            $self->_rebless;
-          }
-        }
-      }
-      elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
-        # not necessarily FreeTDS, but no placeholders nevertheless
-        $self->ensure_class_loaded($no_bind_vars);
-        bless $self, $no_bind_vars;
-        $self->_rebless;
-      } elsif (not $self->_typeless_placeholders_supported) {
-        # this is highly unlikely, but we check just in case
-        $self->auto_cast(1);
-      }
-    }
-  }
-}
-
-sub _init {
-  my $self = shift;
-  $self->_set_max_connect(256);
-
-  # based on LongReadLen in connect_info
-  $self->set_textsize if $self->using_freetds;
-
-# create storage for insert/(update blob) transactions,
-# unless this is that storage
-  return if $self->_is_extra_storage;
-
-  my $writer_storage = (ref $self)->new;
-
-  $writer_storage->_is_extra_storage(1);
-  $writer_storage->connect_info($self->connect_info);
-  $writer_storage->auto_cast($self->auto_cast);
-
-  $self->_writer_storage($writer_storage);
-
-# create a bulk storage unless connect_info is a coderef
-  return
-    if (Scalar::Util::reftype($self->_dbi_connect_info->[0])||'') eq 'CODE';
-
-  my $bulk_storage = (ref $self)->new;
-
-  $bulk_storage->_is_extra_storage(1);
-  $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
-  $bulk_storage->connect_info($self->connect_info);
-
-# this is why
-  $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
-
-  $self->_bulk_storage($bulk_storage);
-}
-
-for my $method (@also_proxy_to_extra_storages) {
-  no strict 'refs';
-  no warnings 'redefine';
-
-  my $replaced = __PACKAGE__->can($method);
-
-  *{$method} = Sub::Name::subname $method => sub {
-    my $self = shift;
-    $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
-    $self->_bulk_storage->$replaced(@_)   if $self->_bulk_storage;
-    return $self->$replaced(@_);
-  };
-}
-
-sub disconnect {
-  my $self = shift;
-
-# Even though we call $sth->finish for uses off the bulk API, there's still an
-# "active statement" warning on disconnect, which we throw away here.
-# This is due to the bug described in insert_bulk.
-# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
-  local $SIG{__WARN__} = sub {
-    warn $_[0] unless $_[0] =~ /active statement/i;
-  } if $self->_is_bulk_storage;
-
-# so that next transaction gets a dbh
-  $self->_began_bulk_work(0) if $self->_is_bulk_storage;
-
-  $self->next::method;
-}
-
-# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
-# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
-# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
-# only want when AutoCommit is off.
-sub _populate_dbh {
-  my $self = shift;
-
-  $self->next::method(@_);
-
-  return unless $self->_driver_determined; # otherwise we screw up MSSQL
-
-  if ($self->_is_bulk_storage) {
-# this should be cleared on every reconnect
-    $self->_began_bulk_work(0);
-    return;
-  }
-
-  if (not $self->using_freetds) {
-    $self->_dbh->{syb_chained_txn} = 1;
-  } else {
-    if ($self->_dbh_autocommit) {
-      $self->_dbh->do('SET CHAINED OFF');
-    } else {
-      $self->_dbh->do('SET CHAINED ON');
-    }
-  }
-}
-
-=head2 connect_call_blob_setup
-
-Used as:
-
-  on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ]
-
-Does C<< $dbh->{syb_binary_images} = 1; >> to return C<IMAGE> data as raw binary
-instead of as a hex string.
-
-Recommended.
-
-Also sets the C<log_on_update> value for blob write operations. The default is
-C<1>, but C<0> is better if your database is configured for it.
-
-See
-L<DBD::Sybase/Handling_IMAGE/TEXT_data_with_syb_ct_get_data()/syb_ct_send_data()>.
-
-=cut
-
-sub connect_call_blob_setup {
-  my $self = shift;
-  my %args = @_;
-  my $dbh = $self->_dbh;
-  $dbh->{syb_binary_images} = 1;
-
-  $self->_blob_log_on_update($args{log_on_update})
-    if exists $args{log_on_update};
-}
-
-sub _is_lob_type {
-  my $self = shift;
-  my $type = shift;
-  $type && $type =~ /(?:text|image|lob|bytea|binary|memo)/i;
-}
-
-sub _is_lob_column {
-  my ($self, $source, $column) = @_;
-
-  return $self->_is_lob_type($source->column_info($column)->{data_type});
-}
-
-sub _prep_for_execute {
-  my $self = shift;
-  my ($op, $extra_bind, $ident, $args) = @_;
-
-  my ($sql, $bind) = $self->next::method (@_);
-
-  my $table = Scalar::Util::blessed($ident) ? $ident->from : $ident;
-
-  my $bind_info = $self->_resolve_column_info(
-    $ident, [map $_->[0], @{$bind}]
-  );
-  my $bound_identity_col = List::Util::first
-    { $bind_info->{$_}{is_auto_increment} }
-    (keys %$bind_info)
-  ;
-  my $identity_col = Scalar::Util::blessed($ident) &&
-    List::Util::first
-    { $ident->column_info($_)->{is_auto_increment} }
-    $ident->columns
-  ;
-
-  if (($op eq 'insert' && $bound_identity_col) ||
-      ($op eq 'update' && exists $args->[0]{$identity_col})) {
-    $sql = join ("\n",
-      $self->_set_table_identity_sql($op => $table, 'on'),
-      $sql,
-      $self->_set_table_identity_sql($op => $table, 'off'),
-    );
-  }
-
-  if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
-      (not $self->{insert_bulk})) {
-    $sql =
-      "$sql\n" .
-      $self->_fetch_identity_sql($ident, $identity_col);
-  }
-
-  return ($sql, $bind);
-}
-
-sub _set_table_identity_sql {
-  my ($self, $op, $table, $on_off) = @_;
-
-  return sprintf 'SET IDENTITY_%s %s %s',
-    uc($op), $self->sql_maker->_quote($table), uc($on_off);
-}
-
-# Stolen from SQLT, with some modifications. This is a makeshift
-# solution before a sane type-mapping library is available, thus
-# the 'our' for easy overrides.
-our %TYPE_MAPPING  = (
-    number    => 'numeric',
-    money     => 'money',
-    varchar   => 'varchar',
-    varchar2  => 'varchar',
-    timestamp => 'datetime',
-    text      => 'varchar',
-    real      => 'double precision',
-    comment   => 'text',
-    bit       => 'bit',
-    tinyint   => 'smallint',
-    float     => 'double precision',
-    serial    => 'numeric',
-    bigserial => 'numeric',
-    boolean   => 'varchar',
-    long      => 'varchar',
-);
-
-sub _native_data_type {
-  my ($self, $type) = @_;
-
-  $type = lc $type;
-  $type =~ s/\s* identity//x;
-
-  return uc($TYPE_MAPPING{$type} || $type);
-}
-
-sub _fetch_identity_sql {
-  my ($self, $source, $col) = @_;
-
-  return sprintf ("SELECT MAX(%s) FROM %s",
-    map { $self->sql_maker->_quote ($_) } ($col, $source->from)
-  );
-}
-
-sub _execute {
-  my $self = shift;
-  my ($op) = @_;
-
-  my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
-
-  if ($op eq 'insert') {
-    $self->_identity($sth->fetchrow_array);
-    $sth->finish;
-  }
-
-  return wantarray ? ($rv, $sth, @bind) : $rv;
-}
-
-sub last_insert_id { shift->_identity }
-
-# handles TEXT/IMAGE and transaction for last_insert_id
-sub insert {
-  my $self = shift;
-  my ($source, $to_insert) = @_;
-
-  my $identity_col = (List::Util::first
-    { $source->column_info($_)->{is_auto_increment} }
-    $source->columns) || '';
-
-  # check for empty insert
-  # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
-  # try to insert explicit 'DEFAULT's instead (except for identity)
-  if (not %$to_insert) {
-    for my $col ($source->columns) {
-      next if $col eq $identity_col;
-      $to_insert->{$col} = \'DEFAULT';
-    }
-  }
-
-  my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
-
-  # do we need the horrific SELECT MAX(COL) hack?
-  my $dumb_last_insert_id =
-       $identity_col
-    && (not exists $to_insert->{$identity_col})
-    && ($self->_identity_method||'') ne '@@IDENTITY';
-
-  my $next = $self->next::can;
-
-  # we are already in a transaction, or there are no blobs
-  # and we don't need the PK - just (try to) do it
-  if ($self->{transaction_depth}
-        || (!$blob_cols && !$dumb_last_insert_id)
-  ) {
-    return $self->_insert (
-      $next, $source, $to_insert, $blob_cols, $identity_col
-    );
-  }
-
-  # otherwise use the _writer_storage to do the insert+transaction on another
-  # connection
-  my $guard = $self->_writer_storage->txn_scope_guard;
-
-  my $updated_cols = $self->_writer_storage->_insert (
-    $next, $source, $to_insert, $blob_cols, $identity_col
-  );
-
-  $self->_identity($self->_writer_storage->_identity);
-
-  $guard->commit;
-
-  return $updated_cols;
-}
-
-sub _insert {
-  my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
-
-  my $updated_cols = $self->$next ($source, $to_insert);
-
-  my $final_row = {
-    ($identity_col ?
-      ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
-    %$to_insert,
-    %$updated_cols,
-  };
-
-  $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
-
-  return $updated_cols;
-}
-
-sub update {
-  my $self = shift;
-  my ($source, $fields, $where, @rest) = @_;
-
-  my $wantarray = wantarray;
-
-  my $blob_cols = $self->_remove_blob_cols($source, $fields);
-
-  my $table = $source->name;
-
-  my $identity_col = List::Util::first
-    { $source->column_info($_)->{is_auto_increment} }
-    $source->columns;
-
-  my $is_identity_update = $identity_col && defined $fields->{$identity_col};
-
-  return $self->next::method(@_) unless $blob_cols;
-
-# If there are any blobs in $where, Sybase will return a descriptive error
-# message.
-# XXX blobs can still be used with a LIKE query, and this should be handled.
-
-# update+blob update(s) done atomically on separate connection
-  $self = $self->_writer_storage;
-
-  my $guard = $self->txn_scope_guard;
-
-# First update the blob columns to be updated to '' (taken from $fields, where
-# it is originally put by _remove_blob_cols .)
-  my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
-
-# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
-
-  $self->next::method($source, \%blobs_to_empty, $where, @rest);
-
-# Now update the blobs before the other columns in case the update of other
-# columns makes the search condition invalid.
-  $self->_update_blobs($source, $blob_cols, $where);
-
-  my @res;
-  if (%$fields) {
-    if ($wantarray) {
-      @res    = $self->next::method(@_);
-    }
-    elsif (defined $wantarray) {
-      $res[0] = $self->next::method(@_);
-    }
-    else {
-      $self->next::method(@_);
-    }
-  }
-
-  $guard->commit;
-
-  return $wantarray ? @res : $res[0];
-}
-
-sub insert_bulk {
-  my $self = shift;
-  my ($source, $cols, $data) = @_;
-
-  my $identity_col = List::Util::first
-    { $source->column_info($_)->{is_auto_increment} }
-    $source->columns;
-
-  my $is_identity_insert = (List::Util::first
-    { $_ eq $identity_col }
-    @{$cols}
-  ) ? 1 : 0;
-
-  my @source_columns = $source->columns;
-
-  my $use_bulk_api =
-    $self->_bulk_storage &&
-    $self->_get_dbh->{syb_has_blk};
-
-  if ((not $use_bulk_api) &&
-      (Scalar::Util::reftype($self->_dbi_connect_info->[0])||'') eq 'CODE' &&
-      (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
-    carp <<'EOF';
-Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
-regular array inserts.
-EOF
-    $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
-  }
-
-  if (not $use_bulk_api) {
-    my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
-
-# _execute_array uses a txn anyway, but it ends too early in case we need to
-# select max(col) to get the identity for inserting blobs.
-    ($self, my $guard) = $self->{transaction_depth} == 0 ?
-      ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
-      :
-      ($self, undef);
-
-    local $self->{insert_bulk} = 1;
-
-    $self->next::method(@_);
-
-    if ($blob_cols) {
-      if ($is_identity_insert) {
-        $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
-      }
-      else {
-        my @cols_with_identities = (@$cols, $identity_col);
-
-        ## calculate identities
-        # XXX This assumes identities always increase by 1, which may or may not
-        # be true.
-        my ($last_identity) =
-          $self->_dbh->selectrow_array (
-            $self->_fetch_identity_sql($source, $identity_col)
-          );
-        my @identities = (($last_identity - @$data + 1) .. $last_identity);
-
-        my @data_with_identities = map [@$_, shift @identities], @$data;
-
-        $self->_insert_blobs_array (
-          $source, $blob_cols, \@cols_with_identities, \@data_with_identities
-        );
-      }
-    }
-
-    $guard->commit if $guard;
-
-    return;
-  }
-
-# otherwise, use the bulk API
-
-# rearrange @$data so that columns are in database order
-  my %orig_idx;
-  @orig_idx{@$cols} = 0..$#$cols;
-
-  my %new_idx;
-  @new_idx{@source_columns} = 0..$#source_columns;
-
-  my @new_data;
-  for my $datum (@$data) {
-    my $new_datum = [];
-    for my $col (@source_columns) {
-# identity data will be 'undef' if not $is_identity_insert
-# columns with defaults will also be 'undef'
-      $new_datum->[ $new_idx{$col} ] =
-        exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
-    }
-    push @new_data, $new_datum;
-  }
-
-# bcp identity index is 1-based
-  my $identity_idx = exists $new_idx{$identity_col} ?
-    $new_idx{$identity_col} + 1 : 0;
-
-## Set a client-side conversion error handler, straight from DBD::Sybase docs.
-# This ignores any data conversion errors detected by the client side libs, as
-# they are usually harmless.
-  my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
-    Sub::Name::subname insert_bulk => sub {
-      my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
-
-      return 1 if $errno == 36;
-
-      carp
-        "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
-        ($errmsg ? "\n$errmsg" : '') .
-        ($osmsg  ? "\n$osmsg"  : '')  .
-        ($blkmsg ? "\n$blkmsg" : '');
-
-      return 0;
-  });
-
-  eval {
-    my $bulk = $self->_bulk_storage;
-
-    my $guard = $bulk->txn_scope_guard;
-
-## XXX get this to work instead of our own $sth
-## will require SQLA or *Hacks changes for ordered columns
-#    $bulk->next::method($source, \@source_columns, \@new_data, {
-#      syb_bcp_attribs => {
-#        identity_flag   => $is_identity_insert,
-#        identity_column => $identity_idx,
-#      }
-#    });
-    my $sql = 'INSERT INTO ' .
-      $bulk->sql_maker->_quote($source->name) . ' (' .
-# colname list is ignored for BCP, but does no harm
-      (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
-      ' VALUES ('.  (join ', ', ('?') x @source_columns) . ')';
-
-## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
-## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
-## better yet the version above. Should be fixed in DBD::Sybase .
-    my $sth = $bulk->_get_dbh->prepare($sql,
-#      'insert', # op
-      {
-        syb_bcp_attribs => {
-          identity_flag   => $is_identity_insert,
-          identity_column => $identity_idx,
-        }
-      }
-    );
-
-    my @bind = do {
-      my $idx = 0;
-      map [ $_, $idx++ ], @source_columns;
-    };
-
-    $self->_execute_array(
-      $source, $sth, \@bind, \@source_columns, \@new_data, sub {
-        $guard->commit
-      }
-    );
-
-    $bulk->_query_end($sql);
-  };
-
-  my $exception = $@;
-  DBD::Sybase::set_cslib_cb($orig_cslib_cb);
-
-  if ($exception =~ /-Y option/) {
-    carp <<"EOF";
-
-Sybase bulk API operation failed due to character set incompatibility, reverting
-to regular array inserts:
-
-*** Try unsetting the LANG environment variable.
-
-$exception
-EOF
-    $self->_bulk_storage(undef);
-    unshift @_, $self;
-    goto \&insert_bulk;
-  }
-  elsif ($exception) {
-# rollback makes the bulkLogin connection unusable
-    $self->_bulk_storage->disconnect;
-    $self->throw_exception($exception);
-  }
-}
-
-sub _dbh_execute_array {
-  my ($self, $sth, $tuple_status, $cb) = @_;
-
-  my $rv = $self->next::method($sth, $tuple_status);
-  $cb->() if $cb;
-
-  return $rv;
-}
-
-# Make sure blobs are not bound as placeholders, and return any non-empty ones
-# as a hash.
-sub _remove_blob_cols {
-  my ($self, $source, $fields) = @_;
-
-  my %blob_cols;
-
-  for my $col (keys %$fields) {
-    if ($self->_is_lob_column($source, $col)) {
-      my $blob_val = delete $fields->{$col};
-      if (not defined $blob_val) {
-        $fields->{$col} = \'NULL';
-      }
-      else {
-        $fields->{$col} = \"''";
-        $blob_cols{$col} = $blob_val unless $blob_val eq '';
-      }
-    }
-  }
-
-  return %blob_cols ? \%blob_cols : undef;
-}
-
-# same for insert_bulk
-sub _remove_blob_cols_array {
-  my ($self, $source, $cols, $data) = @_;
-
-  my @blob_cols;
-
-  for my $i (0..$#$cols) {
-    my $col = $cols->[$i];
-
-    if ($self->_is_lob_column($source, $col)) {
-      for my $j (0..$#$data) {
-        my $blob_val = delete $data->[$j][$i];
-        if (not defined $blob_val) {
-          $data->[$j][$i] = \'NULL';
-        }
-        else {
-          $data->[$j][$i] = \"''";
-          $blob_cols[$j][$i] = $blob_val
-            unless $blob_val eq '';
-        }
-      }
-    }
-  }
-
-  return @blob_cols ? \@blob_cols : undef;
-}
-
-sub _update_blobs {
-  my ($self, $source, $blob_cols, $where) = @_;
-
-  my (@primary_cols) = $source->primary_columns;
-
-  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
-    unless @primary_cols;
-
-# check if we're updating a single row by PK
-  my $pk_cols_in_where = 0;
-  for my $col (@primary_cols) {
-    $pk_cols_in_where++ if defined $where->{$col};
-  }
-  my @rows;
-
-  if ($pk_cols_in_where == @primary_cols) {
-    my %row_to_update;
-    @row_to_update{@primary_cols} = @{$where}{@primary_cols};
-    @rows = \%row_to_update;
-  } else {
-    my $cursor = $self->select ($source, \@primary_cols, $where, {});
-    @rows = map {
-      my %row; @row{@primary_cols} = @$_; \%row
-    } $cursor->all;
-  }
-
-  for my $row (@rows) {
-    $self->_insert_blobs($source, $blob_cols, $row);
-  }
-}
-
-sub _insert_blobs {
-  my ($self, $source, $blob_cols, $row) = @_;
-  my $dbh = $self->_get_dbh;
-
-  my $table = $source->name;
-
-  my %row = %$row;
-  my (@primary_cols) = $source->primary_columns;
-
-  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
-    unless @primary_cols;
-
-  $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
-    if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
-
-  for my $col (keys %$blob_cols) {
-    my $blob = $blob_cols->{$col};
-
-    my %where = map { ($_, $row{$_}) } @primary_cols;
-
-    my $cursor = $self->select ($source, [$col], \%where, {});
-    $cursor->next;
-    my $sth = $cursor->sth;
-
-    if (not $sth) {
-
-      $self->throw_exception(
-          "Could not find row in table '$table' for blob update:\n"
-        . Data::Dumper::Concise::Dumper (\%where)
-      );
-    }
-
-    eval {
-      do {
-        $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
-      } while $sth->fetch;
-
-      $sth->func('ct_prepare_send') or die $sth->errstr;
-
-      my $log_on_update = $self->_blob_log_on_update;
-      $log_on_update    = 1 if not defined $log_on_update;
-
-      $sth->func('CS_SET', 1, {
-        total_txtlen => length($blob),
-        log_on_update => $log_on_update
-      }, 'ct_data_info') or die $sth->errstr;
-
-      $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
-
-      $sth->func('ct_finish_send') or die $sth->errstr;
-    };
-    my $exception = $@;
-    $sth->finish if $sth;
-    if ($exception) {
-      if ($self->using_freetds) {
-        $self->throw_exception (
-          'TEXT/IMAGE operation failed, probably because you are using FreeTDS: '
-          . $exception
-        );
-      } else {
-        $self->throw_exception($exception);
-      }
-    }
-  }
-}
-
-sub _insert_blobs_array {
-  my ($self, $source, $blob_cols, $cols, $data) = @_;
-
-  for my $i (0..$#$data) {
-    my $datum = $data->[$i];
-
-    my %row;
-    @row{ @$cols } = @$datum;
-
-    my %blob_vals;
-    for my $j (0..$#$cols) {
-      if (exists $blob_cols->[$i][$j]) {
-        $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
-      }
-    }
-
-    $self->_insert_blobs ($source, \%blob_vals, \%row);
-  }
-}
-
-=head2 connect_call_datetime_setup
-
-Used as:
-
-  on_connect_call => 'datetime_setup'
-
-In L<DBIx::Class::Storage::DBI/connect_info> to set:
-
-  $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
-  $dbh->do('set dateformat mdy');   # input fmt:  08/13/1979 18:08:55.080
-
-On connection for use with L<DBIx::Class::InflateColumn::DateTime>, using
-L<DateTime::Format::Sybase>, which you will need to install.
-
-This works for both C<DATETIME> and C<SMALLDATETIME> columns, although
-C<SMALLDATETIME> columns only have minute precision.
-
-=cut
-
-{
-  my $old_dbd_warned = 0;
-
-  sub connect_call_datetime_setup {
-    my $self = shift;
-    my $dbh = $self->_get_dbh;
-
-    if ($dbh->can('syb_date_fmt')) {
-      # amazingly, this works with FreeTDS
-      $dbh->syb_date_fmt('ISO_strict');
-    } elsif (not $old_dbd_warned) {
-      carp "Your DBD::Sybase is too old to support ".
-      "DBIx::Class::InflateColumn::DateTime, please upgrade!";
-      $old_dbd_warned = 1;
-    }
-
-    $dbh->do('SET DATEFORMAT mdy');
-
-    1;
-  }
-}
-
-sub datetime_parser_type { "DateTime::Format::Sybase" }
-
-# ->begin_work and such have no effect with FreeTDS but we run them anyway to
-# let the DBD keep any state it needs to.
-#
-# If they ever do start working, the extra statements will do no harm (because
-# Sybase supports nested transactions.)
-
-sub _dbh_begin_work {
-  my $self = shift;
-
-# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
-# TRAN once. However, we need to make sure there's a $dbh.
-  return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
-
-  $self->next::method(@_);
-
-  if ($self->using_freetds) {
-    $self->_get_dbh->do('BEGIN TRAN');
-  }
-
-  $self->_began_bulk_work(1) if $self->_is_bulk_storage;
-}
-
-sub _dbh_commit {
-  my $self = shift;
-  if ($self->using_freetds) {
-    $self->_dbh->do('COMMIT');
-  }
-  return $self->next::method(@_);
-}
-
-sub _dbh_rollback {
-  my $self = shift;
-  if ($self->using_freetds) {
-    $self->_dbh->do('ROLLBACK');
-  }
-  return $self->next::method(@_);
-}
-
-# savepoint support using ASE syntax
-
-sub _svp_begin {
-  my ($self, $name) = @_;
-
-  $self->_get_dbh->do("SAVE TRANSACTION $name");
-}
-
-# A new SAVE TRANSACTION with the same name releases the previous one.
-sub _svp_release { 1 }
-
-sub _svp_rollback {
-  my ($self, $name) = @_;
-
-  $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
-}
-
-1;
-
-=head1 Schema::Loader Support
-
-There is an experimental branch of L<DBIx::Class::Schema::Loader> that will
-allow you to dump a schema from most (if not all) versions of Sybase.
-
-It is available via subversion from:
-
-  http://dev.catalyst.perl.org/repos/bast/branches/DBIx-Class-Schema-Loader/current/
-
-=head1 FreeTDS
-
-This driver supports L<DBD::Sybase> compiled against FreeTDS
-(L<http://www.freetds.org/>) to the best of our ability, however it is
-recommended that you recompile L<DBD::Sybase> against the Sybase Open Client
-libraries. They are a part of the Sybase ASE distribution:
-
-The Open Client FAQ is here:
-L<http://www.isug.com/Sybase_FAQ/ASE/section7.html>.
-
-Sybase ASE for Linux (which comes with the Open Client libraries) may be
-downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
-
-To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
-
-  perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
-
-Some versions of the libraries involved will not support placeholders, in which
-case the storage will be reblessed to
-L<DBIx::Class::Storage::DBI::Sybase::NoBindVars>.
-
-In some configurations, placeholders will work but will throw implicit type
-conversion errors for anything that's not expecting a string. In such a case,
-the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
-automatically set, which you may enable on connection with
-L<DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>. The type info
-for the C<CAST>s is taken from the L<DBIx::Class::ResultSource/data_type>
-definitions in your Result classes, and are mapped to a Sybase type (if it isn't
-already) using a mapping based on L<SQL::Translator>.
-
-In other configurations, placeholers will work just as they do with the Sybase
-Open Client libraries.
-
-Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
-
-=head1 INSERTS WITH PLACEHOLDERS
-
-With placeholders enabled, inserts are done in a transaction so that there are
-no concurrency issues with getting the inserted identity value using
-C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
-mode.
-
-In addition, they are done on a separate connection so that it's possible to
-have active cursors when doing an insert.
-
-When using C<DBIx::Class::Storage::DBI::Sybase::NoBindVars> transactions are
-disabled, as there are no concurrency issues with C<SELECT @@IDENTITY> as it's a
-session variable.
-
-=head1 TRANSACTIONS
-
-Due to limitations of the TDS protocol, L<DBD::Sybase>, or both; you cannot
-begin a transaction while there are active cursors; nor can you use multiple
-active cursors within a transaction. An active cursor is, for example, a
-L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
-C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
-
-For example, this will not work:
-
-  $schema->txn_do(sub {
-    my $rs = $schema->resultset('Book');
-    while (my $row = $rs->next) {
-      $schema->resultset('MetaData')->create({
-        book_id => $row->id,
-        ...
-      });
-    }
-  });
-
-This won't either:
-
-  my $first_row = $large_rs->first;
-  $schema->txn_do(sub { ... });
-
-Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
-are not affected, as they are done on an extra database handle.
-
-Some workarounds:
-
-=over 4
-
-=item * use L<DBIx::Class::Storage::DBI::Replicated>
-
-=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
-
-=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
-
-=back
-
-=head1 MAXIMUM CONNECTIONS
-
-The TDS protocol makes separate connections to the server for active statements
-in the background. By default the number of such connections is limited to 25,
-on both the client side and the server side.
-
-This is a bit too low for a complex L<DBIx::Class> application, so on connection
-the client side setting is set to C<256> (see L<DBD::Sybase/maxConnect>.) You
-can override it to whatever setting you like in the DSN.
-
-See
-L<http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.ase_15.0.sag1/html/sag1/sag1272.htm>
-for information on changing the setting on the server side.
-
-=head1 DATES
-
-See L</connect_call_datetime_setup> to setup date formats
-for L<DBIx::Class::InflateColumn::DateTime>.
-
-=head1 TEXT/IMAGE COLUMNS
-
-L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
-C<TEXT/IMAGE> columns.
-
-Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either:
-
-  $schema->storage->dbh->do("SET TEXTSIZE $bytes");
-
-or
-
-  $schema->storage->set_textsize($bytes);
-
-instead.
-
-However, the C<LongReadLen> you pass in
-L<DBIx::Class::Storage::DBI/connect_info> is used to execute the equivalent
-C<SET TEXTSIZE> command on connection.
-
-See L</connect_call_blob_setup> for a L<DBIx::Class::Storage::DBI/connect_info>
-setting you need to work with C<IMAGE> columns.
-
-=head1 BULK API
-
-The experimental L<DBD::Sybase> Bulk API support is used for
-L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
-on a separate connection.
-
-To use this feature effectively, use a large number of rows for each
-L<populate|DBIx::Class::ResultSet/populate> call, eg.:
-
-  while (my $rows = $data_source->get_100_rows()) {
-    $rs->populate($rows);
-  }
-
-B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
-calls in your C<Result> classes B<must> list columns in database order for this
-to work. Also, you may have to unset the C<LANG> environment variable before
-loading your app, if it doesn't match the character set of your database.
-
-When inserting IMAGE columns using this method, you'll need to use
-L</connect_call_blob_setup> as well.
-
-=head1 TODO
-
-=over
-
-=item *
-
-Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
-any active cursors, using eager cursors.
-
-=item *
-
-Real limits and limited counts using stored procedures deployed on startup.
-
-=item *
-
-Adaptive Server Anywhere (ASA) support, with possible SQLA::Limit support.
-
-=item *
-
-Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
-
-=item *
-
-bulk_insert using prepare_cached (see comments.)
-
-=back
-
-=head1 AUTHOR
-
-See L<DBIx::Class/CONTRIBUTORS>.
-
-=head1 LICENSE
-
-You may distribute this code under the same terms as Perl itself.
-
-=cut
-# vim:sts=2 sw=2:

Copied: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase.pm (from rev 7923, DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase/Common.pm)
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase.pm	                        (rev 0)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI/Sybase.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -0,0 +1,132 @@
+package DBIx::Class::Storage::DBI::Sybase;
+
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Sybase - Base class for drivers using
+L<DBD::Sybase>
+
+=head1 DESCRIPTION
+
+This is the base class/dispatcher for Storage's designed to work with
+L<DBD::Sybase>
+
+=head1 METHODS
+
+=cut
+
+sub _rebless {
+  my $self = shift;
+
+  my $dbtype = eval {
+    @{$self->_get_dbh->selectrow_arrayref(qq{sp_server_info \@attribute_id=1})}[2]
+  };
+
+  $self->throw_exception("Unable to estable connection to determine database type: $@")
+    if $@;
+
+  if ($dbtype) {
+    $dbtype =~ s/\W/_/gi;
+
+    # saner class name
+    $dbtype = 'ASE' if $dbtype eq 'SQL_Server';
+
+    my $subclass = __PACKAGE__ . "::$dbtype";
+    if ($self->load_optional_class($subclass)) {
+      bless $self, $subclass;
+      $self->_rebless;
+    }
+  }
+}
+
+sub _ping {
+  my $self = shift;
+
+  my $dbh = $self->_dbh or return 0;
+
+  local $dbh->{RaiseError} = 1;
+  local $dbh->{PrintError} = 0;
+
+  if ($dbh->{syb_no_child_con}) {
+# if extra connections are not allowed, then ->ping is reliable
+    my $ping = eval { $dbh->ping };
+    return $@ ? 0 : $ping;
+  }
+
+  eval {
+# XXX if the main connection goes stale, does opening another for this statement
+# really determine anything?
+    $dbh->do('select 1');
+  };
+
+  return $@ ? 0 : 1;
+}
+
+sub _set_max_connect {
+  my $self = shift;
+  my $val  = shift || 256;
+
+  my $dsn = $self->_dbi_connect_info->[0];
+
+  return if ref($dsn) eq 'CODE';
+
+  if ($dsn !~ /maxConnect=/) {
+    $self->_dbi_connect_info->[0] = "$dsn;maxConnect=$val";
+    my $connected = defined $self->_dbh;
+    $self->disconnect;
+    $self->ensure_connected if $connected;
+  }
+}
+
+=head2 using_freetds
+
+Whether or not L<DBD::Sybase> was compiled against FreeTDS. If false, it means
+the Sybase OpenClient libraries were used.
+
+=cut
+
+sub using_freetds {
+  my $self = shift;
+
+  return $self->_get_dbh->{syb_oc_version} =~ /freetds/i;
+}
+
+=head2 set_textsize
+
+When using FreeTDS and/or MSSQL, C<< $dbh->{LongReadLen} >> is not available,
+use this function instead. It does:
+
+  $dbh->do("SET TEXTSIZE $bytes");
+
+Takes the number of bytes, or uses the C<LongReadLen> value from your
+L<DBIx::Class/connect_info> if omitted, lastly falls back to the C<32768> which
+is the L<DBD::Sybase> default.
+
+=cut
+
+sub set_textsize {
+  my $self = shift;
+  my $text_size = shift ||
+    eval { $self->_dbi_connect_info->[-1]->{LongReadLen} } ||
+    32768; # the DBD::Sybase default
+
+  return unless defined $text_size;
+
+  $self->_dbh->do("SET TEXTSIZE $text_size");
+}
+
+1;
+
+=head1 AUTHORS
+
+See L<DBIx::Class/CONTRIBUTORS>.
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/lib/DBIx/Class/Storage/DBI.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -14,6 +14,7 @@
 use Scalar::Util();
 use List::Util();
 use Data::Dumper::Concise();
+use Sub::Name ();
 
 # what version of sqlt do we require if deploy() without a ddl_dir is invoked
 # when changing also adjust the corresponding author_require in Makefile.PL
@@ -63,7 +64,7 @@
 
   no strict qw/refs/;
   no warnings qw/redefine/;
-  *{__PACKAGE__ ."::$meth"} = sub {
+  *{__PACKAGE__ ."::$meth"} = Sub::Name::subname $meth => sub {
     if (not $_[0]->_driver_determined) {
       $_[0]->_determine_driver;
       goto $_[0]->can($meth);
@@ -1580,6 +1581,14 @@
 
   # quick check if we got a sane rs on our hands
   my @pcols = $rsrc->primary_columns;
+  unless (@pcols) {
+    $self->throw_exception (
+      sprintf (
+        "You must declare primary key(s) on source '%s' (via set_primary_key) in order to update or delete complex resultsets",
+        $rsrc->source_name || $rsrc->from
+      )
+    );
+  }
 
   my $sel = $rs->_resolved_attrs->{select};
   $sel = [ $sel ] unless ref $sel eq 'ARRAY';
@@ -1991,7 +2000,7 @@
 
 This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
 currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
-L<::Sybase|DBIx::Class::Storage::DBI::Sybase>.
+L<::Sybase::ASE|DBIx::Class::Storage::DBI::Sybase::ASE>.
 
 The default implementation returns C<undef>, implement in your Storage driver if
 you need this functionality.

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/101populate_rs.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/101populate_rs.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/101populate_rs.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -15,9 +15,7 @@
 use lib qw(t/lib);
 use DBICTest;
 
-plan tests => 142;
 
-
 ## ----------------------------------------------------------------------------
 ## Get a Schema and some ResultSets we can play with.
 ## ----------------------------------------------------------------------------
@@ -26,6 +24,8 @@
 my $art_rs	= $schema->resultset('Artist');
 my $cd_rs	= $schema->resultset('CD');
 
+my $restricted_art_rs	= $art_rs->search({rank => 42});
+
 ok( $schema, 'Got a Schema object');
 ok( $art_rs, 'Got Good Artist Resultset');
 ok( $cd_rs, 'Got Good CD Resultset');
@@ -333,6 +333,18 @@
 		is($cdB->artist->name, 'Fred BloggsD', 'Set Artist to FredD');
 		ok($cdB->artist->artistid == $aid, "Got Expected Artist ID");
 	}
+
+  WITH_COND_FROM_RS: {
+  
+    my ($more_crap) = $restricted_art_rs->populate([
+      {
+        name => 'More Manufactured Crap',
+      },
+    ]);
+    
+    ## Did it use the condition in the resultset?
+    cmp_ok( $more_crap->rank, '==', 42, "Got Correct rank for result object");
+  } 
 }
 
 
@@ -601,6 +613,21 @@
 		ok( $cd2->title eq "VOID_Yet More Tweeny-Pop crap", "Got Expected CD Title");
 	}
 
+  WITH_COND_FROM_RS: {
+  
+    $restricted_art_rs->populate([
+      {
+        name => 'VOID More Manufactured Crap',
+      },
+    ]);
+
+    my $more_crap = $art_rs->search({
+      name => 'VOID More Manufactured Crap'
+    })->first;
+    
+    ## Did it use the condition in the resultset?
+    cmp_ok( $more_crap->rank, '==', 42, "Got Correct rank for result object");
+  } 
 }
 
 ARRAYREF_OF_ARRAYREF_STYLE: {
@@ -619,7 +646,7 @@
   is $jumped->name, 'A singer that jumped the shark two albums ago', 'Correct Name';
   is $cool->name, 'An actually cool singer.', 'Correct Name';
   
-  my ($cooler, $lamer) = $art_rs->populate([
+  my ($cooler, $lamer) = $restricted_art_rs->populate([
     [qw/artistid name/],
     [1003, 'Cooler'],
     [1004, 'Lamer'],	
@@ -627,4 +654,36 @@
   
   is $cooler->name, 'Cooler', 'Correct Name';
   is $lamer->name, 'Lamer', 'Correct Name';  
-}
\ No newline at end of file
+
+  cmp_ok $cooler->rank, '==', 42, 'Correct Rank';
+
+  ARRAY_CONTEXT_WITH_COND_FROM_RS: {
+  
+    my ($mega_lamer) = $restricted_art_rs->populate([
+      {
+        name => 'Mega Lamer',
+      },
+    ]);
+
+    ## Did it use the condition in the resultset?
+    cmp_ok( $mega_lamer->rank, '==', 42, "Got Correct rank for result object");
+  } 
+
+  VOID_CONTEXT_WITH_COND_FROM_RS: {
+  
+    $restricted_art_rs->populate([
+      {
+        name => 'VOID Mega Lamer',
+      },
+    ]);
+
+    my $mega_lamer = $art_rs->search({
+      name => 'VOID Mega Lamer'
+    })->first;
+    
+    ## Did it use the condition in the resultset?
+    cmp_ok( $mega_lamer->rank, '==', 42, "Got Correct rank for result object");
+  } 
+}
+
+done_testing;

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/60core.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/60core.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/60core.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -1,5 +1,5 @@
 use strict;
-use warnings;  
+use warnings;
 
 use Test::More;
 use Test::Exception;

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/71mysql.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/71mysql.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/71mysql.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -25,7 +25,7 @@
 
 $dbh->do("DROP TABLE IF EXISTS cd;");
 
-$dbh->do("CREATE TABLE cd (cdid INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, artist INTEGER, title TEXT, year INTEGER, genreid INTEGER, single_track INTEGER);");
+$dbh->do("CREATE TABLE cd (cdid INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, artist INTEGER, title TEXT, year DATE, genreid INTEGER, single_track INTEGER);");
 
 $dbh->do("DROP TABLE IF EXISTS producer;");
 
@@ -160,8 +160,6 @@
 
     my $type_info = $schema->storage->columns_info_for('artist');
     is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
-
-
 }
 
 my $cd = $schema->resultset ('CD')->create ({});
@@ -227,7 +225,54 @@
       => 'Nothing Found!';
 }
 
+ZEROINSEARCH: {
+  my $cds_per_year = {
+    2001 => 2,
+    2002 => 1,
+    2005 => 3,
+  };
 
+  my $rs = $schema->resultset ('CD');
+  $rs->delete;
+  for my $y (keys %$cds_per_year) {
+    for my $c (1 .. $cds_per_year->{$y} ) {
+      $rs->create ({ title => "CD $y-$c", artist => 1, year => "$y-01-01" });
+    }
+  }
+
+  is ($rs->count, 6, 'CDs created successfully');
+
+  $rs = $rs->search ({}, {
+    select => [ {year => 'year'} ], as => ['y'], distinct => 1, order_by => 'year',
+  });
+
+  is_deeply (
+    [ $rs->get_column ('y')->all ],
+    [ sort keys %$cds_per_year ],
+    'Years group successfully',
+  );
+
+  $rs->create ({ artist => 1, year => '0-1-1', title => 'Jesus Rap' });
+
+  is_deeply (
+    [ $rs->get_column ('y')->all ],
+    [ 0, sort keys %$cds_per_year ],
+    'Zero-year groups successfully',
+  );
+
+  # convoluted search taken verbatim from list 
+  my $restrict_rs = $rs->search({ -and => [
+    year => { '!=', 0 },
+    year => { '!=', undef }
+  ]});
+
+  is_deeply (
+    [ $restrict_rs->get_column('y')->all ],
+    [ $rs->get_column ('y')->all ],
+    'Zero year was correctly excluded from resultset',
+  );
+}
+
 ## If find() is the first query after connect()
 ## DBI::Storage::sql_maker() will be called before
 ## _determine_driver() and so the ::SQLHacks class for MySQL

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/746sybase.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/746sybase.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/746sybase.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -7,9 +7,6 @@
 use lib qw(t/lib);
 use DBICTest;
 
-require DBIx::Class::Storage::DBI::Sybase;
-require DBIx::Class::Storage::DBI::Sybase::NoBindVars;
-
 my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
 
 my $TESTS = 63 + 2;
@@ -24,9 +21,11 @@
 }
 
 my @storage_types = (
-  'DBI::Sybase',
-  'DBI::Sybase::NoBindVars',
+  'DBI::Sybase::ASE',
+  'DBI::Sybase::ASE::NoBindVars',
 );
+eval "require DBIx::Class::Storage::$_;" for @storage_types;
+
 my $schema;
 my $storage_idx = -1;
 
@@ -40,8 +39,8 @@
 
 my $ping_count = 0;
 {
-  my $ping = DBIx::Class::Storage::DBI::Sybase->can('_ping');
-  *DBIx::Class::Storage::DBI::Sybase::_ping = sub {
+  my $ping = DBIx::Class::Storage::DBI::Sybase::ASE->can('_ping');
+  *DBIx::Class::Storage::DBI::Sybase::ASE::_ping = sub {
     $ping_count++;
     goto $ping;
   };
@@ -50,7 +49,7 @@
 for my $storage_type (@storage_types) {
   $storage_idx++;
 
-  unless ($storage_type eq 'DBI::Sybase') { # autodetect
+  unless ($storage_type eq 'DBI::Sybase::ASE') { # autodetect
     DBICTest::Schema->storage_type("::$storage_type");
   }
 
@@ -59,7 +58,7 @@
   $schema->storage->ensure_connected;
 
   if ($storage_idx == 0 &&
-      $schema->storage->isa('DBIx::Class::Storage::DBI::Sybase::NoBindVars')) {
+      $schema->storage->isa('DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars')) {
 # no placeholders in this version of Sybase or DBD::Sybase (or using FreeTDS)
       my $tb = Test::More->builder;
       $tb->skip('no placeholders') for 1..$TESTS;
@@ -96,7 +95,7 @@
   $seen_id{$new->artistid}++;
 
 # check redispatch to storage-specific insert when auto-detected storage
-  if ($storage_type eq 'DBI::Sybase') {
+  if ($storage_type eq 'DBI::Sybase::ASE') {
     DBICTest::Schema->storage_type('::DBI');
     $schema = get_schema();
   }
@@ -402,7 +401,7 @@
     my $new_str = $binstr{large} . 'mtfnpy';
 
     # check redispatch to storage-specific update when auto-detected storage
-    if ($storage_type eq 'DBI::Sybase') {
+    if ($storage_type eq 'DBI::Sybase::ASE') {
       DBICTest::Schema->storage_type('::DBI');
       $schema = get_schema();
     }

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/93single_accessor_object.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/93single_accessor_object.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/93single_accessor_object.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -48,7 +48,7 @@
 	my $artist = $schema->resultset('Artist')->create({ artistid => 666, name => 'bad religion' });
 	my $cd = $schema->resultset('CD')->create({ cdid => 187, artist => 1, title => 'how could hell be any worse?', year => 1982, genreid => undef });
 
-	ok(!defined($cd->genreid), 'genreid is NULL');
+	ok(!defined($cd->get_column('genreid')), 'genreid is NULL');  #no accessor was defined for this column
 	ok(!defined($cd->genre), 'genre accessor returns undef');
 }
 

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/95sql_maker.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/95sql_maker.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/95sql_maker.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -70,8 +70,7 @@
 }
 
 # Make sure the carp/croak override in SQLA works (via SQLAHacks)
-my $file = __FILE__;
-$file = "\Q$file\E";
+my $file = quotemeta (__FILE__);
 throws_ok (sub {
   $schema->resultset ('Artist')->search ({}, { order_by => { -asc => 'stuff', -desc => 'staff' } } )->as_query;
 }, qr/$file/, 'Exception correctly croak()ed');

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/95sql_maker_quote.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/95sql_maker_quote.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/95sql_maker_quote.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -6,13 +6,6 @@
 use lib qw(t/lib);
 use DBIC::SqlMakerTest;
 
-BEGIN {
-    eval "use DBD::SQLite";
-    plan $@
-        ? ( skip_all => 'needs DBD::SQLite for testing' )
-        : ( tests => 12 );
-}
-
 use_ok('DBICTest');
 
 my $schema = DBICTest->init_schema();
@@ -235,6 +228,36 @@
 );
 
 
+($sql, @bind) = $sql_maker->select(
+  [ { me => 'cd' }                  ],
+  [qw/ me.cdid me.artist me.title  /],
+  { cdid => \['rlike ?', [cdid => 'X'] ]       },
+  { group_by => 'title', having => \['count(me.artist) > ?', [ cnt => 2] ] },
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title` FROM `cd` `me` WHERE ( `cdid` rlike ? ) GROUP BY `title` HAVING count(me.artist) > ?/,
+  [ [ cdid => 'X'], ['cnt' => '2'] ],
+  'Quoting works with where/having arrayrefsrefs',
+);
+
+
+($sql, @bind) = $sql_maker->select(
+  [ { me => 'cd' }                  ],
+  [qw/ me.cdid me.artist me.title  /],
+  { cdid => \'rlike X'              },
+  { group_by => 'title', having => \'count(me.artist) > 2' },
+);
+
+is_same_sql_bind(
+  $sql, \@bind,
+  q/SELECT `me`.`cdid`, `me`.`artist`, `me`.`title` FROM `cd` `me` WHERE ( `cdid` rlike X ) GROUP BY `title` HAVING count(me.artist) > 2/,
+  [],
+  'Quoting works with where/having scalarrefs',
+);
+
+
 ($sql, @bind) = $sql_maker->update(
           'group',
           {
@@ -330,3 +353,5 @@
   q/UPDATE [group] SET [name] = ?, [order] = ?/, [ ['name' => 'Bill'], ['order' => '12'] ],
   'bracket quoted table names for UPDATE'
 );
+
+done_testing;

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/count/prefetch.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/count/prefetch.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/count/prefetch.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -39,16 +39,9 @@
 
 # collapsing prefetch with distinct
 {
-  my $first_cd = $schema->resultset('Artist')->first->cds->first;
-  $first_cd->update ({
-    genreid => $first_cd->create_related (
-      genre => ({ name => 'vague genre' })
-    )->id
-  });
-
   my $rs = $schema->resultset("Artist")->search(undef, {distinct => 1})
             ->search_related('cds')->search_related('genre',
-                { 'genre.name' => { '!=', 'foo' } },
+                { 'genre.name' => 'emo' },
                 { prefetch => q(cds) },
             );
   is ($rs->all, 1, 'Correct number of objects');
@@ -60,15 +53,22 @@
       SELECT COUNT( * )
         FROM (
           SELECT genre.genreid
-            FROM artist me
-            JOIN cd cds ON cds.artist = me.artistid
+            FROM (
+              SELECT cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track
+                FROM (
+                  SELECT me.artistid, me.name, me.rank, me.charfield
+                    FROM artist me GROUP BY me.artistid, me.name, me.rank, me.charfield
+                ) me
+                LEFT JOIN cd cds ON cds.artist = me.artistid
+              GROUP BY cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track
+            ) cds
             JOIN genre genre ON genre.genreid = cds.genreid
             LEFT JOIN cd cds_2 ON cds_2.genreid = genre.genreid
-          WHERE ( genre.name != ? )
+          WHERE ( genre.name = ? )
           GROUP BY genre.genreid
         ) count_subq
     )',
-    [ [ 'genre.name' => 'foo' ] ],
+    [ [ 'genre.name' => 'emo' ] ],
   );
 }
 

Added: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/count/search_related.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/count/search_related.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/count/search_related.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -0,0 +1,41 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+my $cd_rs = $schema->resultset('CD')->search ({}, { rows => 1, order_by => 'cdid' });
+
+my $track_count = $cd_rs->first->tracks->count;
+
+cmp_ok ($track_count, '>', 1, 'First CD has several tracks');
+
+is ($cd_rs->search_related ('tracks')->count, $track_count, 'related->count returns correct number chained off a limited rs');
+is (scalar ($cd_rs->search_related ('tracks')->all), $track_count, 'related->all returns correct number of objects chained off a limited rs');
+
+
+my $joined_cd_rs = $cd_rs->search ({}, {
+  join => 'tracks', rows => 2, distinct => 1, having => \ 'count(tracks.trackid) > 2',
+});
+
+my $multiple_track_count = $schema->resultset('Track')->search ({
+  cd => { -in => $joined_cd_rs->get_column ('cdid')->as_query }
+})->count;
+
+
+is (
+  $joined_cd_rs->search_related ('tracks')->count,
+  $multiple_track_count,
+  'related->count returns correct number chained off a grouped rs',
+);
+is (
+  scalar ($joined_cd_rs->search_related ('tracks')->all),
+  $multiple_track_count,
+  'related->all returns correct number of objects chained off a grouped rs',
+);
+
+done_testing;

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/from_subquery.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/from_subquery.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/from_subquery.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -1,5 +1,5 @@
 use strict;
-use warnings FATAL => 'all';
+use warnings;
 
 use Test::More;
 

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/inflate/datetime_sybase.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/inflate/datetime_sybase.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/inflate/datetime_sybase.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -23,15 +23,15 @@
 }
 
 my @storage_types = (
-  'DBI::Sybase',
-  'DBI::Sybase::NoBindVars',
+  'DBI::Sybase::ASE',
+  'DBI::Sybase::ASE::NoBindVars',
 );
 my $schema;
 
 for my $storage_type (@storage_types) {
   $schema = DBICTest::Schema->clone;
 
-  unless ($storage_type eq 'DBI::Sybase') { # autodetect
+  unless ($storage_type eq 'DBI::Sybase::ASE') { # autodetect
     $schema->storage_type("::$storage_type");
   }
   $schema->connection($dsn, $user, $pass, {

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/lib/DBICTest/Schema/CD.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/lib/DBICTest/Schema/CD.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/lib/DBICTest/Schema/CD.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -26,6 +26,7 @@
   'genreid' => { 
     data_type => 'integer',
     is_nullable => 1,
+    accessor => undef,
   },
   'single_track' => {
     data_type => 'integer',

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/lib/DBICTest.pm
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/lib/DBICTest.pm	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/lib/DBICTest.pm	2009-12-04 01:44:09 UTC (rev 8025)
@@ -155,6 +155,11 @@
     my $self = shift;
     my $schema = shift;
 
+    $schema->populate('Genre', [
+      [qw/genreid name/],
+      [qw/1       emo  /],
+    ]);
+
     $schema->populate('Artist', [
         [ qw/artistid name/ ],
         [ 1, 'Caterwauler McCrae' ],
@@ -163,8 +168,8 @@
     ]);
 
     $schema->populate('CD', [
-        [ qw/cdid artist title year/ ],
-        [ 1, 1, "Spoonful of bees", 1999 ],
+        [ qw/cdid artist title year genreid/ ],
+        [ 1, 1, "Spoonful of bees", 1999, 1 ],
         [ 2, 1, "Forkful of bees", 2001 ],
         [ 3, 1, "Caterwaulin' Blues", 1997 ],
         [ 4, 2, "Generic Manufactured Singles", 2001 ],
@@ -243,7 +248,7 @@
     
     $schema->populate('TreeLike', [
         [ qw/id parent name/ ],
-        [ 1, undef, 'root' ],        
+        [ 1, undef, 'root' ],
         [ 2, 1, 'foo'  ],
         [ 3, 2, 'bar'  ],
         [ 6, 2, 'blop' ],

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/prefetch/via_search_related.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/prefetch/via_search_related.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/prefetch/via_search_related.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -57,12 +57,12 @@
     }
   );
 
-  is($use_prefetch->count, $no_prefetch->count, 'counts with and without prefetch match');
   is(
     scalar ($use_prefetch->all),
     scalar ($no_prefetch->all),
     "Amount of returned rows is right"
   );
+  is($use_prefetch->count, $no_prefetch->count, 'counts with and without prefetch match');
 
 }, 'search_related prefetch with condition referencing unqualified column of a joined table works');
 }

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/resultset/as_query.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/resultset/as_query.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/resultset/as_query.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -1,7 +1,5 @@
-#!/usr/bin/perl
-
 use strict;
-use warnings FATAL => 'all';
+use warnings;
 
 use Test::More;
 

Modified: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/resultset/is_paged.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/resultset/is_paged.t	2009-12-04 00:52:44 UTC (rev 8024)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/resultset/is_paged.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -16,4 +16,3 @@
 ok $paginated->is_paged, 'resultset is paginated now';
 
 done_testing;
-

Added: DBIx-Class/0.08/branches/create_scalarref_rt51559/t/resultset/plus_select.t
===================================================================
--- DBIx-Class/0.08/branches/create_scalarref_rt51559/t/resultset/plus_select.t	                        (rev 0)
+++ DBIx-Class/0.08/branches/create_scalarref_rt51559/t/resultset/plus_select.t	2009-12-04 01:44:09 UTC (rev 8025)
@@ -0,0 +1,63 @@
+use strict;
+use warnings;
+
+use Test::More;
+
+use lib qw(t/lib);
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $cd_rs = $schema->resultset('CD')->search ({genreid => { '!=', undef } }, { order_by => 'cdid' });
+my $track_cnt = $cd_rs->search({}, { rows => 1 })->search_related ('tracks')->count;
+
+my %basecols = $cd_rs->first->get_columns;
+
+# the current implementation of get_inflated_columns will "inflate"
+# relationships by simply calling the accessor, when you have
+# identically named columns and relationships (you shouldn't anyway)
+# I consider this wrong, but at the same time appreciate the
+# ramifications of changing this. Thus the value override  and the
+# TODO to go with it. Delete all of this if ever resolved.
+my %todo_rel_inflation_override = ( artist => $basecols{artist} );
+TODO: {
+  local $TODO = 'Treating relationships as inflatable data is wrong - see comment in ' . __FILE__;
+  ok (! keys %todo_rel_inflation_override);
+}
+
+my $plus_rs = $cd_rs->search (
+  {},
+  { join => 'tracks', distinct => 1, '+select' => { count => 'tracks.trackid' }, '+as' => 'tr_cnt' },
+);
+
+is_deeply (
+  { $plus_rs->first->get_columns },
+  { %basecols, tr_cnt => $track_cnt },
+  'extra columns returned by get_columns',
+);
+
+is_deeply (
+  { $plus_rs->first->get_inflated_columns, %todo_rel_inflation_override },
+  { %basecols, tr_cnt => $track_cnt },
+  'extra columns returned by get_inflated_columns without inflatable columns',
+);
+
+SKIP: {
+  eval { require DateTime };
+  skip "Need DateTime for +select/get_inflated_columns tests", 1 if $@;
+
+  $schema->class('CD')->inflate_column( 'year',
+    { inflate => sub { DateTime->new( year => shift ) },
+      deflate => sub { shift->year } }
+  );
+
+  $basecols{year} = DateTime->new ( year => $basecols{year} );
+
+  is_deeply (
+    { $plus_rs->first->get_inflated_columns, %todo_rel_inflation_override },
+    { %basecols, tr_cnt => $track_cnt },
+    'extra columns returned by get_inflated_columns',
+  );
+}
+
+done_testing;




More information about the Bast-commits mailing list