[Bast-commits] r7797 - in DBIx-Class/0.08/branches/sybase_support: . lib/DBIx lib/DBIx/Class lib/DBIx/Class/Storage lib/DBIx/Class/Storage/DBI lib/DBIx/Class/Storage/DBI/ODBC lib/DBIx/Class/Storage/DBI/Sybase t

caelum at dev.catalyst.perl.org caelum at dev.catalyst.perl.org
Sun Oct 18 08:57:44 GMT 2009


Author: caelum
Date: 2009-10-18 08:57:43 +0000 (Sun, 18 Oct 2009)
New Revision: 7797

Modified:
   DBIx-Class/0.08/branches/sybase_support/
   DBIx-Class/0.08/branches/sybase_support/Changes
   DBIx-Class/0.08/branches/sybase_support/Makefile.PL
   DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class.pm
   DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/ResultSet.pm
   DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI.pm
   DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/Replicated.pm
   DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/Sybase.pm
   DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/Sybase/NoBindVars.pm
   DBIx-Class/0.08/branches/sybase_support/t/100populate.t
   DBIx-Class/0.08/branches/sybase_support/t/746sybase.t
Log:
 r20732 at hlagh (orig r7707):  ribasushi | 2009-09-20 19:20:00 -0400
 Branch for bulk insert
 r20733 at hlagh (orig r7708):  ribasushi | 2009-09-20 20:06:21 -0400
 All sybase bulk-insert code by Caelum
 r20750 at hlagh (orig r7725):  caelum | 2009-09-24 02:47:39 -0400
 clean up set_identity stuff
 r20751 at hlagh (orig r7726):  caelum | 2009-09-24 05:21:18 -0400
 minor cleanups, test update of blob to NULL
 r20752 at hlagh (orig r7727):  caelum | 2009-09-24 08:45:04 -0400
 remove some duplicate code
 r20753 at hlagh (orig r7728):  caelum | 2009-09-24 09:57:58 -0400
 fix insert with all defaults
 r20786 at hlagh (orig r7732):  caelum | 2009-09-25 21:17:16 -0400
 some cleanups
 r20804 at hlagh (orig r7736):  caelum | 2009-09-28 05:31:38 -0400
 minor changes
 r20805 at hlagh (orig r7737):  caelum | 2009-09-28 06:25:48 -0400
 fix DT stuff
 r20809 at hlagh (orig r7741):  caelum | 2009-09-28 22:25:55 -0400
 removed some dead code, added fix and test for _execute_array_empty
 r20811 at hlagh (orig r7743):  caelum | 2009-09-29 13:36:20 -0400
 minor changes after review
 r20812 at hlagh (orig r7744):  caelum | 2009-09-29 14:16:03 -0400
 do not clobber $rv from execute_array
 r20813 at hlagh (orig r7745):  caelum | 2009-09-29 14:38:14 -0400
 make insert_bulk atomic
 r20815 at hlagh (orig r7747):  caelum | 2009-09-29 20:35:26 -0400
 remove _exhaaust_statements
 r20816 at hlagh (orig r7748):  caelum | 2009-09-29 21:48:38 -0400
 fix insert_bulk when not using bulk api inside a txn
 r20831 at hlagh (orig r7749):  caelum | 2009-09-30 02:53:42 -0400
 added test for populate being atomic
 r20832 at hlagh (orig r7750):  caelum | 2009-09-30 03:00:59 -0400
 factor out subclass-specific _execute_array callback
 r20833 at hlagh (orig r7751):  caelum | 2009-10-01 11:59:30 -0400
 remove a piece of dead code
 r20840 at hlagh (orig r7758):  caelum | 2009-10-03 15:46:56 -0400
 remove _pretty_print
 r20842 at hlagh (orig r7760):  caelum | 2009-10-04 16:19:56 -0400
 minor optimization for insert_bulk
 r21050 at hlagh (orig r7796):  caelum | 2009-10-18 04:56:54 -0400
 error checking related to literal SQL for insert_bulk



Property changes on: DBIx-Class/0.08/branches/sybase_support
___________________________________________________________________
Name: svk:merge
   - 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/cookbook_fixes:7657
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7982
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/autocast:7418
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connect_info_hash:7435
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cookbook_fixes:7479
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_has_many_join:7382
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/is_resultset_paginated:7769
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7566
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:7682
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulk_insert:7679
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:7793
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
   + 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/cookbook_fixes:7657
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7982
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/autocast:7418
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connect_info_hash:7435
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cookbook_fixes:7479
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_has_many_join:7382
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/is_resultset_paginated:7769
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7566
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:7682
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulk_insert:7679
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulkinsert_support:7796
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:7793
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510

Modified: DBIx-Class/0.08/branches/sybase_support/Changes
===================================================================
--- DBIx-Class/0.08/branches/sybase_support/Changes	2009-10-18 08:56:54 UTC (rev 7796)
+++ DBIx-Class/0.08/branches/sybase_support/Changes	2009-10-18 08:57:43 UTC (rev 7797)
@@ -1,16 +1,18 @@
 Revision history for DBIx::Class
 
+
         - Complete Sybase RDBMS support including:
           - Support for TEXT/IMAGE columns
           - Support for the 'money' datatype
-        - Transaction savepoints support
-        - DateTime inflation support
-        - Support for bind variables when connecting to a newer Sybase with
-           OpenClient libraries
-        - Support for connections via FreeTDS with CASTs for bind variables
-           when needed
-        - Support for interpolated variables with proper quoting when
-           connecting to an older Sybase and/or via FreeTDS
+          - Transaction savepoints support
+          - DateTime inflation support
+          - Support for bind variables when connecting to a newer Sybase with
+             OpenClient libraries
+          - Support for connections via FreeTDS with CASTs for bind variables
+             when needed
+          - Support for interpolated variables with proper quoting when
+             connecting to an older Sybase and/or via FreeTDS
+          - bulk API support for populate()
         - Add is_paged method to DBIx::Class::ResultSet so that we can
           check that if we want a pager
         - Skip versioning test on really old perls lacking Time::HiRes

Modified: DBIx-Class/0.08/branches/sybase_support/Makefile.PL
===================================================================
--- DBIx-Class/0.08/branches/sybase_support/Makefile.PL	2009-10-18 08:56:54 UTC (rev 7796)
+++ DBIx-Class/0.08/branches/sybase_support/Makefile.PL	2009-10-18 08:57:43 UTC (rev 7797)
@@ -45,6 +45,7 @@
 requires 'SQL::Abstract'            => '1.60';
 requires 'SQL::Abstract::Limit'     => '0.13';
 requires 'Sub::Name'                => '0.04';
+requires 'Data::Dumper::Concise'    => '1.000';
 
 my %replication_requires = (
   'Moose',                    => '0.87',

Modified: DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/ResultSet.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/ResultSet.pm	2009-10-18 08:56:54 UTC (rev 7796)
+++ DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/ResultSet.pm	2009-10-18 08:57:43 UTC (rev 7797)
@@ -1796,7 +1796,10 @@
   } else {
     my ($first, @rest) = @$data;
 
-    my @names = grep {!ref $first->{$_}} keys %$first;
+    my @names = grep {
+      (not ref $first->{$_}) || (ref $first->{$_} eq 'SCALAR')
+    } keys %$first;
+
     my @rels = grep { $self->result_source->has_relationship($_) } keys %$first;
     my @pks = $self->result_source->primary_columns;
 
@@ -2813,10 +2816,7 @@
       : (
           ( delete $attrs->{columns} )
             ||
-          $source->storage->_order_select_columns(
-              $source,
-              [ $source->columns ],
-          )
+          $source->columns
         )
     ;
 

Modified: DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm	2009-10-18 08:56:54 UTC (rev 7796)
+++ DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm	2009-10-18 08:57:43 UTC (rev 7797)
@@ -61,7 +61,7 @@
   my $self = shift;
 
   if (ref($self->_dbi_connect_info->[0]) eq 'CODE') {
-    $self->throw_exception ('cannot set DBI attributes on a CODE ref connect_info');
+    $self->throw_exception ('Cannot set DBI attributes on a CODE ref connect_info');
   }
 
   my $dbi_attrs = $self->_dbi_connect_info->[-1];

Modified: DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/Replicated.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/Replicated.pm	2009-10-18 08:56:54 UTC (rev 7796)
+++ DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/Replicated.pm	2009-10-18 08:57:43 UTC (rev 7797)
@@ -325,7 +325,6 @@
     _count_select
     _subq_count_select
     _subq_update_delete
-    _order_select_columns
     svp_rollback
     svp_begin
     svp_release

Modified: DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/Sybase/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/Sybase/NoBindVars.pm	2009-10-18 08:56:54 UTC (rev 7796)
+++ DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/Sybase/NoBindVars.pm	2009-10-18 08:57:43 UTC (rev 7797)
@@ -59,9 +59,6 @@
   return $value;
 }
 
-# for tests
-sub _can_insert_bulk { 0 }
-
 1;
 
 =head1 NAME

Modified: DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/Sybase.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/Sybase.pm	2009-10-18 08:56:54 UTC (rev 7796)
+++ DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI/Sybase.pm	2009-10-18 08:57:43 UTC (rev 7797)
@@ -9,15 +9,18 @@
 /;
 use mro 'c3';
 use Carp::Clan qw/^DBIx::Class/;
-use List::Util ();
-use Sub::Name ();
+use List::Util();
+use Sub::Name();
+use Data::Dumper::Concise();
 
 __PACKAGE__->mk_group_accessors('simple' =>
-    qw/_identity _blob_log_on_update _writer_storage _is_writer_storage
+    qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
+       _bulk_storage _is_bulk_storage _began_bulk_work
+       _bulk_disabled_due_to_coderef_connect_info_warned
        _identity_method/
 );
 
-my @also_proxy_to_writer_storage = qw/
+my @also_proxy_to_extra_storages = qw/
   connect_call_set_auto_cast auto_cast connect_call_blob_setup
   connect_call_datetime_setup
 
@@ -105,7 +108,7 @@
         bless $self, $no_bind_vars;
         $self->_rebless;
       } elsif (not $self->_typeless_placeholders_supported) {
-# this is highly unlikely, but we check just in case
+        # this is highly unlikely, but we check just in case
         $self->auto_cast(1);
       }
     }
@@ -121,30 +124,63 @@
 
 # create storage for insert/(update blob) transactions,
 # unless this is that storage
-  return if $self->_is_writer_storage;
+  return if $self->_is_extra_storage;
 
   my $writer_storage = (ref $self)->new;
 
-  $writer_storage->_is_writer_storage(1);
+  $writer_storage->_is_extra_storage(1);
   $writer_storage->connect_info($self->connect_info);
   $writer_storage->auto_cast($self->auto_cast);
 
   $self->_writer_storage($writer_storage);
+
+# create a bulk storage unless connect_info is a coderef
+  return
+    if (Scalar::Util::reftype($self->_dbi_connect_info->[0])||'') eq 'CODE';
+
+  my $bulk_storage = (ref $self)->new;
+
+  $bulk_storage->_is_extra_storage(1);
+  $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
+  $bulk_storage->connect_info($self->connect_info);
+
+# this is why
+  $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
+
+  $self->_bulk_storage($bulk_storage);
 }
 
-for my $method (@also_proxy_to_writer_storage) {
+for my $method (@also_proxy_to_extra_storages) {
   no strict 'refs';
   no warnings 'redefine';
 
   my $replaced = __PACKAGE__->can($method);
 
-  *{$method} = Sub::Name::subname __PACKAGE__."::$method" => sub {
+  *{$method} = Sub::Name::subname $method => sub {
     my $self = shift;
     $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
+    $self->_bulk_storage->$replaced(@_)   if $self->_bulk_storage;
     return $self->$replaced(@_);
   };
 }
 
+sub disconnect {
+  my $self = shift;
+
+# Even though we call $sth->finish for uses off the bulk API, there's still an
+# "active statement" warning on disconnect, which we throw away here.
+# This is due to the bug described in insert_bulk.
+# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
+  local $SIG{__WARN__} = sub {
+    warn $_[0] unless $_[0] =~ /active statement/i;
+  } if $self->_is_bulk_storage;
+
+# so that next transaction gets a dbh
+  $self->_began_bulk_work(0) if $self->_is_bulk_storage;
+
+  $self->next::method;
+}
+
 # Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
 # DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
 # we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
@@ -153,6 +189,12 @@
   my $self = shift;
 
   $self->next::method(@_);
+  
+  if ($self->_is_bulk_storage) {
+# this should be cleared on every reconnect
+    $self->_began_bulk_work(0);
+    return;
+  }
 
   if (not $self->using_freetds) {
     $self->_dbh->{syb_chained_txn} = 1;
@@ -212,41 +254,47 @@
 
   my ($sql, $bind) = $self->next::method (@_);
 
-  if ($op eq 'insert') {
-    my $table = $ident->from;
+  my $table = Scalar::Util::blessed($ident) ? $ident->from : $ident;
 
-    my $bind_info = $self->_resolve_column_info(
-      $ident, [map $_->[0], @{$bind}]
+  my $bind_info = $self->_resolve_column_info(
+    $ident, [map $_->[0], @{$bind}]
+  );
+  my $bound_identity_col = List::Util::first
+    { $bind_info->{$_}{is_auto_increment} }
+    (keys %$bind_info)
+  ;
+  my $identity_col = Scalar::Util::blessed($ident) &&
+    List::Util::first
+    { $ident->column_info($_)->{is_auto_increment} }
+    $ident->columns
+  ;
+
+  if (($op eq 'insert' && $bound_identity_col) ||
+      ($op eq 'update' && exists $args->[0]{$identity_col})) {
+    $sql = join ("\n",
+      $self->_set_table_identity_sql($op => $table, 'on'),
+      $sql,
+      $self->_set_table_identity_sql($op => $table, 'off'),
     );
-    my $identity_col = List::Util::first
-      { $bind_info->{$_}{is_auto_increment} }
-      (keys %$bind_info)
-    ;
+  }
 
-    if ($identity_col) {
-      $sql = join ("\n",
-        "SET IDENTITY_INSERT $table ON",
-        $sql,
-        "SET IDENTITY_INSERT $table OFF",
-      );
-    }
-    else {
-      $identity_col = List::Util::first
-        { $ident->column_info($_)->{is_auto_increment} }
-        $ident->columns
-      ;
-    }
-
-    if ($identity_col) {
-      $sql =
-        "$sql\n" .
-        $self->_fetch_identity_sql($ident, $identity_col);
-    }
+  if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
+      (not $self->{insert_bulk})) {
+    $sql =
+      "$sql\n" .
+      $self->_fetch_identity_sql($ident, $identity_col);
   }
 
   return ($sql, $bind);
 }
 
+sub _set_table_identity_sql {
+  my ($self, $op, $table, $on_off) = @_;
+
+  return sprintf 'SET IDENTITY_%s %s %s',
+    uc($op), $self->sql_maker->_quote($table), uc($on_off);
+}
+
 # Stolen from SQLT, with some modifications. This is a makeshift
 # solution before a sane type-mapping library is available, thus
 # the 'our' for easy overrides.
@@ -306,12 +354,22 @@
   my $self = shift;
   my ($source, $to_insert) = @_;
 
+  my $identity_col = (List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns) || '';
+
+  # check for empty insert
+  # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
+  # try to insert explicit 'DEFAULT's instead (except for identity)
+  if (not %$to_insert) {
+    for my $col ($source->columns) {
+      next if $col eq $identity_col;
+      $to_insert->{$col} = \'DEFAULT';
+    }
+  }
+
   my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
 
-  my $identity_col = List::Util::first
-    { $source->column_info($_)->{is_auto_increment} }
-    $source->columns;
-
   # do we need the horrific SELECT MAX(COL) hack?
   my $dumb_last_insert_id =
        $identity_col
@@ -351,7 +409,8 @@
   my $updated_cols = $self->$next ($source, $to_insert);
 
   my $final_row = {
-    $identity_col => $self->last_insert_id($source, $identity_col),
+    ($identity_col ?
+      ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
     %$to_insert,
     %$updated_cols,
   };
@@ -377,19 +436,11 @@
 
   my $is_identity_update = $identity_col && defined $fields->{$identity_col};
 
-  if (not $blob_cols) {
-    $self->_set_identity_insert($table, 'update')   if $is_identity_update;
-    return $self->next::method(@_);
-    $self->_unset_identity_insert($table, 'update') if $is_identity_update;
-  }
+  return $self->next::method(@_) unless $blob_cols;
 
-# check that we're not updating a blob column that's also in $where
-  for my $blob (grep $self->_is_lob_column($source, $_), $source->columns) {
-    if (exists $where->{$blob} && exists $fields->{$blob}) {
-      croak
-'Update of TEXT/IMAGE column that is also in search condition impossible';
-    }
-  }
+# If there are any blobs in $where, Sybase will return a descriptive error
+# message.
+# XXX blobs can still be used with a LIKE query, and this should be handled.
 
 # update+blob update(s) done atomically on separate connection
   $self = $self->_writer_storage;
@@ -400,6 +451,8 @@
 # it is originally put by _remove_blob_cols .)
   my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
 
+# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
+
   $self->next::method($source, \%blobs_to_empty, $where, @rest);
 
 # Now update the blobs before the other columns in case the update of other
@@ -408,8 +461,6 @@
 
   my @res;
   if (%$fields) {
-    $self->_set_identity_insert($table, 'update')   if $is_identity_update;
-
     if ($wantarray) {
       @res    = $self->next::method(@_);
     }
@@ -419,8 +470,6 @@
     else {
       $self->next::method(@_);
     }
-
-    $self->_unset_identity_insert($table, 'update') if $is_identity_update;
   }
 
   $guard->commit;
@@ -428,78 +477,200 @@
   return $wantarray ? @res : $res[0];
 }
 
-### the insert_bulk stuff stolen from DBI/MSSQL.pm
+sub insert_bulk {
+  my $self = shift;
+  my ($source, $cols, $data) = @_;
 
-sub _set_identity_insert {
-  my ($self, $table, $op) = @_;
+  my $identity_col = List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns;
 
-  my $sql = sprintf (
-    'SET IDENTITY_%s %s ON',
-    (uc($op) || 'INSERT'),
-    $self->sql_maker->_quote ($table),
-  );
+  my $is_identity_insert = (List::Util::first
+    { $_ eq $identity_col }
+    @{$cols}
+  ) ? 1 : 0;
 
-  $self->_query_start($sql);
+  my @source_columns = $source->columns;
 
-  my $dbh = $self->_get_dbh;
-  eval { $dbh->do ($sql) };
-  my $exception = $@;
+  my $use_bulk_api =
+    $self->_bulk_storage &&
+    $self->_get_dbh->{syb_has_blk};
 
-  $self->_query_end($sql);
-
-  if ($exception) {
-    $self->throw_exception (sprintf "Error executing '%s': %s",
-      $sql,
-      $dbh->errstr,
-    );
+  if ((not $use_bulk_api) &&
+      (Scalar::Util::reftype($self->_dbi_connect_info->[0])||'') eq 'CODE' &&
+      (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
+    carp <<'EOF';
+Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
+regular array inserts.
+EOF
+    $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
   }
-}
 
-sub _unset_identity_insert {
-  my ($self, $table, $op) = @_;
+  if (not $use_bulk_api) {
+    my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
 
-  my $sql = sprintf (
-    'SET IDENTITY_%s %s OFF',
-    (uc($op) || 'INSERT'),
-    $self->sql_maker->_quote ($table),
-  );
+# _execute_array uses a txn anyway, but it ends too early in case we need to
+# select max(col) to get the identity for inserting blobs.
+    ($self, my $guard) = $self->{transaction_depth} == 0 ? 
+      ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
+      :
+      ($self, undef);
 
-  $self->_query_start($sql);
+    local $self->{insert_bulk} = 1;
 
-  my $dbh = $self->_get_dbh;
-  $dbh->do ($sql);
+    $self->next::method(@_);
 
-  $self->_query_end($sql);
-}
+    if ($blob_cols) {
+      if ($is_identity_insert) {
+        $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
+      }
+      else {
+        my @cols_with_identities = (@$cols, $identity_col);
 
-# for tests
-sub _can_insert_bulk { 1 }
+        ## calculate identities
+        # XXX This assumes identities always increase by 1, which may or may not
+        # be true.
+        my ($last_identity) =
+          $self->_dbh->selectrow_array (
+            $self->_fetch_identity_sql($source, $identity_col)
+          );
+        my @identities = (($last_identity - @$data + 1) .. $last_identity);
 
-# XXX this should use the DBD::Sybase bulk API, where possible
-sub insert_bulk {
-  my $self = shift;
-  my ($source, $cols, $data) = @_;
+        my @data_with_identities = map [@$_, shift @identities], @$data;
 
-  my $is_identity_insert = (List::Util::first
-      { $source->column_info ($_)->{is_auto_increment} }
-      (@{$cols})
-  )
-     ? 1
-     : 0;
+        $self->_insert_blobs_array (
+          $source, $blob_cols, \@cols_with_identities, \@data_with_identities
+        );
+      }
+    }
 
-  if ($is_identity_insert) {
-     $self->_set_identity_insert ($source->name);
+    $guard->commit if $guard;
+
+    return;
   }
 
-  $self->next::method(@_);
+# otherwise, use the bulk API
 
-  if ($is_identity_insert) {
-     $self->_unset_identity_insert ($source->name);
+# rearrange @$data so that columns are in database order
+  my %orig_idx;
+  @orig_idx{@$cols} = 0..$#$cols;
+
+  my %new_idx;
+  @new_idx{@source_columns} = 0..$#source_columns;
+
+  my @new_data;
+  for my $datum (@$data) {
+    my $new_datum = [];
+    for my $col (@source_columns) {
+# identity data will be 'undef' if not $is_identity_insert
+# columns with defaults will also be 'undef'
+      $new_datum->[ $new_idx{$col} ] =
+        exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
+    }
+    push @new_data, $new_datum;
   }
+
+# bcp identity index is 1-based
+  my $identity_idx = exists $new_idx{$identity_col} ?
+    $new_idx{$identity_col} + 1 : 0;
+
+## Set a client-side conversion error handler, straight from DBD::Sybase docs.
+# This ignores any data conversion errors detected by the client side libs, as
+# they are usually harmless.
+  my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
+    Sub::Name::subname insert_bulk => sub {
+      my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
+
+      return 1 if $errno == 36;
+
+      carp
+        "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
+        ($errmsg ? "\n$errmsg" : '') .
+        ($osmsg  ? "\n$osmsg"  : '')  .
+        ($blkmsg ? "\n$blkmsg" : '');
+
+      return 0;
+  });
+
+  eval {
+    my $bulk = $self->_bulk_storage;
+
+    my $guard = $bulk->txn_scope_guard;
+
+## XXX get this to work instead of our own $sth
+## will require SQLA or *Hacks changes for ordered columns
+#    $bulk->next::method($source, \@source_columns, \@new_data, {
+#      syb_bcp_attribs => {
+#        identity_flag   => $is_identity_insert,
+#        identity_column => $identity_idx,
+#      }
+#    });
+    my $sql = 'INSERT INTO ' .
+      $bulk->sql_maker->_quote($source->name) . ' (' .
+# colname list is ignored for BCP, but does no harm
+      (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
+      ' VALUES ('.  (join ', ', ('?') x @source_columns) . ')';
+
+## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
+## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
+## better yet the version above. Should be fixed in DBD::Sybase .
+    my $sth = $bulk->_get_dbh->prepare($sql,
+#      'insert', # op
+      {
+        syb_bcp_attribs => {
+          identity_flag   => $is_identity_insert,
+          identity_column => $identity_idx,
+        }
+      }
+    );
+
+    my @bind = do {
+      my $idx = 0;
+      map [ $_, $idx++ ], @source_columns;
+    };
+
+    $self->_execute_array(
+      $source, $sth, \@bind, \@source_columns, \@new_data, sub {
+        $guard->commit
+      }
+    );
+
+    $bulk->_query_end($sql);
+  };
+
+  my $exception = $@;
+  DBD::Sybase::set_cslib_cb($orig_cslib_cb);
+
+  if ($exception =~ /-Y option/) {
+    carp <<"EOF";
+
+Sybase bulk API operation failed due to character set incompatibility, reverting
+to regular array inserts:
+
+*** Try unsetting the LANG environment variable.
+
+$exception
+EOF
+    $self->_bulk_storage(undef);
+    unshift @_, $self;
+    goto \&insert_bulk;
+  }
+  elsif ($exception) {
+# rollback makes the bulkLogin connection unusable
+    $self->_bulk_storage->disconnect;
+    $self->throw_exception($exception);
+  }
 }
 
-### end of stolen insert_bulk section
+sub _dbh_execute_array {
+  my ($self, $sth, $tuple_status, $cb) = @_;
 
+  my $rv = $self->next::method($sth, $tuple_status);
+  $cb->() if $cb;
+
+  return $rv;
+}
+
 # Make sure blobs are not bound as placeholders, and return any non-empty ones
 # as a hash.
 sub _remove_blob_cols {
@@ -508,7 +679,7 @@
   my %blob_cols;
 
   for my $col (keys %$fields) {
-    if ($self->_is_lob_type($source->column_info($col)->{data_type})) {
+    if ($self->_is_lob_column($source, $col)) {
       my $blob_val = delete $fields->{$col};
       if (not defined $blob_val) {
         $fields->{$col} = \'NULL';
@@ -520,9 +691,36 @@
     }
   }
 
-  return keys %blob_cols ? \%blob_cols : undef;
+  return %blob_cols ? \%blob_cols : undef;
 }
 
+# same for insert_bulk
+sub _remove_blob_cols_array {
+  my ($self, $source, $cols, $data) = @_;
+
+  my @blob_cols;
+
+  for my $i (0..$#$cols) {
+    my $col = $cols->[$i];
+
+    if ($self->_is_lob_column($source, $col)) {
+      for my $j (0..$#$data) {
+        my $blob_val = delete $data->[$j][$i];
+        if (not defined $blob_val) {
+          $data->[$j][$i] = \'NULL';
+        }
+        else {
+          $data->[$j][$i] = \"''";
+          $blob_cols[$j][$i] = $blob_val
+            unless $blob_val eq '';
+        }
+      }
+    }
+  }
+
+  return @blob_cols ? \@blob_cols : undef;
+}
+
 sub _update_blobs {
   my ($self, $source, $blob_cols, $where) = @_;
 
@@ -582,7 +780,7 @@
 
       $self->throw_exception(
           "Could not find row in table '$table' for blob update:\n"
-        . $self->_pretty_print (\%where)
+        . Data::Dumper::Concise::Dumper (\%where)
       );
     }
 
@@ -620,6 +818,26 @@
   }
 }
 
+sub _insert_blobs_array {
+  my ($self, $source, $blob_cols, $cols, $data) = @_;
+
+  for my $i (0..$#$data) {
+    my $datum = $data->[$i];
+
+    my %row;
+    @row{ @$cols } = @$datum;
+
+    my %blob_vals;
+    for my $j (0..$#$cols) {
+      if (exists $blob_cols->[$i][$j]) {
+        $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
+      }
+    }
+
+    $self->_insert_blobs ($source, \%blob_vals, \%row);
+  }
+}
+
 =head2 connect_call_datetime_setup
 
 Used as:
@@ -644,7 +862,7 @@
 
   sub connect_call_datetime_setup {
     my $self = shift;
-    my $dbh = $self->_dbh;
+    my $dbh = $self->_get_dbh;
 
     if ($dbh->can('syb_date_fmt')) {
       # amazingly, this works with FreeTDS
@@ -671,10 +889,18 @@
 
 sub _dbh_begin_work {
   my $self = shift;
+
+# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
+# TRAN once. However, we need to make sure there's a $dbh.
+  return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
+
   $self->next::method(@_);
+
   if ($self->using_freetds) {
     $self->_get_dbh->do('BEGIN TRAN');
   }
+
+  $self->_began_bulk_work(1) if $self->_is_bulk_storage;
 }
 
 sub _dbh_commit {
@@ -773,10 +999,10 @@
 =head1 TRANSACTIONS
 
 Due to limitations of the TDS protocol, L<DBD::Sybase>, or both; you cannot
-begin a transaction while there are active cursors. An active cursor is, for
-example, a L<ResultSet|DBIx::Class::ResultSet> that has been executed using
-C<next> or C<first> but has not been exhausted or
-L<reset|DBIx::Class::ResultSet/reset>.
+begin a transaction while there are active cursors; nor can you use multiple
+active cursors within a transaction. An active cursor is, for example, a
+L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
+C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
 
 For example, this will not work:
 
@@ -790,6 +1016,11 @@
     }
   });
 
+This won't either:
+
+  my $first_row = $large_rs->first;
+  $schema->txn_do(sub { ... });
+
 Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
 are not affected, as they are done on an extra database handle.
 
@@ -846,6 +1077,54 @@
 See L</connect_call_blob_setup> for a L<DBIx::Class::Storage::DBI/connect_info>
 setting you need to work with C<IMAGE> columns.
 
+=head1 BULK API
+
+The experimental L<DBD::Sybase> Bulk API support is used for
+L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
+on a separate connection.
+
+To use this feature effectively, use a large number of rows for each
+L<populate|DBIx::Class::ResultSet/populate> call, eg.:
+
+  while (my $rows = $data_source->get_100_rows()) {
+    $rs->populate($rows);
+  }
+
+B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
+calls in your C<Result> classes B<must> list columns in database order for this
+to work. Also, you may have to unset the C<LANG> environment variable before
+loading your app, if it doesn't match the character set of your database.
+
+When inserting IMAGE columns using this method, you'll need to use
+L</connect_call_blob_setup> as well.
+
+=head1 TODO
+
+=over
+
+=item *
+
+Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
+any active cursors, using eager cursors.
+
+=item *
+
+Real limits and limited counts using stored procedures deployed on startup.
+
+=item *
+
+Adaptive Server Anywhere (ASA) support, with possible SQLA::Limit support.
+
+=item *
+
+Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
+
+=item *
+
+bulk_insert using prepare_cached (see comments.)
+
+=back
+
 =head1 AUTHOR
 
 See L<DBIx::Class/CONTRIBUTORS>.

Modified: DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI.pm	2009-10-18 08:56:54 UTC (rev 7796)
+++ DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class/Storage/DBI.pm	2009-10-18 08:57:43 UTC (rev 7797)
@@ -13,6 +13,7 @@
 use DBIx::Class::Storage::Statistics;
 use Scalar::Util();
 use List::Util();
+use Data::Dumper::Concise();
 
 # what version of sqlt do we require if deploy() without a ddl_dir is invoked
 # when changing also adjust the corresponding author_require in Makefile.PL
@@ -1344,20 +1345,97 @@
   }
 
   my %colvalues;
-  my $table = $source->from;
   @colvalues{@$cols} = (0..$#$cols);
 
+  for my $i (0..$#$cols) {
+    my $first_val = $data->[0][$i];
+    next unless ref $first_val eq 'SCALAR';
+
+    $colvalues{ $cols->[$i] } = $first_val;
+## This is probably unnecessary since $rs->populate only looks at the first
+## slice anyway.
+#      if (grep {
+#        ref $_ eq 'SCALAR' && $$_ eq $$first_val
+#      } map $data->[$_][$i], (1..$#$data)) == (@$data - 1);
+  }
+
+  # check for bad data
+  my $bad_slice = sub {
+    my ($msg, $slice_idx) = @_;
+    $self->throw_exception(sprintf "%s for populate slice:\n%s",
+      $msg,
+      Data::Dumper::Concise::Dumper({
+        map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
+      }),
+    );
+  };
+
+  for my $datum_idx (0..$#$data) {
+    my $datum = $data->[$datum_idx];
+
+    for my $col_idx (0..$#$cols) {
+      my $val            = $datum->[$col_idx];
+      my $sqla_bind      = $colvalues{ $cols->[$col_idx] };
+      my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
+
+      if ($is_literal_sql) {
+        if (not ref $val) {
+          $bad_slice->('bind found where literal SQL expected', $datum_idx);
+        }
+        elsif ((my $reftype = ref $val) ne 'SCALAR') {
+          $bad_slice->("$reftype reference found where literal SQL expected",
+            $datum_idx);
+        }
+        elsif ($$val ne $$sqla_bind){
+          $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
+            $datum_idx);
+        }
+      }
+      elsif (my $reftype = ref $val) {
+        $bad_slice->("$reftype reference found where bind expected",
+          $datum_idx);
+      }
+    }
+  }
+
   my ($sql, $bind) = $self->_prep_for_execute (
     'insert', undef, $source, [\%colvalues]
   );
-  my @bind = @$bind
-    or croak 'Cannot insert_bulk without support for placeholders';
+  my @bind = @$bind;
 
+  my $empty_bind = 1 if (not @bind) &&
+    (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols;
+
+  if ((not @bind) && (not $empty_bind)) {
+    $self->throw_exception(
+      'Cannot insert_bulk without support for placeholders'
+    );
+  }
+
   $self->_query_start( $sql, @bind );
   my $sth = $self->sth($sql);
 
-#  @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+  my $rv = do {
+    if ($empty_bind) {
+      # bind_param_array doesn't work if there are no binds
+      $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
+    }
+    else {
+#      @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+      $self->_execute_array( $source, $sth, \@bind, $cols, $data );
+    }
+  };
 
+  $self->_query_end( $sql, @bind );
+
+  return (wantarray ? ($rv, $sth, @bind) : $rv);
+}
+
+sub _execute_array {
+  my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
+
+  my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0;
+
   ## This must be an arrayref, else nothing works!
   my $tuple_status = [];
 
@@ -1367,7 +1445,7 @@
   ## Bind the values and execute
   my $placeholder_index = 1;
 
-  foreach my $bound (@bind) {
+  foreach my $bound (@$bind) {
 
     my $attributes = {};
     my ($column_name, $data_index) = @$bound;
@@ -1382,28 +1460,67 @@
     $sth->bind_param_array( $placeholder_index, [@data], $attributes );
     $placeholder_index++;
   }
-  my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) };
-  $sth->finish;
-  if (my $err = $@) {
+
+  my $rv = eval {
+    $self->_dbh_execute_array($sth, $tuple_status, @extra);
+  };
+  my $err = $@ || $sth->errstr;
+
+# Statement must finish even if there was an exception.
+  eval { $sth->finish };
+  $err = $@ unless $err;
+
+  if ($err) {
     my $i = 0;
     ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
 
-    $self->throw_exception($sth->errstr || "Unexpected populate error: $err")
+    $self->throw_exception("Unexpected populate error: $err")
       if ($i > $#$tuple_status);
 
     $self->throw_exception(sprintf "%s for populate slice:\n%s",
-      $tuple_status->[$i][1],
-      $self->_pretty_print ({
+      ($tuple_status->[$i][1] || $err),
+      Data::Dumper::Concise::Dumper({
         map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols)
       }),
     );
   }
-  $self->throw_exception($sth->errstr) if !$rv;
 
-  $self->_query_end( $sql, @bind );
-  return (wantarray ? ($rv, $sth, @bind) : $rv);
+  $guard->commit if $guard;
+
+  return $rv;
 }
 
+sub _dbh_execute_array {
+    my ($self, $sth, $tuple_status, @extra) = @_;
+
+    return $sth->execute_array({ArrayTupleStatus => $tuple_status});
+}
+
+sub _dbh_execute_inserts_with_no_binds {
+  my ($self, $sth, $count) = @_;
+
+  my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0;
+
+  eval {
+    my $dbh = $self->_get_dbh;
+    local $dbh->{RaiseError} = 1;
+    local $dbh->{PrintError} = 0;
+
+    $sth->execute foreach 1..$count;
+  };
+  my $exception = $@;
+
+# Make sure statement is finished even if there was an exception.
+  eval { $sth->finish };
+  $exception = $@ unless $exception;
+
+  $self->throw_exception($exception) if $exception;
+
+  $guard->commit if $guard;
+
+  return $count;
+}
+
 sub update {
   my ($self, $source, @args) = @_; 
 
@@ -1992,19 +2109,6 @@
   return @pcols ? \@pcols : [ 1 ];
 }
 
-#
-# Returns an ordered list of column names before they are used
-# in a SELECT statement. By default simply returns the list
-# passed in.
-#
-# This may be overridden in a specific storage when there are
-# requirements such as moving BLOB columns to the end of the 
-# SELECT list.
-sub _order_select_columns {
-  #my ($self, $source, $columns) = @_;
-  return @{$_[2]};
-}
-
 sub source_bind_attributes {
   my ($self, $source) = @_;
 

Modified: DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class.pm	2009-10-18 08:56:54 UTC (rev 7796)
+++ DBIx-Class/0.08/branches/sybase_support/lib/DBIx/Class.pm	2009-10-18 08:57:43 UTC (rev 7797)
@@ -43,19 +43,6 @@
   return $@ ? $cache : { %$cache, %$rest };
 }
 
-# Pretty printer for debug messages
-sub _pretty_print {
-
-  require Data::Dumper;
-  local $Data::Dumper::Terse = 1;
-  local $Data::Dumper::Indent = 1;
-  local $Data::Dumper::Useqq = 1;
-  local $Data::Dumper::Quotekeys = 0;
-  local $Data::Dumper::Sortkeys = 1;
-
-  return Data::Dumper::Dumper ($_[1]);
-}
-
 1;
 
 =head1 NAME

Modified: DBIx-Class/0.08/branches/sybase_support/t/100populate.t
===================================================================
--- DBIx-Class/0.08/branches/sybase_support/t/100populate.t	2009-10-18 08:56:54 UTC (rev 7796)
+++ DBIx-Class/0.08/branches/sybase_support/t/100populate.t	2009-10-18 08:57:43 UTC (rev 7797)
@@ -6,8 +6,6 @@
 use lib qw(t/lib);
 use DBICTest;
 
-plan tests => 23;
-
 my $schema = DBICTest->init_schema();
 
 # The map below generates stuff like:
@@ -116,3 +114,86 @@
 is($link7->url, undef, 'Link 7 url');
 is($link7->title, 'gtitle', 'Link 7 title');
 
+# test _execute_array_empty (insert_bulk with all literal sql)
+my $rs = $schema->resultset('Artist');
+$rs->delete;
+$rs->populate([
+    (+{
+        name => \"'DT'",
+        rank => \500,
+        charfield => \"'mtfnpy'",
+    }) x 5
+]);
+
+is((grep {
+  $_->name eq 'DT' &&
+  $_->rank == 500  &&
+  $_->charfield eq 'mtfnpy'
+} $rs->all), 5, 'populate with all literal SQL');
+
+$rs->delete;
+
+throws_ok {
+    $rs->populate([
+        {
+            artistid => 1,
+            name => 'foo1',
+        },
+        {
+            artistid => 'foo', # this dies
+            name => 'foo2',
+        },
+        {
+            artistid => 3,
+            name => 'foo3',
+        },
+    ]);
+} qr/slice/, 'bad slice';
+
+is($rs->count, 0, 'populate is atomic');
+
+# Trying to use a column marked as a bind in the first slice with literal sql in
+# a later slice should throw.
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => 1,
+      name => \"'foo'",
+    },
+    {
+      artistid => \2,
+      name => \"'foo'",
+    }
+  ]);
+} qr/bind expected/, 'literal sql where bind expected throws';
+
+# ... and vice-versa.
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => \1,
+      name => \"'foo'",
+    },
+    {
+      artistid => 2,
+      name => \"'foo'",
+    }
+  ]);
+} qr/literal SQL expected/i, 'bind where literal sql expected throws';
+
+throws_ok {
+  $rs->populate([
+    {
+      artistid => 1,
+      name => \"'foo'",
+    },
+    {
+      artistid => 2,
+      name => \"'bar'",
+    }
+  ]);
+} qr/inconsistent/, 'literal sql must be the same in all slices';
+
+done_testing;

Modified: DBIx-Class/0.08/branches/sybase_support/t/746sybase.t
===================================================================
--- DBIx-Class/0.08/branches/sybase_support/t/746sybase.t	2009-10-18 08:56:54 UTC (rev 7796)
+++ DBIx-Class/0.08/branches/sybase_support/t/746sybase.t	2009-10-18 08:57:43 UTC (rev 7797)
@@ -12,7 +12,7 @@
 
 my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
 
-my $TESTS = 51 + 2;
+my $TESTS = 63 + 2;
 
 if (not ($dsn && $user)) {
   plan skip_all =>
@@ -208,11 +208,10 @@
     name => { -like => 'bulk artist %' }
   });
 
-# test insert_bulk using populate, this should always pass whether or not it
-# does anything Sybase specific or not. Just here to aid debugging.
+# test insert_bulk using populate.
   SKIP: {
     skip 'insert_bulk not supported', 4
-      unless $schema->storage->_can_insert_bulk;
+      unless $storage_type !~ /NoBindVars/i;
 
     lives_ok {
       $schema->resultset('Artist')->populate([
@@ -245,10 +244,57 @@
     $bulk_rs->delete;
   }
 
+# make sure insert_bulk works a second time on the same connection
+  SKIP: {
+    skip 'insert_bulk not supported', 3
+      unless $storage_type !~ /NoBindVars/i;
+
+    lives_ok {
+      $schema->resultset('Artist')->populate([
+        {
+          name => 'bulk artist 1',
+          charfield => 'bar',
+        },
+        {
+          name => 'bulk artist 2',
+          charfield => 'bar',
+        },
+        {
+          name => 'bulk artist 3',
+          charfield => 'bar',
+        },
+      ]);
+    } 'insert_bulk via populate called a second time';
+
+    is $bulk_rs->count, 3,
+      'correct number inserted via insert_bulk';
+
+    is ((grep $_->charfield eq 'bar', $bulk_rs->all), 3,
+      'column set correctly via insert_bulk');
+
+    $bulk_rs->delete;
+  }
+
+# test invalid insert_bulk (missing required column)
+#
+# There should be a rollback, reconnect and the next valid insert_bulk should
+# succeed.
+  throws_ok {
+    $schema->resultset('Artist')->populate([
+      {
+        charfield => 'foo',
+      }
+    ]);
+  } qr/no value or default|does not allow null|placeholders/i,
+# The second pattern is the error from fallback to regular array insert on
+# incompatible charset.
+# The third is for ::NoBindVars with no syb_has_blk.
+  'insert_bulk with missing required column throws error';
+
 # now test insert_bulk with IDENTITY_INSERT
   SKIP: {
     skip 'insert_bulk not supported', 3
-      unless $schema->storage->_can_insert_bulk;
+      unless $storage_type !~ /NoBindVars/i;
 
     lives_ok {
       $schema->resultset('Artist')->populate([
@@ -290,7 +336,7 @@
 
 # mostly stolen from the blob stuff Nniuq wrote for t/73oracle.t
   SKIP: {
-    skip 'TEXT/IMAGE support does not work with FreeTDS', 15
+    skip 'TEXT/IMAGE support does not work with FreeTDS', 22
       if $schema->storage->using_freetds;
 
     my $dbh = $schema->storage->_dbh;
@@ -302,7 +348,7 @@
         CREATE TABLE bindtype_test 
         (
           id    INT   IDENTITY PRIMARY KEY,
-          bytea INT   NULL,
+          bytea IMAGE NULL,
           blob  IMAGE NULL,
           clob  TEXT  NULL
         )
@@ -327,35 +373,31 @@
       foreach my $size (qw(small large)) {
         no warnings 'uninitialized';
 
-        my $created = eval { $rs->create( { $type => $binstr{$size} } ) };
-        ok(!$@, "inserted $size $type without dying");
-        diag $@ if $@;
+        my $created;
+        lives_ok {
+          $created = $rs->create( { $type => $binstr{$size} } )
+        } "inserted $size $type without dying";
 
         $last_id = $created->id if $created;
 
-        my $got = eval {
-          $rs->find($last_id)->$type
-        };
-        diag $@ if $@;
-        ok($got eq $binstr{$size}, "verified inserted $size $type");
+        lives_and {
+          ok($rs->find($last_id)->$type eq $binstr{$size})
+        } "verified inserted $size $type";
       }
     }
 
+    $rs->delete;
+
     # blob insert with explicit PK
     # also a good opportunity to test IDENTITY_INSERT
+    lives_ok {
+      $rs->create( { id => 1, blob => $binstr{large} } )
+    } 'inserted large blob without dying with manual PK';
 
-    $rs->delete;
+    lives_and {
+      ok($rs->find(1)->blob eq $binstr{large})
+    } 'verified inserted large blob with manual PK';
 
-    my $created = eval { $rs->create( { id => 1, blob => $binstr{large} } ) };
-    ok(!$@, "inserted large blob without dying with manual PK");
-    diag $@ if $@;
-
-    my $got = eval {
-      $rs->find(1)->blob
-    };
-    diag $@ if $@;
-    ok($got eq $binstr{large}, "verified inserted large blob with manual PK");
-
     # try a blob update
     my $new_str = $binstr{large} . 'mtfnpy';
 
@@ -365,15 +407,14 @@
       $schema = get_schema();
     }
 
-    eval { $rs->search({ id => 1 })->update({ blob => $new_str }) };
-    ok !$@, 'updated blob successfully';
-    diag $@ if $@;
-    $got = eval {
-      $rs->find(1)->blob
-    };
-    diag $@ if $@;
-    ok($got eq $new_str, "verified updated blob");
+    lives_ok {
+      $rs->search({ id => 1 })->update({ blob => $new_str })
+    } 'updated blob successfully';
 
+    lives_and {
+      ok($rs->find(1)->blob eq $new_str)
+    } 'verified updated blob';
+
     # try a blob update with IDENTITY_UPDATE
     lives_and {
       $new_str = $binstr{large} . 'hlagh';
@@ -383,32 +424,103 @@
 
     ## try multi-row blob update
     # first insert some blobs
-    $rs->delete;
-    $rs->create({ blob => $binstr{large} }) for (1..3);
     $new_str = $binstr{large} . 'foo';
-    $rs->update({ blob => $new_str });
-    is((grep $_->blob eq $new_str, $rs->all), 3, 'multi-row blob update');
+    lives_and {
+      $rs->delete;
+      $rs->create({ blob => $binstr{large} }) for (1..2);
+      $rs->update({ blob => $new_str });
+      is((grep $_->blob eq $new_str, $rs->all), 2);
+    } 'multi-row blob update';
 
-    # make sure impossible blob update throws
-    throws_ok {
-      $rs->update({ clob => 'foo' });
-      $rs->create({ clob => 'bar' });
-      $rs->search({ clob => 'foo' })->update({ clob => 'bar' });
-    } qr/impossible/, 'impossible blob update throws';
+    $rs->delete;
+
+    # now try insert_bulk with blobs and only blobs
+    $new_str = $binstr{large} . 'bar';
+    lives_ok {
+      $rs->populate([
+        {
+          bytea => 1,
+          blob => $binstr{large},
+          clob => $new_str,
+        },
+        {
+          bytea => 1,
+          blob => $binstr{large},
+          clob => $new_str,
+        },
+      ]);
+    } 'insert_bulk with blobs does not die';
+
+    is((grep $_->blob eq $binstr{large}, $rs->all), 2,
+      'IMAGE column set correctly via insert_bulk');
+
+    is((grep $_->clob eq $new_str, $rs->all), 2,
+      'TEXT column set correctly via insert_bulk');
+
+    # now try insert_bulk with blobs and a non-blob which also happens to be an
+    # identity column
+    SKIP: {
+      skip 'no insert_bulk without placeholders', 4
+        if $storage_type =~ /NoBindVars/i;
+
+      $rs->delete;
+      $new_str = $binstr{large} . 'bar';
+      lives_ok {
+        $rs->populate([
+          {
+            id => 1,
+            bytea => 1,
+            blob => $binstr{large},
+            clob => $new_str,
+          },
+          {
+            id => 2,
+            bytea => 1,
+            blob => $binstr{large},
+            clob => $new_str,
+          },
+        ]);
+      } 'insert_bulk with blobs and explicit identity does NOT die';
+
+      is((grep $_->blob eq $binstr{large}, $rs->all), 2,
+        'IMAGE column set correctly via insert_bulk with identity');
+
+      is((grep $_->clob eq $new_str, $rs->all), 2,
+        'TEXT column set correctly via insert_bulk with identity');
+
+      is_deeply [ map $_->id, $rs->all ], [ 1,2 ],
+        'explicit identities set correctly via insert_bulk with blobs';
+    }
+
+    lives_and {
+      $rs->delete;
+      $rs->create({ blob => $binstr{large} }) for (1..2);
+      $rs->update({ blob => undef });
+      is((grep !defined($_->blob), $rs->all), 2);
+    } 'blob update to NULL';
   }
 
-# test MONEY column support
+# test MONEY column support (and some other misc. stuff)
   $schema->storage->dbh_do (sub {
       my ($storage, $dbh) = @_;
       eval { $dbh->do("DROP TABLE money_test") };
       $dbh->do(<<'SQL');
 CREATE TABLE money_test (
    id INT IDENTITY PRIMARY KEY,
-   amount MONEY NULL
+   amount MONEY DEFAULT $999.99 NULL
 )
 SQL
   });
 
+  my $rs = $schema->resultset('Money');
+
+# test insert with defaults
+  lives_and {
+    $rs->create({});
+    is((grep $_->amount == 999.99, $rs->all), 1);
+  } 'insert with all defaults works';
+  $rs->delete;
+
 # test insert transaction when there's an active cursor
   {
     my $artist_rs = $schema->resultset('Artist');
@@ -440,8 +552,6 @@
   }
 
 # Now test money values.
-  my $rs = $schema->resultset('Money');
-
   my $row;
   lives_ok {
     $row = $rs->create({ amount => 100 });




More information about the Bast-commits mailing list