[Bast-commits] r7421 - in DBIx-Class/0.08/branches/sybase: .
lib/DBIx/Class lib/DBIx/Class/InflateColumn
lib/DBIx/Class/Manual lib/DBIx/Class/Storage
lib/DBIx/Class/Storage/DBI t t/inflate t/lib
t/lib/DBICTest/Schema t/prefetch t/search t/storage
caelum at dev.catalyst.perl.org
caelum at dev.catalyst.perl.org
Sat Aug 29 06:50:57 GMT 2009
Author: caelum
Date: 2009-08-29 06:50:56 +0000 (Sat, 29 Aug 2009)
New Revision: 7421
Added:
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/AutoCast.pm
DBIx-Class/0.08/branches/sybase/t/93autocast.t
DBIx-Class/0.08/branches/sybase/t/inflate/datetime_determine_parser.t
DBIx-Class/0.08/branches/sybase/t/storage/
DBIx-Class/0.08/branches/sybase/t/storage/base.t
DBIx-Class/0.08/branches/sybase/t/storage/dbh_do.t
DBIx-Class/0.08/branches/sybase/t/storage/dbi_coderef.t
DBIx-Class/0.08/branches/sybase/t/storage/debug.t
DBIx-Class/0.08/branches/sybase/t/storage/disable_sth_caching.t
DBIx-Class/0.08/branches/sybase/t/storage/error.t
DBIx-Class/0.08/branches/sybase/t/storage/on_connect_call.t
DBIx-Class/0.08/branches/sybase/t/storage/on_connect_do.t
DBIx-Class/0.08/branches/sybase/t/storage/ping_count.t
DBIx-Class/0.08/branches/sybase/t/storage/reconnect.t
DBIx-Class/0.08/branches/sybase/t/storage/replication.t
DBIx-Class/0.08/branches/sybase/t/storage/stats.t
Removed:
DBIx-Class/0.08/branches/sybase/t/18inserterror.t
DBIx-Class/0.08/branches/sybase/t/31stats.t
DBIx-Class/0.08/branches/sybase/t/32connect_code_ref.t
DBIx-Class/0.08/branches/sybase/t/33storage_reconnect.t
DBIx-Class/0.08/branches/sybase/t/35disable_sth_caching.t
DBIx-Class/0.08/branches/sybase/t/36datetime.t
DBIx-Class/0.08/branches/sybase/t/91debug.t
DBIx-Class/0.08/branches/sybase/t/92storage.t
DBIx-Class/0.08/branches/sybase/t/92storage_on_connect_call.t
DBIx-Class/0.08/branches/sybase/t/92storage_on_connect_do.t
DBIx-Class/0.08/branches/sybase/t/92storage_ping_count.t
DBIx-Class/0.08/branches/sybase/t/93storage_replication.t
DBIx-Class/0.08/branches/sybase/t/dbh_do.t
Modified:
DBIx-Class/0.08/branches/sybase/
DBIx-Class/0.08/branches/sybase/Changes
DBIx-Class/0.08/branches/sybase/Makefile.PL
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/InflateColumn/DateTime.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/FAQ.pod
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSet.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Schema.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI.pm
DBIx-Class/0.08/branches/sybase/t/03podcoverage.t
DBIx-Class/0.08/branches/sybase/t/72pg.t
DBIx-Class/0.08/branches/sybase/t/89dbicadmin.t
DBIx-Class/0.08/branches/sybase/t/lib/DBICTest/Schema/Track.pm
DBIx-Class/0.08/branches/sybase/t/lib/sqlite.sql
DBIx-Class/0.08/branches/sybase/t/prefetch/grouped.t
DBIx-Class/0.08/branches/sybase/t/search/subquery.t
Log:
r7370 at hlagh (orig r7369): caelum | 2009-08-24 06:32:57 -0400
bump CAG dep
r7389 at hlagh (orig r7388): ribasushi | 2009-08-25 07:43:38 -0400
typo
r7390 at hlagh (orig r7389): ribasushi | 2009-08-25 08:29:37 -0400
r7354 at Thesaurus (orig r7351): abraxxa | 2009-08-20 17:46:06 +0200
new branch grouped_has_many_join
r7382 at Thesaurus (orig r7379): ribasushi | 2009-08-24 22:50:13 +0200
Seems like abraxxa's bug is fixed
r7385 at Thesaurus (orig r7382): ribasushi | 2009-08-25 11:33:40 +0200
One more test
r7394 at hlagh (orig r7393): ribasushi | 2009-08-26 12:07:51 -0400
Stop testing deprecated json::syck
r7395 at hlagh (orig r7394): ribasushi | 2009-08-26 12:08:24 -0400
Make sure sqlt_type gets called after determining driver
r7396 at hlagh (orig r7395): ribasushi | 2009-08-26 12:21:53 -0400
Make POD::Coverage happy... again
r7397 at hlagh (orig r7396): ribasushi | 2009-08-26 12:31:54 -0400
Clarify
r7398 at hlagh (orig r7397): frew | 2009-08-26 16:24:19 -0400
Remove dead, sketchtowne link
r7402 at hlagh (orig r7401): ribasushi | 2009-08-27 12:50:12 -0400
Changes
r7404 at hlagh (orig r7403): ribasushi | 2009-08-27 18:11:29 -0400
Add a test proving how dumb I am
r7405 at hlagh (orig r7404): ribasushi | 2009-08-28 10:34:46 -0400
Warning to spare mst explanations
r18700 at hlagh (orig r7419): caelum | 2009-08-29 02:34:07 -0400
r7381 at hlagh (orig r7380): ribasushi | 2009-08-24 17:07:58 -0400
Branch to add autocast support as a standalone piece of code
r7382 at hlagh (orig r7381): ribasushi | 2009-08-25 05:06:43 -0400
Move storage tests to their own dir
r7385 at hlagh (orig r7384): ribasushi | 2009-08-25 06:35:19 -0400
Switch storage class loading to ensure_class_loaded
r7386 at hlagh (orig r7385): ribasushi | 2009-08-25 06:37:48 -0400
Change a datatype for test purposes
r7387 at hlagh (orig r7386): ribasushi | 2009-08-25 06:45:35 -0400
Fix two storage tests
r7388 at hlagh (orig r7387): ribasushi | 2009-08-25 06:45:52 -0400
Actual autocast code
r18697 at hlagh (orig r7416): caelum | 2009-08-29 01:42:29 -0400
rename method and add docs
r18698 at hlagh (orig r7417): ribasushi | 2009-08-29 02:07:18 -0400
Make sure arrays work
r18699 at hlagh (orig r7418): caelum | 2009-08-29 02:11:14 -0400
rename _map_data_type to _native_data_type
Property changes on: DBIx-Class/0.08/branches/sybase
___________________________________________________________________
Name: svk:merge
- 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7237
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7331
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:7358
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
+ 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7237
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/autocast:7418
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_has_many_join:7382
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7331
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:7419
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
Modified: DBIx-Class/0.08/branches/sybase/Changes
===================================================================
--- DBIx-Class/0.08/branches/sybase/Changes 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/Changes 2009-08-29 06:50:56 UTC (rev 7421)
@@ -4,9 +4,17 @@
- Support for TEXT/IMAGE columns
- Support for the 'money' datatype
- Transaction savepoints support
- - Support for bind variables when connecting via Sybase OpenClient
+ - Support for bind variables when connecting to a newer Sybase with
+ OpenClient libraries
+ - Support for bind variables over FreeTDS with CASTs when needed
- Support for interpolated variables with proper quoting when
- connecting via FreeTDS
+ connecting to an older Sybase or via FreeTDS
+ - Fixed a complex prefetch + regular join regression introduced
+ in 0.08108
+ - SQLT related fixes:
+ - sqlt_type is now called on the correct storage object
+ - hooks can now see the correct producer_type
+ - POD improvements
0.08109 2009-08-18 08:35:00 (UTC)
- Replication updates:
Modified: DBIx-Class/0.08/branches/sybase/Makefile.PL
===================================================================
--- DBIx-Class/0.08/branches/sybase/Makefile.PL 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/Makefile.PL 2009-08-29 06:50:56 UTC (rev 7421)
@@ -29,7 +29,7 @@
# Dependencies (keep in alphabetical order)
requires 'Carp::Clan' => 6.0;
-requires 'Class::Accessor::Grouped' => 0.08003;
+requires 'Class::Accessor::Grouped' => 0.09000;
requires 'Class::C3::Componentised' => 1.0005;
requires 'Class::Inspector' => 1.24;
requires 'Data::Page' => 2.00;
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/InflateColumn/DateTime.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/InflateColumn/DateTime.pm 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/InflateColumn/DateTime.pm 2009-08-29 06:50:56 UTC (rev 7421)
@@ -71,7 +71,7 @@
reports to the list very much welcome).
If the data_type of a field is C<date>, C<datetime> or C<timestamp> (or
-a derivative of these datatypes, e.g. C<timestamp with timezone>, this
+a derivative of these datatypes, e.g. C<timestamp with timezone>), this
module will automatically call the appropriate parse/format method for
deflation/inflation as defined in the storage class. For instance, for
a C<datetime> field the methods C<parse_datetime> and C<format_datetime>
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/FAQ.pod
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/FAQ.pod 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/FAQ.pod 2009-08-29 06:50:56 UTC (rev 7421)
@@ -26,8 +26,7 @@
Next, spend some time defining which data you need to store, and how
it relates to the other data you have. For some help on normalisation,
-go to L<http://b62.tripod.com/doc/dbbase.htm> or
-L<http://209.197.234.36/db/simple.html>.
+go to L<http://b62.tripod.com/doc/dbbase.htm>.
Now, decide whether you want to have the database itself be the
definitive source of information about the data layout, or your
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSet.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSet.pm 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSet.pm 2009-08-29 06:50:56 UTC (rev 7421)
@@ -2278,6 +2278,19 @@
}
});
+=over
+
+=item WARNING
+
+When subclassing ResultSet never attempt to override this method. Since
+it is a simple shortcut for C<< $self->new_result($attrs)->insert >>, a
+lot of the internals simply never call it, so your override will be
+bypassed more often than not. Override either L<new|DBIx::Class::Row/new>
+or L<insert|DBIx::Class::Row/insert> depending on how early in the
+L</create> process you need to intervene.
+
+=back
+
=cut
sub create {
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Schema.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Schema.pm 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Schema.pm 2009-08-29 06:50:56 UTC (rev 7421)
@@ -814,7 +814,7 @@
$storage_class = 'DBIx::Class::Storage'.$storage_class
if $storage_class =~ m/^::/;
- eval "require ${storage_class};";
+ eval { $self->ensure_class_loaded ($storage_class) };
$self->throw_exception(
"No arguments to load_classes and couldn't load ${storage_class} ($@)"
) if $@;
Added: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/AutoCast.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/AutoCast.pm (rev 0)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/AutoCast.pm 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,74 @@
+package DBIx::Class::Storage::DBI::AutoCast;
+
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
+
+__PACKAGE__->mk_group_accessors('simple' => 'auto_cast' );
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::AutoCast
+
+=head1 SYNOPSIS
+
+ $schema->storage->auto_cast(1);
+
+=head1 DESCRIPTION
+
+In some combinations of RDBMS and DBD drivers (e.g. FreeTDS and Sybase)
+statements with values bound to columns or conditions that are not strings will
+throw implicit type conversion errors.
+
+As long as a column L<data_type|DBIx::Class::ResultSource/add_columns> is
+defined, and it resolves to a base RDBMS native type via L</_native_data_type> as
+defined in your Storage driver, the placeholder for this column will be
+converted to:
+
+ CAST(? as $mapped_type)
+
+=cut
+
+sub _prep_for_execute {
+ my $self = shift;
+ my ($op, $extra_bind, $ident, $args) = @_;
+
+ my ($sql, $bind) = $self->next::method (@_);
+
+# If we're using ::NoBindVars, there are no binds by this point so this code
+# gets skippeed.
+ if ($self->auto_cast && @$bind) {
+ my $new_sql;
+ my @sql_part = split /\?/, $sql;
+ my $col_info = $self->_resolve_column_info($ident,[ map $_->[0], @$bind ]);
+
+ foreach my $bound (@$bind) {
+ my $col = $bound->[0];
+ my $type = $self->_native_data_type($col_info->{$col}{data_type});
+
+ foreach my $data (@{$bound}[1..$#$bound]) {
+ $new_sql .= shift(@sql_part) .
+ ($type ? "CAST(? AS $type)" : '?');
+ }
+ }
+ $new_sql .= join '', @sql_part;
+ $sql = $new_sql;
+ }
+
+ return ($sql, $bind);
+}
+
+
+=head1 AUTHORS
+
+See L<DBIx::Class/CONTRIBUTORS>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI.pm 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI.pm 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1597,179 +1597,224 @@
sub _adjust_select_args_for_complex_prefetch {
my ($self, $from, $select, $where, $attrs) = @_;
+ $self->throw_exception ('Nothing to prefetch... how did we get here?!')
+ if not @{$attrs->{_prefetch_select}};
+
$self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
- if (ref $from ne 'ARRAY');
+ if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY');
- # copies for mangling
- $from = [ @$from ];
- $select = [ @$select ];
- $attrs = { %$attrs };
- # separate attributes
- my $sub_attrs = { %$attrs };
- delete $attrs->{$_} for qw/where bind rows offset group_by having/;
- delete $sub_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
+ # generate inner/outer attribute lists, remove stuff that doesn't apply
+ my $outer_attrs = { %$attrs };
+ delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
- my $select_root_alias = $attrs->{alias};
- my $sql_maker = $self->sql_maker;
+ my $inner_attrs = { %$attrs };
+ delete $inner_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
- # create subquery select list - consider only stuff *not* brought in by the prefetch
- my $sub_select = [];
- my $sub_group_by;
- for my $i (0 .. @{$attrs->{select}} - @{$attrs->{_prefetch_select}} - 1) {
- my $sel = $attrs->{select}[$i];
- # alias any functions to the dbic-side 'as' label
- # adjust the outer select accordingly
+ # bring over all non-collapse-induced order_by into the inner query (if any)
+ # the outer one will have to keep them all
+ delete $inner_attrs->{order_by};
+ if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
+ $inner_attrs->{order_by} = [
+ @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
+ ];
+ }
+
+
+ # generate the inner/outer select lists
+ # for inside we consider only stuff *not* brought in by the prefetch
+ # on the outside we substitute any function for its alias
+ my $outer_select = [ @$select ];
+ my $inner_select = [];
+ for my $i (0 .. ( @$outer_select - @{$outer_attrs->{_prefetch_select}} - 1) ) {
+ my $sel = $outer_select->[$i];
+
if (ref $sel eq 'HASH' ) {
$sel->{-as} ||= $attrs->{as}[$i];
- $select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "select_$i") );
+ $outer_select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "inner_column_$i") );
}
- push @$sub_select, $sel;
+ push @$inner_select, $sel;
}
- # bring over all non-collapse-induced order_by into the inner query (if any)
- # the outer one will have to keep them all
- delete $sub_attrs->{order_by};
- if (my $ord_cnt = @{$attrs->{order_by}} - @{$attrs->{_collapse_order_by}} ) {
- $sub_attrs->{order_by} = [
- @{$attrs->{order_by}}[ 0 .. $ord_cnt - 1]
- ];
- }
+ # normalize a copy of $from, so it will be easier to work with further
+ # down (i.e. promote the initial hashref to an AoH)
+ $from = [ @$from ];
+ $from->[0] = [ $from->[0] ];
+ my %original_join_info = map { $_->[0]{-alias} => $_->[0] } (@$from);
- # mangle {from}, keep in mind that $from is "headless" from here on
- my $join_root = shift @$from;
- my %inner_joins;
- my %join_info = map { $_->[0]{-alias} => $_->[0] } (@$from);
+ # decide which parts of the join will remain in either part of
+ # the outer/inner query
- # in complex search_related chains $select_root_alias may *not* be
- # 'me' so always include it in the inner join
- $inner_joins{$select_root_alias} = 1 if ($join_root->{-alias} ne $select_root_alias);
-
-
- # decide which parts of the join will remain on the inside
+ # First we compose a list of which aliases are used in restrictions
+ # (i.e. conditions/order/grouping/etc). Since we do not have
+ # introspectable SQLA, we fall back to ugly scanning of raw SQL for
+ # WHERE, and for pieces of ORDER BY in order to determine which aliases
+ # need to appear in the resulting sql.
+ # It may not be very efficient, but it's a reasonable stop-gap
+ # Also unqualified column names will not be considered, but more often
+ # than not this is actually ok
#
- # this is not a very viable optimisation, but it was written
- # before I realised this, so might as well remain. We can throw
- # away _any_ branches of the join tree that are:
- # 1) not mentioned in the condition/order
- # 2) left-join leaves (or left-join leaf chains)
- # Most of the join conditions will not satisfy this, but for real
- # complex queries some might, and we might make some RDBMS happy.
- #
- #
- # since we do not have introspectable SQLA, we fall back to ugly
- # scanning of raw SQL for WHERE, and for pieces of ORDER BY
- # in order to determine what goes into %inner_joins
- # It may not be very efficient, but it's a reasonable stop-gap
+ # In the same loop we enumerate part of the selection aliases, as
+ # it requires the same sqla hack for the time being
+ my ($restrict_aliases, $select_aliases, $prefetch_aliases);
{
# produce stuff unquoted, so it can be scanned
+ my $sql_maker = $self->sql_maker;
local $sql_maker->{quote_char};
my $sep = $self->_sql_maker_opts->{name_sep} || '.';
$sep = "\Q$sep\E";
- my @order_by = (map
+ my $non_prefetch_select_sql = $sql_maker->_recurse_fields ($inner_select);
+ my $prefetch_select_sql = $sql_maker->_recurse_fields ($outer_attrs->{_prefetch_select});
+ my $where_sql = $sql_maker->where ($where);
+ my $group_by_sql = $sql_maker->_order_by({
+ map { $_ => $inner_attrs->{$_} } qw/group_by having/
+ });
+ my @non_prefetch_order_by_chunks = (map
{ ref $_ ? $_->[0] : $_ }
- $sql_maker->_order_by_chunks ($sub_attrs->{order_by})
+ $sql_maker->_order_by_chunks ($inner_attrs->{order_by})
);
- my $where_sql = $sql_maker->where ($where);
- my $select_sql = $sql_maker->_recurse_fields ($sub_select);
- # sort needed joins
- for my $alias (keys %join_info) {
+ for my $alias (keys %original_join_info) {
+ my $seen_re = qr/\b $alias $sep/x;
- # any table alias found on a column name in where or order_by
- # gets included in %inner_joins
- # Also any parent joins that are needed to reach this particular alias
- for my $piece ($select_sql, $where_sql, @order_by ) {
- if ($piece =~ /\b $alias $sep/x) {
- $inner_joins{$alias} = 1;
+ for my $piece ($where_sql, $group_by_sql, @non_prefetch_order_by_chunks ) {
+ if ($piece =~ $seen_re) {
+ $restrict_aliases->{$alias} = 1;
}
}
+
+ if ($non_prefetch_select_sql =~ $seen_re) {
+ $select_aliases->{$alias} = 1;
+ }
+
+ if ($prefetch_select_sql =~ $seen_re) {
+ $prefetch_aliases->{$alias} = 1;
+ }
+
}
}
- # scan for non-leaf/non-left joins and mark as needed
- # also mark all ancestor joins that are needed to reach this particular alias
- # (e.g. join => { cds => 'tracks' } - tracks will bring cds too )
- #
- # traverse by the size of the -join_path i.e. reverse depth first
- for my $alias (sort { @{$join_info{$b}{-join_path}} <=> @{$join_info{$a}{-join_path}} } (keys %join_info) ) {
+ # Add any non-left joins to the restriction list (such joins are indeed restrictions)
+ for my $j (values %original_join_info) {
+ my $alias = $j->{-alias} or next;
+ $restrict_aliases->{$alias} = 1 if (
+ (not $j->{-join_type})
+ or
+ ($j->{-join_type} !~ /^left (?: \s+ outer)? $/xi)
+ );
+ }
- my $j = $join_info{$alias};
- $inner_joins{$alias} = 1 if (! $j->{-join_type} || ($j->{-join_type} !~ /^left$/i) );
-
- if ($inner_joins{$alias}) {
- $inner_joins{$_} = 1 for (@{$j->{-join_path}});
+ # mark all join parents as mentioned
+ # (e.g. join => { cds => 'tracks' } - tracks will need to bring cds too )
+ for my $collection ($restrict_aliases, $select_aliases) {
+ for my $alias (keys %$collection) {
+ $collection->{$_} = 1
+ for (@{ $original_join_info{$alias}{-join_path} || [] });
}
}
# construct the inner $from for the subquery
- my $inner_from = [ $join_root ];
+ my %inner_joins = (map { %{$_ || {}} } ($restrict_aliases, $select_aliases) );
+ my @inner_from;
for my $j (@$from) {
- push @$inner_from, $j if $inner_joins{$j->[0]{-alias}};
+ push @inner_from, $j if $inner_joins{$j->[0]{-alias}};
}
# if a multi-type join was needed in the subquery ("multi" is indicated by
# presence in {collapse}) - add a group_by to simulate the collapse in the subq
- unless ($sub_attrs->{group_by}) {
+ unless ($inner_attrs->{group_by}) {
for my $alias (keys %inner_joins) {
# the dot comes from some weirdness in collapse
# remove after the rewrite
if ($attrs->{collapse}{".$alias"}) {
- $sub_attrs->{group_by} ||= $sub_select;
+ $inner_attrs->{group_by} ||= $inner_select;
last;
}
}
}
+ # demote the inner_from head
+ $inner_from[0] = $inner_from[0][0];
+
# generate the subquery
my $subq = $self->_select_args_to_query (
- $inner_from,
- $sub_select,
+ \@inner_from,
+ $inner_select,
$where,
- $sub_attrs
+ $inner_attrs,
);
+
my $subq_joinspec = {
- -alias => $select_root_alias,
- -source_handle => $join_root->{-source_handle},
- $select_root_alias => $subq,
+ -alias => $attrs->{alias},
+ -source_handle => $inner_from[0]{-source_handle},
+ $attrs->{alias} => $subq,
};
- # Generate a new from (really just replace the join slot with the subquery)
- # Before we would start the outer chain from the subquery itself (i.e.
- # SELECT ... FROM (SELECT ... ) alias JOIN ..., but this turned out to be
- # a bad idea for search_related, as the root of the chain was effectively
- # lost (i.e. $artist_rs->search_related ('cds'... ) would result in alias
- # of 'cds', which would prevent from doing things like order_by artist.*)
- # See t/prefetch/via_search_related.t for a better idea
+ # Generate the outer from - this is relatively easy (really just replace
+ # the join slot with the subquery), with a major caveat - we can not
+ # join anything that is non-selecting (not part of the prefetch), but at
+ # the same time is a multi-type relationship, as it will explode the result.
+ #
+ # There are two possibilities here
+ # - either the join is non-restricting, in which case we simply throw it away
+ # - it is part of the restrictions, in which case we need to collapse the outer
+ # result by tackling yet another group_by to the outside of the query
+
+ # so first generate the outer_from, up to the substitution point
my @outer_from;
- if ($join_root->{-alias} eq $select_root_alias) { # just swap the root part and we're done
- @outer_from = (
- $subq_joinspec,
- @$from,
- )
+ while (my $j = shift @$from) {
+ if ($j->[0]{-alias} eq $attrs->{alias}) { # time to swap
+ push @outer_from, [
+ $subq_joinspec,
+ @{$j}[1 .. $#$j],
+ ];
+ last; # we'll take care of what's left in $from below
+ }
+ else {
+ push @outer_from, $j;
+ }
}
- else { # this is trickier
- @outer_from = ($join_root);
- for my $j (@$from) {
- if ($j->[0]{-alias} eq $select_root_alias) {
- push @outer_from, [
- $subq_joinspec,
- @{$j}[1 .. $#$j],
- ];
- }
- else {
- push @outer_from, $j;
- }
+ # see what's left - throw away if not selecting/restricting
+ # also throw in a group_by if restricting to guard against
+ # cross-join explosions
+ #
+ while (my $j = shift @$from) {
+ my $alias = $j->[0]{-alias};
+
+ if ($select_aliases->{$alias} || $prefetch_aliases->{$alias}) {
+ push @outer_from, $j;
}
+ elsif ($restrict_aliases->{$alias}) {
+ push @outer_from, $j;
+
+ # FIXME - this should be obviated by SQLA2, as I'll be able to
+ # have restrict_inner and restrict_outer... or something to that
+ # effect... I think...
+
+ # FIXME2 - I can't find a clean way to determine if a particular join
+ # is a multi - instead I am just treating everything as a potential
+ # explosive join (ribasushi)
+ #
+ # if (my $handle = $j->[0]{-source_handle}) {
+ # my $rsrc = $handle->resolve;
+ # ... need to bail out of the following if this is not a multi,
+ # as it will be much easier on the db ...
+
+ $outer_attrs->{group_by} ||= $outer_select;
+ # }
+ }
}
+ # demote the outer_from head
+ $outer_from[0] = $outer_from[0][0];
+
# This is totally horrific - the $where ends up in both the inner and outer query
# Unfortunately not much can be done until SQLA2 introspection arrives, and even
# then if where conditions apply to the *right* side of the prefetch, you may have
@@ -1777,7 +1822,7 @@
# the outer select to exclude joins you didin't want in the first place
#
# OTOH it can be seen as a plus: <ash> (notes that this query would make a DBA cry ;)
- return (\@outer_from, $select, $where, $attrs);
+ return (\@outer_from, $outer_select, $where, $outer_attrs);
}
sub _resolve_ident_sources {
@@ -2065,15 +2110,37 @@
$self->dbh_do('_dbh_last_insert_id', @_);
}
-=head2 sqlt_type
+=head2 _native_data_type
-Returns the database driver name.
+=over 4
+=item Arguments: $type_name
+
+=back
+
+This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
+currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
+L<::Sybase|DBIx::Class::Storage::DBI::Sybase>.
+
+The default implementation returns C<undef>, implement in your Storage driver if
+you need this functionality.
+
+Should map types from other databases to the native RDBMS type, for example
+C<VARCHAR2> to C<VARCHAR>.
+
+Types with modifiers should map to the underlying data type. For example,
+C<INTEGER AUTO_INCREMENT> should become C<INTEGER>.
+
+Composite types should map to the container type, for example
+C<ENUM(foo,bar,baz)> becomes C<ENUM>.
+
=cut
-sub sqlt_type { shift->_get_dbh->{Driver}->{Name} }
+sub _native_data_type {
+ #my ($self, $data_type) = @_;
+ return undef
+}
-
# Check if placeholders are supported at all
sub _placeholders_supported {
my $self = shift;
@@ -2104,7 +2171,23 @@
return $@ ? 0 : 1;
}
+=head2 sqlt_type
+Returns the database driver name.
+
+=cut
+
+sub sqlt_type {
+ my ($self) = @_;
+
+ if (not $self->_driver_determined) {
+ $self->_determine_driver;
+ goto $self->can ('sqlt_type');
+ }
+
+ $self->_get_dbh->{Driver}->{Name};
+}
+
=head2 bind_attribute_by_data_type
Given a datatype from column info, returns a database specific bind
Modified: DBIx-Class/0.08/branches/sybase/t/03podcoverage.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/03podcoverage.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/03podcoverage.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -22,41 +22,48 @@
# do not need to be documented.
my $exceptions = {
'DBIx::Class' => {
- ignore => [
- qw/MODIFY_CODE_ATTRIBUTES
- component_base_class
- mk_classdata
- mk_classaccessor/
- ]
+ ignore => [qw/
+ MODIFY_CODE_ATTRIBUTES
+ component_base_class
+ mk_classdata
+ mk_classaccessor
+ /]
},
'DBIx::Class::Row' => {
- ignore => [
- qw( MULTICREATE_DEBUG )
- ],
+ ignore => [qw/
+ MULTICREATE_DEBUG
+ /],
},
'DBIx::Class::ResultSource' => {
ignore => [qw/
- compare_relationship_keys
- pk_depends_on
- resolve_condition
- resolve_join
- resolve_prefetch
+ compare_relationship_keys
+ pk_depends_on
+ resolve_condition
+ resolve_join
+ resolve_prefetch
/],
},
+ 'DBIx::Class::ResultSourceHandle' => {
+ ignore => [qw/
+ schema
+ source_moniker
+ /],
+ },
'DBIx::Class::Storage' => {
- ignore => [
- qw(cursor)
- ]
+ ignore => [qw/
+ schema
+ cursor
+ /]
},
'DBIx::Class::Schema' => {
- ignore => [
- qw(setup_connection_class)
- ]
+ ignore => [qw/
+ setup_connection_class
+ /]
},
'DBIx::Class::Storage::DBI::Sybase' => {
- ignore => [
- qw/should_quote_data_type/,
- ]
+ ignore => [qw/
+ should_quote_data_type
+ /]
},
'DBIx::Class::CDBICompat::AccessorMapping' => { skip => 1 },
'DBIx::Class::CDBICompat::AbstractSearch' => {
@@ -105,6 +112,7 @@
'DBIx::Class::ResultSetProxy' => { skip => 1 },
'DBIx::Class::ResultSetManager' => { skip => 1 },
'DBIx::Class::ResultSourceProxy' => { skip => 1 },
+ 'DBIx::Class::Storage::Statistics' => { skip => 1 },
'DBIx::Class::Storage::DBI' => { skip => 1 },
'DBIx::Class::Storage::DBI::Replicated::Types' => { skip => 1 },
'DBIx::Class::Storage::DBI::DB2' => { skip => 1 },
Deleted: DBIx-Class/0.08/branches/sybase/t/18inserterror.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/18inserterror.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/18inserterror.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,29 +0,0 @@
-use Class::C3;
-use strict;
-use Test::More;
-use warnings;
-
-BEGIN {
- eval "use DBD::SQLite";
- plan $@
- ? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 4 );
-}
-
-use lib qw(t/lib);
-
-use_ok( 'DBICTest' );
-use_ok( 'DBICTest::Schema' );
-my $schema = DBICTest->init_schema;
-
-{
- my $warnings;
- local $SIG{__WARN__} = sub { $warnings .= $_[0] };
- eval {
- $schema->resultset('CD')
- ->create({ title => 'vacation in antarctica' })
- };
- like $@, qr/NULL/; # as opposed to some other error
- unlike( $warnings, qr/uninitialized value/, "No warning from Storage" );
-}
-
Deleted: DBIx-Class/0.08/branches/sybase/t/31stats.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/31stats.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/31stats.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,104 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use warnings;
-use Test::More;
-
-plan tests => 12;
-
-use lib qw(t/lib);
-
-use_ok('DBICTest');
-my $schema = DBICTest->init_schema();
-
-my $cbworks = 0;
-
-$schema->storage->debugcb(sub { $cbworks = 1; });
-$schema->storage->debug(0);
-my $rs = $schema->resultset('CD')->search({});
-$rs->count();
-ok(!$cbworks, 'Callback not called with debug disabled');
-
-$schema->storage->debug(1);
-
-$rs->count();
-ok($cbworks, 'Debug callback worked.');
-
-my $prof = new DBIx::Test::Profiler();
-$schema->storage->debugobj($prof);
-
-# Test non-transaction calls.
-$rs->count();
-ok($prof->{'query_start'}, 'query_start called');
-ok($prof->{'query_end'}, 'query_end called');
-ok(!$prof->{'txn_begin'}, 'txn_begin not called');
-ok(!$prof->{'txn_commit'}, 'txn_commit not called');
-
-$prof->reset();
-
-# Test transaction calls
-$schema->txn_begin();
-ok($prof->{'txn_begin'}, 'txn_begin called');
-
-$rs = $schema->resultset('CD')->search({});
-$rs->count();
-ok($prof->{'query_start'}, 'query_start called');
-ok($prof->{'query_end'}, 'query_end called');
-
-$schema->txn_commit();
-ok($prof->{'txn_commit'}, 'txn_commit called');
-
-$prof->reset();
-
-# Test a rollback
-$schema->txn_begin();
-$rs = $schema->resultset('CD')->search({});
-$rs->count();
-$schema->txn_rollback();
-ok($prof->{'txn_rollback'}, 'txn_rollback called');
-
-$schema->storage->debug(0);
-
-package DBIx::Test::Profiler;
-use strict;
-
-sub new {
- my $self = bless({});
-}
-
-sub query_start {
- my $self = shift();
- $self->{'query_start'} = 1;
-}
-
-sub query_end {
- my $self = shift();
- $self->{'query_end'} = 1;
-}
-
-sub txn_begin {
- my $self = shift();
- $self->{'txn_begin'} = 1;
-}
-
-sub txn_rollback {
- my $self = shift();
- $self->{'txn_rollback'} = 1;
-}
-
-sub txn_commit {
- my $self = shift();
- $self->{'txn_commit'} = 1;
-}
-
-sub reset {
- my $self = shift();
-
- $self->{'query_start'} = 0;
- $self->{'query_end'} = 0;
- $self->{'txn_begin'} = 0;
- $self->{'txn_rollback'} = 0;
- $self->{'txn_end'} = 0;
-}
-
-1;
Deleted: DBIx-Class/0.08/branches/sybase/t/32connect_code_ref.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/32connect_code_ref.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/32connect_code_ref.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,24 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-plan tests => 1;
-
-# Set up the "usual" sqlite for DBICTest
-my $normal_schema = DBICTest->init_schema( sqlite_use_file => 1 );
-
-# Steal the dsn, which should be like 'dbi:SQLite:t/var/DBIxClass.db'
-my $normal_dsn = $normal_schema->storage->_dbi_connect_info->[0];
-
-# Make sure we have no active connection
-$normal_schema->storage->disconnect;
-
-# Make a new clone with a new connection, using a code reference
-my $code_ref_schema = $normal_schema->connect(sub { DBI->connect($normal_dsn); });
-
-# Stolen from 60core.t - this just verifies things seem to work at all
-my @art = $code_ref_schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
-cmp_ok(@art, '==', 3, "Three artists returned");
Deleted: DBIx-Class/0.08/branches/sybase/t/33storage_reconnect.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/33storage_reconnect.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/33storage_reconnect.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,73 +0,0 @@
-use strict;
-use warnings;
-
-use FindBin;
-use File::Copy;
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-plan tests => 6;
-
-my $db_orig = "$FindBin::Bin/var/DBIxClass.db";
-my $db_tmp = "$db_orig.tmp";
-
-# Set up the "usual" sqlite for DBICTest
-my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
-
-# Make sure we're connected by doing something
-my @art = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
-cmp_ok(@art, '==', 3, "Three artists returned");
-
-# Disconnect the dbh, and be sneaky about it
-# Also test if DBD::SQLite finaly knows how to ->disconnect properly
-{
- my $w;
- local $SIG{__WARN__} = sub { $w = shift };
- $schema->storage->_dbh->disconnect;
- ok ($w !~ /active statement handles/, 'SQLite can disconnect properly');
-}
-
-# Try the operation again - What should happen here is:
-# 1. S::DBI blindly attempts the SELECT, which throws an exception
-# 2. It catches the exception, checks ->{Active}/->ping, sees the disconnected state...
-# 3. Reconnects, and retries the operation
-# 4. Success!
-my @art_two = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
-cmp_ok(@art_two, '==', 3, "Three artists returned");
-
-### Now, disconnect the dbh, and move the db file;
-# create a new one and chmod 000 to prevent SQLite from connecting.
-$schema->storage->_dbh->disconnect;
-move( $db_orig, $db_tmp );
-open DBFILE, '>', $db_orig;
-print DBFILE 'THIS IS NOT A REAL DATABASE';
-close DBFILE;
-chmod 0000, $db_orig;
-
-### Try the operation again... it should fail, since there's no db
-{
- # Catch the DBI connection error
- local $SIG{__WARN__} = sub {};
- eval {
- my @art_three = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
- };
- ok( $@, 'The operation failed' );
-}
-
-### Now, move the db file back to the correct name
-unlink($db_orig);
-move( $db_tmp, $db_orig );
-
-SKIP: {
- skip "Cannot reconnect if original connection didn't fail", 2
- if ( $@ =~ /encrypted or is not a database/ );
-
- ### Try the operation again... this time, it should succeed
- my @art_four;
- eval {
- @art_four = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
- };
- ok( !$@, 'The operation succeeded' );
- cmp_ok( @art_four, '==', 3, "Three artists returned" );
-}
Deleted: DBIx-Class/0.08/branches/sybase/t/35disable_sth_caching.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/35disable_sth_caching.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/35disable_sth_caching.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,19 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-plan tests => 2;
-
-# Set up the "usual" sqlite for DBICTest
-my $schema = DBICTest->init_schema;
-
-my $sth_one = $schema->storage->sth('SELECT 42');
-my $sth_two = $schema->storage->sth('SELECT 42');
-$schema->storage->disable_sth_caching(1);
-my $sth_three = $schema->storage->sth('SELECT 42');
-
-ok($sth_one == $sth_two, "statement caching works");
-ok($sth_two != $sth_three, "disabling statement caching works");
Deleted: DBIx-Class/0.08/branches/sybase/t/36datetime.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/36datetime.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/36datetime.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,28 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-eval { require DateTime::Format::SQLite };
-plan $@ ? ( skip_all => 'Requires DateTime::Format::SQLite' )
- : ( tests => 3 );
-
-my $schema = DBICTest->init_schema(
- no_deploy => 1, # Deploying would cause an early rebless
-);
-
-is(
- ref $schema->storage, 'DBIx::Class::Storage::DBI',
- 'Starting with generic storage'
-);
-
-# Calling date_time_parser should cause the storage to be reblessed,
-# so that we can pick up datetime_parser_type from subclasses
-
-my $parser = $schema->storage->datetime_parser();
-
-is($parser, 'DateTime::Format::SQLite', 'Got expected storage-set datetime_parser');
-isa_ok($schema->storage, 'DBIx::Class::Storage::DBI::SQLite', 'storage');
-
Modified: DBIx-Class/0.08/branches/sybase/t/72pg.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/72pg.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/72pg.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -50,8 +50,16 @@
unless ($dsn && $user);
DBICTest::Schema->load_classes( 'Casecheck', 'ArrayTest' );
-my $schema = DBICTest::Schema->connect($dsn, $user, $pass,);
+# make sure sqlt_type overrides work (::Storage::DBI::Pg does this)
+{
+ my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+
+ ok (!$schema->storage->_dbh, 'definitely not connected');
+ is ($schema->storage->sqlt_type, 'PostgreSQL', 'sqlt_type correct pre-connection');
+}
+
+my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
# Check that datetime_parser returns correctly before we explicitly connect.
SKIP: {
eval { require DateTime::Format::Pg };
Modified: DBIx-Class/0.08/branches/sybase/t/89dbicadmin.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/89dbicadmin.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/89dbicadmin.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -16,7 +16,7 @@
plan skip_all => 'Install Text::CSV_XS or Text::CSV_PP to run this test' if ($@);
}
-my @json_backends = qw/XS JSON DWIW Syck/;
+my @json_backends = qw/XS JSON DWIW/;
my $tests_per_run = 5;
plan tests => $tests_per_run * @json_backends;
Deleted: DBIx-Class/0.08/branches/sybase/t/91debug.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/91debug.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/91debug.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,73 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBIC::DebugObj;
-use DBIC::SqlMakerTest;
-
-my $schema = DBICTest->init_schema();
-
-plan tests => 7;
-
-ok ( $schema->storage->debug(1), 'debug' );
-ok ( defined(
- $schema->storage->debugfh(
- IO::File->new('t/var/sql.log', 'w')
- )
- ),
- 'debugfh'
- );
-
-$schema->storage->debugfh->autoflush(1);
-my $rs = $schema->resultset('CD')->search({});
-$rs->count();
-
-my $log = new IO::File('t/var/sql.log', 'r') or die($!);
-my $line = <$log>;
-$log->close();
-ok($line =~ /^SELECT COUNT/, 'Log success');
-
-$schema->storage->debugfh(undef);
-$ENV{'DBIC_TRACE'} = '=t/var/foo.log';
-$rs = $schema->resultset('CD')->search({});
-$rs->count();
-$log = new IO::File('t/var/foo.log', 'r') or die($!);
-$line = <$log>;
-$log->close();
-ok($line =~ /^SELECT COUNT/, 'Log success');
-$schema->storage->debugobj->debugfh(undef);
-delete($ENV{'DBIC_TRACE'});
-open(STDERRCOPY, '>&STDERR');
-stat(STDERRCOPY); # nop to get warnings quiet
-close(STDERR);
-eval {
- $rs = $schema->resultset('CD')->search({});
- $rs->count();
-};
-ok($@, 'Died on closed FH');
-open(STDERR, '>&STDERRCOPY');
-
-# test trace output correctness for bind params
-{
- my ($sql, @bind);
- $schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind));
-
- my @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
- is_same_sql_bind(
- $sql, \@bind,
- "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) ): '1', '1', '3'",
- [qw/'1' '1' '3'/],
- 'got correct SQL with all bind parameters (debugcb)'
- );
-
- @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
- is_same_sql_bind(
- $sql, \@bind,
- "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) )", ["'1'", "'1'", "'3'"],
- 'got correct SQL with all bind parameters (debugobj)'
- );
-}
-
-1;
Deleted: DBIx-Class/0.08/branches/sybase/t/92storage.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/92storage.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/92storage.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,172 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use Data::Dumper;
-
-{
- package DBICTest::ExplodingStorage::Sth;
- use strict;
- use warnings;
-
- sub execute { die "Kablammo!" }
-
- sub bind_param {}
-
- package DBICTest::ExplodingStorage;
- use strict;
- use warnings;
- use base 'DBIx::Class::Storage::DBI::SQLite';
-
- my $count = 0;
- sub sth {
- my ($self, $sql) = @_;
- return bless {}, "DBICTest::ExplodingStorage::Sth" unless $count++;
- return $self->next::method($sql);
- }
-
- sub connected {
- return 0 if $count == 1;
- return shift->next::method(@_);
- }
-}
-
-plan tests => 17;
-
-my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
-
-is( ref($schema->storage), 'DBIx::Class::Storage::DBI::SQLite',
- 'Storage reblessed correctly into DBIx::Class::Storage::DBI::SQLite' );
-
-my $storage = $schema->storage;
-$storage->ensure_connected;
-
-eval {
- $schema->storage->throw_exception('test_exception_42');
-};
-like($@, qr/\btest_exception_42\b/, 'basic exception');
-
-eval {
- $schema->resultset('CD')->search_literal('broken +%$#$1')->all;
-};
-like($@, qr/prepare_cached failed/, 'exception via DBI->HandleError, etc');
-
-bless $storage, "DBICTest::ExplodingStorage";
-$schema->storage($storage);
-
-eval {
- $schema->resultset('Artist')->create({ name => "Exploding Sheep" });
-};
-
-is($@, "", "Exploding \$sth->execute was caught");
-
-is(1, $schema->resultset('Artist')->search({name => "Exploding Sheep" })->count,
- "And the STH was retired");
-
-
-# testing various invocations of connect_info ([ ... ])
-
-my $coderef = sub { 42 };
-my $invocations = {
- 'connect_info ([ $d, $u, $p, \%attr, \%extra_attr])' => {
- args => [
- 'foo',
- 'bar',
- undef,
- {
- on_connect_do => [qw/a b c/],
- PrintError => 0,
- },
- {
- AutoCommit => 1,
- on_disconnect_do => [qw/d e f/],
- },
- {
- unsafe => 1,
- auto_savepoint => 1,
- },
- ],
- dbi_connect_info => [
- 'foo',
- 'bar',
- undef,
- {
- %{$storage->_default_dbi_connect_attributes || {} },
- PrintError => 0,
- AutoCommit => 1,
- },
- ],
- },
-
- 'connect_info ([ \%code, \%extra_attr ])' => {
- args => [
- $coderef,
- {
- on_connect_do => [qw/a b c/],
- PrintError => 0,
- AutoCommit => 1,
- on_disconnect_do => [qw/d e f/],
- },
- {
- unsafe => 1,
- auto_savepoint => 1,
- },
- ],
- dbi_connect_info => [
- $coderef,
- ],
- },
-
- 'connect_info ([ \%attr ])' => {
- args => [
- {
- on_connect_do => [qw/a b c/],
- PrintError => 1,
- AutoCommit => 0,
- on_disconnect_do => [qw/d e f/],
- user => 'bar',
- dsn => 'foo',
- },
- {
- unsafe => 1,
- auto_savepoint => 1,
- },
- ],
- dbi_connect_info => [
- 'foo',
- 'bar',
- undef,
- {
- %{$storage->_default_dbi_connect_attributes || {} },
- PrintError => 1,
- AutoCommit => 0,
- },
- ],
- },
-};
-
-for my $type (keys %$invocations) {
-
- # we can not use a cloner portably because of the coderef
- # so compare dumps instead
- local $Data::Dumper::Sortkeys = 1;
- my $arg_dump = Dumper ($invocations->{$type}{args});
-
- $storage->connect_info ($invocations->{$type}{args});
-
- is ($arg_dump, Dumper ($invocations->{$type}{args}), "$type didn't modify passed arguments");
-
-
- is_deeply ($storage->_dbi_connect_info, $invocations->{$type}{dbi_connect_info}, "$type produced correct _dbi_connect_info");
- ok ( (not $storage->auto_savepoint and not $storage->unsafe), "$type correctly ignored extra hashref");
-
- is_deeply (
- [$storage->on_connect_do, $storage->on_disconnect_do ],
- [ [qw/a b c/], [qw/d e f/] ],
- "$type correctly parsed DBIC specific on_[dis]connect_do",
- );
-}
-
-1;
Deleted: DBIx-Class/0.08/branches/sybase/t/92storage_on_connect_call.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/92storage_on_connect_call.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/92storage_on_connect_call.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,68 +0,0 @@
-use strict;
-use warnings;
-no warnings qw/once redefine/;
-
-use lib qw(t/lib);
-use DBICTest;
-
-use Test::More tests => 9;
-
-my $schema = DBICTest->init_schema(
- no_connect => 1,
- no_deploy => 1,
-);
-
-local *DBIx::Class::Storage::DBI::connect_call_foo = sub {
- isa_ok $_[0], 'DBIx::Class::Storage::DBI',
- 'got storage in connect_call method';
- is $_[1], 'bar', 'got param in connect_call method';
-};
-
-local *DBIx::Class::Storage::DBI::disconnect_call_foo = sub {
- isa_ok $_[0], 'DBIx::Class::Storage::DBI',
- 'got storage in disconnect_call method';
-};
-
-ok $schema->connection(
- DBICTest->_database,
- {
- on_connect_call => [
- [ do_sql => 'create table test1 (id integer)' ],
- [ do_sql => [ 'insert into test1 values (?)', {}, 1 ] ],
- [ do_sql => sub { ['insert into test1 values (2)'] } ],
- [ sub { $_[0]->dbh->do($_[1]) }, 'insert into test1 values (3)' ],
- # this invokes $storage->connect_call_foo('bar') (above)
- [ foo => 'bar' ],
- ],
- on_connect_do => 'insert into test1 values (4)',
- on_disconnect_call => 'foo',
- },
-), 'connection()';
-
-is_deeply (
- $schema->storage->dbh->selectall_arrayref('select * from test1'),
- [ [ 1 ], [ 2 ], [ 3 ], [ 4 ] ],
- 'on_connect_call/do actions worked'
-);
-
-local *DBIx::Class::Storage::DBI::connect_call_foo = sub {
- isa_ok $_[0], 'DBIx::Class::Storage::DBI',
- 'got storage in connect_call method';
-};
-
-local *DBIx::Class::Storage::DBI::connect_call_bar = sub {
- isa_ok $_[0], 'DBIx::Class::Storage::DBI',
- 'got storage in connect_call method';
-};
-
-$schema->storage->disconnect;
-
-ok $schema->connection(
- DBICTest->_database,
- {
- # method list form
- on_connect_call => [ 'foo', sub { ok 1, "coderef in list form" }, 'bar' ],
- },
-), 'connection()';
-
-$schema->storage->ensure_connected;
Deleted: DBIx-Class/0.08/branches/sybase/t/92storage_on_connect_do.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/92storage_on_connect_do.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/92storage_on_connect_do.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,88 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More tests => 12;
-
-use lib qw(t/lib);
-use base 'DBICTest';
-
-
-my $schema = DBICTest->init_schema(
- no_connect => 1,
- no_deploy => 1,
-);
-
-ok $schema->connection(
- DBICTest->_database,
- {
- on_connect_do => 'CREATE TABLE TEST_empty (id INTEGER)',
- },
-), 'connection()';
-
-is_deeply (
- $schema->storage->dbh->selectall_arrayref('SELECT * FROM TEST_empty'),
- [],
- 'string version on_connect_do() worked'
-);
-
-$schema->storage->disconnect;
-
-ok $schema->connection(
- DBICTest->_database,
- {
- on_connect_do => [
- 'CREATE TABLE TEST_empty (id INTEGER)',
- [ 'INSERT INTO TEST_empty VALUES (?)', {}, 2 ],
- \&insert_from_subref,
- ],
- on_disconnect_do =>
- [\&check_exists, 'DROP TABLE TEST_empty', \&check_dropped],
- },
-), 'connection()';
-
-is_deeply (
- $schema->storage->dbh->selectall_arrayref('SELECT * FROM TEST_empty'),
- [ [ 2 ], [ 3 ], [ 7 ] ],
- 'on_connect_do() worked'
-);
-eval { $schema->storage->dbh->do('SELECT 1 FROM TEST_nonexistent'); };
-ok $@, 'Searching for nonexistent table dies';
-
-$schema->storage->disconnect();
-
-my($connected, $disconnected, @cb_args);
-ok $schema->connection(
- DBICTest->_database,
- {
- on_connect_do => sub { $connected = 1; @cb_args = @_; },
- on_disconnect_do => sub { $disconnected = 1 },
- },
-), 'second connection()';
-$schema->storage->dbh->do('SELECT 1');
-ok $connected, 'on_connect_do() called after connect()';
-ok ! $disconnected, 'on_disconnect_do() not called after connect()';
-$schema->storage->disconnect();
-ok $disconnected, 'on_disconnect_do() called after disconnect()';
-
-isa_ok($cb_args[0], 'DBIx::Class::Storage', 'first arg to on_connect_do hook');
-
-sub check_exists {
- my $storage = shift;
- ok $storage->dbh->do('SELECT 1 FROM TEST_empty'), 'Table still exists';
- return;
-}
-
-sub check_dropped {
- my $storage = shift;
- eval { $storage->dbh->do('SELECT 1 FROM TEST_empty'); };
- ok $@, 'Reading from dropped table fails';
- return;
-}
-
-sub insert_from_subref {
- my $storage = shift;
- return [
- [ 'INSERT INTO TEST_empty VALUES (?)', {}, 3 ],
- 'INSERT INTO TEST_empty VALUES (7)',
- ];
-}
Deleted: DBIx-Class/0.08/branches/sybase/t/92storage_ping_count.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/92storage_ping_count.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/92storage_ping_count.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,61 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use Data::Dumper;
-use DBIC::SqlMakerTest;
-
-my $ping_count = 0;
-
-{
- local $SIG{__WARN__} = sub {};
- require DBIx::Class::Storage::DBI;
-
- my $ping = \&DBIx::Class::Storage::DBI::_ping;
-
- *DBIx::Class::Storage::DBI::_ping = sub {
- $ping_count++;
- goto &$ping;
- };
-}
-
-
-# measure pings around deploy() separately
-my $schema = DBICTest->init_schema( sqlite_use_file => 1, no_populate => 1 );
-
-is ($ping_count, 0, 'no _ping() calls during deploy');
-$ping_count = 0;
-
-
-
-DBICTest->populate_schema ($schema);
-
-# perform some operations and make sure they don't ping
-
-$schema->resultset('CD')->create({
- cdid => 6, artist => 3, title => 'mtfnpy', year => 2009
-});
-
-$schema->resultset('CD')->create({
- cdid => 7, artist => 3, title => 'mtfnpy2', year => 2009
-});
-
-$schema->storage->_dbh->disconnect;
-
-$schema->resultset('CD')->create({
- cdid => 8, artist => 3, title => 'mtfnpy3', year => 2009
-});
-
-$schema->storage->_dbh->disconnect;
-
-$schema->txn_do(sub {
- $schema->resultset('CD')->create({
- cdid => 9, artist => 3, title => 'mtfnpy4', year => 2009
- });
-});
-
-is $ping_count, 0, 'no _ping() calls';
-
-done_testing;
Added: DBIx-Class/0.08/branches/sybase/t/93autocast.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/93autocast.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/93autocast.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,82 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+{ # Fake storage driver for sqlite with autocast
+ package DBICTest::SQLite::AutoCast;
+ use base qw/
+ DBIx::Class::Storage::DBI::AutoCast
+ DBIx::Class::Storage::DBI::SQLite
+ /;
+ use mro 'c3';
+
+ my $type_map = {
+ datetime => 'DateTime',
+ integer => 'INT',
+ int => undef, # no conversion
+ };
+
+ sub _native_data_type {
+ return $type_map->{$_[1]};
+ }
+}
+
+my $schema = DBICTest->init_schema (storage_type => 'DBICTest::SQLite::AutoCast');
+
+# 'me.id' will be cast unlike the unqualified 'id'
+my $rs = $schema->resultset ('CD')->search ({
+ cdid => { '>', 5 },
+ 'tracks.last_updated_at' => { '!=', undef },
+ 'tracks.last_updated_on' => { '<', 2009 },
+ 'tracks.position' => 4,
+ 'tracks.single_track' => \[ '= ?', [ single_track => [1, 2, 3 ] ] ],
+}, { join => 'tracks' });
+
+my $bind = [
+ [ cdid => 5 ],
+ [ 'tracks.last_updated_on' => 2009 ],
+ [ 'tracks.position' => 4 ],
+ [ 'single_track' => [ 1, 2, 3] ],
+];
+
+is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ FROM cd me
+ LEFT JOIN track tracks ON tracks.cd = me.cdid
+ WHERE
+ cdid > ?
+ AND tracks.last_updated_at IS NOT NULL
+ AND tracks.last_updated_on < ?
+ AND tracks.position = ?
+ AND tracks.single_track = ?
+ )',
+ $bind,
+ 'expected sql with casting off',
+);
+
+$schema->storage->auto_cast (1);
+
+is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ FROM cd me
+ LEFT JOIN track tracks ON tracks.cd = me.cdid
+ WHERE
+ cdid > CAST(? AS INT)
+ AND tracks.last_updated_at IS NOT NULL
+ AND tracks.last_updated_on < CAST (? AS yyy)
+ AND tracks.position = ?
+ AND tracks.single_track = CAST(? AS INT)
+ )',
+ $bind,
+ 'expected sql with casting on',
+);
+
+done_testing;
Deleted: DBIx-Class/0.08/branches/sybase/t/93storage_replication.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/93storage_replication.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/93storage_replication.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,853 +0,0 @@
-use strict;
-use warnings;
-use lib qw(t/lib);
-use Test::More;
-use Test::Exception;
-use DBICTest;
-use List::Util 'first';
-use Scalar::Util 'reftype';
-use File::Spec;
-use IO::Handle;
-
-BEGIN {
- eval "use DBIx::Class::Storage::DBI::Replicated; use Test::Moose";
- plan skip_all => "Deps not installed: $@" if $@;
-}
-
-use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
-use_ok 'DBIx::Class::Storage::DBI::Replicated::Balancer';
-use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-use_ok 'DBIx::Class::Storage::DBI::Replicated';
-
-use Moose();
-use MooseX::Types();
-diag "Using Moose version $Moose::VERSION and MooseX::Types version $MooseX::Types::VERSION";
-
-=head1 HOW TO USE
-
- This is a test of the replicated storage system. This will work in one of
- two ways, either it was try to fake replication with a couple of SQLite DBs
- and creative use of copy, or if you define a couple of %ENV vars correctly
- will try to test those. If you do that, it will assume the setup is properly
- replicating. Your results may vary, but I have demonstrated this to work with
- mysql native replication.
-
-=cut
-
-
-## ----------------------------------------------------------------------------
-## Build a class to hold all our required testing data and methods.
-## ----------------------------------------------------------------------------
-
-TESTSCHEMACLASSES: {
-
- ## --------------------------------------------------------------------- ##
- ## Create an object to contain your replicated stuff.
- ## --------------------------------------------------------------------- ##
-
- package DBIx::Class::DBI::Replicated::TestReplication;
-
- use DBICTest;
- use base qw/Class::Accessor::Fast/;
-
- __PACKAGE__->mk_accessors( qw/schema/ );
-
- ## Initialize the object
-
- sub new {
- my ($class, $schema_method) = (shift, shift);
- my $self = $class->SUPER::new(@_);
-
- $self->schema( $self->init_schema($schema_method) );
- return $self;
- }
-
- ## Get the Schema and set the replication storage type
-
- sub init_schema {
- # current SQLT SQLite producer does not handle DROP TABLE IF EXISTS, trap warnings here
- local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/ };
-
- my ($class, $schema_method) = @_;
-
- my $method = "get_schema_$schema_method";
- my $schema = $class->$method;
-
- return $schema;
- }
-
- sub get_schema_by_storage_type {
- DBICTest->init_schema(
- sqlite_use_file => 1,
- storage_type=>{
- '::DBI::Replicated' => {
- balancer_type=>'::Random',
- balancer_args=>{
- auto_validate_every=>100,
- master_read_weight => 1
- },
- }
- },
- deploy_args=>{
- add_drop_table => 1,
- },
- );
- }
-
- sub get_schema_by_connect_info {
- DBICTest->init_schema(
- sqlite_use_file => 1,
- storage_type=> '::DBI::Replicated',
- balancer_type=>'::Random',
- balancer_args=> {
- auto_validate_every=>100,
- master_read_weight => 1
- },
- deploy_args=>{
- add_drop_table => 1,
- },
- );
- }
-
- sub generate_replicant_connect_info {}
- sub replicate {}
- sub cleanup {}
-
- ## --------------------------------------------------------------------- ##
- ## Add a connect_info option to test option merging.
- ## --------------------------------------------------------------------- ##
- {
- package DBIx::Class::Storage::DBI::Replicated;
-
- use Moose;
-
- __PACKAGE__->meta->make_mutable;
-
- around connect_info => sub {
- my ($next, $self, $info) = @_;
- $info->[3]{master_option} = 1;
- $self->$next($info);
- };
-
- __PACKAGE__->meta->make_immutable;
-
- no Moose;
- }
-
- ## --------------------------------------------------------------------- ##
- ## Subclass for when you are using SQLite for testing, this provides a fake
- ## replication support.
- ## --------------------------------------------------------------------- ##
-
- package DBIx::Class::DBI::Replicated::TestReplication::SQLite;
-
- use DBICTest;
- use File::Copy;
- use base 'DBIx::Class::DBI::Replicated::TestReplication';
-
- __PACKAGE__->mk_accessors(qw/master_path slave_paths/);
-
- ## Set the master path from DBICTest
-
- sub new {
- my $class = shift @_;
- my $self = $class->SUPER::new(@_);
-
- $self->master_path( DBICTest->_sqlite_dbfilename );
- $self->slave_paths([
- File::Spec->catfile(qw/t var DBIxClass_slave1.db/),
- File::Spec->catfile(qw/t var DBIxClass_slave2.db/),
- ]);
-
- return $self;
- }
-
- ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
- ## $storage->connect_info to be used for connecting replicants.
-
- sub generate_replicant_connect_info {
- my $self = shift @_;
- my @dsn = map {
- "dbi:SQLite:${_}";
- } @{$self->slave_paths};
-
- my @connect_infos = map { [$_,'','',{AutoCommit=>1}] } @dsn;
-
- ## Make sure nothing is left over from a failed test
- $self->cleanup;
-
- ## try a hashref too
- my $c = $connect_infos[0];
- $connect_infos[0] = {
- dsn => $c->[0],
- user => $c->[1],
- password => $c->[2],
- %{ $c->[3] }
- };
-
- @connect_infos
- }
-
- ## Do a 'good enough' replication by copying the master dbfile over each of
- ## the slave dbfiles. If the master is SQLite we do this, otherwise we
- ## just do a one second pause to let the slaves catch up.
-
- sub replicate {
- my $self = shift @_;
- foreach my $slave (@{$self->slave_paths}) {
- copy($self->master_path, $slave);
- }
- }
-
- ## Cleanup after ourselves. Unlink all gthe slave paths.
-
- sub cleanup {
- my $self = shift @_;
- foreach my $slave (@{$self->slave_paths}) {
- if(-e $slave) {
- unlink $slave;
- }
- }
- }
-
- ## --------------------------------------------------------------------- ##
- ## Subclass for when you are setting the databases via custom export vars
- ## This is for when you have a replicating database setup that you are
- ## going to test against. You'll need to define the correct $ENV and have
- ## two slave databases to test against, as well as a replication system
- ## that will replicate in less than 1 second.
- ## --------------------------------------------------------------------- ##
-
- package DBIx::Class::DBI::Replicated::TestReplication::Custom;
- use base 'DBIx::Class::DBI::Replicated::TestReplication';
-
- ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
- ## $storage->connect_info to be used for connecting replicants.
-
- sub generate_replicant_connect_info {
- return (
- [$ENV{"DBICTEST_SLAVE0_DSN"}, $ENV{"DBICTEST_SLAVE0_DBUSER"}, $ENV{"DBICTEST_SLAVE0_DBPASS"}, {AutoCommit => 1}],
- [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}],
- );
- }
-
- ## pause a bit to let the replication catch up
-
- sub replicate {
- sleep 1;
- }
-}
-
-## ----------------------------------------------------------------------------
-## Create an object and run some tests
-## ----------------------------------------------------------------------------
-
-## Thi first bunch of tests are basic, just make sure all the bits are behaving
-
-my $replicated_class = DBICTest->has_custom_dsn ?
- 'DBIx::Class::DBI::Replicated::TestReplication::Custom' :
- 'DBIx::Class::DBI::Replicated::TestReplication::SQLite';
-
-my $replicated;
-
-for my $method (qw/by_connect_info by_storage_type/) {
- undef $replicated;
- ok $replicated = $replicated_class->new($method)
- => "Created a replication object $method";
-
- isa_ok $replicated->schema
- => 'DBIx::Class::Schema';
-
- isa_ok $replicated->schema->storage
- => 'DBIx::Class::Storage::DBI::Replicated';
-
- isa_ok $replicated->schema->storage->balancer
- => 'DBIx::Class::Storage::DBI::Replicated::Balancer::Random'
- => 'configured balancer_type';
-}
-
-ok $replicated->schema->storage->meta
- => 'has a meta object';
-
-isa_ok $replicated->schema->storage->master
- => 'DBIx::Class::Storage::DBI';
-
-isa_ok $replicated->schema->storage->pool
- => 'DBIx::Class::Storage::DBI::Replicated::Pool';
-
-does_ok $replicated->schema->storage->balancer
- => 'DBIx::Class::Storage::DBI::Replicated::Balancer';
-
-ok my @replicant_connects = $replicated->generate_replicant_connect_info
- => 'got replication connect information';
-
-ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects)
- => 'Created some storages suitable for replicants';
-
-our %debug;
-$replicated->schema->storage->debug(1);
-$replicated->schema->storage->debugcb(sub {
- my ($op, $info) = @_;
- ##warn "\n$op, $info\n";
- %debug = (
- op => $op,
- info => $info,
- dsn => ($info=~m/\[(.+)\]/)[0],
- storage_type => $info=~m/REPLICANT/ ? 'REPLICANT' : 'MASTER',
- );
-});
-
-ok my @all_storages = $replicated->schema->storage->all_storages
- => '->all_storages';
-
-is scalar @all_storages,
- 3
- => 'correct number of ->all_storages';
-
-is ((grep $_->isa('DBIx::Class::Storage::DBI'), @all_storages),
- 3
- => '->all_storages are correct type');
-
-my @all_storage_opts =
- grep { (reftype($_)||'') eq 'HASH' }
- map @{ $_->_connect_info }, @all_storages;
-
-is ((grep $_->{master_option}, @all_storage_opts),
- 3
- => 'connect_info was merged from master to replicants');
-
-my @replicant_names = keys %{ $replicated->schema->storage->replicants };
-
-ok @replicant_names, "found replicant names @replicant_names";
-
-## Silence warning about not supporting the is_replicating method if using the
-## sqlite dbs.
-$replicated->schema->storage->debugobj->silence(1)
- if first { m{^t/} } @replicant_names;
-
-isa_ok $replicated->schema->storage->balancer->current_replicant
- => 'DBIx::Class::Storage::DBI';
-
-$replicated->schema->storage->debugobj->silence(0);
-
-ok $replicated->schema->storage->pool->has_replicants
- => 'does have replicants';
-
-is $replicated->schema->storage->pool->num_replicants => 2
- => 'has two replicants';
-
-does_ok $replicated_storages[0]
- => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-
-does_ok $replicated_storages[1]
- => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-
-does_ok $replicated->schema->storage->replicants->{$replicant_names[0]}
- => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-
-does_ok $replicated->schema->storage->replicants->{$replicant_names[1]}
- => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-
-## Add some info to the database
-
-$replicated
- ->schema
- ->populate('Artist', [
- [ qw/artistid name/ ],
- [ 4, "Ozric Tentacles"],
- ]);
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- like $debug{info}, qr/INSERT/, 'Last was an insert';
-
-## Make sure all the slaves have the table definitions
-
-$replicated->replicate;
-$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
-$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
-
-## Silence warning about not supporting the is_replicating method if using the
-## sqlite dbs.
-$replicated->schema->storage->debugobj->silence(1)
- if first { m{^t/} } @replicant_names;
-
-$replicated->schema->storage->pool->validate_replicants;
-
-$replicated->schema->storage->debugobj->silence(0);
-
-## Make sure we can read the data.
-
-ok my $artist1 = $replicated->schema->resultset('Artist')->find(4)
- => 'Created Result';
-
-## We removed testing here since master read weight is on, so we can't tell in
-## advance what storage to expect. We turn master read weight off a bit lower
-## is $debug{storage_type}, 'REPLICANT'
-## => "got last query from a replicant: $debug{dsn}, $debug{info}";
-
-isa_ok $artist1
- => 'DBICTest::Artist';
-
-is $artist1->name, 'Ozric Tentacles'
- => 'Found expected name for first result';
-
-## Check that master_read_weight is honored
-{
- no warnings qw/once redefine/;
-
- local
- *DBIx::Class::Storage::DBI::Replicated::Balancer::Random::_random_number =
- sub { 999 };
-
- $replicated->schema->storage->balancer->increment_storage;
-
- is $replicated->schema->storage->balancer->current_replicant,
- $replicated->schema->storage->master
- => 'master_read_weight is honored';
-
- ## turn it off for the duration of the test
- $replicated->schema->storage->balancer->master_read_weight(0);
- $replicated->schema->storage->balancer->increment_storage;
-}
-
-## Add some new rows that only the master will have This is because
-## we overload any type of write operation so that is must hit the master
-## database.
-
-$replicated
- ->schema
- ->populate('Artist', [
- [ qw/artistid name/ ],
- [ 5, "Doom's Children"],
- [ 6, "Dead On Arrival"],
- [ 7, "Watergate"],
- ]);
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- like $debug{info}, qr/INSERT/, 'Last was an insert';
-
-## Make sure all the slaves have the table definitions
-$replicated->replicate;
-
-## Should find some data now
-
-ok my $artist2 = $replicated->schema->resultset('Artist')->find(5)
- => 'Sync succeed';
-
-is $debug{storage_type}, 'REPLICANT'
- => "got last query from a replicant: $debug{dsn}";
-
-isa_ok $artist2
- => 'DBICTest::Artist';
-
-is $artist2->name, "Doom's Children"
- => 'Found expected name for first result';
-
-## What happens when we disconnect all the replicants?
-
-is $replicated->schema->storage->pool->connected_replicants => 2
- => "both replicants are connected";
-
-$replicated->schema->storage->replicants->{$replicant_names[0]}->disconnect;
-$replicated->schema->storage->replicants->{$replicant_names[1]}->disconnect;
-
-is $replicated->schema->storage->pool->connected_replicants => 0
- => "both replicants are now disconnected";
-
-## All these should pass, since the database should automatically reconnect
-
-ok my $artist3 = $replicated->schema->resultset('Artist')->find(6)
- => 'Still finding stuff.';
-
-is $debug{storage_type}, 'REPLICANT'
- => "got last query from a replicant: $debug{dsn}";
-
-isa_ok $artist3
- => 'DBICTest::Artist';
-
-is $artist3->name, "Dead On Arrival"
- => 'Found expected name for first result';
-
-is $replicated->schema->storage->pool->connected_replicants => 1
- => "At Least One replicant reconnected to handle the job";
-
-## What happens when we try to select something that doesn't exist?
-
-ok ! $replicated->schema->resultset('Artist')->find(666)
- => 'Correctly failed to find something.';
-
-is $debug{storage_type}, 'REPLICANT'
- => "got last query from a replicant: $debug{dsn}";
-
-## test the reliable option
-
-TESTRELIABLE: {
-
- $replicated->schema->storage->set_reliable_storage;
-
- ok $replicated->schema->resultset('Artist')->find(2)
- => 'Read from master 1';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- ok $replicated->schema->resultset('Artist')->find(5)
- => 'Read from master 2';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- $replicated->schema->storage->set_balanced_storage;
-
- ok $replicated->schema->resultset('Artist')->find(3)
- => 'Read from replicant';
-
- is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-}
-
-## Make sure when reliable goes out of scope, we are using replicants again
-
-ok $replicated->schema->resultset('Artist')->find(1)
- => 'back to replicant 1.';
-
- is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-
-ok $replicated->schema->resultset('Artist')->find(2)
- => 'back to replicant 2.';
-
- is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-
-## set all the replicants to inactive, and make sure the balancer falls back to
-## the master.
-
-$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
-$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
-
-{
- ## catch the fallback to master warning
- open my $debugfh, '>', \my $fallback_warning;
- my $oldfh = $replicated->schema->storage->debugfh;
- $replicated->schema->storage->debugfh($debugfh);
-
- ok $replicated->schema->resultset('Artist')->find(2)
- => 'Fallback to master';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- like $fallback_warning, qr/falling back to master/
- => 'emits falling back to master warning';
-
- $replicated->schema->storage->debugfh($oldfh);
-}
-
-$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
-$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
-
-## Silence warning about not supporting the is_replicating method if using the
-## sqlite dbs.
-$replicated->schema->storage->debugobj->silence(1)
- if first { m{^t/} } @replicant_names;
-
-$replicated->schema->storage->pool->validate_replicants;
-
-$replicated->schema->storage->debugobj->silence(0);
-
-ok $replicated->schema->resultset('Artist')->find(2)
- => 'Returned to replicates';
-
-is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-
-## Getting slave status tests
-
-SKIP: {
- ## We skip this tests unless you have a custom replicants, since the default
- ## sqlite based replication tests don't support these functions.
-
- skip 'Cannot Test Replicant Status on Non Replicating Database', 10
- unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
-
- $replicated->replicate; ## Give the slaves a chance to catchup.
-
- ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating
- => 'Replicants are replicating';
-
- is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0
- => 'Replicant is zero seconds behind master';
-
- ## Test the validate replicants
-
- $replicated->schema->storage->pool->validate_replicants;
-
- is $replicated->schema->storage->pool->active_replicants, 2
- => 'Still have 2 replicants after validation';
-
- ## Force the replicants to fail the validate test by required their lag to
- ## be negative (ie ahead of the master!)
-
- $replicated->schema->storage->pool->maximum_lag(-10);
- $replicated->schema->storage->pool->validate_replicants;
-
- is $replicated->schema->storage->pool->active_replicants, 0
- => 'No way a replicant be be ahead of the master';
-
- ## Let's be fair to the replicants again. Let them lag up to 5
-
- $replicated->schema->storage->pool->maximum_lag(5);
- $replicated->schema->storage->pool->validate_replicants;
-
- is $replicated->schema->storage->pool->active_replicants, 2
- => 'Both replicants in good standing again';
-
- ## Check auto validate
-
- is $replicated->schema->storage->balancer->auto_validate_every, 100
- => "Got the expected value for auto validate";
-
- ## This will make sure we auto validatge everytime
- $replicated->schema->storage->balancer->auto_validate_every(0);
-
- ## set all the replicants to inactive, and make sure the balancer falls back to
- ## the master.
-
- $replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
- $replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
-
- ## Ok, now when we go to run a query, autovalidate SHOULD reconnect
-
- is $replicated->schema->storage->pool->active_replicants => 0
- => "both replicants turned off";
-
- ok $replicated->schema->resultset('Artist')->find(5)
- => 'replicant reactivated';
-
- is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-
- is $replicated->schema->storage->pool->active_replicants => 2
- => "both replicants reactivated";
-}
-
-## Test the reliably callback
-
-ok my $reliably = sub {
-
- ok $replicated->schema->resultset('Artist')->find(5)
- => 'replicant reactivated';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
-} => 'created coderef properly';
-
-$replicated->schema->storage->execute_reliably($reliably);
-
-## Try something with an error
-
-ok my $unreliably = sub {
-
- ok $replicated->schema->resultset('ArtistXX')->find(5)
- => 'replicant reactivated';
-
-} => 'created coderef properly';
-
-throws_ok {$replicated->schema->storage->execute_reliably($unreliably)}
- qr/Can't find source for ArtistXX/
- => 'Bad coderef throws proper error';
-
-## Make sure replication came back
-
-ok $replicated->schema->resultset('Artist')->find(3)
- => 'replicant reactivated';
-
-is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
-
-## make sure transactions are set to execute_reliably
-
-ok my $transaction = sub {
-
- my $id = shift @_;
-
- $replicated
- ->schema
- ->populate('Artist', [
- [ qw/artistid name/ ],
- [ $id, "Children of the Grave"],
- ]);
-
- ok my $result = $replicated->schema->resultset('Artist')->find($id)
- => "Found expected artist for $id";
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'Found expected artist again for 1';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- return ($result, $more);
-
-} => 'Created a coderef properly';
-
-## Test the transaction with multi return
-{
- ok my @return = $replicated->schema->txn_do($transaction, 666)
- => 'did transaction';
-
- is $return[0]->id, 666
- => 'first returned value is correct';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- is $return[1]->id, 1
- => 'second returned value is correct';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
-}
-
-## Test that asking for single return works
-{
- ok my @return = $replicated->schema->txn_do($transaction, 777)
- => 'did transaction';
-
- is $return[0]->id, 777
- => 'first returned value is correct';
-
- is $return[1]->id, 1
- => 'second returned value is correct';
-}
-
-## Test transaction returning a single value
-
-{
- ok my $result = $replicated->schema->txn_do(sub {
- ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'found inside a transaction';
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
- return $more;
- }) => 'successfully processed transaction';
-
- is $result->id, 1
- => 'Got expected single result from transaction';
-}
-
-## Make sure replication came back
-
-ok $replicated->schema->resultset('Artist')->find(1)
- => 'replicant reactivated';
-
-is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
-
-## Test Discard changes
-
-{
- ok my $artist = $replicated->schema->resultset('Artist')->find(2)
- => 'got an artist to test discard changes';
-
- is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
-
- ok $artist->get_from_storage({force_pool=>'master'})
- => 'properly discard changes';
-
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
-
- ok $artist->discard_changes({force_pool=>'master'})
- => 'properly called discard_changes against master (manual attrs)';
-
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
-
- ok $artist->discard_changes()
- => 'properly called discard_changes against master (default attrs)';
-
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
-
- ok $artist->discard_changes({force_pool=>$replicant_names[0]})
- => 'properly able to override the default attributes';
-
- is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}"
-}
-
-## Test some edge cases, like trying to do a transaction inside a transaction, etc
-
-{
- ok my $result = $replicated->schema->txn_do(sub {
- return $replicated->schema->txn_do(sub {
- ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'found inside a transaction inside a transaction';
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
- return $more;
- });
- }) => 'successfully processed transaction';
-
- is $result->id, 1
- => 'Got expected single result from transaction';
-}
-
-{
- ok my $result = $replicated->schema->txn_do(sub {
- return $replicated->schema->storage->execute_reliably(sub {
- return $replicated->schema->txn_do(sub {
- return $replicated->schema->storage->execute_reliably(sub {
- ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'found inside crazy deep transactions and execute_reliably';
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
- return $more;
- });
- });
- });
- }) => 'successfully processed transaction';
-
- is $result->id, 1
- => 'Got expected single result from transaction';
-}
-
-## Test the force_pool resultset attribute.
-
-{
- ok my $artist_rs = $replicated->schema->resultset('Artist')
- => 'got artist resultset';
-
- ## Turn on Forced Pool Storage
- ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'})
- => 'Created a resultset using force_pool storage';
-
- ok my $artist = $reliable_artist_rs->find(2)
- => 'got an artist result via force_pool storage';
-
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
-}
-
-## Test the force_pool resultset attribute part two.
-
-{
- ok my $artist_rs = $replicated->schema->resultset('Artist')
- => 'got artist resultset';
-
- ## Turn on Forced Pool Storage
- ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>$replicant_names[0]})
- => 'Created a resultset using force_pool storage';
-
- ok my $artist = $reliable_artist_rs->find(2)
- => 'got an artist result via force_pool storage';
-
- is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
-}
-## Delete the old database files
-$replicated->cleanup;
-
-done_testing;
-
-# vim: sw=4 sts=4 :
Deleted: DBIx-Class/0.08/branches/sybase/t/dbh_do.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/dbh_do.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/dbh_do.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,33 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use warnings;
-
-use Test::More tests => 8;
-use lib qw(t/lib);
-use DBICTest;
-
-
-my $schema = DBICTest->init_schema();
-my $storage = $schema->storage;
-
-my $test_func = sub {
- is $_[0], $storage;
- is $_[1], $storage->dbh;
- is $_[2], "foo";
- is $_[3], "bar";
-};
-
-$storage->dbh_do(
- $test_func,
- "foo", "bar"
-);
-
-my $storage_class = ref $storage;
-{
- no strict 'refs';
- *{$storage_class .'::__test_method'} = $test_func;
-}
-$storage->dbh_do("__test_method", "foo", "bar");
-
-
\ No newline at end of file
Copied: DBIx-Class/0.08/branches/sybase/t/inflate/datetime_determine_parser.t (from rev 6643, DBIx-Class/0.08/branches/sybase/t/36datetime.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/inflate/datetime_determine_parser.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/inflate/datetime_determine_parser.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,28 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+eval { require DateTime::Format::SQLite };
+plan $@ ? ( skip_all => 'Requires DateTime::Format::SQLite' )
+ : ( tests => 3 );
+
+my $schema = DBICTest->init_schema(
+ no_deploy => 1, # Deploying would cause an early rebless
+);
+
+is(
+ ref $schema->storage, 'DBIx::Class::Storage::DBI',
+ 'Starting with generic storage'
+);
+
+# Calling date_time_parser should cause the storage to be reblessed,
+# so that we can pick up datetime_parser_type from subclasses
+
+my $parser = $schema->storage->datetime_parser();
+
+is($parser, 'DateTime::Format::SQLite', 'Got expected storage-set datetime_parser');
+isa_ok($schema->storage, 'DBIx::Class::Storage::DBI::SQLite', 'storage');
+
Modified: DBIx-Class/0.08/branches/sybase/t/lib/DBICTest/Schema/Track.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/lib/DBICTest/Schema/Track.pm 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/lib/DBICTest/Schema/Track.pm 2009-08-29 06:50:56 UTC (rev 7421)
@@ -14,7 +14,7 @@
data_type => 'integer',
},
'position' => {
- data_type => 'integer',
+ data_type => 'int',
accessor => 'pos',
},
'title' => {
Modified: DBIx-Class/0.08/branches/sybase/t/lib/sqlite.sql
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/lib/sqlite.sql 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/lib/sqlite.sql 2009-08-29 06:50:56 UTC (rev 7421)
@@ -1,4 +1,4 @@
--- Created on Thu Aug 20 07:47:13 2009
+-- Created on Tue Aug 25 12:34:34 2009
--
@@ -281,7 +281,7 @@
CREATE TABLE track (
trackid INTEGER PRIMARY KEY NOT NULL,
cd integer NOT NULL,
- position integer NOT NULL,
+ position int NOT NULL,
title varchar(100) NOT NULL,
last_updated_on datetime,
last_updated_at datetime,
Modified: DBIx-Class/0.08/branches/sybase/t/prefetch/grouped.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/prefetch/grouped.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/prefetch/grouped.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -271,4 +271,62 @@
);
}
+{
+ my $cd_rs = $schema->resultset('CD')->search({}, {
+ distinct => 1,
+ join => [qw/ tracks /],
+ prefetch => [qw/ artist /],
+ });
+ is($cd_rs->count, 5, 'complex prefetch + non-prefetching has_many join count correct');
+ is($cd_rs->all, 5, 'complex prefetch + non-prefetching has_many join number of objects correct');
+
+ # make sure join tracks was thrown out
+ is_same_sql_bind (
+ $cd_rs->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield
+ FROM (
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ FROM cd me
+ JOIN artist artist ON artist.artistid = me.artist
+ GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ ) me
+ JOIN artist artist ON artist.artistid = me.artist
+ )',
+ [],
+ );
+
+
+
+ # try the same as above, but add a condition so the tracks join can not be thrown away
+ my $cd_rs2 = $cd_rs->search ({ 'tracks.title' => { '!=' => 'ugabuganoexist' } });
+ is($cd_rs2->count, 5, 'complex prefetch + non-prefetching restricted has_many join count correct');
+ is($cd_rs2->all, 5, 'complex prefetch + non-prefetching restricted has_many join number of objects correct');
+
+ # the outer group_by seems like a necessary evil, if someone can figure out how to take it away
+ # without breaking compat - be my guest
+ is_same_sql_bind (
+ $cd_rs2->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield
+ FROM (
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ FROM cd me
+ LEFT JOIN track tracks ON tracks.cd = me.cdid
+ JOIN artist artist ON artist.artistid = me.artist
+ WHERE ( tracks.title != ? )
+ GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ ) me
+ LEFT JOIN track tracks ON tracks.cd = me.cdid
+ JOIN artist artist ON artist.artistid = me.artist
+ WHERE ( tracks.title != ? )
+ GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield
+ )',
+ [ map { [ 'tracks.title' => 'ugabuganoexist' ] } (1 .. 2) ],
+ );
+}
+
done_testing;
Modified: DBIx-Class/0.08/branches/sybase/t/search/subquery.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/search/subquery.t 2009-08-29 06:36:27 UTC (rev 7420)
+++ DBIx-Class/0.08/branches/sybase/t/search/subquery.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -19,6 +19,17 @@
my @tests = (
{
rs => $cdrs,
+ search => \[ "title = ? AND year LIKE ?", 'buahaha', '20%' ],
+ attrs => { rows => 5 },
+ sqlbind => \[
+ "( SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE (title = ? AND year LIKE ?) LIMIT 5)",
+ 'buahaha',
+ '20%',
+ ],
+ },
+
+ {
+ rs => $cdrs,
search => {
artist_id => { 'in' => $art_rs->search({}, { rows => 1 })->get_column( 'id' )->as_query },
},
Copied: DBIx-Class/0.08/branches/sybase/t/storage/base.t (from rev 7318, DBIx-Class/0.08/branches/sybase/t/92storage.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/storage/base.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/storage/base.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,172 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use Data::Dumper;
+
+{
+ package DBICTest::ExplodingStorage::Sth;
+ use strict;
+ use warnings;
+
+ sub execute { die "Kablammo!" }
+
+ sub bind_param {}
+
+ package DBICTest::ExplodingStorage;
+ use strict;
+ use warnings;
+ use base 'DBIx::Class::Storage::DBI::SQLite';
+
+ my $count = 0;
+ sub sth {
+ my ($self, $sql) = @_;
+ return bless {}, "DBICTest::ExplodingStorage::Sth" unless $count++;
+ return $self->next::method($sql);
+ }
+
+ sub connected {
+ return 0 if $count == 1;
+ return shift->next::method(@_);
+ }
+}
+
+plan tests => 17;
+
+my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
+
+is( ref($schema->storage), 'DBIx::Class::Storage::DBI::SQLite',
+ 'Storage reblessed correctly into DBIx::Class::Storage::DBI::SQLite' );
+
+my $storage = $schema->storage;
+$storage->ensure_connected;
+
+eval {
+ $schema->storage->throw_exception('test_exception_42');
+};
+like($@, qr/\btest_exception_42\b/, 'basic exception');
+
+eval {
+ $schema->resultset('CD')->search_literal('broken +%$#$1')->all;
+};
+like($@, qr/prepare_cached failed/, 'exception via DBI->HandleError, etc');
+
+bless $storage, "DBICTest::ExplodingStorage";
+$schema->storage($storage);
+
+eval {
+ $schema->resultset('Artist')->create({ name => "Exploding Sheep" });
+};
+
+is($@, "", "Exploding \$sth->execute was caught");
+
+is(1, $schema->resultset('Artist')->search({name => "Exploding Sheep" })->count,
+ "And the STH was retired");
+
+
+# testing various invocations of connect_info ([ ... ])
+
+my $coderef = sub { 42 };
+my $invocations = {
+ 'connect_info ([ $d, $u, $p, \%attr, \%extra_attr])' => {
+ args => [
+ 'foo',
+ 'bar',
+ undef,
+ {
+ on_connect_do => [qw/a b c/],
+ PrintError => 0,
+ },
+ {
+ AutoCommit => 1,
+ on_disconnect_do => [qw/d e f/],
+ },
+ {
+ unsafe => 1,
+ auto_savepoint => 1,
+ },
+ ],
+ dbi_connect_info => [
+ 'foo',
+ 'bar',
+ undef,
+ {
+ %{$storage->_default_dbi_connect_attributes || {} },
+ PrintError => 0,
+ AutoCommit => 1,
+ },
+ ],
+ },
+
+ 'connect_info ([ \%code, \%extra_attr ])' => {
+ args => [
+ $coderef,
+ {
+ on_connect_do => [qw/a b c/],
+ PrintError => 0,
+ AutoCommit => 1,
+ on_disconnect_do => [qw/d e f/],
+ },
+ {
+ unsafe => 1,
+ auto_savepoint => 1,
+ },
+ ],
+ dbi_connect_info => [
+ $coderef,
+ ],
+ },
+
+ 'connect_info ([ \%attr ])' => {
+ args => [
+ {
+ on_connect_do => [qw/a b c/],
+ PrintError => 1,
+ AutoCommit => 0,
+ on_disconnect_do => [qw/d e f/],
+ user => 'bar',
+ dsn => 'foo',
+ },
+ {
+ unsafe => 1,
+ auto_savepoint => 1,
+ },
+ ],
+ dbi_connect_info => [
+ 'foo',
+ 'bar',
+ undef,
+ {
+ %{$storage->_default_dbi_connect_attributes || {} },
+ PrintError => 1,
+ AutoCommit => 0,
+ },
+ ],
+ },
+};
+
+for my $type (keys %$invocations) {
+
+ # we can not use a cloner portably because of the coderef
+ # so compare dumps instead
+ local $Data::Dumper::Sortkeys = 1;
+ my $arg_dump = Dumper ($invocations->{$type}{args});
+
+ $storage->connect_info ($invocations->{$type}{args});
+
+ is ($arg_dump, Dumper ($invocations->{$type}{args}), "$type didn't modify passed arguments");
+
+
+ is_deeply ($storage->_dbi_connect_info, $invocations->{$type}{dbi_connect_info}, "$type produced correct _dbi_connect_info");
+ ok ( (not $storage->auto_savepoint and not $storage->unsafe), "$type correctly ignored extra hashref");
+
+ is_deeply (
+ [$storage->on_connect_do, $storage->on_disconnect_do ],
+ [ [qw/a b c/], [qw/d e f/] ],
+ "$type correctly parsed DBIC specific on_[dis]connect_do",
+ );
+}
+
+1;
Copied: DBIx-Class/0.08/branches/sybase/t/storage/dbh_do.t (from rev 6490, DBIx-Class/0.08/branches/sybase/t/dbh_do.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/storage/dbh_do.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/storage/dbh_do.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,33 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+
+use Test::More tests => 8;
+use lib qw(t/lib);
+use DBICTest;
+
+
+my $schema = DBICTest->init_schema();
+my $storage = $schema->storage;
+
+my $test_func = sub {
+ is $_[0], $storage;
+ is $_[1], $storage->dbh;
+ is $_[2], "foo";
+ is $_[3], "bar";
+};
+
+$storage->dbh_do(
+ $test_func,
+ "foo", "bar"
+);
+
+my $storage_class = ref $storage;
+{
+ no strict 'refs';
+ *{$storage_class .'::__test_method'} = $test_func;
+}
+$storage->dbh_do("__test_method", "foo", "bar");
+
+
\ No newline at end of file
Copied: DBIx-Class/0.08/branches/sybase/t/storage/dbi_coderef.t (from rev 6490, DBIx-Class/0.08/branches/sybase/t/32connect_code_ref.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/storage/dbi_coderef.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/storage/dbi_coderef.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,24 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 1;
+
+# Set up the "usual" sqlite for DBICTest
+my $normal_schema = DBICTest->init_schema( sqlite_use_file => 1 );
+
+# Steal the dsn, which should be like 'dbi:SQLite:t/var/DBIxClass.db'
+my $normal_dsn = $normal_schema->storage->_dbi_connect_info->[0];
+
+# Make sure we have no active connection
+$normal_schema->storage->disconnect;
+
+# Make a new clone with a new connection, using a code reference
+my $code_ref_schema = $normal_schema->connect(sub { DBI->connect($normal_dsn); });
+
+# Stolen from 60core.t - this just verifies things seem to work at all
+my @art = $code_ref_schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
+cmp_ok(@art, '==', 3, "Three artists returned");
Copied: DBIx-Class/0.08/branches/sybase/t/storage/debug.t (from rev 6490, DBIx-Class/0.08/branches/sybase/t/91debug.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/storage/debug.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/storage/debug.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,73 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::DebugObj;
+use DBIC::SqlMakerTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 7;
+
+ok ( $schema->storage->debug(1), 'debug' );
+ok ( defined(
+ $schema->storage->debugfh(
+ IO::File->new('t/var/sql.log', 'w')
+ )
+ ),
+ 'debugfh'
+ );
+
+$schema->storage->debugfh->autoflush(1);
+my $rs = $schema->resultset('CD')->search({});
+$rs->count();
+
+my $log = new IO::File('t/var/sql.log', 'r') or die($!);
+my $line = <$log>;
+$log->close();
+ok($line =~ /^SELECT COUNT/, 'Log success');
+
+$schema->storage->debugfh(undef);
+$ENV{'DBIC_TRACE'} = '=t/var/foo.log';
+$rs = $schema->resultset('CD')->search({});
+$rs->count();
+$log = new IO::File('t/var/foo.log', 'r') or die($!);
+$line = <$log>;
+$log->close();
+ok($line =~ /^SELECT COUNT/, 'Log success');
+$schema->storage->debugobj->debugfh(undef);
+delete($ENV{'DBIC_TRACE'});
+open(STDERRCOPY, '>&STDERR');
+stat(STDERRCOPY); # nop to get warnings quiet
+close(STDERR);
+eval {
+ $rs = $schema->resultset('CD')->search({});
+ $rs->count();
+};
+ok($@, 'Died on closed FH');
+open(STDERR, '>&STDERRCOPY');
+
+# test trace output correctness for bind params
+{
+ my ($sql, @bind);
+ $schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind));
+
+ my @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
+ is_same_sql_bind(
+ $sql, \@bind,
+ "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) ): '1', '1', '3'",
+ [qw/'1' '1' '3'/],
+ 'got correct SQL with all bind parameters (debugcb)'
+ );
+
+ @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
+ is_same_sql_bind(
+ $sql, \@bind,
+ "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) )", ["'1'", "'1'", "'3'"],
+ 'got correct SQL with all bind parameters (debugobj)'
+ );
+}
+
+1;
Copied: DBIx-Class/0.08/branches/sybase/t/storage/disable_sth_caching.t (from rev 6490, DBIx-Class/0.08/branches/sybase/t/35disable_sth_caching.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/storage/disable_sth_caching.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/storage/disable_sth_caching.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,19 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 2;
+
+# Set up the "usual" sqlite for DBICTest
+my $schema = DBICTest->init_schema;
+
+my $sth_one = $schema->storage->sth('SELECT 42');
+my $sth_two = $schema->storage->sth('SELECT 42');
+$schema->storage->disable_sth_caching(1);
+my $sth_three = $schema->storage->sth('SELECT 42');
+
+ok($sth_one == $sth_two, "statement caching works");
+ok($sth_two != $sth_three, "disabling statement caching works");
Copied: DBIx-Class/0.08/branches/sybase/t/storage/error.t (from rev 6490, DBIx-Class/0.08/branches/sybase/t/18inserterror.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/storage/error.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/storage/error.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,29 @@
+use Class::C3;
+use strict;
+use Test::More;
+use warnings;
+
+BEGIN {
+ eval "use DBD::SQLite";
+ plan $@
+ ? ( skip_all => 'needs DBD::SQLite for testing' )
+ : ( tests => 4 );
+}
+
+use lib qw(t/lib);
+
+use_ok( 'DBICTest' );
+use_ok( 'DBICTest::Schema' );
+my $schema = DBICTest->init_schema;
+
+{
+ my $warnings;
+ local $SIG{__WARN__} = sub { $warnings .= $_[0] };
+ eval {
+ $schema->resultset('CD')
+ ->create({ title => 'vacation in antarctica' })
+ };
+ like $@, qr/NULL/; # as opposed to some other error
+ unlike( $warnings, qr/uninitialized value/, "No warning from Storage" );
+}
+
Copied: DBIx-Class/0.08/branches/sybase/t/storage/on_connect_call.t (from rev 6865, DBIx-Class/0.08/branches/sybase/t/92storage_on_connect_call.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/storage/on_connect_call.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/storage/on_connect_call.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,69 @@
+use strict;
+use warnings;
+no warnings qw/once redefine/;
+
+use lib qw(t/lib);
+use DBICTest;
+
+use Test::More tests => 9;
+
+use DBIx::Class::Storage::DBI;
+my $schema = DBICTest->init_schema(
+ no_connect => 1,
+ no_deploy => 1,
+);
+
+local *DBIx::Class::Storage::DBI::connect_call_foo = sub {
+ isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+ 'got storage in connect_call method';
+ is $_[1], 'bar', 'got param in connect_call method';
+};
+
+local *DBIx::Class::Storage::DBI::disconnect_call_foo = sub {
+ isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+ 'got storage in disconnect_call method';
+};
+
+ok $schema->connection(
+ DBICTest->_database,
+ {
+ on_connect_call => [
+ [ do_sql => 'create table test1 (id integer)' ],
+ [ do_sql => [ 'insert into test1 values (?)', {}, 1 ] ],
+ [ do_sql => sub { ['insert into test1 values (2)'] } ],
+ [ sub { $_[0]->dbh->do($_[1]) }, 'insert into test1 values (3)' ],
+ # this invokes $storage->connect_call_foo('bar') (above)
+ [ foo => 'bar' ],
+ ],
+ on_connect_do => 'insert into test1 values (4)',
+ on_disconnect_call => 'foo',
+ },
+), 'connection()';
+
+is_deeply (
+ $schema->storage->dbh->selectall_arrayref('select * from test1'),
+ [ [ 1 ], [ 2 ], [ 3 ], [ 4 ] ],
+ 'on_connect_call/do actions worked'
+);
+
+local *DBIx::Class::Storage::DBI::connect_call_foo = sub {
+ isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+ 'got storage in connect_call method';
+};
+
+local *DBIx::Class::Storage::DBI::connect_call_bar = sub {
+ isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+ 'got storage in connect_call method';
+};
+
+$schema->storage->disconnect;
+
+ok $schema->connection(
+ DBICTest->_database,
+ {
+ # method list form
+ on_connect_call => [ 'foo', sub { ok 1, "coderef in list form" }, 'bar' ],
+ },
+), 'connection()';
+
+$schema->storage->ensure_connected;
Copied: DBIx-Class/0.08/branches/sybase/t/storage/on_connect_do.t (from rev 6490, DBIx-Class/0.08/branches/sybase/t/92storage_on_connect_do.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/storage/on_connect_do.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/storage/on_connect_do.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,88 @@
+use strict;
+use warnings;
+
+use Test::More tests => 12;
+
+use lib qw(t/lib);
+use base 'DBICTest';
+
+
+my $schema = DBICTest->init_schema(
+ no_connect => 1,
+ no_deploy => 1,
+);
+
+ok $schema->connection(
+ DBICTest->_database,
+ {
+ on_connect_do => 'CREATE TABLE TEST_empty (id INTEGER)',
+ },
+), 'connection()';
+
+is_deeply (
+ $schema->storage->dbh->selectall_arrayref('SELECT * FROM TEST_empty'),
+ [],
+ 'string version on_connect_do() worked'
+);
+
+$schema->storage->disconnect;
+
+ok $schema->connection(
+ DBICTest->_database,
+ {
+ on_connect_do => [
+ 'CREATE TABLE TEST_empty (id INTEGER)',
+ [ 'INSERT INTO TEST_empty VALUES (?)', {}, 2 ],
+ \&insert_from_subref,
+ ],
+ on_disconnect_do =>
+ [\&check_exists, 'DROP TABLE TEST_empty', \&check_dropped],
+ },
+), 'connection()';
+
+is_deeply (
+ $schema->storage->dbh->selectall_arrayref('SELECT * FROM TEST_empty'),
+ [ [ 2 ], [ 3 ], [ 7 ] ],
+ 'on_connect_do() worked'
+);
+eval { $schema->storage->dbh->do('SELECT 1 FROM TEST_nonexistent'); };
+ok $@, 'Searching for nonexistent table dies';
+
+$schema->storage->disconnect();
+
+my($connected, $disconnected, @cb_args);
+ok $schema->connection(
+ DBICTest->_database,
+ {
+ on_connect_do => sub { $connected = 1; @cb_args = @_; },
+ on_disconnect_do => sub { $disconnected = 1 },
+ },
+), 'second connection()';
+$schema->storage->dbh->do('SELECT 1');
+ok $connected, 'on_connect_do() called after connect()';
+ok ! $disconnected, 'on_disconnect_do() not called after connect()';
+$schema->storage->disconnect();
+ok $disconnected, 'on_disconnect_do() called after disconnect()';
+
+isa_ok($cb_args[0], 'DBIx::Class::Storage', 'first arg to on_connect_do hook');
+
+sub check_exists {
+ my $storage = shift;
+ ok $storage->dbh->do('SELECT 1 FROM TEST_empty'), 'Table still exists';
+ return;
+}
+
+sub check_dropped {
+ my $storage = shift;
+ eval { $storage->dbh->do('SELECT 1 FROM TEST_empty'); };
+ ok $@, 'Reading from dropped table fails';
+ return;
+}
+
+sub insert_from_subref {
+ my $storage = shift;
+ return [
+ [ 'INSERT INTO TEST_empty VALUES (?)', {}, 3 ],
+ 'INSERT INTO TEST_empty VALUES (7)',
+ ];
+}
Copied: DBIx-Class/0.08/branches/sybase/t/storage/ping_count.t (from rev 7318, DBIx-Class/0.08/branches/sybase/t/92storage_ping_count.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/storage/ping_count.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/storage/ping_count.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,61 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use Data::Dumper;
+use DBIC::SqlMakerTest;
+
+my $ping_count = 0;
+
+{
+ local $SIG{__WARN__} = sub {};
+ require DBIx::Class::Storage::DBI;
+
+ my $ping = \&DBIx::Class::Storage::DBI::_ping;
+
+ *DBIx::Class::Storage::DBI::_ping = sub {
+ $ping_count++;
+ goto &$ping;
+ };
+}
+
+
+# measure pings around deploy() separately
+my $schema = DBICTest->init_schema( sqlite_use_file => 1, no_populate => 1 );
+
+is ($ping_count, 0, 'no _ping() calls during deploy');
+$ping_count = 0;
+
+
+
+DBICTest->populate_schema ($schema);
+
+# perform some operations and make sure they don't ping
+
+$schema->resultset('CD')->create({
+ cdid => 6, artist => 3, title => 'mtfnpy', year => 2009
+});
+
+$schema->resultset('CD')->create({
+ cdid => 7, artist => 3, title => 'mtfnpy2', year => 2009
+});
+
+$schema->storage->_dbh->disconnect;
+
+$schema->resultset('CD')->create({
+ cdid => 8, artist => 3, title => 'mtfnpy3', year => 2009
+});
+
+$schema->storage->_dbh->disconnect;
+
+$schema->txn_do(sub {
+ $schema->resultset('CD')->create({
+ cdid => 9, artist => 3, title => 'mtfnpy4', year => 2009
+ });
+});
+
+is $ping_count, 0, 'no _ping() calls';
+
+done_testing;
Copied: DBIx-Class/0.08/branches/sybase/t/storage/reconnect.t (from rev 6490, DBIx-Class/0.08/branches/sybase/t/33storage_reconnect.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/storage/reconnect.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/storage/reconnect.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,73 @@
+use strict;
+use warnings;
+
+use FindBin;
+use File::Copy;
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 6;
+
+my $db_orig = "$FindBin::Bin/../var/DBIxClass.db";
+my $db_tmp = "$db_orig.tmp";
+
+# Set up the "usual" sqlite for DBICTest
+my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
+
+# Make sure we're connected by doing something
+my @art = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
+cmp_ok(@art, '==', 3, "Three artists returned");
+
+# Disconnect the dbh, and be sneaky about it
+# Also test if DBD::SQLite finaly knows how to ->disconnect properly
+{
+ my $w;
+ local $SIG{__WARN__} = sub { $w = shift };
+ $schema->storage->_dbh->disconnect;
+ ok ($w !~ /active statement handles/, 'SQLite can disconnect properly');
+}
+
+# Try the operation again - What should happen here is:
+# 1. S::DBI blindly attempts the SELECT, which throws an exception
+# 2. It catches the exception, checks ->{Active}/->ping, sees the disconnected state...
+# 3. Reconnects, and retries the operation
+# 4. Success!
+my @art_two = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
+cmp_ok(@art_two, '==', 3, "Three artists returned");
+
+### Now, disconnect the dbh, and move the db file;
+# create a new one and chmod 000 to prevent SQLite from connecting.
+$schema->storage->_dbh->disconnect;
+move( $db_orig, $db_tmp );
+open DBFILE, '>', $db_orig;
+print DBFILE 'THIS IS NOT A REAL DATABASE';
+close DBFILE;
+chmod 0000, $db_orig;
+
+### Try the operation again... it should fail, since there's no db
+{
+ # Catch the DBI connection error
+ local $SIG{__WARN__} = sub {};
+ eval {
+ my @art_three = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
+ };
+ ok( $@, 'The operation failed' );
+}
+
+### Now, move the db file back to the correct name
+unlink($db_orig);
+move( $db_tmp, $db_orig );
+
+SKIP: {
+ skip "Cannot reconnect if original connection didn't fail", 2
+ if ( $@ =~ /encrypted or is not a database/ );
+
+ ### Try the operation again... this time, it should succeed
+ my @art_four;
+ eval {
+ @art_four = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
+ };
+ ok( !$@, 'The operation succeeded' );
+ cmp_ok( @art_four, '==', 3, "Three artists returned" );
+}
Copied: DBIx-Class/0.08/branches/sybase/t/storage/replication.t (from rev 7269, DBIx-Class/0.08/branches/sybase/t/93storage_replication.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/storage/replication.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/storage/replication.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,853 @@
+use strict;
+use warnings;
+use lib qw(t/lib);
+use Test::More;
+use Test::Exception;
+use DBICTest;
+use List::Util 'first';
+use Scalar::Util 'reftype';
+use File::Spec;
+use IO::Handle;
+
+BEGIN {
+ eval "use DBIx::Class::Storage::DBI::Replicated; use Test::Moose";
+ plan skip_all => "Deps not installed: $@" if $@;
+}
+
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+use_ok 'DBIx::Class::Storage::DBI::Replicated';
+
+use Moose();
+use MooseX::Types();
+diag "Using Moose version $Moose::VERSION and MooseX::Types version $MooseX::Types::VERSION";
+
+=head1 HOW TO USE
+
+ This is a test of the replicated storage system. This will work in one of
+ two ways, either it was try to fake replication with a couple of SQLite DBs
+ and creative use of copy, or if you define a couple of %ENV vars correctly
+ will try to test those. If you do that, it will assume the setup is properly
+ replicating. Your results may vary, but I have demonstrated this to work with
+ mysql native replication.
+
+=cut
+
+
+## ----------------------------------------------------------------------------
+## Build a class to hold all our required testing data and methods.
+## ----------------------------------------------------------------------------
+
+TESTSCHEMACLASSES: {
+
+ ## --------------------------------------------------------------------- ##
+ ## Create an object to contain your replicated stuff.
+ ## --------------------------------------------------------------------- ##
+
+ package DBIx::Class::DBI::Replicated::TestReplication;
+
+ use DBICTest;
+ use base qw/Class::Accessor::Fast/;
+
+ __PACKAGE__->mk_accessors( qw/schema/ );
+
+ ## Initialize the object
+
+ sub new {
+ my ($class, $schema_method) = (shift, shift);
+ my $self = $class->SUPER::new(@_);
+
+ $self->schema( $self->init_schema($schema_method) );
+ return $self;
+ }
+
+ ## Get the Schema and set the replication storage type
+
+ sub init_schema {
+ # current SQLT SQLite producer does not handle DROP TABLE IF EXISTS, trap warnings here
+ local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/ };
+
+ my ($class, $schema_method) = @_;
+
+ my $method = "get_schema_$schema_method";
+ my $schema = $class->$method;
+
+ return $schema;
+ }
+
+ sub get_schema_by_storage_type {
+ DBICTest->init_schema(
+ sqlite_use_file => 1,
+ storage_type=>{
+ '::DBI::Replicated' => {
+ balancer_type=>'::Random',
+ balancer_args=>{
+ auto_validate_every=>100,
+ master_read_weight => 1
+ },
+ }
+ },
+ deploy_args=>{
+ add_drop_table => 1,
+ },
+ );
+ }
+
+ sub get_schema_by_connect_info {
+ DBICTest->init_schema(
+ sqlite_use_file => 1,
+ storage_type=> '::DBI::Replicated',
+ balancer_type=>'::Random',
+ balancer_args=> {
+ auto_validate_every=>100,
+ master_read_weight => 1
+ },
+ deploy_args=>{
+ add_drop_table => 1,
+ },
+ );
+ }
+
+ sub generate_replicant_connect_info {}
+ sub replicate {}
+ sub cleanup {}
+
+ ## --------------------------------------------------------------------- ##
+ ## Add a connect_info option to test option merging.
+ ## --------------------------------------------------------------------- ##
+ {
+ package DBIx::Class::Storage::DBI::Replicated;
+
+ use Moose;
+
+ __PACKAGE__->meta->make_mutable;
+
+ around connect_info => sub {
+ my ($next, $self, $info) = @_;
+ $info->[3]{master_option} = 1;
+ $self->$next($info);
+ };
+
+ __PACKAGE__->meta->make_immutable;
+
+ no Moose;
+ }
+
+ ## --------------------------------------------------------------------- ##
+ ## Subclass for when you are using SQLite for testing, this provides a fake
+ ## replication support.
+ ## --------------------------------------------------------------------- ##
+
+ package DBIx::Class::DBI::Replicated::TestReplication::SQLite;
+
+ use DBICTest;
+ use File::Copy;
+ use base 'DBIx::Class::DBI::Replicated::TestReplication';
+
+ __PACKAGE__->mk_accessors(qw/master_path slave_paths/);
+
+ ## Set the master path from DBICTest
+
+ sub new {
+ my $class = shift @_;
+ my $self = $class->SUPER::new(@_);
+
+ $self->master_path( DBICTest->_sqlite_dbfilename );
+ $self->slave_paths([
+ File::Spec->catfile(qw/t var DBIxClass_slave1.db/),
+ File::Spec->catfile(qw/t var DBIxClass_slave2.db/),
+ ]);
+
+ return $self;
+ }
+
+ ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
+ ## $storage->connect_info to be used for connecting replicants.
+
+ sub generate_replicant_connect_info {
+ my $self = shift @_;
+ my @dsn = map {
+ "dbi:SQLite:${_}";
+ } @{$self->slave_paths};
+
+ my @connect_infos = map { [$_,'','',{AutoCommit=>1}] } @dsn;
+
+ ## Make sure nothing is left over from a failed test
+ $self->cleanup;
+
+ ## try a hashref too
+ my $c = $connect_infos[0];
+ $connect_infos[0] = {
+ dsn => $c->[0],
+ user => $c->[1],
+ password => $c->[2],
+ %{ $c->[3] }
+ };
+
+ @connect_infos
+ }
+
+ ## Do a 'good enough' replication by copying the master dbfile over each of
+ ## the slave dbfiles. If the master is SQLite we do this, otherwise we
+ ## just do a one second pause to let the slaves catch up.
+
+ sub replicate {
+ my $self = shift @_;
+ foreach my $slave (@{$self->slave_paths}) {
+ copy($self->master_path, $slave);
+ }
+ }
+
+ ## Cleanup after ourselves. Unlink all gthe slave paths.
+
+ sub cleanup {
+ my $self = shift @_;
+ foreach my $slave (@{$self->slave_paths}) {
+ if(-e $slave) {
+ unlink $slave;
+ }
+ }
+ }
+
+ ## --------------------------------------------------------------------- ##
+ ## Subclass for when you are setting the databases via custom export vars
+ ## This is for when you have a replicating database setup that you are
+ ## going to test against. You'll need to define the correct $ENV and have
+ ## two slave databases to test against, as well as a replication system
+ ## that will replicate in less than 1 second.
+ ## --------------------------------------------------------------------- ##
+
+ package DBIx::Class::DBI::Replicated::TestReplication::Custom;
+ use base 'DBIx::Class::DBI::Replicated::TestReplication';
+
+ ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
+ ## $storage->connect_info to be used for connecting replicants.
+
+ sub generate_replicant_connect_info {
+ return (
+ [$ENV{"DBICTEST_SLAVE0_DSN"}, $ENV{"DBICTEST_SLAVE0_DBUSER"}, $ENV{"DBICTEST_SLAVE0_DBPASS"}, {AutoCommit => 1}],
+ [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}],
+ );
+ }
+
+ ## pause a bit to let the replication catch up
+
+ sub replicate {
+ sleep 1;
+ }
+}
+
+## ----------------------------------------------------------------------------
+## Create an object and run some tests
+## ----------------------------------------------------------------------------
+
+## Thi first bunch of tests are basic, just make sure all the bits are behaving
+
+my $replicated_class = DBICTest->has_custom_dsn ?
+ 'DBIx::Class::DBI::Replicated::TestReplication::Custom' :
+ 'DBIx::Class::DBI::Replicated::TestReplication::SQLite';
+
+my $replicated;
+
+for my $method (qw/by_connect_info by_storage_type/) {
+ undef $replicated;
+ ok $replicated = $replicated_class->new($method)
+ => "Created a replication object $method";
+
+ isa_ok $replicated->schema
+ => 'DBIx::Class::Schema';
+
+ isa_ok $replicated->schema->storage
+ => 'DBIx::Class::Storage::DBI::Replicated';
+
+ isa_ok $replicated->schema->storage->balancer
+ => 'DBIx::Class::Storage::DBI::Replicated::Balancer::Random'
+ => 'configured balancer_type';
+}
+
+ok $replicated->schema->storage->meta
+ => 'has a meta object';
+
+isa_ok $replicated->schema->storage->master
+ => 'DBIx::Class::Storage::DBI';
+
+isa_ok $replicated->schema->storage->pool
+ => 'DBIx::Class::Storage::DBI::Replicated::Pool';
+
+does_ok $replicated->schema->storage->balancer
+ => 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+
+ok my @replicant_connects = $replicated->generate_replicant_connect_info
+ => 'got replication connect information';
+
+ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects)
+ => 'Created some storages suitable for replicants';
+
+our %debug;
+$replicated->schema->storage->debug(1);
+$replicated->schema->storage->debugcb(sub {
+ my ($op, $info) = @_;
+ ##warn "\n$op, $info\n";
+ %debug = (
+ op => $op,
+ info => $info,
+ dsn => ($info=~m/\[(.+)\]/)[0],
+ storage_type => $info=~m/REPLICANT/ ? 'REPLICANT' : 'MASTER',
+ );
+});
+
+ok my @all_storages = $replicated->schema->storage->all_storages
+ => '->all_storages';
+
+is scalar @all_storages,
+ 3
+ => 'correct number of ->all_storages';
+
+is ((grep $_->isa('DBIx::Class::Storage::DBI'), @all_storages),
+ 3
+ => '->all_storages are correct type');
+
+my @all_storage_opts =
+ grep { (reftype($_)||'') eq 'HASH' }
+ map @{ $_->_connect_info }, @all_storages;
+
+is ((grep $_->{master_option}, @all_storage_opts),
+ 3
+ => 'connect_info was merged from master to replicants');
+
+my @replicant_names = keys %{ $replicated->schema->storage->replicants };
+
+ok @replicant_names, "found replicant names @replicant_names";
+
+## Silence warning about not supporting the is_replicating method if using the
+## sqlite dbs.
+$replicated->schema->storage->debugobj->silence(1)
+ if first { m{^t/} } @replicant_names;
+
+isa_ok $replicated->schema->storage->balancer->current_replicant
+ => 'DBIx::Class::Storage::DBI';
+
+$replicated->schema->storage->debugobj->silence(0);
+
+ok $replicated->schema->storage->pool->has_replicants
+ => 'does have replicants';
+
+is $replicated->schema->storage->pool->num_replicants => 2
+ => 'has two replicants';
+
+does_ok $replicated_storages[0]
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated_storages[1]
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated->schema->storage->replicants->{$replicant_names[0]}
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated->schema->storage->replicants->{$replicant_names[1]}
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+## Add some info to the database
+
+$replicated
+ ->schema
+ ->populate('Artist', [
+ [ qw/artistid name/ ],
+ [ 4, "Ozric Tentacles"],
+ ]);
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ like $debug{info}, qr/INSERT/, 'Last was an insert';
+
+## Make sure all the slaves have the table definitions
+
+$replicated->replicate;
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
+
+## Silence warning about not supporting the is_replicating method if using the
+## sqlite dbs.
+$replicated->schema->storage->debugobj->silence(1)
+ if first { m{^t/} } @replicant_names;
+
+$replicated->schema->storage->pool->validate_replicants;
+
+$replicated->schema->storage->debugobj->silence(0);
+
+## Make sure we can read the data.
+
+ok my $artist1 = $replicated->schema->resultset('Artist')->find(4)
+ => 'Created Result';
+
+## We removed testing here since master read weight is on, so we can't tell in
+## advance what storage to expect. We turn master read weight off a bit lower
+## is $debug{storage_type}, 'REPLICANT'
+## => "got last query from a replicant: $debug{dsn}, $debug{info}";
+
+isa_ok $artist1
+ => 'DBICTest::Artist';
+
+is $artist1->name, 'Ozric Tentacles'
+ => 'Found expected name for first result';
+
+## Check that master_read_weight is honored
+{
+ no warnings qw/once redefine/;
+
+ local
+ *DBIx::Class::Storage::DBI::Replicated::Balancer::Random::_random_number =
+ sub { 999 };
+
+ $replicated->schema->storage->balancer->increment_storage;
+
+ is $replicated->schema->storage->balancer->current_replicant,
+ $replicated->schema->storage->master
+ => 'master_read_weight is honored';
+
+ ## turn it off for the duration of the test
+ $replicated->schema->storage->balancer->master_read_weight(0);
+ $replicated->schema->storage->balancer->increment_storage;
+}
+
+## Add some new rows that only the master will have This is because
+## we overload any type of write operation so that is must hit the master
+## database.
+
+$replicated
+ ->schema
+ ->populate('Artist', [
+ [ qw/artistid name/ ],
+ [ 5, "Doom's Children"],
+ [ 6, "Dead On Arrival"],
+ [ 7, "Watergate"],
+ ]);
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ like $debug{info}, qr/INSERT/, 'Last was an insert';
+
+## Make sure all the slaves have the table definitions
+$replicated->replicate;
+
+## Should find some data now
+
+ok my $artist2 = $replicated->schema->resultset('Artist')->find(5)
+ => 'Sync succeed';
+
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
+isa_ok $artist2
+ => 'DBICTest::Artist';
+
+is $artist2->name, "Doom's Children"
+ => 'Found expected name for first result';
+
+## What happens when we disconnect all the replicants?
+
+is $replicated->schema->storage->pool->connected_replicants => 2
+ => "both replicants are connected";
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->disconnect;
+$replicated->schema->storage->replicants->{$replicant_names[1]}->disconnect;
+
+is $replicated->schema->storage->pool->connected_replicants => 0
+ => "both replicants are now disconnected";
+
+## All these should pass, since the database should automatically reconnect
+
+ok my $artist3 = $replicated->schema->resultset('Artist')->find(6)
+ => 'Still finding stuff.';
+
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
+isa_ok $artist3
+ => 'DBICTest::Artist';
+
+is $artist3->name, "Dead On Arrival"
+ => 'Found expected name for first result';
+
+is $replicated->schema->storage->pool->connected_replicants => 1
+ => "At Least One replicant reconnected to handle the job";
+
+## What happens when we try to select something that doesn't exist?
+
+ok ! $replicated->schema->resultset('Artist')->find(666)
+ => 'Correctly failed to find something.';
+
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
+## test the reliable option
+
+TESTRELIABLE: {
+
+ $replicated->schema->storage->set_reliable_storage;
+
+ ok $replicated->schema->resultset('Artist')->find(2)
+ => 'Read from master 1';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ ok $replicated->schema->resultset('Artist')->find(5)
+ => 'Read from master 2';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ $replicated->schema->storage->set_balanced_storage;
+
+ ok $replicated->schema->resultset('Artist')->find(3)
+ => 'Read from replicant';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+}
+
+## Make sure when reliable goes out of scope, we are using replicants again
+
+ok $replicated->schema->resultset('Artist')->find(1)
+ => 'back to replicant 1.';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
+ok $replicated->schema->resultset('Artist')->find(2)
+ => 'back to replicant 2.';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
+## set all the replicants to inactive, and make sure the balancer falls back to
+## the master.
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+
+{
+ ## catch the fallback to master warning
+ open my $debugfh, '>', \my $fallback_warning;
+ my $oldfh = $replicated->schema->storage->debugfh;
+ $replicated->schema->storage->debugfh($debugfh);
+
+ ok $replicated->schema->resultset('Artist')->find(2)
+ => 'Fallback to master';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ like $fallback_warning, qr/falling back to master/
+ => 'emits falling back to master warning';
+
+ $replicated->schema->storage->debugfh($oldfh);
+}
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
+
+## Silence warning about not supporting the is_replicating method if using the
+## sqlite dbs.
+$replicated->schema->storage->debugobj->silence(1)
+ if first { m{^t/} } @replicant_names;
+
+$replicated->schema->storage->pool->validate_replicants;
+
+$replicated->schema->storage->debugobj->silence(0);
+
+ok $replicated->schema->resultset('Artist')->find(2)
+ => 'Returned to replicates';
+
+is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
+## Getting slave status tests
+
+SKIP: {
+ ## We skip this tests unless you have a custom replicants, since the default
+ ## sqlite based replication tests don't support these functions.
+
+ skip 'Cannot Test Replicant Status on Non Replicating Database', 10
+ unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
+
+ $replicated->replicate; ## Give the slaves a chance to catchup.
+
+ ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating
+ => 'Replicants are replicating';
+
+ is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0
+ => 'Replicant is zero seconds behind master';
+
+ ## Test the validate replicants
+
+ $replicated->schema->storage->pool->validate_replicants;
+
+ is $replicated->schema->storage->pool->active_replicants, 2
+ => 'Still have 2 replicants after validation';
+
+ ## Force the replicants to fail the validate test by required their lag to
+ ## be negative (ie ahead of the master!)
+
+ $replicated->schema->storage->pool->maximum_lag(-10);
+ $replicated->schema->storage->pool->validate_replicants;
+
+ is $replicated->schema->storage->pool->active_replicants, 0
+ => 'No way a replicant be be ahead of the master';
+
+ ## Let's be fair to the replicants again. Let them lag up to 5
+
+ $replicated->schema->storage->pool->maximum_lag(5);
+ $replicated->schema->storage->pool->validate_replicants;
+
+ is $replicated->schema->storage->pool->active_replicants, 2
+ => 'Both replicants in good standing again';
+
+ ## Check auto validate
+
+ is $replicated->schema->storage->balancer->auto_validate_every, 100
+ => "Got the expected value for auto validate";
+
+ ## This will make sure we auto validatge everytime
+ $replicated->schema->storage->balancer->auto_validate_every(0);
+
+ ## set all the replicants to inactive, and make sure the balancer falls back to
+ ## the master.
+
+ $replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+ $replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+
+ ## Ok, now when we go to run a query, autovalidate SHOULD reconnect
+
+ is $replicated->schema->storage->pool->active_replicants => 0
+ => "both replicants turned off";
+
+ ok $replicated->schema->resultset('Artist')->find(5)
+ => 'replicant reactivated';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
+ is $replicated->schema->storage->pool->active_replicants => 2
+ => "both replicants reactivated";
+}
+
+## Test the reliably callback
+
+ok my $reliably = sub {
+
+ ok $replicated->schema->resultset('Artist')->find(5)
+ => 'replicant reactivated';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+} => 'created coderef properly';
+
+$replicated->schema->storage->execute_reliably($reliably);
+
+## Try something with an error
+
+ok my $unreliably = sub {
+
+ ok $replicated->schema->resultset('ArtistXX')->find(5)
+ => 'replicant reactivated';
+
+} => 'created coderef properly';
+
+throws_ok {$replicated->schema->storage->execute_reliably($unreliably)}
+ qr/Can't find source for ArtistXX/
+ => 'Bad coderef throws proper error';
+
+## Make sure replication came back
+
+ok $replicated->schema->resultset('Artist')->find(3)
+ => 'replicant reactivated';
+
+is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+
+## make sure transactions are set to execute_reliably
+
+ok my $transaction = sub {
+
+ my $id = shift @_;
+
+ $replicated
+ ->schema
+ ->populate('Artist', [
+ [ qw/artistid name/ ],
+ [ $id, "Children of the Grave"],
+ ]);
+
+ ok my $result = $replicated->schema->resultset('Artist')->find($id)
+ => "Found expected artist for $id";
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ ok my $more = $replicated->schema->resultset('Artist')->find(1)
+ => 'Found expected artist again for 1';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ return ($result, $more);
+
+} => 'Created a coderef properly';
+
+## Test the transaction with multi return
+{
+ ok my @return = $replicated->schema->txn_do($transaction, 666)
+ => 'did transaction';
+
+ is $return[0]->id, 666
+ => 'first returned value is correct';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ is $return[1]->id, 1
+ => 'second returned value is correct';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+}
+
+## Test that asking for single return works
+{
+ ok my @return = $replicated->schema->txn_do($transaction, 777)
+ => 'did transaction';
+
+ is $return[0]->id, 777
+ => 'first returned value is correct';
+
+ is $return[1]->id, 1
+ => 'second returned value is correct';
+}
+
+## Test transaction returning a single value
+
+{
+ ok my $result = $replicated->schema->txn_do(sub {
+ ok my $more = $replicated->schema->resultset('Artist')->find(1)
+ => 'found inside a transaction';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+ return $more;
+ }) => 'successfully processed transaction';
+
+ is $result->id, 1
+ => 'Got expected single result from transaction';
+}
+
+## Make sure replication came back
+
+ok $replicated->schema->resultset('Artist')->find(1)
+ => 'replicant reactivated';
+
+is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+
+## Test Discard changes
+
+{
+ ok my $artist = $replicated->schema->resultset('Artist')->find(2)
+ => 'got an artist to test discard changes';
+
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+
+ ok $artist->get_from_storage({force_pool=>'master'})
+ => 'properly discard changes';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+
+ ok $artist->discard_changes({force_pool=>'master'})
+ => 'properly called discard_changes against master (manual attrs)';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+
+ ok $artist->discard_changes()
+ => 'properly called discard_changes against master (default attrs)';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+
+ ok $artist->discard_changes({force_pool=>$replicant_names[0]})
+ => 'properly able to override the default attributes';
+
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}"
+}
+
+## Test some edge cases, like trying to do a transaction inside a transaction, etc
+
+{
+ ok my $result = $replicated->schema->txn_do(sub {
+ return $replicated->schema->txn_do(sub {
+ ok my $more = $replicated->schema->resultset('Artist')->find(1)
+ => 'found inside a transaction inside a transaction';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+ return $more;
+ });
+ }) => 'successfully processed transaction';
+
+ is $result->id, 1
+ => 'Got expected single result from transaction';
+}
+
+{
+ ok my $result = $replicated->schema->txn_do(sub {
+ return $replicated->schema->storage->execute_reliably(sub {
+ return $replicated->schema->txn_do(sub {
+ return $replicated->schema->storage->execute_reliably(sub {
+ ok my $more = $replicated->schema->resultset('Artist')->find(1)
+ => 'found inside crazy deep transactions and execute_reliably';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+ return $more;
+ });
+ });
+ });
+ }) => 'successfully processed transaction';
+
+ is $result->id, 1
+ => 'Got expected single result from transaction';
+}
+
+## Test the force_pool resultset attribute.
+
+{
+ ok my $artist_rs = $replicated->schema->resultset('Artist')
+ => 'got artist resultset';
+
+ ## Turn on Forced Pool Storage
+ ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'})
+ => 'Created a resultset using force_pool storage';
+
+ ok my $artist = $reliable_artist_rs->find(2)
+ => 'got an artist result via force_pool storage';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+}
+
+## Test the force_pool resultset attribute part two.
+
+{
+ ok my $artist_rs = $replicated->schema->resultset('Artist')
+ => 'got artist resultset';
+
+ ## Turn on Forced Pool Storage
+ ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>$replicant_names[0]})
+ => 'Created a resultset using force_pool storage';
+
+ ok my $artist = $reliable_artist_rs->find(2)
+ => 'got an artist result via force_pool storage';
+
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+}
+## Delete the old database files
+$replicated->cleanup;
+
+done_testing;
+
+# vim: sw=4 sts=4 :
Copied: DBIx-Class/0.08/branches/sybase/t/storage/stats.t (from rev 6944, DBIx-Class/0.08/branches/sybase/t/31stats.t)
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/storage/stats.t (rev 0)
+++ DBIx-Class/0.08/branches/sybase/t/storage/stats.t 2009-08-29 06:50:56 UTC (rev 7421)
@@ -0,0 +1,104 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use Test::More;
+
+plan tests => 12;
+
+use lib qw(t/lib);
+
+use_ok('DBICTest');
+my $schema = DBICTest->init_schema();
+
+my $cbworks = 0;
+
+$schema->storage->debugcb(sub { $cbworks = 1; });
+$schema->storage->debug(0);
+my $rs = $schema->resultset('CD')->search({});
+$rs->count();
+ok(!$cbworks, 'Callback not called with debug disabled');
+
+$schema->storage->debug(1);
+
+$rs->count();
+ok($cbworks, 'Debug callback worked.');
+
+my $prof = new DBIx::Test::Profiler();
+$schema->storage->debugobj($prof);
+
+# Test non-transaction calls.
+$rs->count();
+ok($prof->{'query_start'}, 'query_start called');
+ok($prof->{'query_end'}, 'query_end called');
+ok(!$prof->{'txn_begin'}, 'txn_begin not called');
+ok(!$prof->{'txn_commit'}, 'txn_commit not called');
+
+$prof->reset();
+
+# Test transaction calls
+$schema->txn_begin();
+ok($prof->{'txn_begin'}, 'txn_begin called');
+
+$rs = $schema->resultset('CD')->search({});
+$rs->count();
+ok($prof->{'query_start'}, 'query_start called');
+ok($prof->{'query_end'}, 'query_end called');
+
+$schema->txn_commit();
+ok($prof->{'txn_commit'}, 'txn_commit called');
+
+$prof->reset();
+
+# Test a rollback
+$schema->txn_begin();
+$rs = $schema->resultset('CD')->search({});
+$rs->count();
+$schema->txn_rollback();
+ok($prof->{'txn_rollback'}, 'txn_rollback called');
+
+$schema->storage->debug(0);
+
+package DBIx::Test::Profiler;
+use strict;
+
+sub new {
+ my $self = bless({});
+}
+
+sub query_start {
+ my $self = shift();
+ $self->{'query_start'} = 1;
+}
+
+sub query_end {
+ my $self = shift();
+ $self->{'query_end'} = 1;
+}
+
+sub txn_begin {
+ my $self = shift();
+ $self->{'txn_begin'} = 1;
+}
+
+sub txn_rollback {
+ my $self = shift();
+ $self->{'txn_rollback'} = 1;
+}
+
+sub txn_commit {
+ my $self = shift();
+ $self->{'txn_commit'} = 1;
+}
+
+sub reset {
+ my $self = shift();
+
+ $self->{'query_start'} = 0;
+ $self->{'query_end'} = 0;
+ $self->{'txn_begin'} = 0;
+ $self->{'txn_rollback'} = 0;
+ $self->{'txn_end'} = 0;
+}
+
+1;
More information about the Bast-commits
mailing list