[Bast-commits] r7065 - in DBIx-Class/0.08/branches/sybase: .
lib/DBIx/Class lib/DBIx/Class/CDBICompat
lib/DBIx/Class/InflateColumn lib/DBIx/Class/Manual
lib/DBIx/Class/Relationship lib/DBIx/Class/ResultSourceProxy
lib/DBIx/Class/Schema lib/DBIx/Class/Storage
lib/DBIx/Class/Storage/DBI lib/DBIx/Class/Storage/DBI/ODBC
lib/DBIx/Class/Storage/DBI/Oracle
lib/DBIx/Class/Storage/DBI/Replicated
lib/DBIx/Class/Storage/DBI/Replicated/Balancer
lib/SQL/Translator/Parser/DBIx
lib/SQL/Translator/Producer/DBIx/Class t
t/lib/DBICTest/Schema t/prefetch
caelum at dev.catalyst.perl.org
caelum at dev.catalyst.perl.org
Fri Jul 17 07:41:07 GMT 2009
Author: caelum
Date: 2009-07-17 07:41:07 +0000 (Fri, 17 Jul 2009)
New Revision: 7065
Added:
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod
Modified:
DBIx-Class/0.08/branches/sybase/
DBIx-Class/0.08/branches/sybase/Changes
DBIx-Class/0.08/branches/sybase/Makefile.PL
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ColumnCase.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ColumnGroups.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Copy.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ImaDBI.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Iterator.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/LazyLoading.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Relationship.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Relationships.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Retrieve.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/TempColumns.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/DB.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Exception.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/InflateColumn/DateTime.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/InflateColumn/File.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/Cookbook.pod
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/Example.pod
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/FAQ.pod
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/Intro.pod
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Ordered.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/PK.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Relationship.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Relationship/Base.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Relationship/HasMany.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSet.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSetColumn.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSource.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSourceHandle.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSourceProxy/Table.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Row.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Schema.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Schema/Versioned.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/StartupCheck.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/DB2.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Pg.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Types.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/Statistics.pm
DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/UTF8Columns.pm
DBIx-Class/0.08/branches/sybase/lib/SQL/Translator/Parser/DBIx/Class.pm
DBIx-Class/0.08/branches/sybase/lib/SQL/Translator/Producer/DBIx/Class/File.pm
DBIx-Class/0.08/branches/sybase/t/93storage_replication.t
DBIx-Class/0.08/branches/sybase/t/lib/DBICTest/Schema/Bookmark.pm
DBIx-Class/0.08/branches/sybase/t/prefetch/grouped.t
Log:
r5959 at hlagh (orig r7027): caelum | 2009-07-10 17:56:57 -0400
fix PodInherit call in Makefile.PL
r5961 at hlagh (orig r7029): robkinyon | 2009-07-10 18:03:07 -0400
Applied patch from kados regarding use of a DateTime::Format class to validate
r5962 at hlagh (orig r7030): caelum | 2009-07-11 05:26:40 -0400
reword IC::DT doc patch
r6009 at hlagh (orig r7037): dandv | 2009-07-13 08:06:08 -0400
PK::Auto has moved into Core since 2007
r6010 at hlagh (orig r7038): dandv | 2009-07-13 08:15:13 -0400
Fixed has_many example in Intro.pod
r6011 at hlagh (orig r7039): dandv | 2009-07-13 16:58:45 -0400
Fixed run-on sentences in FAQ
r6012 at hlagh (orig r7040): dandv | 2009-07-13 17:18:11 -0400
Minor POD fixes in Example.pod
r6013 at hlagh (orig r7041): dandv | 2009-07-13 17:48:18 -0400
Favored using ->single to get the topmost result over less readable ->slice(0)
r6014 at hlagh (orig r7042): dandv | 2009-07-13 18:56:31 -0400
Minor POD fixes in Cookbook
r6015 at hlagh (orig r7045): ribasushi | 2009-07-14 07:30:55 -0400
Minor logic cleanup
r6016 at hlagh (orig r7046): ribasushi | 2009-07-14 08:07:11 -0400
grouped prefetch fix
r6023 at hlagh (orig r7053): ijw | 2009-07-15 12:55:35 -0400
Added SQLA link for more comprehensive documentation of order_by options available
r6026 at hlagh (orig r7056): caelum | 2009-07-15 18:54:22 -0400
add "smalldatetime" support to IC::DT
r6029 at hlagh (orig r7059): ribasushi | 2009-07-16 00:29:41 -0400
r7013 at Thesaurus (orig r7012): jnapiorkowski | 2009-07-09 17:00:22 +0200
new branch
r7014 at Thesaurus (orig r7013): jnapiorkowski | 2009-07-09 20:06:44 +0200
changed the way transactions are detected for replication to work with the standard way to do this, minor doc updates, fix to the force pool so you can force a particular slave, changes to the way the debugging is created
r7015 at Thesaurus (orig r7014): jnapiorkowski | 2009-07-09 20:17:03 +0200
more changes to the way debug output works
r7016 at Thesaurus (orig r7015): jnapiorkowski | 2009-07-09 22:26:47 +0200
big update to the test suite so that we now check to make sure the storage that was expected was actually used
r7017 at Thesaurus (orig r7016): jnapiorkowski | 2009-07-09 23:23:37 +0200
set correct number of tests, changed the debuggin output to not warn on DDL, minor change to a test resultclass so we can deploy to mysql properly
r7018 at Thesaurus (orig r7017): jnapiorkowski | 2009-07-09 23:26:59 +0200
corrected the number of skipped tests
r7019 at Thesaurus (orig r7018): jnapiorkowski | 2009-07-09 23:52:22 +0200
fixed test resultclass formatting, added a few more DBIC::Storage::DBI methods that I might need to delegate.
r7020 at Thesaurus (orig r7019): jnapiorkowski | 2009-07-10 01:23:07 +0200
some documention updates and changed the way we find paths for the sqlite dbfiles to use File::Spec, which I hope will solve some of the Win32 error messages
r7023 at Thesaurus (orig r7022): jnapiorkowski | 2009-07-10 18:00:38 +0200
pod cleanup, fixed broken pod links, and new Introduction pod
r7024 at Thesaurus (orig r7023): jnapiorkowski | 2009-07-10 19:10:57 +0200
updated Changes file to reflect work completed
r7025 at Thesaurus (orig r7024): jnapiorkowski | 2009-07-10 19:37:53 +0200
a few more Moose Type related fixes and added diag to the replication test to report the moose and types version used, to help us debug some of the moose related errors being reported
r7058 at Thesaurus (orig r7057): ribasushi | 2009-07-16 06:28:44 +0200
A couple of typos, and general whitespace cleanup (ick)
r6031 at hlagh (orig r7062): jnapiorkowski | 2009-07-16 11:03:32 -0400
increased Moose version requirements due to changes in the way type constraints get validated, which is not backwardly compatible
r6032 at hlagh (orig r7063): dandv | 2009-07-16 21:37:28 -0400
Minor POD grammar: it's -> its where appropriate
Property changes on: DBIx-Class/0.08/branches/sybase
___________________________________________________________________
Name: svk:merge
- 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:5969
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:7009
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
+ 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:5969
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:7063
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
Modified: DBIx-Class/0.08/branches/sybase/Changes
===================================================================
--- DBIx-Class/0.08/branches/sybase/Changes 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/Changes 2009-07-17 07:41:07 UTC (rev 7065)
@@ -1,5 +1,12 @@
Revision history for DBIx::Class
+ - Replication updates: Improved the replication tests so that they are
+ more reliable and accurate, and hopefully solve some cross platform
+ issues. Bugfixes related to naming particular replicants in a
+ 'force_pool' attribute. Lots of documentation updates, including a
+ new Introduction.pod file. Fixed the way we detect transaction to
+ make this more reliable and forward looking. Fixed some trouble with
+ the way Moose Types are used.
- Added call to Pod::Inherit in Makefile.PL -
currently at author-time only, so we need to add the produced
.pod files to the MANIFEST
Modified: DBIx-Class/0.08/branches/sybase/Makefile.PL
===================================================================
--- DBIx-Class/0.08/branches/sybase/Makefile.PL 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/Makefile.PL 2009-07-17 07:41:07 UTC (rev 7065)
@@ -144,7 +144,7 @@
}
eval { require Module::Install::Pod::Inherit };
- Module::Install::Pod::Inherit::PodInherit() if !$@;
+ PodInherit() if !$@;
}
auto_install();
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ColumnCase.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ColumnCase.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ColumnCase.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -16,10 +16,10 @@
sub has_a {
my($self, $col, @rest) = @_;
-
+
$self->_declare_has_a(lc $col, @rest);
$self->_mk_inflated_column_accessor($col);
-
+
return 1;
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ColumnGroups.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ColumnGroups.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ColumnGroups.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -73,7 +73,7 @@
sub _has_custom_accessor {
my($class, $name) = @_;
-
+
no strict 'refs';
my $existing_accessor = *{$class .'::'. $name}{CODE};
return $existing_accessor && !$our_accessors{$existing_accessor};
@@ -90,7 +90,7 @@
my $fullname = join '::', $class, $name;
*$fullname = Sub::Name::subname $fullname, $accessor;
}
-
+
$our_accessors{$accessor}++;
return 1;
@@ -120,7 +120,7 @@
# warn " $field $alias\n";
{
no strict 'refs';
-
+
$class->_deploy_accessor($name, $accessor);
$class->_deploy_accessor($alias, $accessor);
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -39,16 +39,16 @@
my $class = shift;
my $new = $class->next::method(@_);
-
+
$new->_make_columns_as_hash;
-
+
return $new;
}
sub _make_columns_as_hash {
my $self = shift;
-
+
for my $col ($self->columns) {
if( exists $self->{$col} ) {
warn "Skipping mapping $col to a hash key because it exists";
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Copy.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Copy.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Copy.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -25,7 +25,7 @@
sub copy {
my($self, $arg) = @_;
return $self->next::method($arg) if ref $arg;
-
+
my @primary_columns = $self->primary_columns;
croak("Need hash-ref to edit copied column values")
if @primary_columns > 1;
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ImaDBI.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ImaDBI.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/ImaDBI.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -59,7 +59,7 @@
$rel_obj->{cond}, $to, $from) );
return $join;
}
-
+
} );
sub db_Main {
@@ -115,7 +115,7 @@
sub transform_sql {
my ($class, $sql, @args) = @_;
-
+
my $tclass = $class->sql_transformer_class;
$class->ensure_class_loaded($tclass);
my $t = $tclass->new($class, $sql, @args);
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Iterator.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Iterator.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Iterator.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -25,7 +25,7 @@
sub _init_result_source_instance {
my $class = shift;
-
+
my $table = $class->next::method(@_);
$table->resultset_class("DBIx::Class::CDBICompat::Iterator::ResultSet");
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/LazyLoading.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/LazyLoading.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/LazyLoading.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -16,12 +16,12 @@
# request in case the database modifies the new value (say, via a trigger)
sub update {
my $self = shift;
-
+
my @dirty_columns = keys %{$self->{_dirty_columns}};
-
+
my $ret = $self->next::method(@_);
$self->_clear_column_data(@dirty_columns);
-
+
return $ret;
}
@@ -30,12 +30,12 @@
sub create {
my $class = shift;
my($data) = @_;
-
+
my @columns = keys %$data;
-
+
my $obj = $class->next::method(@_);
return $obj unless defined $obj;
-
+
my %primary_cols = map { $_ => 1 } $class->primary_columns;
my @data_cols = grep !$primary_cols{$_}, @columns;
$obj->_clear_column_data(@data_cols);
@@ -46,7 +46,7 @@
sub _clear_column_data {
my $self = shift;
-
+
delete $self->{_column_data}{$_} for @_;
delete $self->{_inflated_column}{$_} for @_;
}
@@ -71,7 +71,7 @@
for my $col ($self->primary_columns) {
$changes->{$col} = undef unless exists $changes->{$col};
}
-
+
return $self->next::method($changes);
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -20,9 +20,9 @@
sub nocache {
my $class = shift;
-
+
return $class->__nocache(@_) if @_;
-
+
return 1 if $Class::DBI::Weaken_Is_Available == 0;
return $class->__nocache;
}
@@ -74,9 +74,9 @@
sub inflate_result {
my ($class, @rest) = @_;
my $new = $class->next::method(@rest);
-
+
return $new if $new->nocache;
-
+
if (my $key = $new->ID) {
#warn "Key $key";
my $live = $class->live_object_index;
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Relationship.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Relationship.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Relationship.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -25,7 +25,7 @@
sub new {
my($class, $args) = @_;
-
+
return bless $args, $class;
}
@@ -34,7 +34,7 @@
my $code = sub {
$_[0]->{$key};
};
-
+
no strict 'refs';
*{$method} = Sub::Name::subname $method, $code;
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Relationships.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Relationships.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Relationships.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -24,10 +24,10 @@
sub has_a {
my($self, $col, @rest) = @_;
-
+
$self->_declare_has_a($col, @rest);
$self->_mk_inflated_column_accessor($col);
-
+
return 1;
}
@@ -37,7 +37,7 @@
$self->throw_exception( "No such column ${col}" )
unless $self->has_column($col);
$self->ensure_class_loaded($f_class);
-
+
my $rel_info;
if ($args{'inflate'} || $args{'deflate'}) { # Non-database has_a
@@ -50,7 +50,7 @@
$args{'deflate'} = sub { shift->$meth; };
}
$self->inflate_column($col, \%args);
-
+
$rel_info = {
class => $f_class
};
@@ -59,9 +59,9 @@
$self->belongs_to($col, $f_class);
$rel_info = $self->result_source_instance->relationship_info($col);
}
-
+
$rel_info->{args} = \%args;
-
+
$self->_extend_meta(
has_a => $col,
$rel_info
@@ -72,7 +72,7 @@
sub _mk_inflated_column_accessor {
my($class, $col) = @_;
-
+
return $class->mk_group_accessors('inflated_column' => $col);
}
@@ -137,7 +137,7 @@
sub might_have {
my ($class, $rel, $f_class, @columns) = @_;
-
+
my $ret;
if (ref $columns[0] || !defined $columns[0]) {
$ret = $class->next::method($rel, $f_class, @columns);
@@ -153,7 +153,7 @@
might_have => $rel,
$rel_info
);
-
+
return $ret;
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Retrieve.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Retrieve.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/Retrieve.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -74,7 +74,7 @@
my $class = shift;
my $obj = $class->resultset_instance->new_result(@_);
$obj->in_storage(1);
-
+
return $obj;
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/TempColumns.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/TempColumns.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat/TempColumns.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -11,7 +11,7 @@
sub _add_column_group {
my ($class, $group, @cols) = @_;
-
+
return $class->next::method($group, @cols) unless $group eq 'TEMP';
my %new_cols = map { $_ => 1 } @cols;
@@ -61,11 +61,11 @@
sub set {
my($self, %data) = @_;
-
+
my $temp_data = $self->_extract_temp_data(\%data);
-
+
$self->set_temp($_, $temp_data->{$_}) for keys %$temp_data;
-
+
return $self->next::method(%data);
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/CDBICompat.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -11,7 +11,7 @@
DBIx::ContextualFetch
Clone
);
-
+
my @didnt_load;
for my $module (@Extra_Modules) {
push @didnt_load, $module unless eval qq{require $module};
@@ -149,13 +149,13 @@
package Foo;
use base qw(Class::DBI);
-
+
Foo->table("foo");
Foo->columns( All => qw(this that bar) );
package Bar;
use base qw(Class::DBI);
-
+
Bar->table("bar");
Bar->columns( All => qw(up down) );
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/DB.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/DB.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/DB.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -174,7 +174,7 @@
sub result_source_instance {
my $class = shift;
$class = ref $class || $class;
-
+
if (@_) {
my $source = $_[0];
$class->_result_source_instance([$source, $class]);
@@ -186,7 +186,7 @@
return unless Scalar::Util::blessed($source);
if ($result_class ne $class) { # new class
- # Give this new class it's own source and register it.
+ # Give this new class its own source and register it.
$source = $source->new({
%$source,
source_name => $class,
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Exception.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Exception.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Exception.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -61,7 +61,7 @@
else {
$msg = Carp::longmess($msg);
}
-
+
my $self = { msg => $msg };
bless $self => $class;
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/InflateColumn/DateTime.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/InflateColumn/DateTime.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/InflateColumn/DateTime.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -40,17 +40,26 @@
__PACKAGE__->add_columns(
starts_when => { data_type => 'varchar', inflate_datetime => 1 }
);
-
+
__PACKAGE__->add_columns(
starts_when => { data_type => 'varchar', inflate_date => 1 }
);
It's also possible to explicitly skip inflation:
-
+
__PACKAGE__->add_columns(
starts_when => { data_type => 'datetime', inflate_datetime => 0 }
);
+NOTE: Don't rely on C<InflateColumn::DateTime> to parse date strings for you.
+The column is set directly for any non-references and C<InflateColumn::DateTime>
+is completely bypassed. Instead, use an input parser to create a DateTime
+object. For instance, if your user input comes as a 'YYYY-MM-DD' string, you can
+use C<DateTime::Format::ISO8601> thusly:
+
+ use DateTime::Format::ISO8601;
+ my $dt = DateTime::Format::ISO8601->parse_datetime('YYYY-MM-DD');
+
=head1 DESCRIPTION
This module figures out the type of DateTime::Format::* class to
@@ -77,7 +86,7 @@
In the case of an invalid date, L<DateTime> will throw an exception. To
bypass these exceptions and just have the inflation return undef, use
the C<datetime_undef_if_invalid> option in the column info:
-
+
"broken_date",
{
data_type => "datetime",
@@ -110,6 +119,9 @@
if ($type eq "timestamp with time zone" || $type eq "timestamptz") {
$type = "timestamp";
$info->{_ic_dt_method} ||= "timestamp_with_timezone";
+ } elsif ($type eq "smalldatetime") {
+ $type = "datetime";
+ $info->{_ic_dt_method} ||= "datetime";
}
}
@@ -126,7 +138,7 @@
"please put it directly into the '$column' column definition.";
$locale = $info->{extra}{locale};
}
-
+
$locale = $info->{locale} if defined $info->{locale};
$timezone = $info->{timezone} if defined $info->{timezone};
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/InflateColumn/File.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/InflateColumn/File.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/InflateColumn/File.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -58,7 +58,7 @@
sub insert {
my $self = shift;
-
+
# cache our file columns so we can write them to the fs
# -after- we have a PK
my %file_column;
@@ -114,7 +114,7 @@
In your L<DBIx::Class> table class:
__PACKAGE__->load_components( "PK::Auto", "InflateColumn::File", "Core" );
-
+
# define your columns
__PACKAGE__->add_columns(
"id",
@@ -136,8 +136,8 @@
size => 255,
},
);
-
+
In your L<Catalyst::Controller> class:
FileColumn requires a hash that contains L<IO::File> as handle and the file's
@@ -152,15 +152,15 @@
body => '....'
});
$c->stash->{entry}=$entry;
-
+
And Place the following in your TT template
-
+
Article Subject: [% entry.subject %]
Uploaded File:
<a href="/static/files/[% entry.id %]/[% entry.filename.filename %]">File</a>
Body: [% entry.body %]
-
+
The file will be stored on the filesystem for later retrieval. Calling delete
on your resultset will delete the file from the filesystem. Retrevial of the
record automatically inflates the column back to the set hash with the
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/Cookbook.pod
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/Cookbook.pod 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/Cookbook.pod 2009-07-17 07:41:07 UTC (rev 7065)
@@ -1,4 +1,4 @@
-=head1 NAME
+=head1 NAME
DBIx::Class::Manual::Cookbook - Miscellaneous recipes
@@ -62,12 +62,12 @@
Sometimes you need only the first "top" row of a resultset. While this can be
easily done with L<< $rs->first|DBIx::Class::ResultSet/first >>, it is suboptimal,
as a full blown cursor for the resultset will be created and then immediately
-destroyed after fetching the first row object.
+destroyed after fetching the first row object.
L<< $rs->single|DBIx::Class::ResultSet/single >> is
designed specifically for this case - it will grab the first returned result
-without even instantiating a cursor.
+without even instantiating a cursor.
-Before replacing all your calls to C<first()> with C<single()> please observe the
+Before replacing all your calls to C<first()> with C<single()> please observe the
following CAVEATS:
=over
@@ -96,50 +96,50 @@
Sometimes you have to run arbitrary SQL because your query is too complex
(e.g. it contains Unions, Sub-Selects, Stored Procedures, etc.) or has to
-be optimized for your database in a special way, but you still want to
-get the results as a L<DBIx::Class::ResultSet>.
-The recommended way to accomplish this is by defining a separate ResultSource
-for your query. You can then inject complete SQL statements using a scalar
+be optimized for your database in a special way, but you still want to
+get the results as a L<DBIx::Class::ResultSet>.
+The recommended way to accomplish this is by defining a separate ResultSource
+for your query. You can then inject complete SQL statements using a scalar
reference (this is a feature of L<SQL::Abstract>).
Say you want to run a complex custom query on your user data, here's what
you have to add to your User class:
package My::Schema::Result::User;
-
+
use base qw/DBIx::Class/;
-
+
# ->load_components, ->table, ->add_columns, etc.
# Make a new ResultSource based on the User class
my $source = __PACKAGE__->result_source_instance();
my $new_source = $source->new( $source );
$new_source->source_name( 'UserFriendsComplex' );
-
+
# Hand in your query as a scalar reference
# It will be added as a sub-select after FROM,
# so pay attention to the surrounding brackets!
$new_source->name( \<<SQL );
- ( SELECT u.* FROM user u
- INNER JOIN user_friends f ON u.id = f.user_id
+ ( SELECT u.* FROM user u
+ INNER JOIN user_friends f ON u.id = f.user_id
WHERE f.friend_user_id = ?
- UNION
- SELECT u.* FROM user u
- INNER JOIN user_friends f ON u.id = f.friend_user_id
+ UNION
+ SELECT u.* FROM user u
+ INNER JOIN user_friends f ON u.id = f.friend_user_id
WHERE f.user_id = ? )
- SQL
+ SQL
# Finally, register your new ResultSource with your Schema
My::Schema->register_extra_source( 'UserFriendsComplex' => $new_source );
Next, you can execute your complex query using bind parameters like this:
- my $friends = [ $schema->resultset( 'UserFriendsComplex' )->search( {},
+ my $friends = [ $schema->resultset( 'UserFriendsComplex' )->search( {},
{
bind => [ 12345, 12345 ]
}
) ];
-
+
... and you'll get back a perfect L<DBIx::Class::ResultSet> (except, of course,
that you cannot modify the rows it contains, ie. cannot call L</update>,
L</delete>, ... on it).
@@ -231,7 +231,7 @@
# Define accessor manually:
sub name_length { shift->get_column('name_length'); }
-
+
# Or use DBIx::Class::AccessorGroup:
__PACKAGE__->mk_group_accessors('column' => 'name_length');
@@ -242,7 +242,7 @@
{
columns => [ qw/artist_id name rank/ ],
distinct => 1
- }
+ }
);
my $rs = $schema->resultset('Artist')->search(
@@ -279,7 +279,7 @@
my $count = $rs->count;
# Equivalent SQL:
- # SELECT COUNT( * ) FROM (SELECT me.name FROM artist me GROUP BY me.name) count_subq:
+ # SELECT COUNT( * ) FROM (SELECT me.name FROM artist me GROUP BY me.name) count_subq:
=head2 Grouping results
@@ -359,7 +359,7 @@
=head2 Predefined searches
You can write your own L<DBIx::Class::ResultSet> class by inheriting from it
-and define often used searches as methods:
+and defining often used searches as methods:
package My::DBIC::ResultSet::CD;
use strict;
@@ -439,7 +439,7 @@
my $rs = $schema->resultset('CD')->search(
{
- 'artists.name' => 'Bob Marley'
+ 'artists.name' => 'Bob Marley'
},
{
join => 'artists', # join the artist table
@@ -452,7 +452,7 @@
# WHERE artist.name = 'Bob Marley'
In that example both the join, and the condition use the relationship name rather than the table name
-(see DBIx::Class::Manual::Joining for more details on aliasing ).
+(see L<DBIx::Class::Manual::Joining> for more details on aliasing ).
If required, you can now sort on any column in the related tables by including
it in your C<order_by> attribute, (again using the aliased relation name rather than table name) :
@@ -673,7 +673,7 @@
my $schema = $cd->result_source->schema;
# use the schema as normal:
- my $artist_rs = $schema->resultset('Artist');
+ my $artist_rs = $schema->resultset('Artist');
This can be useful when you don't want to pass around a Schema object to every
method.
@@ -693,7 +693,7 @@
=head2 Stringification
-Employ the standard stringification technique by using the C<overload>
+Employ the standard stringification technique by using the L<overload>
module.
To make an object stringify itself as a single column, use something
@@ -741,17 +741,17 @@
# do whatever else you wanted if it was a new row
}
-=head2 Static sub-classing DBIx::Class result classes
+=head2 Static sub-classing DBIx::Class result classes
AKA adding additional relationships/methods/etc. to a model for a
specific usage of the (shared) model.
-B<Schema definition>
-
- package My::App::Schema;
-
- use base DBIx::Class::Schema;
+B<Schema definition>
+ package My::App::Schema;
+
+ use base DBIx::Class::Schema;
+
# load subclassed classes from My::App::Schema::Result/ResultSet
__PACKAGE__->load_namespaces;
@@ -763,35 +763,35 @@
/]});
1;
-
-B<Result-Subclass definition>
-
+
+B<Result-Subclass definition>
+
package My::App::Schema::Result::Baz;
-
- use strict;
- use warnings;
- use base My::Shared::Model::Result::Baz;
-
+
+ use strict;
+ use warnings;
+ use base My::Shared::Model::Result::Baz;
+
# WARNING: Make sure you call table() again in your subclass,
# otherwise DBIx::Class::ResultSourceProxy::Table will not be called
# and the class name is not correctly registered as a source
- __PACKAGE__->table('baz');
-
- sub additional_method {
- return "I'm an additional method only needed by this app";
+ __PACKAGE__->table('baz');
+
+ sub additional_method {
+ return "I'm an additional method only needed by this app";
}
1;
-
-=head2 Dynamic Sub-classing DBIx::Class proxy classes
+=head2 Dynamic Sub-classing DBIx::Class proxy classes
+
AKA multi-class object inflation from one table
-
+
L<DBIx::Class> classes are proxy classes, therefore some different
techniques need to be employed for more than basic subclassing. In
this example we have a single user table that carries a boolean bit
for admin. We would like like to give the admin users
-objects(L<DBIx::Class::Row>) the same methods as a regular user but
+objects (L<DBIx::Class::Row>) the same methods as a regular user but
also special admin only methods. It doesn't make sense to create two
seperate proxy-class files for this. We would be copying all the user
methods into the Admin class. There is a cleaner way to accomplish
@@ -803,128 +803,128 @@
grab the object being returned, inspect the values we are looking for,
bless it if it's an admin object, and then return it. See the example
below:
-
-B<Schema Definition>
-
- package My::Schema;
-
- use base qw/DBIx::Class::Schema/;
-
+
+B<Schema Definition>
+
+ package My::Schema;
+
+ use base qw/DBIx::Class::Schema/;
+
__PACKAGE__->load_namespaces;
1;
-
-
-B<Proxy-Class definitions>
-
- package My::Schema::Result::User;
-
- use strict;
- use warnings;
- use base qw/DBIx::Class/;
-
- ### Defined what our admin class is for ensure_class_loaded
- my $admin_class = __PACKAGE__ . '::Admin';
-
- __PACKAGE__->load_components(qw/Core/);
-
- __PACKAGE__->table('users');
-
- __PACKAGE__->add_columns(qw/user_id email password
- firstname lastname active
- admin/);
-
- __PACKAGE__->set_primary_key('user_id');
-
- sub inflate_result {
- my $self = shift;
- my $ret = $self->next::method(@_);
- if( $ret->admin ) {### If this is an admin rebless for extra functions
- $self->ensure_class_loaded( $admin_class );
- bless $ret, $admin_class;
- }
- return $ret;
- }
-
- sub hello {
- print "I am a regular user.\n";
- return ;
- }
-
+
+
+B<Proxy-Class definitions>
+
+ package My::Schema::Result::User;
+
+ use strict;
+ use warnings;
+ use base qw/DBIx::Class/;
+
+ ### Define what our admin class is, for ensure_class_loaded()
+ my $admin_class = __PACKAGE__ . '::Admin';
+
+ __PACKAGE__->load_components(qw/Core/);
+
+ __PACKAGE__->table('users');
+
+ __PACKAGE__->add_columns(qw/user_id email password
+ firstname lastname active
+ admin/);
+
+ __PACKAGE__->set_primary_key('user_id');
+
+ sub inflate_result {
+ my $self = shift;
+ my $ret = $self->next::method(@_);
+ if( $ret->admin ) {### If this is an admin, rebless for extra functions
+ $self->ensure_class_loaded( $admin_class );
+ bless $ret, $admin_class;
+ }
+ return $ret;
+ }
+
+ sub hello {
+ print "I am a regular user.\n";
+ return ;
+ }
+
1;
-
- package My::Schema::Result::User::Admin;
-
- use strict;
- use warnings;
- use base qw/My::Schema::Result::User/;
+ package My::Schema::Result::User::Admin;
+
+ use strict;
+ use warnings;
+ use base qw/My::Schema::Result::User/;
+
# This line is important
__PACKAGE__->table('users');
-
- sub hello
- {
- print "I am an admin.\n";
- return;
- }
-
- sub do_admin_stuff
- {
- print "I am doing admin stuff\n";
- return ;
+
+ sub hello
+ {
+ print "I am an admin.\n";
+ return;
}
+ sub do_admin_stuff
+ {
+ print "I am doing admin stuff\n";
+ return ;
+ }
+
1;
-
-B<Test File> test.pl
-
- use warnings;
- use strict;
- use My::Schema;
-
- my $user_data = { email => 'someguy at place.com',
- password => 'pass1',
- admin => 0 };
-
- my $admin_data = { email => 'someadmin at adminplace.com',
- password => 'pass2',
- admin => 1 };
-
- my $schema = My::Schema->connection('dbi:Pg:dbname=test');
-
- $schema->resultset('User')->create( $user_data );
- $schema->resultset('User')->create( $admin_data );
-
- ### Now we search for them
- my $user = $schema->resultset('User')->single( $user_data );
- my $admin = $schema->resultset('User')->single( $admin_data );
-
- print ref $user, "\n";
- print ref $admin, "\n";
-
- print $user->password , "\n"; # pass1
- print $admin->password , "\n";# pass2; inherited from User
- print $user->hello , "\n";# I am a regular user.
- print $admin->hello, "\n";# I am an admin.
-
- ### The statement below will NOT print
- print "I can do admin stuff\n" if $user->can('do_admin_stuff');
- ### The statement below will print
- print "I can do admin stuff\n" if $admin->can('do_admin_stuff');
+B<Test File> test.pl
+
+ use warnings;
+ use strict;
+ use My::Schema;
+
+ my $user_data = { email => 'someguy at place.com',
+ password => 'pass1',
+ admin => 0 };
+
+ my $admin_data = { email => 'someadmin at adminplace.com',
+ password => 'pass2',
+ admin => 1 };
+
+ my $schema = My::Schema->connection('dbi:Pg:dbname=test');
+
+ $schema->resultset('User')->create( $user_data );
+ $schema->resultset('User')->create( $admin_data );
+
+ ### Now we search for them
+ my $user = $schema->resultset('User')->single( $user_data );
+ my $admin = $schema->resultset('User')->single( $admin_data );
+
+ print ref $user, "\n";
+ print ref $admin, "\n";
+
+ print $user->password , "\n"; # pass1
+ print $admin->password , "\n";# pass2; inherited from User
+ print $user->hello , "\n";# I am a regular user.
+ print $admin->hello, "\n";# I am an admin.
+
+ ### The statement below will NOT print
+ print "I can do admin stuff\n" if $user->can('do_admin_stuff');
+ ### The statement below will print
+ print "I can do admin stuff\n" if $admin->can('do_admin_stuff');
+
=head2 Skip row object creation for faster results
DBIx::Class is not built for speed, it's built for convenience and
ease of use, but sometimes you just need to get the data, and skip the
fancy objects.
-
+
To do this simply use L<DBIx::Class::ResultClass::HashRefInflator>.
-
+
my $rs = $schema->resultset('CD');
-
+
$rs->result_class('DBIx::Class::ResultClass::HashRefInflator');
-
+
my $hash_ref = $rs->find(1);
Wasn't that easy?
@@ -968,7 +968,7 @@
my $rs = $schema->resultset('Items')->search(
{},
- {
+ {
select => [ { sum => 'Cost' } ],
as => [ 'total_cost' ], # remember this 'as' is for DBIx::Class::ResultSet not SQL
}
@@ -997,7 +997,7 @@
print $c;
}
-C<ResultSetColumn> only has a limited number of built-in functions, if
+C<ResultSetColumn> only has a limited number of built-in functions. If
you need one that it doesn't have, then you can use the C<func> method
instead:
@@ -1012,7 +1012,7 @@
=head2 Creating a result set from a set of rows
-Sometimes you have a (set of) row objects that you want to put into a
+Sometimes you have a (set of) row objects that you want to put into a
resultset without the need to hit the DB again. You can do that by using the
L<set_cache|DBIx::Class::Resultset/set_cache> method:
@@ -1047,7 +1047,7 @@
=head2 Ordering a relationship result set
-If you always want a relation to be ordered, you can specify this when you
+If you always want a relation to be ordered, you can specify this when you
create the relationship.
To order C<< $book->pages >> by descending page_number, create the relation
@@ -1108,9 +1108,9 @@
package MyDatabase::Main::Artist;
use base qw/DBIx::Class/;
__PACKAGE__->load_components(qw/PK::Auto Core/);
-
+
__PACKAGE__->table('database1.artist'); # will use "database1.artist" in FROM clause
-
+
__PACKAGE__->add_columns(qw/ artist_id name /);
__PACKAGE__->set_primary_key('artist_id');
__PACKAGE__->has_many('cds' => 'MyDatabase::Main::Cd');
@@ -1131,16 +1131,16 @@
package MyDatabase::Schema;
use Moose;
-
+
extends 'DBIx::Class::Schema';
-
+
around connection => sub {
my ( $inner, $self, $dsn, $username, $pass, $attr ) = ( shift, @_ );
-
+
my $postfix = delete $attr->{schema_name_postfix};
-
+
$inner->(@_);
-
+
if ( $postfix ) {
$self->append_db_name($postfix);
}
@@ -1148,18 +1148,18 @@
sub append_db_name {
my ( $self, $postfix ) = @_;
-
- my @sources_with_db
- = grep
- { $_->name =~ /^\w+\./mx }
- map
- { $self->source($_) }
+
+ my @sources_with_db
+ = grep
+ { $_->name =~ /^\w+\./mx }
+ map
+ { $self->source($_) }
$self->sources;
-
+
foreach my $source (@sources_with_db) {
my $name = $source->name;
$name =~ s{^(\w+)\.}{${1}${postfix}\.}mx;
-
+
$source->name($name);
}
}
@@ -1171,17 +1171,17 @@
then simply iterate over all the Schema's ResultSources, renaming them as
needed.
-To use this facility, simply add or modify the \%attr hashref that is passed to
+To use this facility, simply add or modify the \%attr hashref that is passed to
L<connection|DBIx::Class::Schama/connect>, as follows:
- my $schema
+ my $schema
= MyDatabase::Schema->connect(
- $dsn,
- $user,
+ $dsn,
+ $user,
$pass,
{
schema_name_postfix => '_dev'
- # ... Other options as desired ...
+ # ... Other options as desired ...
})
Obviously, one could accomplish even more advanced mapping via a hash map or a
@@ -1227,14 +1227,14 @@
transactions (for databases that support them) will hopefully be added
in the future.
-=head1 SQL
+=head1 SQL
=head2 Creating Schemas From An Existing Database
-L<DBIx::Class::Schema::Loader> will connect to a database and create a
+L<DBIx::Class::Schema::Loader> will connect to a database and create a
L<DBIx::Class::Schema> and associated sources by examining the database.
-The recommend way of achieving this is to use the
+The recommend way of achieving this is to use the
L<make_schema_at|DBIx::Class::Schema::Loader/make_schema_at> method:
perl -MDBIx::Class::Schema::Loader=make_schema_at,dump_to_dir:./lib \
@@ -1296,7 +1296,7 @@
your database.
Make a table class as you would for any other table
-
+
package MyAppDB::Dual;
use strict;
use warnings;
@@ -1307,34 +1307,34 @@
"dummy",
{ data_type => "VARCHAR2", is_nullable => 0, size => 1 },
);
-
+
Once you've loaded your table class select from it using C<select>
and C<as> instead of C<columns>
-
+
my $rs = $schema->resultset('Dual')->search(undef,
{ select => [ 'sydate' ],
as => [ 'now' ]
},
);
-
+
All you have to do now is be careful how you access your resultset, the below
will not work because there is no column called 'now' in the Dual table class
-
+
while (my $dual = $rs->next) {
print $dual->now."\n";
}
# Can't locate object method "now" via package "MyAppDB::Dual" at headshot.pl line 23.
-
+
You could of course use 'dummy' in C<as> instead of 'now', or C<add_columns> to
your Dual class for whatever you wanted to select from dual, but that's just
silly, instead use C<get_column>
-
+
while (my $dual = $rs->next) {
print $dual->get_column('now')."\n";
}
-
+
Or use C<cursor>
-
+
my $cursor = $rs->cursor;
while (my @vals = $cursor->next) {
print $vals[0]."\n";
@@ -1351,48 +1351,48 @@
parser_args => { sources => [ grep $_ ne 'Dual', schema->sources ] },
};
$schema->create_ddl_dir( [qw/Oracle/], undef, './sql', undef, $sqlt_args );
-
+
Or use L<DBIx::Class::ResultClass::HashRefInflator>
-
+
$rs->result_class('DBIx::Class::ResultClass::HashRefInflator');
while ( my $dual = $rs->next ) {
print $dual->{now}."\n";
}
-
+
Here are some example C<select> conditions to illustrate the different syntax
-you could use for doing stuff like
+you could use for doing stuff like
C<oracles.heavily(nested(functions_can('take', 'lots'), OF), 'args')>
-
+
# get a sequence value
select => [ 'A_SEQ.nextval' ],
-
+
# get create table sql
select => [ { 'dbms_metadata.get_ddl' => [ "'TABLE'", "'ARTIST'" ]} ],
-
+
# get a random num between 0 and 100
select => [ { "trunc" => [ { "dbms_random.value" => [0,100] } ]} ],
-
+
# what year is it?
select => [ { 'extract' => [ \'year from sysdate' ] } ],
-
+
# do some math
select => [ {'round' => [{'cos' => [ \'180 * 3.14159265359/180' ]}]}],
-
+
# which day of the week were you born on?
select => [{'to_char' => [{'to_date' => [ "'25-DEC-1980'", "'dd-mon-yyyy'" ]}, "'day'"]}],
-
+
# select 16 rows from dual
select => [ "'hello'" ],
as => [ 'world' ],
group_by => [ 'cube( 1, 2, 3, 4 )' ],
-
-
+
+
=head2 Adding Indexes And Functions To Your SQL
Often you will want indexes on columns on your table to speed up searching. To
-do this, create a method called C<sqlt_deploy_hook> in the relevant source
-class (refer to the advanced
+do this, create a method called C<sqlt_deploy_hook> in the relevant source
+class (refer to the advanced
L<callback system|DBIx::Class::ResultSource/sqlt_deploy_callback> if you wish
to share a hook between multiple sources):
@@ -1409,13 +1409,13 @@
1;
-Sometimes you might want to change the index depending on the type of the
+Sometimes you might want to change the index depending on the type of the
database for which SQL is being generated:
my ($db_type = $sqlt_table->schema->translator->producer_type)
=~ s/^SQL::Translator::Producer:://;
-You can also add hooks to the schema level to stop certain tables being
+You can also add hooks to the schema level to stop certain tables being
created:
package My::Schema;
@@ -1508,7 +1508,7 @@
Alternatively, you can send the conversion sql scripts to your
customers as above.
-=head2 Setting quoting for the generated SQL.
+=head2 Setting quoting for the generated SQL.
If the database contains column names with spaces and/or reserved words, they
need to be quoted in the SQL queries. This is done using:
@@ -1518,14 +1518,14 @@
The first sets the quote characters. Either a pair of matching
brackets, or a C<"> or C<'>:
-
+
__PACKAGE__->storage->sql_maker->quote_char('"');
Check the documentation of your database for the correct quote
characters to use. C<name_sep> needs to be set to allow the SQL
generator to put the quotes the correct place.
-In most cases you should set these as part of the arguments passed to
+In most cases you should set these as part of the arguments passed to
L<DBIx::Class::Schema/connect>:
my $schema = My::Schema->connect(
@@ -1553,7 +1553,7 @@
The JDBC bridge is one way of getting access to a MSSQL server from a platform
that Microsoft doesn't deliver native client libraries for. (e.g. Linux)
-The limit dialect can also be set at connect time by specifying a
+The limit dialect can also be set at connect time by specifying a
C<limit_dialect> key in the final hash as shown above.
=head2 Working with PostgreSQL array types
@@ -1594,7 +1594,7 @@
arrayrefs together with the column name, like this: C<< [column_name => value]
>>.
-=head1 BOOTSTRAPPING/MIGRATING
+=head1 BOOTSTRAPPING/MIGRATING
=head2 Easy migration from class-based to schema-based setup
@@ -1605,10 +1605,10 @@
use MyDB;
use SQL::Translator;
-
+
my $schema = MyDB->schema_instance;
-
- my $translator = SQL::Translator->new(
+
+ my $translator = SQL::Translator->new(
debug => $debug || 0,
trace => $trace || 0,
no_comments => $no_comments || 0,
@@ -1622,13 +1622,13 @@
'prefix' => 'My::Schema',
},
);
-
+
$translator->parser('SQL::Translator::Parser::DBIx::Class');
$translator->producer('SQL::Translator::Producer::DBIx::Class::File');
-
+
my $output = $translator->translate(@args) or die
"Error: " . $translator->error;
-
+
print $output;
You could use L<Module::Find> to search for all subclasses in the MyDB::*
@@ -1657,16 +1657,16 @@
return $new;
}
-For more information about C<next::method>, look in the L<Class::C3>
+For more information about C<next::method>, look in the L<Class::C3>
documentation. See also L<DBIx::Class::Manual::Component> for more
ways to write your own base classes to do this.
People looking for ways to do "triggers" with DBIx::Class are probably
-just looking for this.
+just looking for this.
=head2 Changing one field whenever another changes
-For example, say that you have three columns, C<id>, C<number>, and
+For example, say that you have three columns, C<id>, C<number>, and
C<squared>. You would like to make changes to C<number> and have
C<squared> be automagically set to the value of C<number> squared.
You can accomplish this by overriding C<store_column>:
@@ -1684,7 +1684,7 @@
=head2 Automatically creating related objects
-You might have a class C<Artist> which has many C<CD>s. Further, if you
+You might have a class C<Artist> which has many C<CD>s. Further, you
want to create a C<CD> object every time you insert an C<Artist> object.
You can accomplish this by overriding C<insert> on your objects:
@@ -1881,7 +1881,7 @@
If this preamble is moved into a common base class:-
package MyDBICbase;
-
+
use base qw/DBIx::Class/;
__PACKAGE__->load_components(qw/InflateColumn::DateTime Core/);
1;
@@ -1902,7 +1902,7 @@
to load the result classes. This will use L<Module::Find|Module::Find>
to find and load the appropriate modules. Explicitly defining the
classes you wish to load will remove the overhead of
-L<Module::Find|Module::Find> and the related directory operations:-
+L<Module::Find|Module::Find> and the related directory operations:
__PACKAGE__->load_classes(qw/ CD Artist Track /);
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/Example.pod
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/Example.pod 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/Example.pod 2009-07-17 07:41:07 UTC (rev 7065)
@@ -43,7 +43,7 @@
CREATE TABLE artist (
artistid INTEGER PRIMARY KEY,
- name TEXT NOT NULL
+ name TEXT NOT NULL
);
CREATE TABLE cd (
@@ -60,7 +60,7 @@
and create the sqlite database file:
-sqlite3 example.db < example.sql
+ sqlite3 example.db < example.sql
=head3 Set up DBIx::Class::Schema
@@ -78,7 +78,7 @@
Then, create the following DBIx::Class::Schema classes:
MyDatabase/Main.pm:
-
+
package MyDatabase::Main;
use base qw/DBIx::Class::Schema/;
__PACKAGE__->load_namespaces;
@@ -90,7 +90,7 @@
package MyDatabase::Main::Result::Artist;
use base qw/DBIx::Class/;
- __PACKAGE__->load_components(qw/PK::Auto Core/);
+ __PACKAGE__->load_components(qw/Core/);
__PACKAGE__->table('artist');
__PACKAGE__->add_columns(qw/ artistid name /);
__PACKAGE__->set_primary_key('artistid');
@@ -103,7 +103,7 @@
package MyDatabase::Main::Result::Cd;
use base qw/DBIx::Class/;
- __PACKAGE__->load_components(qw/PK::Auto Core/);
+ __PACKAGE__->load_components(qw/Core/);
__PACKAGE__->table('cd');
__PACKAGE__->add_columns(qw/ cdid artist title/);
__PACKAGE__->set_primary_key('cdid');
@@ -117,7 +117,7 @@
package MyDatabase::Main::Result::Track;
use base qw/DBIx::Class/;
- __PACKAGE__->load_components(qw/PK::Auto Core/);
+ __PACKAGE__->load_components(qw/Core/);
__PACKAGE__->table('track');
__PACKAGE__->add_columns(qw/ trackid cd title/);
__PACKAGE__->set_primary_key('trackid');
@@ -137,7 +137,7 @@
my $schema = MyDatabase::Main->connect('dbi:SQLite:db/example.db');
- # here's some of the sql that is going to be generated by the schema
+ # here's some of the SQL that is going to be generated by the schema
# INSERT INTO artist VALUES (NULL,'Michael Jackson');
# INSERT INTO artist VALUES (NULL,'Eminem');
@@ -248,8 +248,8 @@
}
print "\n";
}
-
-
+
+
sub get_cd_by_track {
my $tracktitle = shift;
print "get_cd_by_track($tracktitle):\n";
@@ -264,7 +264,7 @@
my $cd = $rs->first;
print $cd->title . "\n\n";
}
-
+
sub get_cds_by_artist {
my $artistname = shift;
print "get_cds_by_artist($artistname):\n";
@@ -349,20 +349,20 @@
A reference implentation of the database and scripts in this example
are available in the main distribution for DBIx::Class under the
-directory t/examples/Schema
+directory C<t/examples/Schema>.
With these scripts we're relying on @INC looking in the current
working directory. You may want to add the MyDatabase namespaces to
@INC in a different way when it comes to deployment.
-The testdb.pl script is an excellent start for testing your database
+The C<testdb.pl> script is an excellent start for testing your database
model.
-This example uses load_namespaces to load in the appropriate Row classes
-from the MyDatabase::Main::Result namespace, and any required resultset
-classes from the MyDatabase::Main::ResultSet namespace (although we
-created the directory in the directions above we did not add, or need to
-add, any resultset classes).
+This example uses L<DBIx::Class::Schema/load_namespaces> to load in the
+appropriate L<Row|DBIx::Class::Row> classes from the MyDatabase::Main::Result namespace,
+and any required resultset classes from the MyDatabase::Main::ResultSet
+namespace (although we created the directory in the directions above we
+did not add, or need to add, any resultset classes).
=head1 TODO
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/FAQ.pod
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/FAQ.pod 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/FAQ.pod 2009-07-17 07:41:07 UTC (rev 7065)
@@ -87,7 +87,7 @@
to connect with rights to read/write all the schemas/tables as
necessary.
-=back
+=back
=head2 Relationships
@@ -112,7 +112,7 @@
Create a C<belongs_to> relationship for the field containing the
foreign key. See L<DBIx::Class::Relationship/belongs_to>.
-=item .. define a foreign key relationship where the key field may contain NULL?
+=item .. define a foreign key relationship where the key field may contain NULL?
Just create a C<belongs_to> relationship, as above. If the column is
NULL then the inflation to the foreign object will not happen. This
@@ -307,8 +307,8 @@
=item .. fetch a whole column of data instead of a row?
-Call C<get_column> on a L<DBIx::Class::ResultSet>, this returns a
-L<DBIx::Class::ResultSetColumn>, see it's documentation and the
+Call C<get_column> on a L<DBIx::Class::ResultSet>. This returns a
+L<DBIx::Class::ResultSetColumn>. See its documentation and the
L<Cookbook|DBIx::Class::Manual::Cookbook> for details.
=item .. fetch a formatted column?
@@ -324,22 +324,17 @@
=item .. fetch a single (or topmost) row?
-Sometimes you many only want a single record back from a search. A quick
-way to get that single row is to first run your search as usual:
+See L<DBIx::Class::Manual::Cookbook/Retrieve_one_and_only_one_row_from_a_resultset>.
- ->search->(undef, { order_by => "id DESC" })
+A less readable way is to ask a regular search to return 1 row, using
+L<DBIx::Class::ResultSet/slice>:
-Then call L<DBIx::Class::ResultSet/slice> and ask it only to return 1 row:
-
- ->slice(0)
-
-These two calls can be combined into a single statement:
-
->search->(undef, { order_by => "id DESC" })->slice(0)
-Why slice instead of L<DBIx::Class::ResultSet/first> or L<DBIx::Class::ResultSet/single>?
-If supported by the database, slice will use LIMIT/OFFSET to hint to the database that we
-really only need one row. This can result in a significant speed improvement.
+which (if supported by the database) will use LIMIT/OFFSET to hint to the
+database that we really only need one row. This can result in a significant
+speed improvement. The method using L<DBIx::Class::ResultSet/single> mentioned
+in the cookbook can do the same if you pass a C<rows> attribute to the search.
=item .. refresh a row from storage?
@@ -410,17 +405,17 @@
But note that when using a scalar reference the column in the database
will be updated but when you read the value from the object with e.g.
-
+
->somecolumn()
-
+
you still get back the scalar reference to the string, B<not> the new
value in the database. To get that you must refresh the row from storage
using C<discard_changes()>. Or chain your function calls like this:
->update->discard_changes
-
- to update the database and refresh the object in one step.
-
+
+to update the database and refresh the object in one step.
+
=item .. store JSON/YAML in a column and have it deflate/inflate automatically?
You can use L<DBIx::Class::InflateColumn> to accomplish YAML/JSON storage transparently.
@@ -474,7 +469,7 @@
package MyTable;
use Moose; # import Moose
- use Moose::Util::TypeConstraint; # import Moose accessor type constraints
+ use Moose::Util::TypeConstraint; # import Moose accessor type constraints
extends 'DBIx::Class'; # Moose changes the way we define our parent (base) package
@@ -486,7 +481,7 @@
my $row;
- # assume that some where in here $row will get assigned to a MyTable row
+ # assume that somewhere in here $row will get assigned to a MyTable row
$row->non_column_data('some string'); # would set the non_column_data accessor
@@ -494,7 +489,7 @@
$row->update(); # would not inline the non_column_data accessor into the update
-
+
=item How do I use DBIx::Class objects in my TT templates?
Like normal objects, mostly. However you need to watch out for TT
@@ -536,7 +531,7 @@
=item How do I reduce the overhead of database queries?
You can reduce the overhead of object creation within L<DBIx::Class>
-using the tips in L<DBIx::Class::Manual::Cookbook/"Skip row object creation for faster results">
+using the tips in L<DBIx::Class::Manual::Cookbook/"Skip row object creation for faster results">
and L<DBIx::Class::Manual::Cookbook/"Get raw data for blindingly fast results">
=back
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/Intro.pod
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/Intro.pod 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Manual/Intro.pod 2009-07-17 07:41:07 UTC (rev 7065)
@@ -11,7 +11,7 @@
=head1 THE DBIx::Class WAY
Here are a few simple tips that will help you get your bearings with
-DBIx::Class.
+DBIx::Class.
=head2 Tables become Result classes
@@ -29,7 +29,7 @@
=head2 It's all about the ResultSet
So, we've got some ResultSources defined. Now, we want to actually use those
-definitions to help us translate the queries we need into handy perl objects!
+definitions to help us translate the queries we need into handy perl objects!
Let's say we defined a ResultSource for an "album" table with three columns:
"albumid", "artist", and "title". Any time we want to query this table, we'll
@@ -39,18 +39,18 @@
SELECT albumid, artist, title FROM album;
Would be retrieved by creating a ResultSet object from the album table's
-ResultSource, likely by using the "search" method.
+ResultSource, likely by using the "search" method.
DBIx::Class doesn't limit you to creating only simple ResultSets -- if you
wanted to do something like:
SELECT title FROM album GROUP BY title;
-You could easily achieve it.
+You could easily achieve it.
-The important thing to understand:
+The important thing to understand:
- Any time you would reach for a SQL query in DBI, you are
+ Any time you would reach for a SQL query in DBI, you are
creating a DBIx::Class::ResultSet.
=head2 Search is like "prepare"
@@ -109,13 +109,10 @@
Load any components required by each class with the load_components() method.
This should consist of "Core" plus any additional components you want to use.
-For example, if you want serial/auto-incrementing primary keys:
+For example, if you want to force columns to use UTF-8 encoding:
- __PACKAGE__->load_components(qw/ PK::Auto Core /);
+ __PACKAGE__->load_components(qw/ ForceUTF8 Core /);
-C<PK::Auto> is supported for many databases; see L<DBIx::Class::Storage::DBI>
-for more information.
-
Set the table for your class:
__PACKAGE__->table('album');
@@ -142,7 +139,7 @@
is_auto_increment => 0,
default_value => '',
},
- title =>
+ title =>
{ data_type => 'varchar',
size => 256,
is_nullable => 0,
@@ -176,7 +173,8 @@
make a predefined accessor for fetching objects that contain this Table's
foreign key:
- __PACKAGE__->has_many('albums', 'My::Schema::Result::Artist', 'album_id');
+ # in My::Schema::Result::Artist
+ __PACKAGE__->has_many('albums', 'My::Schema::Result::Album', 'artist');
See L<DBIx::Class::Relationship> for more information about the various types of
available relationships and how you can design your own.
@@ -273,7 +271,7 @@
returns an instance of C<My::Schema::Result::Album> that can be used to access the data
in the new record:
- my $new_album = $schema->resultset('Album')->create({
+ my $new_album = $schema->resultset('Album')->create({
title => 'Wish You Were Here',
artist => 'Pink Floyd'
});
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Ordered.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Ordered.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Ordered.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -60,20 +60,20 @@
#!/use/bin/perl
use My::Item;
-
+
my $item = My::Item->create({ name=>'Matt S. Trout' });
# If using grouping_column:
my $item = My::Item->create({ name=>'Matt S. Trout', group_id=>1 });
-
+
my $rs = $item->siblings();
my @siblings = $item->siblings();
-
+
my $sibling;
$sibling = $item->first_sibling();
$sibling = $item->last_sibling();
$sibling = $item->previous_sibling();
$sibling = $item->next_sibling();
-
+
$item->move_previous();
$item->move_next();
$item->move_first();
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/PK.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/PK.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/PK.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -42,16 +42,16 @@
my ($self, $attrs) = @_;
delete $self->{_dirty_columns};
return unless $self->in_storage; # Don't reload if we aren't real!
-
+
if( my $current_storage = $self->get_from_storage($attrs)) {
-
+
# Set $self to the current.
%$self = %$current_storage;
-
+
# Avoid a possible infinite loop with
# sub DESTROY { $_[0]->discard_changes }
bless $current_storage, 'Do::Not::Exist';
-
+
return $self;
} else {
$self->in_storage(0);
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Relationship/Base.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Relationship/Base.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Relationship/Base.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -83,18 +83,18 @@
An arrayref containing a list of accessors in the foreign class to create in
the main class. If, for example, you do the following:
-
+
MyDB::Schema::CD->might_have(liner_notes => 'MyDB::Schema::LinerNotes',
undef, {
proxy => [ qw/notes/ ],
});
-
+
Then, assuming MyDB::Schema::LinerNotes has an accessor named notes, you can do:
my $cd = MyDB::Schema::CD->find(1);
$cd->notes('Notes go here'); # set notes -- LinerNotes object is
# created if it doesn't exist
-
+
=item accessor
Specifies the type of accessor that should be created for the relationship.
@@ -179,7 +179,7 @@
my $rel_info = $self->relationship_info($rel);
$self->throw_exception( "No such relationship ${rel}" )
unless $rel_info;
-
+
return $self->{related_resultsets}{$rel} ||= do {
my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
$attrs = { %{$rel_info->{attrs} || {}}, %$attrs };
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Relationship/HasMany.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Relationship/HasMany.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Relationship/HasMany.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -40,7 +40,7 @@
$class->throw_exception(
"No such column ${f_key} on foreign class ${f_class} ($guess)"
) if $f_class_loaded && !$f_class->has_column($f_key);
-
+
$cond = { "foreign.${f_key}" => "self.${pri}" };
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Relationship.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Relationship.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Relationship.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -106,7 +106,7 @@
All helper methods are called similar to the following template:
__PACKAGE__->$method_name('relname', 'Foreign::Class', \%cond | \@cond, \%attrs);
-
+
Both C<$cond> and C<$attrs> are optional. Pass C<undef> for C<$cond> if
you want to use the default value for it, but still want to set C<\%attrs>.
@@ -297,7 +297,7 @@
'My::DBIC::Schema::Book',
{ 'foreign.author_id' => 'self.id' },
);
-
+
# OR (similar result, assuming related_class is storing our PK, in "author")
# (the "author" is guessed at from "Author" in the class namespace)
My::DBIC::Schema::Author->has_many(
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSet.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSet.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSet.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -1315,9 +1315,12 @@
sub _switch_to_inner_join_if_needed {
my ($self, $from, $alias) = @_;
+ # subqueries and other oddness is naturally not supported
return $from if (
ref $from ne 'ARRAY'
||
+ @$from <= 1
+ ||
ref $from->[0] ne 'HASH'
||
! $from->[0]{-alias}
@@ -1325,10 +1328,6 @@
$from->[0]{-alias} eq $alias
);
- # this would be the case with a subquery - we'll never find
- # the target as it is not in the parseable part of {from}
- return $from if @$from == 1;
-
my $switch_branch;
JOINSCAN:
for my $j (@{$from}[1 .. $#$from]) {
@@ -3091,11 +3090,16 @@
=back
-Which column(s) to order the results by. If a single column name, or
-an arrayref of names is supplied, the argument is passed through
-directly to SQL. The hashref syntax allows for connection-agnostic
-specification of ordering direction:
+Which column(s) to order the results by.
+[The full list of suitable values is documented in
+L<SQL::Abstract/"ORDER BY CLAUSES">; the following is a summary of
+common options.]
+
+If a single column name, or an arrayref of names is supplied, the
+argument is passed through directly to SQL. The hashref syntax allows
+for connection-agnostic specification of ordering direction:
+
For descending order:
order_by => { -desc => [qw/col1 col2 col3/] }
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSetColumn.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSetColumn.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSetColumn.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -338,7 +338,7 @@
sub func {
my ($self,$function) = @_;
my $cursor = $self->func_rs($function)->cursor;
-
+
if( wantarray ) {
return map { $_->[ 0 ] } $cursor->all;
}
@@ -373,9 +373,9 @@
=head2 throw_exception
See L<DBIx::Class::Schema/throw_exception> for details.
-
+
=cut
-
+
sub throw_exception {
my $self=shift;
if (ref $self && $self->{_parent_resultset}) {
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSource.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSource.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSource.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -981,7 +981,7 @@
L<DBIx::Class::Relationship>.
The returned hashref is keyed by the name of the opposing
-relationship, and contains it's data in the same manner as
+relationship, and contains its data in the same manner as
L</relationship_info>.
=cut
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSourceHandle.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSourceHandle.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSourceHandle.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -77,7 +77,7 @@
my ($self, $cloning) = @_;
my $to_serialize = { %$self };
-
+
my $class = $self->schema->class($self->source_moniker);
$to_serialize->{schema} = $class;
return (Storable::freeze($to_serialize));
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSourceProxy/Table.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSourceProxy/Table.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/ResultSourceProxy/Table.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -67,7 +67,7 @@
=head2 table
__PACKAGE__->table('tbl_name');
-
+
Gets or sets the table name.
=cut
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Row.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Row.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Row.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -1332,6 +1332,13 @@
changes made since the row was last read from storage. Actually
implemented in L<DBIx::Class::PK>
+Note: If you are using L<DBIx::Class::Storage::DBI::Replicated> as your
+storage, please kept in mind that if you L</discard_changes> on a row that you
+just updated or created, you should wrap the entire bit inside a transaction.
+Otherwise you run the risk that you insert or update to the master database
+but read from a replicant database that has not yet been updated from the
+master. This will result in unexpected results.
+
=cut
1;
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Schema/Versioned.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Schema/Versioned.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Schema/Versioned.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -308,7 +308,7 @@
# here to be sure.
# XXX - just fix it
$self->storage->sqlt_type;
-
+
my $upgrade_file = $self->ddl_filename(
$self->storage->sqlt_type,
$self->schema_version,
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Schema.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Schema.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Schema.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -511,7 +511,7 @@
general.
Note that C<connect_info> expects an arrayref of arguments, but
-C<connect> does not. C<connect> wraps it's arguments in an arrayref
+C<connect> does not. C<connect> wraps its arguments in an arrayref
before passing them to C<connect_info>.
=head3 Overloading
@@ -755,7 +755,7 @@
[ 2, 'Indie Band' ],
...
]);
-
+
Since wantarray context is basically the same as looping over $rs->create(...)
you won't see any performance benefits and in this case the method is more for
convenience. Void context sends the column information directly to storage
@@ -806,10 +806,10 @@
sub connection {
my ($self, @info) = @_;
return $self if !@info && $self->storage;
-
+
my ($storage_class, $args) = ref $self->storage_type ?
($self->_normalize_storage_type($self->storage_type),{}) : ($self->storage_type, {});
-
+
$storage_class = 'DBIx::Class::Storage'.$storage_class
if $storage_class =~ m/^::/;
eval "require ${storage_class};";
@@ -1146,7 +1146,7 @@
$filename =~ s/::/-/g;
$filename = File::Spec->catfile($dir, "$filename-$version-$type.sql");
$filename =~ s/$version/$preversion-$version/ if($preversion);
-
+
return $filename;
}
@@ -1372,7 +1372,7 @@
$self->throw_exception
("No arguments to load_classes and couldn't load ${base} ($@)")
if $@;
-
+
if ($self eq $target) {
# Pathological case, largely caused by the docs on early C::M::DBIC::Plain
foreach my $moniker ($self->sources) {
@@ -1385,14 +1385,14 @@
$self->connection(@info);
return $self;
}
-
+
my $schema = $self->compose_namespace($target, $base);
{
no strict 'refs';
my $name = join '::', $target, 'schema';
*$name = Sub::Name::subname $name, sub { $schema };
}
-
+
$schema->connection(@info);
foreach my $moniker ($schema->sources) {
my $source = $schema->source($moniker);
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/StartupCheck.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/StartupCheck.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/StartupCheck.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -7,7 +7,7 @@
=head1 SYNOPSIS
use DBIx::Class::StartupCheck;
-
+
=head1 DESCRIPTION
This module used to check for, and if necessary issue a warning for, a
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/DB2.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/DB2.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/DB2.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -21,11 +21,11 @@
sub _sql_maker_opts {
my ( $self, $opts ) = @_;
-
+
if ( $opts ) {
$self->{_sql_maker_opts} = { %$opts };
}
-
+
return { limit_dialect => 'RowNumberOver', %{$self->{_sql_maker_opts}||{}} };
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -40,11 +40,11 @@
sub bind_attribute_by_data_type {
my $self = shift;
-
+
my ( $data_type ) = @_;
-
+
return { TYPE => $data_type } if $data_type == DBI::SQL_LONGVARCHAR;
-
+
return;
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -23,7 +23,7 @@
sub _sql_maker_opts {
my ($self) = @_;
-
+
$self->dbh_do(sub {
my ($self, $dbh) = @_;
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -139,7 +139,7 @@
sub get_autoinc_seq {
my ($self, $source, $col) = @_;
-
+
$self->dbh_do('_dbh_get_autoinc_seq', $source, $col);
}
@@ -210,7 +210,7 @@
sub _svp_begin {
my ($self, $name) = @_;
-
+
$self->dbh->do("SAVEPOINT $name");
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Pg.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Pg.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Pg.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -50,7 +50,7 @@
sub get_autoinc_seq {
my ($self,$source,$col) = @_;
-
+
my @pri = $source->primary_columns;
my ($schema,$table) = $source->name =~ /^(.+)\.(.+)$/ ? ($1,$2)
: (undef,$source->name);
@@ -71,7 +71,7 @@
bytea => { pg_type => DBD::Pg::PG_BYTEA },
blob => { pg_type => DBD::Pg::PG_BYTEA },
};
-
+
if( defined $bind_attributes->{$data_type} ) {
return $bind_attributes->{$data_type};
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -12,7 +12,7 @@
This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>. You
shouldn't need to create instances of this class.
-
+
=head1 DESCRIPTION
Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -13,7 +13,7 @@
This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>. You
shouldn't need to create instances of this class.
-
+
=head1 DESCRIPTION
Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -3,7 +3,8 @@
use Moose::Role;
requires 'next_storage';
use MooseX::Types::Moose qw/Int/;
-
+use DBIx::Class::Storage::DBI::Replicated::Pool;
+use DBIx::Class::Storage::DBI::Replicated::Types qw/DBICStorageDBI/;
use namespace::clean -except => 'meta';
=head1 NAME
@@ -13,7 +14,7 @@
=head1 SYNOPSIS
This role is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
-
+
=head1 DESCRIPTION
Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
@@ -48,7 +49,7 @@
has 'master' => (
is=>'ro',
- isa=>'DBIx::Class::Storage::DBI',
+ isa=>DBICStorageDBI,
required=>1,
);
@@ -74,13 +75,13 @@
This attribute returns the next slave to handle a read request. Your L</pool>
attribute has methods to help you shuffle through all the available replicants
-via it's balancer object.
+via its balancer object.
=cut
has 'current_replicant' => (
is=> 'rw',
- isa=>'DBIx::Class::Storage::DBI',
+ isa=>DBICStorageDBI,
lazy_build=>1,
handles=>[qw/
select
@@ -169,10 +170,12 @@
around 'select' => sub {
my ($select, $self, @args) = @_;
-
+
if (my $forced_pool = $args[-1]->{force_pool}) {
delete $args[-1]->{force_pool};
return $self->_get_forced_pool($forced_pool)->select(@args);
+ } elsif($self->master->{transaction_depth}) {
+ return $self->master->select(@args);
} else {
$self->increment_storage;
return $self->$select(@args);
@@ -189,10 +192,12 @@
around 'select_single' => sub {
my ($select_single, $self, @args) = @_;
-
+
if (my $forced_pool = $args[-1]->{force_pool}) {
delete $args[-1]->{force_pool};
return $self->_get_forced_pool($forced_pool)->select_single(@args);
+ } elsif($self->master->{transaction_depth}) {
+ return $self->master->select_single(@args);
} else {
$self->increment_storage;
return $self->$select_single(@args);
@@ -224,7 +229,7 @@
return $forced_pool;
} elsif($forced_pool eq 'master') {
return $self->master;
- } elsif(my $replicant = $self->pool->replicants($forced_pool)) {
+ } elsif(my $replicant = $self->pool->replicants->{$forced_pool}) {
return $replicant;
} else {
$self->master->throw_exception("$forced_pool is not a named replicant.");
@@ -233,7 +238,7 @@
=head1 AUTHOR
-John Napiorkowski <john.napiorkowski at takkle.com>
+John Napiorkowski <jjnapiork at cpan.org>
=head1 LICENSE
Added: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod (rev 0)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod 2009-07-17 07:41:07 UTC (rev 7065)
@@ -0,0 +1,185 @@
+package DBIx::Class::Storage::DBI::Replicated::Introduction;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Introduction - Minimum Need to Know
+
+=head1 SYNOPSIS
+
+This is an introductory document for L<DBIx::Class::Storage::Replication>.
+
+This document is not an overview of what replication is or why you should be
+using it. It is not a document explaing how to setup MySQL native replication
+either. Copious external resources are avialable for both. This document
+presumes you have the basics down.
+
+=head1 DESCRIPTION
+
+L<DBIx::Class> supports a framework for using database replication. This system
+is integrated completely, which means once it's setup you should be able to
+automatically just start using a replication cluster without additional work or
+changes to your code. Some caveats apply, primarily related to the proper use
+of transactions (you are wrapping all your database modifying statements inside
+a transaction, right ;) ) however in our experience properly written DBIC will
+work transparently with Replicated storage.
+
+Currently we have support for MySQL native replication, which is relatively
+easy to install and configure. We also currently support single master to one
+or more replicants (also called 'slaves' in some documentation). However the
+framework is not specifically tied to the MySQL framework and supporting other
+replication systems or topographies should be possible. Please bring your
+patches and ideas to the #dbix-class IRC channel or the mailing list.
+
+For an easy way to start playing with MySQL native replication, see:
+L<MySQL::Sandbox>.
+
+If you are using this with a L<Catalyst> based appplication, you may also wish
+to see more recent updates to L<Catalyst::Model::DBIC::Schema>, which has
+support for replication configuration options as well.
+
+=head1 REPLICATED STORAGE
+
+By default, when you start L<DBIx::Class>, your Schema (L<DBIx::Class::Schema>)
+is assigned a storage_type, which when fully connected will reflect your
+underlying storage engine as defined by your choosen database driver. For
+example, if you connect to a MySQL database, your storage_type will be
+L<DBIx::Class::Storage::DBI::mysql> Your storage type class will contain
+database specific code to help smooth over the differences between databases
+and let L<DBIx::Class> do its thing.
+
+If you want to use replication, you will override this setting so that the
+replicated storage engine will 'wrap' your underlying storages and present to
+the end programmer a unified interface. This wrapper storage class will
+delegate method calls to either a master database or one or more replicated
+databases based on if they are read only (by default sent to the replicants)
+or write (reserved for the master). Additionally, the Replicated storage
+will monitor the health of your replicants and automatically drop them should
+one exceed configurable parameters. Later, it can automatically restore a
+replicant when its health is restored.
+
+This gives you a very robust system, since you can add or drop replicants
+and DBIC will automatically adjust itself accordingly.
+
+Additionally, if you need high data integrity, such as when you are executing
+a transaction, replicated storage will automatically delegate all database
+traffic to the master storage. There are several ways to enable this high
+integrity mode, but wrapping your statements inside a transaction is the easy
+and canonical option.
+
+=head1 PARTS OF REPLICATED STORAGE
+
+A replicated storage contains several parts. First, there is the replicated
+storage itself (L<DBIx::Class::Storage::DBI::Replicated>). A replicated storage
+takes a pool of replicants (L<DBIx::Class::Storage::DBI::Replicated::Pool>)
+and a software balancer (L<DBIx::Class::Storage::DBI::Replicated::Pool>). The
+balancer does the job of splitting up all the read traffic amongst each
+replicant in the Pool. Currently there are two types of balancers, a Random one
+which chooses a Replicant in the Pool using a naive randomizer algorithm, and a
+First replicant, which just uses the first one in the Pool (and obviously is
+only of value when you have a single replicant).
+
+=head1 REPLICATED STORAGE CONFIGURATION
+
+All the parts of replication can be altered dynamically at runtime, which makes
+it possibly to create a system that automatically scales under load by creating
+more replicants as needed, perhaps using a cloud system such as Amazon EC2.
+However, for common use you can setup your replicated storage to be enabled at
+the time you connect the databases. The following is a breakdown of how you
+may wish to do this. Again, if you are using L<Catalyst>, I strongly recommend
+you use (or upgrade to) the latest L<Catalyst::Model::DBIC::Schema>, which makes
+this job even easier.
+
+First, you need to connect your L<DBIx::Class::Schema>. Let's assume you have
+such a schema called, "MyApp::Schema".
+
+ use MyApp::Schema;
+ my $schema = MyApp::Schema->connect($dsn, $user, $pass);
+
+Next, you need to set the storage_type.
+
+ $schema->storage_type(
+ ::DBI::Replicated' => {
+ balancer_type => '::Random',
+ balancer_args => {
+ auto_validate_every => 5,
+ master_read_weight => 1
+ },
+ pool_args => {
+ maximum_lag =>2,
+ },
+ }
+ );
+
+Let's break down the settings. The method L<DBIx::Class::Schema/storage_type>
+takes one mandatory parameter, a scalar value, and an option second value which
+is a Hash Reference of configuration options for that storage. In this case,
+we are setting the Replicated storage type using '::DBI::Replicated' as the
+first value. You will only use a different value if you are subclassing the
+replicated storage, so for now just copy that first parameter.
+
+The second parameter contains a hash reference of stuff that gets passed to the
+replicated storage. L<DBIx::Class::Storage::DBI::Replicated/balancer_type> is
+the type of software load balancer you will use to split up traffic among all
+your replicants. Right now we have two options, "::Random" and "::First". You
+can review documentation for both at:
+
+L<DBIx::Class::Storage::DBI::Replicated::Balancer::First>,
+L<DBIx::Class::Storage::DBI::Replicated::Balancer::Random>.
+
+In this case we will have three replicants, so the ::Random option is the only
+one that makes sense.
+
+'balancer_args' get passed to the balancer when it's instantiated. All
+balancers have the 'auto_validate_every' option. This is the number of seconds
+we allow to pass between validation checks on a load balanced replicant. So
+the higher the number, the more possibility that your reads to the replicant
+may be inconsistant with what's on the master. Setting this number too low
+will result in increased database loads, so choose a number with care. Our
+experience is that setting the number around 5 seconds results in a good
+performance / integrity balance.
+
+'master_read_weight' is an option associated with the ::Random balancer. It
+allows you to let the master be read from. I usually leave this off (default
+is off).
+
+The 'pool_args' are configuration options associated with the replicant pool.
+This object (L<DBIx::Class::Storage::DBI::Replicated::Pool>) manages all the
+declared replicants. 'maximum_lag' is the number of seconds a replicant is
+allowed to lag behind the master before being temporarily removed from the pool.
+Keep in mind that the Balancer option 'auto_validate_every' determins how often
+a replicant is tested against this condition, so the true possible lag can be
+higher than the number you set. The default is zero.
+
+No matter how low you set the maximum_lag or the auto_validate_every settings,
+there is always the chance that your replicants will lag a bit behind the
+master for the supported replication system built into MySQL. You can ensure
+reliabily reads by using a transaction, which will force both read and write
+activity to the master, however this will increase the load on your master
+database.
+
+After you've configured the replicated storage, you need to add the connection
+information for the replicants:
+
+ $schema->storage->connect_replicants(
+ [$dsn1, $user, $pass, \%opts],
+ [$dsn2, $user, $pass, \%opts],
+ [$dsn3, $user, $pass, \%opts],
+ );
+
+These replicants should be configured as slaves to the master using the
+instructions for MySQL native replication, or if you are just learning, you
+will find L<MySQL::Sandbox> an easy way to set up a replication cluster.
+
+And now your $schema object is properly configured! Enjoy!
+
+=head1 AUTHOR
+
+John Napiorkowski <jjnapiork at cpan.org>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -18,7 +18,7 @@
This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>. You
shouldn't need to create instances of this class.
-
+
=head1 DESCRIPTION
In a replicated storage type, there is at least one replicant to handle the
@@ -34,7 +34,7 @@
This is a number which defines the maximum allowed lag returned by the
L<DBIx::Class::Storage::DBI/lag_behind_master> method. The default is 0. In
general, this should return a larger number when the replicant is lagging
-behind it's master, however the implementation of this is database specific, so
+behind its master, however the implementation of this is database specific, so
don't count on this number having a fixed meaning. For example, MySQL will
return a number of seconds that the replicating database is lagging.
@@ -51,7 +51,7 @@
=head2 last_validated
This is an integer representing a time since the last time the replicants were
-validated. It's nothing fancy, just an integer provided via the perl time
+validated. It's nothing fancy, just an integer provided via the perl L<time|perlfunc/time>
builtin.
=cut
@@ -89,11 +89,11 @@
actual replicant storage. For example if the $dsn element is something like:
"dbi:SQLite:dbname=dbfile"
-
+
You could access the specific replicant via:
$schema->storage->replicants->{'dbname=dbfile'}
-
+
This attributes also supports the following helper methods:
=over 4
@@ -125,14 +125,15 @@
has 'replicants' => (
is=>'rw',
metaclass => 'Collection::Hash',
- isa=>HashRef['DBIx::Class::Storage::DBI'],
+ isa=>HashRef['Object'],
default=>sub {{}},
provides => {
'set' => 'set_replicant',
- 'get' => 'get_replicant',
+ 'get' => 'get_replicant',
'empty' => 'has_replicants',
'count' => 'num_replicants',
'delete' => 'delete_replicant',
+ 'values' => 'all_replicant_storages',
},
);
@@ -151,7 +152,7 @@
sub connect_replicants {
my $self = shift @_;
my $schema = shift @_;
-
+
my @newly_created = ();
foreach my $connect_info (@_) {
$connect_info = [ $connect_info ]
@@ -169,7 +170,7 @@
$self->set_replicant( $key => $replicant);
push @newly_created, $replicant;
}
-
+
return @newly_created;
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -14,7 +14,7 @@
=head1 SYNOPSIS
This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
-
+
=head1 DESCRIPTION
Replicants are DBI Storages that follow a master DBI Storage. Typically this
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Types.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Types.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/Types.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -5,12 +5,15 @@
# L<DBIx::Class::Storage::DBI::Replicated>
use MooseX::Types
- -declare => [qw/BalancerClassNamePart Weight/];
+ -declare => [qw/BalancerClassNamePart Weight DBICSchema DBICStorageDBI/];
use MooseX::Types::Moose qw/ClassName Str Num/;
class_type 'DBIx::Class::Storage::DBI';
class_type 'DBIx::Class::Schema';
+subtype DBICSchema, as 'DBIx::Class::Schema';
+subtype DBICStorageDBI, as 'DBIx::Class::Storage::DBI';
+
subtype BalancerClassNamePart,
as ClassName;
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -13,7 +13,7 @@
=head1 SYNOPSIS
This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
-
+
=head1 DESCRIPTION
This role adds C<DSN: > info to storage debugging output.
@@ -31,7 +31,10 @@
around '_query_start' => sub {
my ($method, $self, $sql, @bind) = @_;
my $dsn = $self->_dbi_connect_info->[0];
- $self->$method("DSN: $dsn SQL: $sql", @bind);
+ my($op, $rest) = (($sql=~m/^(\w+)(.+)$/),'NOP', 'NO SQL');
+ my $storage_type = $self->can('active') ? 'REPLICANT' : 'MASTER';
+
+ $self->$method("$op [DSN_$storage_type=$dsn]$rest", @bind);
};
=head1 ALSO SEE
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI/Replicated.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -2,35 +2,35 @@
BEGIN {
use Carp::Clan qw/^DBIx::Class/;
-
+
## Modules required for Replication support not required for general DBIC
## use, so we explicitly test for these.
-
+
my %replication_required = (
- Moose => '0.77',
- MooseX::AttributeHelpers => '0.12',
- MooseX::Types => '0.10',
- namespace::clean => '0.11',
- Hash::Merge => '0.11'
+ 'Moose' => '0.87',
+ 'MooseX::AttributeHelpers' => '0.20',
+ 'MooseX::Types' => '0.16',
+ 'namespace::clean' => '0.11',
+ 'Hash::Merge' => '0.11'
);
-
+
my @didnt_load;
-
+
for my $module (keys %replication_required) {
eval "use $module $replication_required{$module}";
push @didnt_load, "$module $replication_required{$module}"
if $@;
}
-
+
croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication")
- if @didnt_load;
+ if @didnt_load;
}
use Moose;
use DBIx::Class::Storage::DBI;
use DBIx::Class::Storage::DBI::Replicated::Pool;
use DBIx::Class::Storage::DBI::Replicated::Balancer;
-use DBIx::Class::Storage::DBI::Replicated::Types 'BalancerClassNamePart';
+use DBIx::Class::Storage::DBI::Replicated::Types qw/BalancerClassNamePart DBICSchema DBICStorageDBI/;
use MooseX::Types::Moose qw/ClassName HashRef Object/;
use Scalar::Util 'reftype';
use Carp::Clan qw/^DBIx::Class/;
@@ -48,33 +48,45 @@
storage type, add some replicated (readonly) databases, and perform reporting
tasks.
- ## Change storage_type in your schema class
+You should set the 'storage_type attribute to a replicated type. You should
+also define your arguments, such as which balancer you want and any arguments
+that the Pool object should get.
+
$schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] );
-
- ## Add some slaves. Basically this is an array of arrayrefs, where each
- ## arrayref is database connect information
-
+
+Next, you need to add in the Replicants. Basically this is an array of
+arrayrefs, where each arrayref is database connect information. Think of these
+arguments as what you'd pass to the 'normal' $schema->connect method.
+
$schema->storage->connect_replicants(
[$dsn1, $user, $pass, \%opts],
[$dsn2, $user, $pass, \%opts],
[$dsn3, $user, $pass, \%opts],
);
-
- ## Now, just use the $schema as normal
+
+Now, just use the $schema as you normally would. Automatically all reads will
+be delegated to the replicants, while writes to the master.
+
$schema->resultset('Source')->search({name=>'etc'});
-
- ## You can force a given query to use a particular storage using the search
- ### attribute 'force_pool'. For example:
-
+
+You can force a given query to use a particular storage using the search
+attribute 'force_pool'. For example:
+
my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'});
-
- ## Now $RS will force everything (both reads and writes) to use whatever was
- ## setup as the master storage. 'master' is hardcoded to always point to the
- ## Master, but you can also use any Replicant name. Please see:
- ## L<DBIx::Class::Storage::Replicated::Pool> and the replicants attribute for
- ## More. Also see transactions and L</execute_reliably> for alternative ways
- ## to force read traffic to the master.
-
+
+Now $RS will force everything (both reads and writes) to use whatever was setup
+as the master storage. 'master' is hardcoded to always point to the Master,
+but you can also use any Replicant name. Please see:
+L<DBIx::Class::Storage::DBI::Replicated::Pool> and the replicants attribute for more.
+
+Also see transactions and L</execute_reliably> for alternative ways to
+force read traffic to the master. In general, you should wrap your statements
+in a transaction when you are reading and writing to the same tables at the
+same time, since your replicants will often lag a bit behind the master.
+
+See L<DBIx::Class::Storage::DBI::Replicated::Instructions> for more help and
+walkthroughs.
+
=head1 DESCRIPTION
Warning: This class is marked BETA. This has been running a production
@@ -100,7 +112,7 @@
=head1 NOTES
The consistancy betweeen master and replicants is database specific. The Pool
-gives you a method to validate it's replicants, removing and replacing them
+gives you a method to validate its replicants, removing and replacing them
when they fail/pass predefined criteria. Please make careful use of the ways
to force a query to run against Master when needed.
@@ -108,12 +120,12 @@
Replicated Storage has additional requirements not currently part of L<DBIx::Class>
- Moose => 0.77
- MooseX::AttributeHelpers => 0.12
- MooseX::Types => 0.10
- namespace::clean => 0.11
- Hash::Merge => 0.11
-
+ Moose => '0.87',
+ MooseX::AttributeHelpers => '0.20',
+ MooseX::Types => '0.16',
+ namespace::clean => '0.11',
+ Hash::Merge => '0.11'
+
You will need to install these modules manually via CPAN or make them part of the
Makefile for your distribution.
@@ -129,7 +141,7 @@
has 'schema' => (
is=>'rw',
- isa=>'DBIx::Class::Schema',
+ isa=>DBICSchema,
weak_ref=>1,
required=>1,
);
@@ -153,7 +165,7 @@
=head2 pool_args
Contains a hashref of initialized information to pass to the Balancer object.
-See L<DBIx::Class::Storage::Replicated::Pool> for available arguments.
+See L<DBIx::Class::Storage::DBI::Replicated::Pool> for available arguments.
=cut
@@ -186,7 +198,7 @@
=head2 balancer_args
Contains a hashref of initialized information to pass to the Balancer object.
-See L<DBIx::Class::Storage::Replicated::Balancer> for available arguments.
+See L<DBIx::Class::Storage::DBI::Replicated::Balancer> for available arguments.
=cut
@@ -242,7 +254,7 @@
has 'master' => (
is=> 'ro',
- isa=>'DBIx::Class::Storage::DBI',
+ isa=>DBICStorageDBI,
lazy_build=>1,
);
@@ -288,7 +300,8 @@
create_ddl_dir
deployment_statements
datetime_parser
- datetime_parser_type
+ datetime_parser_type
+ build_datetime_parser
last_insert_id
insert
insert_bulk
@@ -303,10 +316,19 @@
sth
deploy
with_deferred_fk_checks
-
+ dbh_do
reload_row
+ with_deferred_fk_checks
_prep_for_execute
-
+
+ backup
+ is_datatype_numeric
+ _count_select
+ _subq_count_select
+ _subq_update_delete
+ svp_rollback
+ svp_begin
+ svp_release
/],
);
@@ -381,7 +403,7 @@
=head2 BUILDARGS
-L<DBIx::Class::Schema> when instantiating it's storage passed itself as the
+L<DBIx::Class::Schema> when instantiating its storage passed itself as the
first argument. So we need to massage the arguments a bit so that all the
bits get put into the correct places.
@@ -389,7 +411,7 @@
sub BUILDARGS {
my ($class, $schema, $storage_type_args, @args) = @_;
-
+
return {
schema=>$schema,
%$storage_type_args,
@@ -546,24 +568,24 @@
sub execute_reliably {
my ($self, $coderef, @args) = @_;
-
+
unless( ref $coderef eq 'CODE') {
$self->throw_exception('Second argument must be a coderef');
}
-
+
##Get copy of master storage
my $master = $self->master;
-
+
##Get whatever the current read hander is
my $current = $self->read_handler;
-
+
##Set the read handler to master
$self->read_handler($master);
-
+
## do whatever the caller needs
my @result;
my $want_array = wantarray;
-
+
eval {
if($want_array) {
@result = $coderef->(@args);
@@ -573,13 +595,13 @@
$coderef->(@args);
}
};
-
+
##Reset to the original state
$self->read_handler($current);
-
+
##Exception testing has to come last, otherwise you might leave the
##read_handler set to master.
-
+
if($@) {
$self->throw_exception("coderef returned an error: $@");
} else {
@@ -591,14 +613,14 @@
Sets the current $schema to be 'reliable', that is all queries, both read and
write are sent to the master
-
+
=cut
sub set_reliable_storage {
my $self = shift @_;
my $schema = $self->schema;
my $write_handler = $self->schema->storage->write_handler;
-
+
$schema->storage->read_handler($write_handler);
}
@@ -606,30 +628,17 @@
Sets the current $schema to be use the </balancer> for all reads, while all
writea are sent to the master only
-
+
=cut
sub set_balanced_storage {
my $self = shift @_;
my $schema = $self->schema;
- my $write_handler = $self->schema->storage->balancer;
-
- $schema->storage->read_handler($write_handler);
+ my $balanced_handler = $self->schema->storage->balancer;
+
+ $schema->storage->read_handler($balanced_handler);
}
-=head2 around: txn_do ($coderef)
-
-Overload to the txn_do method, which is delegated to whatever the
-L<write_handler> is set to. We overload this in order to wrap in inside a
-L</execute_reliably> method.
-
-=cut
-
-around 'txn_do' => sub {
- my($txn_do, $self, $coderef, @args) = @_;
- $self->execute_reliably(sub {$self->$txn_do($coderef, @args)});
-};
-
=head2 connected
Check that the master and at least one of the replicants is connected.
@@ -802,7 +811,7 @@
}
$self->master->cursor_class;
}
-
+
=head1 GOTCHAS
Due to the fact that replicants can lag behind a master, you must take care to
@@ -836,7 +845,7 @@
my $new_schema = $schema->clone;
$new_schema->set_reliable_storage;
-
+
## $new_schema will use only the Master storage for all reads/writes while
## the $schema object will use replicated storage.
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/DBI.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -1480,7 +1480,7 @@
my $alias = $attrs->{alias};
my $sql_maker = $self->sql_maker;
- # create subquery select list - loop only over primary columns
+ # create subquery select list - consider only stuff *not* brought in by the prefetch
my $sub_select = [];
for my $i (0 .. @{$attrs->{select}} - @{$attrs->{prefetch_select}} - 1) {
my $sel = $attrs->{select}[$i];
@@ -1489,7 +1489,7 @@
# adjust the outer select accordingly
if (ref $sel eq 'HASH' && !$sel->{-select}) {
$sel = { -select => $sel, -as => $attrs->{as}[$i] };
- $select->[$i] = join ('.', $attrs->{alias}, $attrs->{as}[$i]);
+ $select->[$i] = join ('.', $attrs->{alias}, ($attrs->{as}[$i] || "select_$i") );
}
push @$sub_select, $sel;
@@ -1547,6 +1547,8 @@
{
# produce stuff unquoted, so it can be scanned
local $sql_maker->{quote_char};
+ my $sep = $self->_sql_maker_opts->{name_sep} || '.';
+ $sep = "\Q$sep\E";
my @order_by = (map
{ ref $_ ? $_->[0] : $_ }
@@ -1554,6 +1556,7 @@
);
my $where_sql = $sql_maker->where ($where);
+ my $select_sql = $sql_maker->_recurse_fields ($sub_select);
# sort needed joins
for my $alias (keys %join_info) {
@@ -1561,8 +1564,8 @@
# any table alias found on a column name in where or order_by
# gets included in %inner_joins
# Also any parent joins that are needed to reach this particular alias
- for my $piece ($where_sql, @order_by ) {
- if ($piece =~ /\b$alias\./) {
+ for my $piece ($select_sql, $where_sql, @order_by ) {
+ if ($piece =~ /\b $alias $sep/x) {
$inner_joins{$alias} = 1;
}
}
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/Statistics.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/Statistics.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/Storage/Statistics.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -16,7 +16,7 @@
=head1 DESCRIPTION
This class is called by DBIx::Class::Storage::DBI as a means of collecting
-statistics on it's actions. Using this class alone merely prints the SQL
+statistics on its actions. Using this class alone merely prints the SQL
executed, the fact that it completes and begin/end notification for
transactions.
Modified: DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/UTF8Columns.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/UTF8Columns.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/DBIx/Class/UTF8Columns.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -25,7 +25,7 @@
package Artist;
__PACKAGE__->load_components(qw/UTF8Columns Core/);
__PACKAGE__->utf8_columns(qw/name description/);
-
+
# then belows return strings with utf8 flag
$artist->name;
$artist->get_column('description');
Modified: DBIx-Class/0.08/branches/sybase/lib/SQL/Translator/Parser/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/SQL/Translator/Parser/DBIx/Class.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/SQL/Translator/Parser/DBIx/Class.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -337,7 +337,7 @@
## Standalone
use MyApp::Schema;
use SQL::Translator;
-
+
my $schema = MyApp::Schema->connect;
my $trans = SQL::Translator->new (
parser => 'SQL::Translator::Parser::DBIx::Class',
@@ -353,7 +353,7 @@
C<SQL::Translator::Parser::DBIx::Class> reads a DBIx::Class schema,
interrogates the columns, and stuffs it all in an $sqlt_schema object.
-It's primary use is in deploying database layouts described as a set
+Its primary use is in deploying database layouts described as a set
of L<DBIx::Class> classes, to a database. To do this, see
L<DBIx::Class::Schema/deploy>.
Modified: DBIx-Class/0.08/branches/sybase/lib/SQL/Translator/Producer/DBIx/Class/File.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/lib/SQL/Translator/Producer/DBIx/Class/File.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/lib/SQL/Translator/Producer/DBIx/Class/File.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -128,7 +128,7 @@
$tableextras{$table->name} .= "\n__PACKAGE__->belongs_to('" .
$cont->fields->[0]->name . "', '" .
"${dbixschema}::" . $cont->reference_table . "');\n";
-
+
my $other = "\n__PACKAGE__->has_many('" .
"get_" . $table->name. "', '" .
"${dbixschema}::" . $table->name. "', '" .
Modified: DBIx-Class/0.08/branches/sybase/t/93storage_replication.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/93storage_replication.t 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/t/93storage_replication.t 2009-07-17 07:41:07 UTC (rev 7065)
@@ -6,13 +6,14 @@
use DBICTest;
use List::Util 'first';
use Scalar::Util 'reftype';
+use File::Spec;
use IO::Handle;
BEGIN {
eval "use DBIx::Class::Storage::DBI::Replicated; use Test::Moose";
plan $@
? ( skip_all => "Deps not installed: $@" )
- : ( tests => 90 );
+ : ( tests => 126 );
}
use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
@@ -20,6 +21,10 @@
use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant';
use_ok 'DBIx::Class::Storage::DBI::Replicated';
+use Moose();
+use MooseX::Types();
+diag "Using Moose version $Moose::VERSION and MooseX::Types version $MooseX::Types::VERSION";
+
=head1 HOW TO USE
This is a test of the replicated storage system. This will work in one of
@@ -142,9 +147,9 @@
use File::Copy;
use base 'DBIx::Class::DBI::Replicated::TestReplication';
- __PACKAGE__->mk_accessors( qw/master_path slave_paths/ );
+ __PACKAGE__->mk_accessors(qw/master_path slave_paths/);
- ## Set the mastep path from DBICTest
+ ## Set the master path from DBICTest
sub new {
my $class = shift @_;
@@ -152,9 +157,9 @@
$self->master_path( DBICTest->_sqlite_dbfilename );
$self->slave_paths([
- "t/var/DBIxClass_slave1.db",
- "t/var/DBIxClass_slave2.db",
- ]);
+ File::Spec->catfile(qw/t var DBIxClass_slave1.db/),
+ File::Spec->catfile(qw/t var DBIxClass_slave2.db/),
+ ]);
return $self;
}
@@ -170,7 +175,10 @@
my @connect_infos = map { [$_,'','',{AutoCommit=>1}] } @dsn;
- # try a hashref too
+ ## Make sure nothing is left over from a failed test
+ $self->cleanup;
+
+ ## try a hashref too
my $c = $connect_infos[0];
$connect_infos[0] = {
dsn => $c->[0],
@@ -198,7 +206,9 @@
sub cleanup {
my $self = shift @_;
foreach my $slave (@{$self->slave_paths}) {
- unlink $slave;
+ if(-e $slave) {
+ unlink $slave;
+ }
}
}
@@ -275,6 +285,19 @@
ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects)
=> 'Created some storages suitable for replicants';
+our %debug;
+$replicated->schema->storage->debug(1);
+$replicated->schema->storage->debugcb(sub {
+ my ($op, $info) = @_;
+ ##warn "\n$op, $info\n";
+ %debug = (
+ op => $op,
+ info => $info,
+ dsn => ($info=~m/\[(.+)\]/)[0],
+ storage_type => $info=~m/REPLICANT/ ? 'REPLICANT' : 'MASTER',
+ );
+});
+
ok my @all_storages = $replicated->schema->storage->all_storages
=> '->all_storages';
@@ -296,6 +319,8 @@
my @replicant_names = keys %{ $replicated->schema->storage->replicants };
+ok @replicant_names, "found replicant names @replicant_names";
+
## Silence warning about not supporting the is_replicating method if using the
## sqlite dbs.
$replicated->schema->storage->debugobj->silence(1)
@@ -332,6 +357,11 @@
[ qw/artistid name/ ],
[ 4, "Ozric Tentacles"],
]);
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ like $debug{info}, qr/INSERT/, 'Last was an insert';
## Make sure all the slaves have the table definitions
@@ -353,6 +383,11 @@
ok my $artist1 = $replicated->schema->resultset('Artist')->find(4)
=> 'Created Result';
+## We removed testing here since master read weight is on, so we can't tell in
+## advance what storage to expect. We turn master read weight off a bit lower
+## is $debug{storage_type}, 'REPLICANT'
+## => "got last query from a replicant: $debug{dsn}, $debug{info}";
+
isa_ok $artist1
=> 'DBICTest::Artist';
@@ -391,6 +426,11 @@
[ 7, "Watergate"],
]);
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ like $debug{info}, qr/INSERT/, 'Last was an insert';
+
## Make sure all the slaves have the table definitions
$replicated->replicate;
@@ -398,7 +438,10 @@
ok my $artist2 = $replicated->schema->resultset('Artist')->find(5)
=> 'Sync succeed';
-
+
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
isa_ok $artist2
=> 'DBICTest::Artist';
@@ -420,7 +463,10 @@
ok my $artist3 = $replicated->schema->resultset('Artist')->find(6)
=> 'Still finding stuff.';
-
+
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
isa_ok $artist3
=> 'DBICTest::Artist';
@@ -434,7 +480,10 @@
ok ! $replicated->schema->resultset('Artist')->find(666)
=> 'Correctly failed to find something.';
-
+
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
## test the reliable option
TESTRELIABLE: {
@@ -443,24 +492,39 @@
ok $replicated->schema->resultset('Artist')->find(2)
=> 'Read from master 1';
-
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
ok $replicated->schema->resultset('Artist')->find(5)
=> 'Read from master 2';
-
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
$replicated->schema->storage->set_balanced_storage;
ok $replicated->schema->resultset('Artist')->find(3)
=> 'Read from replicant';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
}
## Make sure when reliable goes out of scope, we are using replicants again
ok $replicated->schema->resultset('Artist')->find(1)
=> 'back to replicant 1.';
-
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
ok $replicated->schema->resultset('Artist')->find(2)
=> 'back to replicant 2.';
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
## set all the replicants to inactive, and make sure the balancer falls back to
## the master.
@@ -474,10 +538,13 @@
$replicated->schema->storage->debugfh($debugfh);
ok $replicated->schema->resultset('Artist')->find(2)
- => 'Fallback to master';
+ => 'Fallback to master';
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
like $fallback_warning, qr/falling back to master/
- => 'emits falling back to master warning';
+ => 'emits falling back to master warning';
$replicated->schema->storage->debugfh($oldfh);
}
@@ -496,6 +563,9 @@
ok $replicated->schema->resultset('Artist')->find(2)
=> 'Returned to replicates';
+
+is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
## Getting slave status tests
@@ -503,7 +573,7 @@
## We skip this tests unless you have a custom replicants, since the default
## sqlite based replication tests don't support these functions.
- skip 'Cannot Test Replicant Status on Non Replicating Database', 9
+ skip 'Cannot Test Replicant Status on Non Replicating Database', 10
unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
$replicated->replicate; ## Give the slaves a chance to catchup.
@@ -559,6 +629,9 @@
ok $replicated->schema->resultset('Artist')->find(5)
=> 'replicant reactivated';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
is $replicated->schema->storage->pool->active_replicants => 2
=> "both replicants reactivated";
@@ -569,7 +642,10 @@
ok my $reliably = sub {
ok $replicated->schema->resultset('Artist')->find(5)
- => 'replicant reactivated';
+ => 'replicant reactivated';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
} => 'created coderef properly';
@@ -592,6 +668,8 @@
ok $replicated->schema->resultset('Artist')->find(3)
=> 'replicant reactivated';
+
+is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
## make sure transactions are set to execute_reliably
@@ -607,11 +685,17 @@
]);
ok my $result = $replicated->schema->resultset('Artist')->find($id)
- => 'Found expected artist';
-
+ => "Found expected artist for $id";
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'Found expected artist again';
-
+ => 'Found expected artist again for 1';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
return ($result, $more);
} => 'Created a coderef properly';
@@ -623,18 +707,28 @@
is $return[0]->id, 666
=> 'first returned value is correct';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
is $return[1]->id, 1
=> 'second returned value is correct';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
}
## Test that asking for single return works
{
- ok my $return = $replicated->schema->txn_do($transaction, 777)
+ ok my @return = $replicated->schema->txn_do($transaction, 777)
=> 'did transaction';
- is $return->id, 777
+ is $return[0]->id, 777
=> 'first returned value is correct';
+
+ is $return[1]->id, 1
+ => 'second returned value is correct';
}
## Test transaction returning a single value
@@ -643,6 +737,7 @@
ok my $result = $replicated->schema->txn_do(sub {
ok my $more = $replicated->schema->resultset('Artist')->find(1)
=> 'found inside a transaction';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
return $more;
}) => 'successfully processed transaction';
@@ -654,15 +749,22 @@
ok $replicated->schema->resultset('Artist')->find(1)
=> 'replicant reactivated';
+
+is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
## Test Discard changes
{
ok my $artist = $replicated->schema->resultset('Artist')->find(2)
=> 'got an artist to test discard changes';
-
- ok $artist->discard_changes
+
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+
+ ok $artist->get_from_storage({force_pool=>'master'})
=> 'properly discard changes';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+
}
## Test some edge cases, like trying to do a transaction inside a transaction, etc
@@ -672,6 +774,7 @@
return $replicated->schema->txn_do(sub {
ok my $more = $replicated->schema->resultset('Artist')->find(1)
=> 'found inside a transaction inside a transaction';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
return $more;
});
}) => 'successfully processed transaction';
@@ -686,7 +789,8 @@
return $replicated->schema->txn_do(sub {
return $replicated->schema->storage->execute_reliably(sub {
ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'found inside crazy deep transactions and execute_reliably';
+ => 'found inside crazy deep transactions and execute_reliably';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
return $more;
});
});
@@ -709,8 +813,25 @@
ok my $artist = $reliable_artist_rs->find(2)
=> 'got an artist result via force_pool storage';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
}
+## Test the force_pool resultset attribute part two.
+
+{
+ ok my $artist_rs = $replicated->schema->resultset('Artist')
+ => 'got artist resultset';
+
+ ## Turn on Forced Pool Storage
+ ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>$replicant_names[0]})
+ => 'Created a resultset using force_pool storage';
+
+ ok my $artist = $reliable_artist_rs->find(2)
+ => 'got an artist result via force_pool storage';
+
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+}
## Delete the old database files
$replicated->cleanup;
Modified: DBIx-Class/0.08/branches/sybase/t/lib/DBICTest/Schema/Bookmark.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/lib/DBICTest/Schema/Bookmark.pm 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/t/lib/DBICTest/Schema/Bookmark.pm 2009-07-17 07:41:07 UTC (rev 7065)
@@ -15,6 +15,7 @@
},
'link' => {
data_type => 'integer',
+ is_nullable => 1,
},
);
Modified: DBIx-Class/0.08/branches/sybase/t/prefetch/grouped.t
===================================================================
--- DBIx-Class/0.08/branches/sybase/t/prefetch/grouped.t 2009-07-17 07:39:54 UTC (rev 7064)
+++ DBIx-Class/0.08/branches/sybase/t/prefetch/grouped.t 2009-07-17 07:41:07 UTC (rev 7065)
@@ -28,7 +28,6 @@
my $track_rs = $schema->resultset ('Track')->search (
{ 'me.cd' => { -in => [ $cd_rs->get_column ('cdid')->all ] } },
{
- # the select/as is deliberately silly to test both funcs and refs below
select => [
'me.cd',
{ count => 'me.trackid' },
@@ -67,8 +66,6 @@
# Test sql by hand, as the sqlite db will simply paper over
# improper group/select combinations
#
- # the exploded IN needs fixing below, coming in another branch
- #
is_same_sql_bind (
$track_rs->count_rs->as_query,
'(
@@ -131,14 +128,19 @@
# test a has_many/might_have prefetch at the same level
# Note that one of the CDs now has 4 tracks instead of 3
{
- my $most_tracks_rs = $cd_rs->search ({}, {
- prefetch => 'liner_notes', # tracks are alredy prefetched
- select => ['me.cdid', { count => 'tracks.trackid' } ],
- as => [qw/cdid track_count/],
- group_by => 'me.cdid',
- order_by => { -desc => 'track_count' },
- rows => 2,
- });
+ my $most_tracks_rs = $schema->resultset ('CD')->search (
+ {
+ 'me.cdid' => { '!=' => undef }, # duh - this is just to test WHERE
+ },
+ {
+ prefetch => [qw/tracks liner_notes/],
+ select => ['me.cdid', { count => 'tracks.trackid' } ],
+ as => [qw/cdid track_count/],
+ group_by => 'me.cdid',
+ order_by => { -desc => 'track_count' },
+ rows => 2,
+ }
+ );
is_same_sql_bind (
$most_tracks_rs->count_rs->as_query,
@@ -149,7 +151,7 @@
FROM cd me
LEFT JOIN track tracks ON tracks.cd = me.cdid
LEFT JOIN liner_notes liner_notes ON liner_notes.liner_id = me.cdid
- WHERE ( tracks.cd IS NOT NULL )
+ WHERE ( me.cdid IS NOT NULL )
GROUP BY me.cdid
LIMIT 2
) count_subq
@@ -166,14 +168,14 @@
SELECT me.cdid, COUNT( tracks.trackid ) AS track_count
FROM cd me
LEFT JOIN track tracks ON tracks.cd = me.cdid
- WHERE ( tracks.cd IS NOT NULL )
+ WHERE ( me.cdid IS NOT NULL )
GROUP BY me.cdid
ORDER BY track_count DESC
LIMIT 2
) me
LEFT JOIN track tracks ON tracks.cd = me.cdid
LEFT JOIN liner_notes liner_notes ON liner_notes.liner_id = me.cdid
- WHERE ( tracks.cd IS NOT NULL )
+ WHERE ( me.cdid IS NOT NULL )
ORDER BY track_count DESC, tracks.cd
)',
[],
More information about the Bast-commits
mailing list