[Bast-commits] r7171 - in DBIx-Class/0.08/branches/mysql_ansi: .
lib/DBIx lib/DBIx/Class lib/DBIx/Class/CDBICompat
lib/DBIx/Class/InflateColumn lib/DBIx/Class/Manual
lib/DBIx/Class/Relationship lib/DBIx/Class/ResultSource
lib/DBIx/Class/ResultSourceProxy lib/DBIx/Class/Schema
lib/DBIx/Class/Storage lib/DBIx/Class/Storage/DBI
lib/DBIx/Class/Storage/DBI/ODBC lib/DBIx/Class/Storage/DBI/Oracle
lib/DBIx/Class/Storage/DBI/Replicated
lib/DBIx/Class/Storage/DBI/Replicated/Balancer
lib/SQL/Translator/Parser/DBIx
lib/SQL/Translator/Producer/DBIx/Class t t/bind
t/cdbi/testlib/DBIC/Test t/inflate t/lib t/lib/DBICTest
t/lib/DBICTest/Schema t/prefetch
ribasushi at dev.catalyst.perl.org
ribasushi at dev.catalyst.perl.org
Mon Aug 3 08:13:40 GMT 2009
Author: ribasushi
Date: 2009-08-03 08:13:39 +0000 (Mon, 03 Aug 2009)
New Revision: 7171
Added:
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod
DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/CustomSql.pm
DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/Money.pm
Modified:
DBIx-Class/0.08/branches/mysql_ansi/
DBIx-Class/0.08/branches/mysql_ansi/Changes
DBIx-Class/0.08/branches/mysql_ansi/Features_09
DBIx-Class/0.08/branches/mysql_ansi/Makefile.PL
DBIx-Class/0.08/branches/mysql_ansi/TODO
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ColumnCase.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ColumnGroups.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Copy.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ImaDBI.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Iterator.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/LazyLoading.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Relationship.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Relationships.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Retrieve.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/TempColumns.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/DB.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Exception.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/InflateColumn/DateTime.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/InflateColumn/File.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/Cookbook.pod
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/Example.pod
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/FAQ.pod
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/Intro.pod
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Ordered.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/PK.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/Accessor.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/Base.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/BelongsTo.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/CascadeActions.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/HasMany.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/HasOne.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/ManyToMany.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/ProxyMethods.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSet.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSetColumn.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSource.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSource/View.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSourceHandle.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSourceProxy/Table.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Row.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Schema.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Schema/Versioned.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/StartupCheck.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/DB2.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Pg.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Types.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/mysql.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/Statistics.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/UTF8Columns.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/SQL/Translator/Parser/DBIx/Class.pm
DBIx-Class/0.08/branches/mysql_ansi/lib/SQL/Translator/Producer/DBIx/Class/File.pm
DBIx-Class/0.08/branches/mysql_ansi/t/03podcoverage.t
DBIx-Class/0.08/branches/mysql_ansi/t/746mssql.t
DBIx-Class/0.08/branches/mysql_ansi/t/74mssql.t
DBIx-Class/0.08/branches/mysql_ansi/t/93storage_replication.t
DBIx-Class/0.08/branches/mysql_ansi/t/99dbic_sqlt_parser.t
DBIx-Class/0.08/branches/mysql_ansi/t/bind/attribute.t
DBIx-Class/0.08/branches/mysql_ansi/t/cdbi/testlib/DBIC/Test/SQLite.pm
DBIx-Class/0.08/branches/mysql_ansi/t/inflate/datetime_pg.t
DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema.pm
DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/Bookmark.pm
DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/Event.pm
DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/EventTZPg.pm
DBIx-Class/0.08/branches/mysql_ansi/t/lib/sqlite.sql
DBIx-Class/0.08/branches/mysql_ansi/t/prefetch/grouped.t
Log:
r6985 at Thesaurus (orig r6984): ribasushi | 2009-07-04 12:08:16 +0200
Missing newline
r6986 at Thesaurus (orig r6985): ribasushi | 2009-07-04 12:11:18 +0200
typo
r6987 at Thesaurus (orig r6986): ribasushi | 2009-07-04 12:40:47 +0200
Fix POD
r6988 at Thesaurus (orig r6987): ribasushi | 2009-07-04 13:09:39 +0200
todos are shorter now
r6990 at Thesaurus (orig r6989): castaway | 2009-07-05 22:00:55 +0200
Added Pod::Inherit use to Makefile.PL at author-time, comments/suggestions as to whether its too "noisy" welcome.
r6991 at Thesaurus (orig r6990): ribasushi | 2009-07-06 00:06:52 +0200
Couple of makefile fixes:
use is compile time, use require
recommends is for distro maintainers only, push the dependency into the authors hash (it is not to be executed by mere mortals)
r6992 at Thesaurus (orig r6991): ribasushi | 2009-07-06 00:55:36 +0200
Forgotten pod exclusions
r6993 at Thesaurus (orig r6992): ribasushi | 2009-07-06 01:07:05 +0200
Temporarily backout Pod::Inherit changes
r6994 at Thesaurus (orig r6993): ribasushi | 2009-07-06 01:10:22 +0200
Put Pod::Inherit stuff back after proper copy
r7010 at Thesaurus (orig r7009): ribasushi | 2009-07-09 12:45:02 +0200
r6995 at Thesaurus (orig r6994): ribasushi | 2009-07-06 01:12:57 +0200
Where 08108 will come from
r7028 at Thesaurus (orig r7027): caelum | 2009-07-10 23:56:57 +0200
fix PodInherit call in Makefile.PL
r7030 at Thesaurus (orig r7029): robkinyon | 2009-07-11 00:03:07 +0200
Applied patch from kados regarding use of a DateTime::Format class to validate
r7031 at Thesaurus (orig r7030): caelum | 2009-07-11 11:26:40 +0200
reword IC::DT doc patch
r7038 at Thesaurus (orig r7037): dandv | 2009-07-13 14:06:08 +0200
PK::Auto has moved into Core since 2007
r7039 at Thesaurus (orig r7038): dandv | 2009-07-13 14:15:13 +0200
Fixed has_many example in Intro.pod
r7040 at Thesaurus (orig r7039): dandv | 2009-07-13 22:58:45 +0200
Fixed run-on sentences in FAQ
r7041 at Thesaurus (orig r7040): dandv | 2009-07-13 23:18:11 +0200
Minor POD fixes in Example.pod
r7042 at Thesaurus (orig r7041): dandv | 2009-07-13 23:48:18 +0200
Favored using ->single to get the topmost result over less readable ->slice(0)
r7043 at Thesaurus (orig r7042): dandv | 2009-07-14 00:56:31 +0200
Minor POD fixes in Cookbook
r7046 at Thesaurus (orig r7045): ribasushi | 2009-07-14 13:30:55 +0200
Minor logic cleanup
r7047 at Thesaurus (orig r7046): ribasushi | 2009-07-14 14:07:11 +0200
grouped prefetch fix
r7054 at Thesaurus (orig r7053): ijw | 2009-07-15 18:55:35 +0200
Added SQLA link for more comprehensive documentation of order_by options available
r7057 at Thesaurus (orig r7056): caelum | 2009-07-16 00:54:22 +0200
add "smalldatetime" support to IC::DT
r7060 at Thesaurus (orig r7059): ribasushi | 2009-07-16 06:29:41 +0200
r7013 at Thesaurus (orig r7012): jnapiorkowski | 2009-07-09 17:00:22 +0200
new branch
r7014 at Thesaurus (orig r7013): jnapiorkowski | 2009-07-09 20:06:44 +0200
changed the way transactions are detected for replication to work with the standard way to do this, minor doc updates, fix to the force pool so you can force a particular slave, changes to the way the debugging is created
r7015 at Thesaurus (orig r7014): jnapiorkowski | 2009-07-09 20:17:03 +0200
more changes to the way debug output works
r7016 at Thesaurus (orig r7015): jnapiorkowski | 2009-07-09 22:26:47 +0200
big update to the test suite so that we now check to make sure the storage that was expected was actually used
r7017 at Thesaurus (orig r7016): jnapiorkowski | 2009-07-09 23:23:37 +0200
set correct number of tests, changed the debuggin output to not warn on DDL, minor change to a test resultclass so we can deploy to mysql properly
r7018 at Thesaurus (orig r7017): jnapiorkowski | 2009-07-09 23:26:59 +0200
corrected the number of skipped tests
r7019 at Thesaurus (orig r7018): jnapiorkowski | 2009-07-09 23:52:22 +0200
fixed test resultclass formatting, added a few more DBIC::Storage::DBI methods that I might need to delegate.
r7020 at Thesaurus (orig r7019): jnapiorkowski | 2009-07-10 01:23:07 +0200
some documention updates and changed the way we find paths for the sqlite dbfiles to use File::Spec, which I hope will solve some of the Win32 error messages
r7023 at Thesaurus (orig r7022): jnapiorkowski | 2009-07-10 18:00:38 +0200
pod cleanup, fixed broken pod links, and new Introduction pod
r7024 at Thesaurus (orig r7023): jnapiorkowski | 2009-07-10 19:10:57 +0200
updated Changes file to reflect work completed
r7025 at Thesaurus (orig r7024): jnapiorkowski | 2009-07-10 19:37:53 +0200
a few more Moose Type related fixes and added diag to the replication test to report the moose and types version used, to help us debug some of the moose related errors being reported
r7058 at Thesaurus (orig r7057): ribasushi | 2009-07-16 06:28:44 +0200
A couple of typos, and general whitespace cleanup (ick)
r7063 at Thesaurus (orig r7062): jnapiorkowski | 2009-07-16 17:03:32 +0200
increased Moose version requirements due to changes in the way type constraints get validated, which is not backwardly compatible
r7064 at Thesaurus (orig r7063): dandv | 2009-07-17 03:37:28 +0200
Minor POD grammar: it's -> its where appropriate
r7075 at Thesaurus (orig r7074): tomboh | 2009-07-20 18:20:37 +0200
Fix POD changes from r7040.
r7078 at Thesaurus (orig r7077): norbi | 2009-07-21 00:59:30 +0200
r7079 at Thesaurus (orig r7078): norbi | 2009-07-21 00:59:58 +0200
r7232 at vger: mendel | 2009-07-21 00:58:12 +0200
Fixed documentation and added test for the "Arbitrary SQL through a custom ResultSource" Cookbook alternate (subclassing) recipe.
r7080 at Thesaurus (orig r7079): norbi | 2009-07-21 01:05:32 +0200
r7235 at vger: mendel | 2009-07-21 01:05:18 +0200
Fixed 'typo' (removed a word that I left there by accident).
r7081 at Thesaurus (orig r7080): norbi | 2009-07-21 10:06:21 +0200
r7237 at vger: mendel | 2009-07-21 10:06:05 +0200
Fixing what my svk client screwed up.
r7082 at Thesaurus (orig r7081): caelum | 2009-07-21 16:51:55 +0200
update Storage::Replicated prereqs
r7083 at Thesaurus (orig r7082): caelum | 2009-07-21 18:16:34 +0200
show Oracle datetime_setup alter session statements in debug output
r7086 at Thesaurus (orig r7085): ribasushi | 2009-07-22 03:50:57 +0200
Lazy folks do not run the whole test suite before merging >:(
r7100 at Thesaurus (orig r7097): caelum | 2009-07-23 20:14:11 +0200
r6092 at hlagh (orig r7090): caelum | 2009-07-23 08:24:39 -0400
new branch for fixing the MONEY type in MSSQL
r6093 at hlagh (orig r7091): caelum | 2009-07-23 08:34:01 -0400
add test
r6283 at hlagh (orig r7093): caelum | 2009-07-23 10:31:08 -0400
fix money columns
r6284 at hlagh (orig r7094): caelum | 2009-07-23 10:34:06 -0400
minor change
r6285 at hlagh (orig r7095): caelum | 2009-07-23 11:01:37 -0400
add test for updating money value to NULL
r6286 at hlagh (orig r7096): caelum | 2009-07-23 14:09:26 -0400
add money type tests to dbd::sybase+mssql tests
r7129 at Thesaurus (orig r7126): caelum | 2009-07-28 02:03:47 +0200
add postgres "timestamp without time zone" support
r7143 at Thesaurus (orig r7140): caelum | 2009-07-30 14:46:04 +0200
update sqlite test schema
r7148 at Thesaurus (orig r7145): robkinyon | 2009-07-30 16:13:21 +0200
Added prefetch caveats
r7149 at Thesaurus (orig r7146): robkinyon | 2009-07-30 16:20:02 +0200
Fixed caveats
r7152 at Thesaurus (orig r7149): caelum | 2009-07-30 17:56:01 +0200
make ::Oracle::Generic load without DBD::Oracle
r7153 at Thesaurus (orig r7150): caelum | 2009-07-30 18:04:47 +0200
make sure DBD::Oracle is loaded when using constants from it
r7157 at Thesaurus (orig r7154): castaway | 2009-07-30 22:17:33 +0200
Mangled Rob's example somewhat, still needs explaining whch circs exactly cause the borken results
r7161 at Thesaurus (orig r7158): mo | 2009-07-31 12:51:20 +0200
POD fix
r7162 at Thesaurus (orig r7159): mo | 2009-07-31 12:52:42 +0200
undo that attributes merge stuff
r7169 at Thesaurus (orig r7166): castaway | 2009-08-02 12:41:25 +0200
Mention ResultSet, ResultSource and Row in synopsis
r7170 at Thesaurus (orig r7167): castaway | 2009-08-02 14:10:53 +0200
Docs: Explainations of result sources and how to find them
Property changes on: DBIx-Class/0.08/branches/mysql_ansi
___________________________________________________________________
Name: svk:merge
- 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:5969
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:5651
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
+ 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7237
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:5651
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:7167
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
Modified: DBIx-Class/0.08/branches/mysql_ansi/Changes
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/Changes 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/Changes 2009-08-03 08:13:39 UTC (rev 7171)
@@ -1,5 +1,17 @@
Revision history for DBIx::Class
+ - Replication updates: Improved the replication tests so that they are
+ more reliable and accurate, and hopefully solve some cross platform
+ issues. Bugfixes related to naming particular replicants in a
+ 'force_pool' attribute. Lots of documentation updates, including a
+ new Introduction.pod file. Fixed the way we detect transaction to
+ make this more reliable and forward looking. Fixed some trouble with
+ the way Moose Types are used.
+ - Added call to Pod::Inherit in Makefile.PL -
+ currently at author-time only, so we need to add the produced
+ .pod files to the MANIFEST
+
+0.08108 2009-07-05 23:15:00 (UTC)
- Fixed the has_many prefetch with limit/group deficiency -
it is now possible to select "top 5 commenters" while
prefetching all their comments
@@ -21,7 +33,7 @@
- Fixed set_$rel with where restriction deleting rows outside
the restriction
- populate() returns the created objects or an arrayref of the
- create dobjects depending on scalar vs. list context
+ created objects depending on scalar vs. list context
- Fixed find_related on 'single' relationships - the former
implementation would overspecify the WHERE condition, reporting
no related objects when there in fact is one
Modified: DBIx-Class/0.08/branches/mysql_ansi/Features_09
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/Features_09 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/Features_09 2009-08-03 08:13:39 UTC (rev 7171)
@@ -14,12 +14,8 @@
- "belongs_to" to "contains/refers/something"
Using inflated objects/references as values in searches
- - Goes together with subselects above
- should deflate then run search
-FilterColumn - like Inflate, only for changing scalar values
- - This seems to be vaporware atm..
-
SQL/API feature complete?
- UNION
- proper join conditions!
@@ -27,17 +23,16 @@
Moosification - ouch
+Metamodel stuff - introspection
+
Prefetch improvements
- slow on mysql, speedup?
- multi has_many prefetch
- - paging working with prefetch
Magically "discover" needed joins/prefetches and add them
- eg $books->search({ 'author.name' => 'Fred'}), autoadds: join => 'author'
- also guess aliases when supplying column names that are on joined/related tables
-Metamodel stuff - introspection
-
Storage API/restructure
- call update/insert etc on the ResultSource, which then calls to storage
- handle different storages/db-specific code better
@@ -52,4 +47,3 @@
Documentation - improvements
- better indexing for finding of stuff in general
- more cross-referencing of docs
-
Modified: DBIx-Class/0.08/branches/mysql_ansi/Makefile.PL
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/Makefile.PL 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/Makefile.PL 2009-08-03 08:13:39 UTC (rev 7171)
@@ -61,6 +61,7 @@
my %force_requires_if_author = (
'Test::Pod::Coverage' => 1.04,
+ 'Module::Install::Pod::Inherit' => 0.01,
'SQL::Translator' => 0.09007,
# CDBI-compat related
@@ -83,9 +84,9 @@
'DateTime::Format::Strptime'=> 0,
# t/93storage_replication.t
- 'Moose', => 0.77,
- 'MooseX::AttributeHelpers' => 0.12,
- 'MooseX::Types', => 0.10,
+ 'Moose', => 0.87,
+ 'MooseX::AttributeHelpers' => 0.21,
+ 'MooseX::Types', => 0.16,
'namespace::clean' => 0.11,
'Hash::Merge', => 0.11,
@@ -135,6 +136,9 @@
print "Removing MANIFEST\n";
unlink 'MANIFEST';
}
+
+ eval { require Module::Install::Pod::Inherit };
+ PodInherit() if !$@;
}
auto_install();
Modified: DBIx-Class/0.08/branches/mysql_ansi/TODO
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/TODO 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/TODO 2009-08-03 08:13:39 UTC (rev 7171)
@@ -3,12 +3,8 @@
- ResultSource objects caching ->resultset causes interesting problems
- find why XSUB dumper kills schema in Catalyst (may be Pg only?)
-2006-04-11 by castaway
- - docs of copy() should say that is_auto_increment is essential for auto_incrementing keys
-
2006-03-25 by mst
- find a way to un-wantarray search without breaking compat
- - audit logging component
- delay relationship setup if done via ->load_classes
- double-sided relationships
- make short form of class specifier in relationships work
@@ -21,9 +17,6 @@
We should still support the old inflate/deflate syntax, but this new
way should be recommended.
-2006-02-07 by castaway
- - Extract DBIC::SQL::Abstract into a separate module for CPAN
-
2006-03-18 by bluefeet
- Support table locking.
@@ -47,18 +40,8 @@
if you haven't specified one of the others
2008-10-30 by ribasushi
- Leftovers for next dev-release
- Rewrite the test suite to rely on $schema->deploy, allowing for seamless
testing of various RDBMS using the same tests
- - Proper support of default create (i.e. create({}) ), with proper workarounds
- for different Storage's
- Automatically infer quote_char/name_sep from $schema->storage
- - Finally incorporate View support (needs real tests)
- Fix and properly test chained search attribute merging
-
-2008-11-07 by ribasushi
- - Be loud when a relationship resolution fails because we did not select/as
- a neccessary pk
- Recursive update() (all code seems to be already available)
- - $rs->populate changes its syntax depending on wantarray context (BAD)
- Also the interface differs from $schema->populate (not so good)
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ColumnCase.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ColumnCase.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ColumnCase.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -16,10 +16,10 @@
sub has_a {
my($self, $col, @rest) = @_;
-
+
$self->_declare_has_a(lc $col, @rest);
$self->_mk_inflated_column_accessor($col);
-
+
return 1;
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ColumnGroups.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ColumnGroups.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ColumnGroups.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -73,7 +73,7 @@
sub _has_custom_accessor {
my($class, $name) = @_;
-
+
no strict 'refs';
my $existing_accessor = *{$class .'::'. $name}{CODE};
return $existing_accessor && !$our_accessors{$existing_accessor};
@@ -90,7 +90,7 @@
my $fullname = join '::', $class, $name;
*$fullname = Sub::Name::subname $fullname, $accessor;
}
-
+
$our_accessors{$accessor}++;
return 1;
@@ -120,7 +120,7 @@
# warn " $field $alias\n";
{
no strict 'refs';
-
+
$class->_deploy_accessor($name, $accessor);
$class->_deploy_accessor($alias, $accessor);
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ColumnsAsHash.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -39,16 +39,16 @@
my $class = shift;
my $new = $class->next::method(@_);
-
+
$new->_make_columns_as_hash;
-
+
return $new;
}
sub _make_columns_as_hash {
my $self = shift;
-
+
for my $col ($self->columns) {
if( exists $self->{$col} ) {
warn "Skipping mapping $col to a hash key because it exists";
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Copy.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Copy.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Copy.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -25,7 +25,7 @@
sub copy {
my($self, $arg) = @_;
return $self->next::method($arg) if ref $arg;
-
+
my @primary_columns = $self->primary_columns;
croak("Need hash-ref to edit copied column values")
if @primary_columns > 1;
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ImaDBI.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ImaDBI.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/ImaDBI.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -59,7 +59,7 @@
$rel_obj->{cond}, $to, $from) );
return $join;
}
-
+
} );
sub db_Main {
@@ -115,7 +115,7 @@
sub transform_sql {
my ($class, $sql, @args) = @_;
-
+
my $tclass = $class->sql_transformer_class;
$class->ensure_class_loaded($tclass);
my $t = $tclass->new($class, $sql, @args);
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Iterator.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Iterator.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Iterator.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -25,7 +25,7 @@
sub _init_result_source_instance {
my $class = shift;
-
+
my $table = $class->next::method(@_);
$table->resultset_class("DBIx::Class::CDBICompat::Iterator::ResultSet");
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/LazyLoading.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/LazyLoading.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/LazyLoading.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -16,12 +16,12 @@
# request in case the database modifies the new value (say, via a trigger)
sub update {
my $self = shift;
-
+
my @dirty_columns = keys %{$self->{_dirty_columns}};
-
+
my $ret = $self->next::method(@_);
$self->_clear_column_data(@dirty_columns);
-
+
return $ret;
}
@@ -30,12 +30,12 @@
sub create {
my $class = shift;
my($data) = @_;
-
+
my @columns = keys %$data;
-
+
my $obj = $class->next::method(@_);
return $obj unless defined $obj;
-
+
my %primary_cols = map { $_ => 1 } $class->primary_columns;
my @data_cols = grep !$primary_cols{$_}, @columns;
$obj->_clear_column_data(@data_cols);
@@ -46,7 +46,7 @@
sub _clear_column_data {
my $self = shift;
-
+
delete $self->{_column_data}{$_} for @_;
delete $self->{_inflated_column}{$_} for @_;
}
@@ -71,7 +71,7 @@
for my $col ($self->primary_columns) {
$changes->{$col} = undef unless exists $changes->{$col};
}
-
+
return $self->next::method($changes);
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/LiveObjectIndex.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -20,9 +20,9 @@
sub nocache {
my $class = shift;
-
+
return $class->__nocache(@_) if @_;
-
+
return 1 if $Class::DBI::Weaken_Is_Available == 0;
return $class->__nocache;
}
@@ -74,9 +74,9 @@
sub inflate_result {
my ($class, @rest) = @_;
my $new = $class->next::method(@rest);
-
+
return $new if $new->nocache;
-
+
if (my $key = $new->ID) {
#warn "Key $key";
my $live = $class->live_object_index;
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Relationship.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Relationship.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Relationship.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -25,7 +25,7 @@
sub new {
my($class, $args) = @_;
-
+
return bless $args, $class;
}
@@ -34,7 +34,7 @@
my $code = sub {
$_[0]->{$key};
};
-
+
no strict 'refs';
*{$method} = Sub::Name::subname $method, $code;
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Relationships.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Relationships.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Relationships.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -24,10 +24,10 @@
sub has_a {
my($self, $col, @rest) = @_;
-
+
$self->_declare_has_a($col, @rest);
$self->_mk_inflated_column_accessor($col);
-
+
return 1;
}
@@ -37,7 +37,7 @@
$self->throw_exception( "No such column ${col}" )
unless $self->has_column($col);
$self->ensure_class_loaded($f_class);
-
+
my $rel_info;
if ($args{'inflate'} || $args{'deflate'}) { # Non-database has_a
@@ -50,7 +50,7 @@
$args{'deflate'} = sub { shift->$meth; };
}
$self->inflate_column($col, \%args);
-
+
$rel_info = {
class => $f_class
};
@@ -59,9 +59,9 @@
$self->belongs_to($col, $f_class);
$rel_info = $self->result_source_instance->relationship_info($col);
}
-
+
$rel_info->{args} = \%args;
-
+
$self->_extend_meta(
has_a => $col,
$rel_info
@@ -72,7 +72,7 @@
sub _mk_inflated_column_accessor {
my($class, $col) = @_;
-
+
return $class->mk_group_accessors('inflated_column' => $col);
}
@@ -137,7 +137,7 @@
sub might_have {
my ($class, $rel, $f_class, @columns) = @_;
-
+
my $ret;
if (ref $columns[0] || !defined $columns[0]) {
$ret = $class->next::method($rel, $f_class, @columns);
@@ -153,7 +153,7 @@
might_have => $rel,
$rel_info
);
-
+
return $ret;
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Retrieve.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Retrieve.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/Retrieve.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -74,7 +74,7 @@
my $class = shift;
my $obj = $class->resultset_instance->new_result(@_);
$obj->in_storage(1);
-
+
return $obj;
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/TempColumns.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/TempColumns.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat/TempColumns.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -11,7 +11,7 @@
sub _add_column_group {
my ($class, $group, @cols) = @_;
-
+
return $class->next::method($group, @cols) unless $group eq 'TEMP';
my %new_cols = map { $_ => 1 } @cols;
@@ -61,11 +61,11 @@
sub set {
my($self, %data) = @_;
-
+
my $temp_data = $self->_extract_temp_data(\%data);
-
+
$self->set_temp($_, $temp_data->{$_}) for keys %$temp_data;
-
+
return $self->next::method(%data);
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/CDBICompat.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -11,7 +11,7 @@
DBIx::ContextualFetch
Clone
);
-
+
my @didnt_load;
for my $module (@Extra_Modules) {
push @didnt_load, $module unless eval qq{require $module};
@@ -149,13 +149,13 @@
package Foo;
use base qw(Class::DBI);
-
+
Foo->table("foo");
Foo->columns( All => qw(this that bar) );
package Bar;
use base qw(Class::DBI);
-
+
Bar->table("bar");
Bar->columns( All => qw(up down) );
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/DB.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/DB.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/DB.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -174,7 +174,7 @@
sub result_source_instance {
my $class = shift;
$class = ref $class || $class;
-
+
if (@_) {
my $source = $_[0];
$class->_result_source_instance([$source, $class]);
@@ -186,7 +186,7 @@
return unless Scalar::Util::blessed($source);
if ($result_class ne $class) { # new class
- # Give this new class it's own source and register it.
+ # Give this new class its own source and register it.
$source = $source->new({
%$source,
source_name => $class,
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Exception.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Exception.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Exception.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -61,7 +61,7 @@
else {
$msg = Carp::longmess($msg);
}
-
+
my $self = { msg => $msg };
bless $self => $class;
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/InflateColumn/DateTime.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/InflateColumn/DateTime.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/InflateColumn/DateTime.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -40,17 +40,26 @@
__PACKAGE__->add_columns(
starts_when => { data_type => 'varchar', inflate_datetime => 1 }
);
-
+
__PACKAGE__->add_columns(
starts_when => { data_type => 'varchar', inflate_date => 1 }
);
It's also possible to explicitly skip inflation:
-
+
__PACKAGE__->add_columns(
starts_when => { data_type => 'datetime', inflate_datetime => 0 }
);
+NOTE: Don't rely on C<InflateColumn::DateTime> to parse date strings for you.
+The column is set directly for any non-references and C<InflateColumn::DateTime>
+is completely bypassed. Instead, use an input parser to create a DateTime
+object. For instance, if your user input comes as a 'YYYY-MM-DD' string, you can
+use C<DateTime::Format::ISO8601> thusly:
+
+ use DateTime::Format::ISO8601;
+ my $dt = DateTime::Format::ISO8601->parse_datetime('YYYY-MM-DD');
+
=head1 DESCRIPTION
This module figures out the type of DateTime::Format::* class to
@@ -77,7 +86,7 @@
In the case of an invalid date, L<DateTime> will throw an exception. To
bypass these exceptions and just have the inflation return undef, use
the C<datetime_undef_if_invalid> option in the column info:
-
+
"broken_date",
{
data_type => "datetime",
@@ -110,6 +119,12 @@
if ($type eq "timestamp with time zone" || $type eq "timestamptz") {
$type = "timestamp";
$info->{_ic_dt_method} ||= "timestamp_with_timezone";
+ } elsif ($type eq "timestamp without time zone") {
+ $type = "timestamp";
+ $info->{_ic_dt_method} ||= "timestamp_without_timezone";
+ } elsif ($type eq "smalldatetime") {
+ $type = "datetime";
+ $info->{_ic_dt_method} ||= "datetime";
}
}
@@ -126,7 +141,7 @@
"please put it directly into the '$column' column definition.";
$locale = $info->{extra}{locale};
}
-
+
$locale = $info->{locale} if defined $info->{locale};
$timezone = $info->{timezone} if defined $info->{timezone};
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/InflateColumn/File.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/InflateColumn/File.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/InflateColumn/File.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -58,7 +58,7 @@
sub insert {
my $self = shift;
-
+
# cache our file columns so we can write them to the fs
# -after- we have a PK
my %file_column;
@@ -114,7 +114,7 @@
In your L<DBIx::Class> table class:
__PACKAGE__->load_components( "PK::Auto", "InflateColumn::File", "Core" );
-
+
# define your columns
__PACKAGE__->add_columns(
"id",
@@ -136,8 +136,8 @@
size => 255,
},
);
-
+
In your L<Catalyst::Controller> class:
FileColumn requires a hash that contains L<IO::File> as handle and the file's
@@ -152,15 +152,15 @@
body => '....'
});
$c->stash->{entry}=$entry;
-
+
And Place the following in your TT template
-
+
Article Subject: [% entry.subject %]
Uploaded File:
<a href="/static/files/[% entry.id %]/[% entry.filename.filename %]">File</a>
Body: [% entry.body %]
-
+
The file will be stored on the filesystem for later retrieval. Calling delete
on your resultset will delete the file from the filesystem. Retrevial of the
record automatically inflates the column back to the set hash with the
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/Cookbook.pod
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/Cookbook.pod 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/Cookbook.pod 2009-08-03 08:13:39 UTC (rev 7171)
@@ -1,4 +1,4 @@
-=head1 NAME
+=head1 NAME
DBIx::Class::Manual::Cookbook - Miscellaneous recipes
@@ -62,12 +62,12 @@
Sometimes you need only the first "top" row of a resultset. While this can be
easily done with L<< $rs->first|DBIx::Class::ResultSet/first >>, it is suboptimal,
as a full blown cursor for the resultset will be created and then immediately
-destroyed after fetching the first row object.
+destroyed after fetching the first row object.
L<< $rs->single|DBIx::Class::ResultSet/single >> is
designed specifically for this case - it will grab the first returned result
-without even instantiating a cursor.
+without even instantiating a cursor.
-Before replacing all your calls to C<first()> with C<single()> please observe the
+Before replacing all your calls to C<first()> with C<single()> please observe the
following CAVEATS:
=over
@@ -96,62 +96,61 @@
Sometimes you have to run arbitrary SQL because your query is too complex
(e.g. it contains Unions, Sub-Selects, Stored Procedures, etc.) or has to
-be optimized for your database in a special way, but you still want to
-get the results as a L<DBIx::Class::ResultSet>.
-The recommended way to accomplish this is by defining a separate ResultSource
-for your query. You can then inject complete SQL statements using a scalar
+be optimized for your database in a special way, but you still want to
+get the results as a L<DBIx::Class::ResultSet>.
+The recommended way to accomplish this is by defining a separate ResultSource
+for your query. You can then inject complete SQL statements using a scalar
reference (this is a feature of L<SQL::Abstract>).
Say you want to run a complex custom query on your user data, here's what
you have to add to your User class:
package My::Schema::Result::User;
-
+
use base qw/DBIx::Class/;
-
+
# ->load_components, ->table, ->add_columns, etc.
# Make a new ResultSource based on the User class
my $source = __PACKAGE__->result_source_instance();
my $new_source = $source->new( $source );
$new_source->source_name( 'UserFriendsComplex' );
-
+
# Hand in your query as a scalar reference
# It will be added as a sub-select after FROM,
# so pay attention to the surrounding brackets!
$new_source->name( \<<SQL );
- ( SELECT u.* FROM user u
- INNER JOIN user_friends f ON u.id = f.user_id
+ ( SELECT u.* FROM user u
+ INNER JOIN user_friends f ON u.id = f.user_id
WHERE f.friend_user_id = ?
- UNION
- SELECT u.* FROM user u
- INNER JOIN user_friends f ON u.id = f.friend_user_id
+ UNION
+ SELECT u.* FROM user u
+ INNER JOIN user_friends f ON u.id = f.friend_user_id
WHERE f.user_id = ? )
- SQL
+ SQL
# Finally, register your new ResultSource with your Schema
My::Schema->register_extra_source( 'UserFriendsComplex' => $new_source );
Next, you can execute your complex query using bind parameters like this:
- my $friends = [ $schema->resultset( 'UserFriendsComplex' )->search( {},
+ my $friends = [ $schema->resultset( 'UserFriendsComplex' )->search( {},
{
bind => [ 12345, 12345 ]
}
) ];
-
+
... and you'll get back a perfect L<DBIx::Class::ResultSet> (except, of course,
that you cannot modify the rows it contains, ie. cannot call L</update>,
L</delete>, ... on it).
If you prefer to have the definitions of these custom ResultSources in separate
-files (instead of stuffing all of them into the same resultset class), you can
-achieve the same with subclassing the resultset class and defining the
-ResultSource there:
+files (instead of stuffing all of them into the same ResultSource class), you
+can achieve the same with subclassing the ResultSource class and defining the
+new ResultSource there:
package My::Schema::Result::UserFriendsComplex;
- use My::Schema::Result::User;
use base qw/My::Schema::Result::User/;
__PACKAGE__->table('dummy'); # currently must be called before anything else
@@ -159,7 +158,7 @@
# Hand in your query as a scalar reference
# It will be added as a sub-select after FROM,
# so pay attention to the surrounding brackets!
- __PACKAGE__->name( \<<SQL );
+ __PACKAGE__->result_source_instance->name( \<<SQL );
( SELECT u.* FROM user u
INNER JOIN user_friends f ON u.id = f.user_id
WHERE f.friend_user_id = ?
@@ -169,6 +168,8 @@
WHERE f.user_id = ? )
SQL
+ 1;
+
TIMTOWDI.
=head2 Using specific columns
@@ -231,7 +232,7 @@
# Define accessor manually:
sub name_length { shift->get_column('name_length'); }
-
+
# Or use DBIx::Class::AccessorGroup:
__PACKAGE__->mk_group_accessors('column' => 'name_length');
@@ -242,7 +243,7 @@
{
columns => [ qw/artist_id name rank/ ],
distinct => 1
- }
+ }
);
my $rs = $schema->resultset('Artist')->search(
@@ -279,7 +280,7 @@
my $count = $rs->count;
# Equivalent SQL:
- # SELECT COUNT( * ) FROM (SELECT me.name FROM artist me GROUP BY me.name) count_subq:
+ # SELECT COUNT( * ) FROM (SELECT me.name FROM artist me GROUP BY me.name) count_subq:
=head2 Grouping results
@@ -359,7 +360,7 @@
=head2 Predefined searches
You can write your own L<DBIx::Class::ResultSet> class by inheriting from it
-and define often used searches as methods:
+and defining often used searches as methods:
package My::DBIC::ResultSet::CD;
use strict;
@@ -439,7 +440,7 @@
my $rs = $schema->resultset('CD')->search(
{
- 'artists.name' => 'Bob Marley'
+ 'artists.name' => 'Bob Marley'
},
{
join => 'artists', # join the artist table
@@ -452,7 +453,7 @@
# WHERE artist.name = 'Bob Marley'
In that example both the join, and the condition use the relationship name rather than the table name
-(see DBIx::Class::Manual::Joining for more details on aliasing ).
+(see L<DBIx::Class::Manual::Joining> for more details on aliasing ).
If required, you can now sort on any column in the related tables by including
it in your C<order_by> attribute, (again using the aliased relation name rather than table name) :
@@ -673,7 +674,7 @@
my $schema = $cd->result_source->schema;
# use the schema as normal:
- my $artist_rs = $schema->resultset('Artist');
+ my $artist_rs = $schema->resultset('Artist');
This can be useful when you don't want to pass around a Schema object to every
method.
@@ -693,7 +694,7 @@
=head2 Stringification
-Employ the standard stringification technique by using the C<overload>
+Employ the standard stringification technique by using the L<overload>
module.
To make an object stringify itself as a single column, use something
@@ -741,17 +742,17 @@
# do whatever else you wanted if it was a new row
}
-=head2 Static sub-classing DBIx::Class result classes
+=head2 Static sub-classing DBIx::Class result classes
AKA adding additional relationships/methods/etc. to a model for a
specific usage of the (shared) model.
-B<Schema definition>
-
- package My::App::Schema;
-
- use base DBIx::Class::Schema;
+B<Schema definition>
+ package My::App::Schema;
+
+ use base DBIx::Class::Schema;
+
# load subclassed classes from My::App::Schema::Result/ResultSet
__PACKAGE__->load_namespaces;
@@ -763,35 +764,35 @@
/]});
1;
-
-B<Result-Subclass definition>
-
+
+B<Result-Subclass definition>
+
package My::App::Schema::Result::Baz;
-
- use strict;
- use warnings;
- use base My::Shared::Model::Result::Baz;
-
+
+ use strict;
+ use warnings;
+ use base My::Shared::Model::Result::Baz;
+
# WARNING: Make sure you call table() again in your subclass,
# otherwise DBIx::Class::ResultSourceProxy::Table will not be called
# and the class name is not correctly registered as a source
- __PACKAGE__->table('baz');
-
- sub additional_method {
- return "I'm an additional method only needed by this app";
+ __PACKAGE__->table('baz');
+
+ sub additional_method {
+ return "I'm an additional method only needed by this app";
}
1;
-
-=head2 Dynamic Sub-classing DBIx::Class proxy classes
+=head2 Dynamic Sub-classing DBIx::Class proxy classes
+
AKA multi-class object inflation from one table
-
+
L<DBIx::Class> classes are proxy classes, therefore some different
techniques need to be employed for more than basic subclassing. In
this example we have a single user table that carries a boolean bit
for admin. We would like like to give the admin users
-objects(L<DBIx::Class::Row>) the same methods as a regular user but
+objects (L<DBIx::Class::Row>) the same methods as a regular user but
also special admin only methods. It doesn't make sense to create two
seperate proxy-class files for this. We would be copying all the user
methods into the Admin class. There is a cleaner way to accomplish
@@ -803,128 +804,128 @@
grab the object being returned, inspect the values we are looking for,
bless it if it's an admin object, and then return it. See the example
below:
-
-B<Schema Definition>
-
- package My::Schema;
-
- use base qw/DBIx::Class::Schema/;
-
+
+B<Schema Definition>
+
+ package My::Schema;
+
+ use base qw/DBIx::Class::Schema/;
+
__PACKAGE__->load_namespaces;
1;
-
-
-B<Proxy-Class definitions>
-
- package My::Schema::Result::User;
-
- use strict;
- use warnings;
- use base qw/DBIx::Class/;
-
- ### Defined what our admin class is for ensure_class_loaded
- my $admin_class = __PACKAGE__ . '::Admin';
-
- __PACKAGE__->load_components(qw/Core/);
-
- __PACKAGE__->table('users');
-
- __PACKAGE__->add_columns(qw/user_id email password
- firstname lastname active
- admin/);
-
- __PACKAGE__->set_primary_key('user_id');
-
- sub inflate_result {
- my $self = shift;
- my $ret = $self->next::method(@_);
- if( $ret->admin ) {### If this is an admin rebless for extra functions
- $self->ensure_class_loaded( $admin_class );
- bless $ret, $admin_class;
- }
- return $ret;
- }
-
- sub hello {
- print "I am a regular user.\n";
- return ;
- }
-
+
+
+B<Proxy-Class definitions>
+
+ package My::Schema::Result::User;
+
+ use strict;
+ use warnings;
+ use base qw/DBIx::Class/;
+
+ ### Define what our admin class is, for ensure_class_loaded()
+ my $admin_class = __PACKAGE__ . '::Admin';
+
+ __PACKAGE__->load_components(qw/Core/);
+
+ __PACKAGE__->table('users');
+
+ __PACKAGE__->add_columns(qw/user_id email password
+ firstname lastname active
+ admin/);
+
+ __PACKAGE__->set_primary_key('user_id');
+
+ sub inflate_result {
+ my $self = shift;
+ my $ret = $self->next::method(@_);
+ if( $ret->admin ) {### If this is an admin, rebless for extra functions
+ $self->ensure_class_loaded( $admin_class );
+ bless $ret, $admin_class;
+ }
+ return $ret;
+ }
+
+ sub hello {
+ print "I am a regular user.\n";
+ return ;
+ }
+
1;
-
- package My::Schema::Result::User::Admin;
-
- use strict;
- use warnings;
- use base qw/My::Schema::Result::User/;
+ package My::Schema::Result::User::Admin;
+
+ use strict;
+ use warnings;
+ use base qw/My::Schema::Result::User/;
+
# This line is important
__PACKAGE__->table('users');
-
- sub hello
- {
- print "I am an admin.\n";
- return;
- }
-
- sub do_admin_stuff
- {
- print "I am doing admin stuff\n";
- return ;
+
+ sub hello
+ {
+ print "I am an admin.\n";
+ return;
}
+ sub do_admin_stuff
+ {
+ print "I am doing admin stuff\n";
+ return ;
+ }
+
1;
-
-B<Test File> test.pl
-
- use warnings;
- use strict;
- use My::Schema;
-
- my $user_data = { email => 'someguy at place.com',
- password => 'pass1',
- admin => 0 };
-
- my $admin_data = { email => 'someadmin at adminplace.com',
- password => 'pass2',
- admin => 1 };
-
- my $schema = My::Schema->connection('dbi:Pg:dbname=test');
-
- $schema->resultset('User')->create( $user_data );
- $schema->resultset('User')->create( $admin_data );
-
- ### Now we search for them
- my $user = $schema->resultset('User')->single( $user_data );
- my $admin = $schema->resultset('User')->single( $admin_data );
-
- print ref $user, "\n";
- print ref $admin, "\n";
-
- print $user->password , "\n"; # pass1
- print $admin->password , "\n";# pass2; inherited from User
- print $user->hello , "\n";# I am a regular user.
- print $admin->hello, "\n";# I am an admin.
-
- ### The statement below will NOT print
- print "I can do admin stuff\n" if $user->can('do_admin_stuff');
- ### The statement below will print
- print "I can do admin stuff\n" if $admin->can('do_admin_stuff');
+B<Test File> test.pl
+
+ use warnings;
+ use strict;
+ use My::Schema;
+
+ my $user_data = { email => 'someguy at place.com',
+ password => 'pass1',
+ admin => 0 };
+
+ my $admin_data = { email => 'someadmin at adminplace.com',
+ password => 'pass2',
+ admin => 1 };
+
+ my $schema = My::Schema->connection('dbi:Pg:dbname=test');
+
+ $schema->resultset('User')->create( $user_data );
+ $schema->resultset('User')->create( $admin_data );
+
+ ### Now we search for them
+ my $user = $schema->resultset('User')->single( $user_data );
+ my $admin = $schema->resultset('User')->single( $admin_data );
+
+ print ref $user, "\n";
+ print ref $admin, "\n";
+
+ print $user->password , "\n"; # pass1
+ print $admin->password , "\n";# pass2; inherited from User
+ print $user->hello , "\n";# I am a regular user.
+ print $admin->hello, "\n";# I am an admin.
+
+ ### The statement below will NOT print
+ print "I can do admin stuff\n" if $user->can('do_admin_stuff');
+ ### The statement below will print
+ print "I can do admin stuff\n" if $admin->can('do_admin_stuff');
+
=head2 Skip row object creation for faster results
DBIx::Class is not built for speed, it's built for convenience and
ease of use, but sometimes you just need to get the data, and skip the
fancy objects.
-
+
To do this simply use L<DBIx::Class::ResultClass::HashRefInflator>.
-
+
my $rs = $schema->resultset('CD');
-
+
$rs->result_class('DBIx::Class::ResultClass::HashRefInflator');
-
+
my $hash_ref = $rs->find(1);
Wasn't that easy?
@@ -968,7 +969,7 @@
my $rs = $schema->resultset('Items')->search(
{},
- {
+ {
select => [ { sum => 'Cost' } ],
as => [ 'total_cost' ], # remember this 'as' is for DBIx::Class::ResultSet not SQL
}
@@ -997,7 +998,7 @@
print $c;
}
-C<ResultSetColumn> only has a limited number of built-in functions, if
+C<ResultSetColumn> only has a limited number of built-in functions. If
you need one that it doesn't have, then you can use the C<func> method
instead:
@@ -1012,7 +1013,7 @@
=head2 Creating a result set from a set of rows
-Sometimes you have a (set of) row objects that you want to put into a
+Sometimes you have a (set of) row objects that you want to put into a
resultset without the need to hit the DB again. You can do that by using the
L<set_cache|DBIx::Class::Resultset/set_cache> method:
@@ -1047,7 +1048,7 @@
=head2 Ordering a relationship result set
-If you always want a relation to be ordered, you can specify this when you
+If you always want a relation to be ordered, you can specify this when you
create the relationship.
To order C<< $book->pages >> by descending page_number, create the relation
@@ -1108,9 +1109,9 @@
package MyDatabase::Main::Artist;
use base qw/DBIx::Class/;
__PACKAGE__->load_components(qw/PK::Auto Core/);
-
+
__PACKAGE__->table('database1.artist'); # will use "database1.artist" in FROM clause
-
+
__PACKAGE__->add_columns(qw/ artist_id name /);
__PACKAGE__->set_primary_key('artist_id');
__PACKAGE__->has_many('cds' => 'MyDatabase::Main::Cd');
@@ -1131,16 +1132,16 @@
package MyDatabase::Schema;
use Moose;
-
+
extends 'DBIx::Class::Schema';
-
+
around connection => sub {
my ( $inner, $self, $dsn, $username, $pass, $attr ) = ( shift, @_ );
-
+
my $postfix = delete $attr->{schema_name_postfix};
-
+
$inner->(@_);
-
+
if ( $postfix ) {
$self->append_db_name($postfix);
}
@@ -1148,18 +1149,18 @@
sub append_db_name {
my ( $self, $postfix ) = @_;
-
- my @sources_with_db
- = grep
- { $_->name =~ /^\w+\./mx }
- map
- { $self->source($_) }
+
+ my @sources_with_db
+ = grep
+ { $_->name =~ /^\w+\./mx }
+ map
+ { $self->source($_) }
$self->sources;
-
+
foreach my $source (@sources_with_db) {
my $name = $source->name;
$name =~ s{^(\w+)\.}{${1}${postfix}\.}mx;
-
+
$source->name($name);
}
}
@@ -1171,17 +1172,17 @@
then simply iterate over all the Schema's ResultSources, renaming them as
needed.
-To use this facility, simply add or modify the \%attr hashref that is passed to
+To use this facility, simply add or modify the \%attr hashref that is passed to
L<connection|DBIx::Class::Schama/connect>, as follows:
- my $schema
+ my $schema
= MyDatabase::Schema->connect(
- $dsn,
- $user,
+ $dsn,
+ $user,
$pass,
{
schema_name_postfix => '_dev'
- # ... Other options as desired ...
+ # ... Other options as desired ...
})
Obviously, one could accomplish even more advanced mapping via a hash map or a
@@ -1227,14 +1228,14 @@
transactions (for databases that support them) will hopefully be added
in the future.
-=head1 SQL
+=head1 SQL
=head2 Creating Schemas From An Existing Database
-L<DBIx::Class::Schema::Loader> will connect to a database and create a
+L<DBIx::Class::Schema::Loader> will connect to a database and create a
L<DBIx::Class::Schema> and associated sources by examining the database.
-The recommend way of achieving this is to use the
+The recommend way of achieving this is to use the
L<make_schema_at|DBIx::Class::Schema::Loader/make_schema_at> method:
perl -MDBIx::Class::Schema::Loader=make_schema_at,dump_to_dir:./lib \
@@ -1296,7 +1297,7 @@
your database.
Make a table class as you would for any other table
-
+
package MyAppDB::Dual;
use strict;
use warnings;
@@ -1307,34 +1308,34 @@
"dummy",
{ data_type => "VARCHAR2", is_nullable => 0, size => 1 },
);
-
+
Once you've loaded your table class select from it using C<select>
and C<as> instead of C<columns>
-
+
my $rs = $schema->resultset('Dual')->search(undef,
{ select => [ 'sydate' ],
as => [ 'now' ]
},
);
-
+
All you have to do now is be careful how you access your resultset, the below
will not work because there is no column called 'now' in the Dual table class
-
+
while (my $dual = $rs->next) {
print $dual->now."\n";
}
# Can't locate object method "now" via package "MyAppDB::Dual" at headshot.pl line 23.
-
+
You could of course use 'dummy' in C<as> instead of 'now', or C<add_columns> to
your Dual class for whatever you wanted to select from dual, but that's just
silly, instead use C<get_column>
-
+
while (my $dual = $rs->next) {
print $dual->get_column('now')."\n";
}
-
+
Or use C<cursor>
-
+
my $cursor = $rs->cursor;
while (my @vals = $cursor->next) {
print $vals[0]."\n";
@@ -1351,48 +1352,48 @@
parser_args => { sources => [ grep $_ ne 'Dual', schema->sources ] },
};
$schema->create_ddl_dir( [qw/Oracle/], undef, './sql', undef, $sqlt_args );
-
+
Or use L<DBIx::Class::ResultClass::HashRefInflator>
-
+
$rs->result_class('DBIx::Class::ResultClass::HashRefInflator');
while ( my $dual = $rs->next ) {
print $dual->{now}."\n";
}
-
+
Here are some example C<select> conditions to illustrate the different syntax
-you could use for doing stuff like
+you could use for doing stuff like
C<oracles.heavily(nested(functions_can('take', 'lots'), OF), 'args')>
-
+
# get a sequence value
select => [ 'A_SEQ.nextval' ],
-
+
# get create table sql
select => [ { 'dbms_metadata.get_ddl' => [ "'TABLE'", "'ARTIST'" ]} ],
-
+
# get a random num between 0 and 100
select => [ { "trunc" => [ { "dbms_random.value" => [0,100] } ]} ],
-
+
# what year is it?
select => [ { 'extract' => [ \'year from sysdate' ] } ],
-
+
# do some math
select => [ {'round' => [{'cos' => [ \'180 * 3.14159265359/180' ]}]}],
-
+
# which day of the week were you born on?
select => [{'to_char' => [{'to_date' => [ "'25-DEC-1980'", "'dd-mon-yyyy'" ]}, "'day'"]}],
-
+
# select 16 rows from dual
select => [ "'hello'" ],
as => [ 'world' ],
group_by => [ 'cube( 1, 2, 3, 4 )' ],
-
-
+
+
=head2 Adding Indexes And Functions To Your SQL
Often you will want indexes on columns on your table to speed up searching. To
-do this, create a method called C<sqlt_deploy_hook> in the relevant source
-class (refer to the advanced
+do this, create a method called C<sqlt_deploy_hook> in the relevant source
+class (refer to the advanced
L<callback system|DBIx::Class::ResultSource/sqlt_deploy_callback> if you wish
to share a hook between multiple sources):
@@ -1409,13 +1410,13 @@
1;
-Sometimes you might want to change the index depending on the type of the
+Sometimes you might want to change the index depending on the type of the
database for which SQL is being generated:
my ($db_type = $sqlt_table->schema->translator->producer_type)
=~ s/^SQL::Translator::Producer:://;
-You can also add hooks to the schema level to stop certain tables being
+You can also add hooks to the schema level to stop certain tables being
created:
package My::Schema;
@@ -1508,7 +1509,7 @@
Alternatively, you can send the conversion sql scripts to your
customers as above.
-=head2 Setting quoting for the generated SQL.
+=head2 Setting quoting for the generated SQL.
If the database contains column names with spaces and/or reserved words, they
need to be quoted in the SQL queries. This is done using:
@@ -1518,14 +1519,14 @@
The first sets the quote characters. Either a pair of matching
brackets, or a C<"> or C<'>:
-
+
__PACKAGE__->storage->sql_maker->quote_char('"');
Check the documentation of your database for the correct quote
characters to use. C<name_sep> needs to be set to allow the SQL
generator to put the quotes the correct place.
-In most cases you should set these as part of the arguments passed to
+In most cases you should set these as part of the arguments passed to
L<DBIx::Class::Schema/connect>:
my $schema = My::Schema->connect(
@@ -1553,7 +1554,7 @@
The JDBC bridge is one way of getting access to a MSSQL server from a platform
that Microsoft doesn't deliver native client libraries for. (e.g. Linux)
-The limit dialect can also be set at connect time by specifying a
+The limit dialect can also be set at connect time by specifying a
C<limit_dialect> key in the final hash as shown above.
=head2 Working with PostgreSQL array types
@@ -1594,7 +1595,7 @@
arrayrefs together with the column name, like this: C<< [column_name => value]
>>.
-=head1 BOOTSTRAPPING/MIGRATING
+=head1 BOOTSTRAPPING/MIGRATING
=head2 Easy migration from class-based to schema-based setup
@@ -1605,10 +1606,10 @@
use MyDB;
use SQL::Translator;
-
+
my $schema = MyDB->schema_instance;
-
- my $translator = SQL::Translator->new(
+
+ my $translator = SQL::Translator->new(
debug => $debug || 0,
trace => $trace || 0,
no_comments => $no_comments || 0,
@@ -1622,13 +1623,13 @@
'prefix' => 'My::Schema',
},
);
-
+
$translator->parser('SQL::Translator::Parser::DBIx::Class');
$translator->producer('SQL::Translator::Producer::DBIx::Class::File');
-
+
my $output = $translator->translate(@args) or die
"Error: " . $translator->error;
-
+
print $output;
You could use L<Module::Find> to search for all subclasses in the MyDB::*
@@ -1657,16 +1658,16 @@
return $new;
}
-For more information about C<next::method>, look in the L<Class::C3>
+For more information about C<next::method>, look in the L<Class::C3>
documentation. See also L<DBIx::Class::Manual::Component> for more
ways to write your own base classes to do this.
People looking for ways to do "triggers" with DBIx::Class are probably
-just looking for this.
+just looking for this.
=head2 Changing one field whenever another changes
-For example, say that you have three columns, C<id>, C<number>, and
+For example, say that you have three columns, C<id>, C<number>, and
C<squared>. You would like to make changes to C<number> and have
C<squared> be automagically set to the value of C<number> squared.
You can accomplish this by overriding C<store_column>:
@@ -1684,7 +1685,7 @@
=head2 Automatically creating related objects
-You might have a class C<Artist> which has many C<CD>s. Further, if you
+You might have a class C<Artist> which has many C<CD>s. Further, you
want to create a C<CD> object every time you insert an C<Artist> object.
You can accomplish this by overriding C<insert> on your objects:
@@ -1881,7 +1882,7 @@
If this preamble is moved into a common base class:-
package MyDBICbase;
-
+
use base qw/DBIx::Class/;
__PACKAGE__->load_components(qw/InflateColumn::DateTime Core/);
1;
@@ -1902,7 +1903,7 @@
to load the result classes. This will use L<Module::Find|Module::Find>
to find and load the appropriate modules. Explicitly defining the
classes you wish to load will remove the overhead of
-L<Module::Find|Module::Find> and the related directory operations:-
+L<Module::Find|Module::Find> and the related directory operations:
__PACKAGE__->load_classes(qw/ CD Artist Track /);
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/Example.pod
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/Example.pod 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/Example.pod 2009-08-03 08:13:39 UTC (rev 7171)
@@ -43,7 +43,7 @@
CREATE TABLE artist (
artistid INTEGER PRIMARY KEY,
- name TEXT NOT NULL
+ name TEXT NOT NULL
);
CREATE TABLE cd (
@@ -60,7 +60,7 @@
and create the sqlite database file:
-sqlite3 example.db < example.sql
+ sqlite3 example.db < example.sql
=head3 Set up DBIx::Class::Schema
@@ -78,7 +78,7 @@
Then, create the following DBIx::Class::Schema classes:
MyDatabase/Main.pm:
-
+
package MyDatabase::Main;
use base qw/DBIx::Class::Schema/;
__PACKAGE__->load_namespaces;
@@ -90,7 +90,7 @@
package MyDatabase::Main::Result::Artist;
use base qw/DBIx::Class/;
- __PACKAGE__->load_components(qw/PK::Auto Core/);
+ __PACKAGE__->load_components(qw/Core/);
__PACKAGE__->table('artist');
__PACKAGE__->add_columns(qw/ artistid name /);
__PACKAGE__->set_primary_key('artistid');
@@ -103,7 +103,7 @@
package MyDatabase::Main::Result::Cd;
use base qw/DBIx::Class/;
- __PACKAGE__->load_components(qw/PK::Auto Core/);
+ __PACKAGE__->load_components(qw/Core/);
__PACKAGE__->table('cd');
__PACKAGE__->add_columns(qw/ cdid artist title/);
__PACKAGE__->set_primary_key('cdid');
@@ -117,7 +117,7 @@
package MyDatabase::Main::Result::Track;
use base qw/DBIx::Class/;
- __PACKAGE__->load_components(qw/PK::Auto Core/);
+ __PACKAGE__->load_components(qw/Core/);
__PACKAGE__->table('track');
__PACKAGE__->add_columns(qw/ trackid cd title/);
__PACKAGE__->set_primary_key('trackid');
@@ -137,7 +137,7 @@
my $schema = MyDatabase::Main->connect('dbi:SQLite:db/example.db');
- # here's some of the sql that is going to be generated by the schema
+ # here's some of the SQL that is going to be generated by the schema
# INSERT INTO artist VALUES (NULL,'Michael Jackson');
# INSERT INTO artist VALUES (NULL,'Eminem');
@@ -248,8 +248,8 @@
}
print "\n";
}
-
-
+
+
sub get_cd_by_track {
my $tracktitle = shift;
print "get_cd_by_track($tracktitle):\n";
@@ -264,7 +264,7 @@
my $cd = $rs->first;
print $cd->title . "\n\n";
}
-
+
sub get_cds_by_artist {
my $artistname = shift;
print "get_cds_by_artist($artistname):\n";
@@ -349,20 +349,20 @@
A reference implentation of the database and scripts in this example
are available in the main distribution for DBIx::Class under the
-directory t/examples/Schema
+directory F<t/examples/Schema>.
With these scripts we're relying on @INC looking in the current
working directory. You may want to add the MyDatabase namespaces to
@INC in a different way when it comes to deployment.
-The testdb.pl script is an excellent start for testing your database
+The F<testdb.pl> script is an excellent start for testing your database
model.
-This example uses load_namespaces to load in the appropriate Row classes
-from the MyDatabase::Main::Result namespace, and any required resultset
-classes from the MyDatabase::Main::ResultSet namespace (although we
-created the directory in the directions above we did not add, or need to
-add, any resultset classes).
+This example uses L<DBIx::Class::Schema/load_namespaces> to load in the
+appropriate L<Row|DBIx::Class::Row> classes from the MyDatabase::Main::Result namespace,
+and any required resultset classes from the MyDatabase::Main::ResultSet
+namespace (although we created the directory in the directions above we
+did not add, or need to add, any resultset classes).
=head1 TODO
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/FAQ.pod
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/FAQ.pod 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/FAQ.pod 2009-08-03 08:13:39 UTC (rev 7171)
@@ -87,7 +87,7 @@
to connect with rights to read/write all the schemas/tables as
necessary.
-=back
+=back
=head2 Relationships
@@ -112,7 +112,7 @@
Create a C<belongs_to> relationship for the field containing the
foreign key. See L<DBIx::Class::Relationship/belongs_to>.
-=item .. define a foreign key relationship where the key field may contain NULL?
+=item .. define a foreign key relationship where the key field may contain NULL?
Just create a C<belongs_to> relationship, as above. If the column is
NULL then the inflation to the foreign object will not happen. This
@@ -307,8 +307,8 @@
=item .. fetch a whole column of data instead of a row?
-Call C<get_column> on a L<DBIx::Class::ResultSet>, this returns a
-L<DBIx::Class::ResultSetColumn>, see it's documentation and the
+Call C<get_column> on a L<DBIx::Class::ResultSet>. This returns a
+L<DBIx::Class::ResultSetColumn>. See its documentation and the
L<Cookbook|DBIx::Class::Manual::Cookbook> for details.
=item .. fetch a formatted column?
@@ -324,22 +324,17 @@
=item .. fetch a single (or topmost) row?
-Sometimes you many only want a single record back from a search. A quick
-way to get that single row is to first run your search as usual:
+See L<DBIx::Class::Manual::Cookbook/Retrieve_one_and_only_one_row_from_a_resultset>.
- ->search->(undef, { order_by => "id DESC" })
+A less readable way is to ask a regular search to return 1 row, using
+L<DBIx::Class::ResultSet/slice>:
-Then call L<DBIx::Class::ResultSet/slice> and ask it only to return 1 row:
-
- ->slice(0)
-
-These two calls can be combined into a single statement:
-
->search->(undef, { order_by => "id DESC" })->slice(0)
-Why slice instead of L<DBIx::Class::ResultSet/first> or L<DBIx::Class::ResultSet/single>?
-If supported by the database, slice will use LIMIT/OFFSET to hint to the database that we
-really only need one row. This can result in a significant speed improvement.
+which (if supported by the database) will use LIMIT/OFFSET to hint to the
+database that we really only need one row. This can result in a significant
+speed improvement. The method using L<DBIx::Class::ResultSet/single> mentioned
+in the cookbook can do the same if you pass a C<rows> attribute to the search.
=item .. refresh a row from storage?
@@ -410,17 +405,17 @@
But note that when using a scalar reference the column in the database
will be updated but when you read the value from the object with e.g.
-
+
->somecolumn()
-
+
you still get back the scalar reference to the string, B<not> the new
value in the database. To get that you must refresh the row from storage
using C<discard_changes()>. Or chain your function calls like this:
->update->discard_changes
-
- to update the database and refresh the object in one step.
-
+
+to update the database and refresh the object in one step.
+
=item .. store JSON/YAML in a column and have it deflate/inflate automatically?
You can use L<DBIx::Class::InflateColumn> to accomplish YAML/JSON storage transparently.
@@ -474,7 +469,7 @@
package MyTable;
use Moose; # import Moose
- use Moose::Util::TypeConstraint; # import Moose accessor type constraints
+ use Moose::Util::TypeConstraint; # import Moose accessor type constraints
extends 'DBIx::Class'; # Moose changes the way we define our parent (base) package
@@ -486,7 +481,7 @@
my $row;
- # assume that some where in here $row will get assigned to a MyTable row
+ # assume that somewhere in here $row will get assigned to a MyTable row
$row->non_column_data('some string'); # would set the non_column_data accessor
@@ -494,7 +489,7 @@
$row->update(); # would not inline the non_column_data accessor into the update
-
+
=item How do I use DBIx::Class objects in my TT templates?
Like normal objects, mostly. However you need to watch out for TT
@@ -536,7 +531,7 @@
=item How do I reduce the overhead of database queries?
You can reduce the overhead of object creation within L<DBIx::Class>
-using the tips in L<DBIx::Class::Manual::Cookbook/"Skip row object creation for faster results">
+using the tips in L<DBIx::Class::Manual::Cookbook/"Skip row object creation for faster results">
and L<DBIx::Class::Manual::Cookbook/"Get raw data for blindingly fast results">
=back
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/Intro.pod
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/Intro.pod 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Manual/Intro.pod 2009-08-03 08:13:39 UTC (rev 7171)
@@ -11,7 +11,7 @@
=head1 THE DBIx::Class WAY
Here are a few simple tips that will help you get your bearings with
-DBIx::Class.
+DBIx::Class.
=head2 Tables become Result classes
@@ -29,7 +29,7 @@
=head2 It's all about the ResultSet
So, we've got some ResultSources defined. Now, we want to actually use those
-definitions to help us translate the queries we need into handy perl objects!
+definitions to help us translate the queries we need into handy perl objects!
Let's say we defined a ResultSource for an "album" table with three columns:
"albumid", "artist", and "title". Any time we want to query this table, we'll
@@ -39,18 +39,18 @@
SELECT albumid, artist, title FROM album;
Would be retrieved by creating a ResultSet object from the album table's
-ResultSource, likely by using the "search" method.
+ResultSource, likely by using the "search" method.
DBIx::Class doesn't limit you to creating only simple ResultSets -- if you
wanted to do something like:
SELECT title FROM album GROUP BY title;
-You could easily achieve it.
+You could easily achieve it.
-The important thing to understand:
+The important thing to understand:
- Any time you would reach for a SQL query in DBI, you are
+ Any time you would reach for a SQL query in DBI, you are
creating a DBIx::Class::ResultSet.
=head2 Search is like "prepare"
@@ -109,13 +109,10 @@
Load any components required by each class with the load_components() method.
This should consist of "Core" plus any additional components you want to use.
-For example, if you want serial/auto-incrementing primary keys:
+For example, if you want to force columns to use UTF-8 encoding:
- __PACKAGE__->load_components(qw/ PK::Auto Core /);
+ __PACKAGE__->load_components(qw/ ForceUTF8 Core /);
-C<PK::Auto> is supported for many databases; see L<DBIx::Class::Storage::DBI>
-for more information.
-
Set the table for your class:
__PACKAGE__->table('album');
@@ -142,7 +139,7 @@
is_auto_increment => 0,
default_value => '',
},
- title =>
+ title =>
{ data_type => 'varchar',
size => 256,
is_nullable => 0,
@@ -176,7 +173,8 @@
make a predefined accessor for fetching objects that contain this Table's
foreign key:
- __PACKAGE__->has_many('albums', 'My::Schema::Result::Artist', 'album_id');
+ # in My::Schema::Result::Artist
+ __PACKAGE__->has_many('albums', 'My::Schema::Result::Album', 'artist');
See L<DBIx::Class::Relationship> for more information about the various types of
available relationships and how you can design your own.
@@ -273,7 +271,7 @@
returns an instance of C<My::Schema::Result::Album> that can be used to access the data
in the new record:
- my $new_album = $schema->resultset('Album')->create({
+ my $new_album = $schema->resultset('Album')->create({
title => 'Wish You Were Here',
artist => 'Pink Floyd'
});
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Ordered.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Ordered.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Ordered.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -60,20 +60,20 @@
#!/use/bin/perl
use My::Item;
-
+
my $item = My::Item->create({ name=>'Matt S. Trout' });
# If using grouping_column:
my $item = My::Item->create({ name=>'Matt S. Trout', group_id=>1 });
-
+
my $rs = $item->siblings();
my @siblings = $item->siblings();
-
+
my $sibling;
$sibling = $item->first_sibling();
$sibling = $item->last_sibling();
$sibling = $item->previous_sibling();
$sibling = $item->next_sibling();
-
+
$item->move_previous();
$item->move_next();
$item->move_first();
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/PK.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/PK.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/PK.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -42,16 +42,16 @@
my ($self, $attrs) = @_;
delete $self->{_dirty_columns};
return unless $self->in_storage; # Don't reload if we aren't real!
-
+
if( my $current_storage = $self->get_from_storage($attrs)) {
-
+
# Set $self to the current.
%$self = %$current_storage;
-
+
# Avoid a possible infinite loop with
# sub DESTROY { $_[0]->discard_changes }
bless $current_storage, 'Do::Not::Exist';
-
+
return $self;
} else {
$self->in_storage(0);
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/Accessor.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/Accessor.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/Accessor.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -6,6 +6,11 @@
use Sub::Name ();
use Class::Inspector ();
+our %_pod_inherit_config =
+ (
+ class_map => { 'DBIx::Class::Relationship::Accessor' => 'DBIx::Class::Relationship' }
+ );
+
sub register_relationship {
my ($class, $rel, $info) = @_;
if (my $acc_type = $info->{attrs}{accessor}) {
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/Base.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/Base.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/Base.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -83,18 +83,18 @@
An arrayref containing a list of accessors in the foreign class to create in
the main class. If, for example, you do the following:
-
+
MyDB::Schema::CD->might_have(liner_notes => 'MyDB::Schema::LinerNotes',
undef, {
proxy => [ qw/notes/ ],
});
-
+
Then, assuming MyDB::Schema::LinerNotes has an accessor named notes, you can do:
my $cd = MyDB::Schema::CD->find(1);
$cd->notes('Notes go here'); # set notes -- LinerNotes object is
# created if it doesn't exist
-
+
=item accessor
Specifies the type of accessor that should be created for the relationship.
@@ -179,7 +179,7 @@
my $rel_info = $self->relationship_info($rel);
$self->throw_exception( "No such relationship ${rel}" )
unless $rel_info;
-
+
return $self->{related_resultsets}{$rel} ||= do {
my $attrs = (@_ > 1 && ref $_[$#_] eq 'HASH' ? pop(@_) : {});
$attrs = { %{$rel_info->{attrs} || {}}, %$attrs };
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/BelongsTo.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/BelongsTo.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/BelongsTo.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -7,6 +7,11 @@
use strict;
use warnings;
+our %_pod_inherit_config =
+ (
+ class_map => { 'DBIx::Class::Relationship::BelongsTo' => 'DBIx::Class::Relationship' }
+ );
+
sub belongs_to {
my ($class, $rel, $f_class, $cond, $attrs) = @_;
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/CascadeActions.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/CascadeActions.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/CascadeActions.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -4,6 +4,11 @@
use strict;
use warnings;
+our %_pod_inherit_config =
+ (
+ class_map => { 'DBIx::Class::Relationship::CascadeActions' => 'DBIx::Class::Relationship' }
+ );
+
sub delete {
my ($self, @rest) = @_;
return $self->next::method(@rest) unless ref $self;
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/HasMany.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/HasMany.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/HasMany.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -4,6 +4,11 @@
use strict;
use warnings;
+our %_pod_inherit_config =
+ (
+ class_map => { 'DBIx::Class::Relationship::HasMany' => 'DBIx::Class::Relationship' }
+ );
+
sub has_many {
my ($class, $rel, $f_class, $cond, $attrs) = @_;
@@ -35,7 +40,7 @@
$class->throw_exception(
"No such column ${f_key} on foreign class ${f_class} ($guess)"
) if $f_class_loaded && !$f_class->has_column($f_key);
-
+
$cond = { "foreign.${f_key}" => "self.${pri}" };
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/HasOne.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/HasOne.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/HasOne.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -4,6 +4,11 @@
use strict;
use warnings;
+our %_pod_inherit_config =
+ (
+ class_map => { 'DBIx::Class::Relationship::HasOne' => 'DBIx::Class::Relationship' }
+ );
+
sub might_have {
shift->_has_one('LEFT' => @_);
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/ManyToMany.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/ManyToMany.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/ManyToMany.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -7,6 +7,11 @@
use Carp::Clan qw/^DBIx::Class/;
use Sub::Name ();
+our %_pod_inherit_config =
+ (
+ class_map => { 'DBIx::Class::Relationship::ManyToMany' => 'DBIx::Class::Relationship' }
+ );
+
sub many_to_many {
my ($class, $meth, $rel, $f_rel, $rel_attrs) = @_;
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/ProxyMethods.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/ProxyMethods.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship/ProxyMethods.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -6,6 +6,11 @@
use Sub::Name ();
use base qw/DBIx::Class/;
+our %_pod_inherit_config =
+ (
+ class_map => { 'DBIx::Class::Relationship::ProxyMethods' => 'DBIx::Class::Relationship' }
+ );
+
sub register_relationship {
my ($class, $rel, $info) = @_;
if (my $proxy_list = $info->{attrs}{proxy}) {
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Relationship.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -106,7 +106,7 @@
All helper methods are called similar to the following template:
__PACKAGE__->$method_name('relname', 'Foreign::Class', \%cond | \@cond, \%attrs);
-
+
Both C<$cond> and C<$attrs> are optional. Pass C<undef> for C<$cond> if
you want to use the default value for it, but still want to set C<\%attrs>.
@@ -297,7 +297,7 @@
'My::DBIC::Schema::Book',
{ 'foreign.author_id' => 'self.id' },
);
-
+
# OR (similar result, assuming related_class is storing our PK, in "author")
# (the "author" is guessed at from "Author" in the class namespace)
My::DBIC::Schema::Author->has_many(
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSet.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSet.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSet.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -1315,9 +1315,12 @@
sub _switch_to_inner_join_if_needed {
my ($self, $from, $alias) = @_;
+ # subqueries and other oddness is naturally not supported
return $from if (
ref $from ne 'ARRAY'
||
+ @$from <= 1
+ ||
ref $from->[0] ne 'HASH'
||
! $from->[0]{-alias}
@@ -1325,10 +1328,6 @@
$from->[0]{-alias} eq $alias
);
- # this would be the case with a subquery - we'll never find
- # the target as it is not in the parseable part of {from}
- return $from if @$from == 1;
-
my $switch_branch;
JOINSCAN:
for my $j (@{$from}[1 .. $#$from]) {
@@ -3091,11 +3090,16 @@
=back
-Which column(s) to order the results by. If a single column name, or
-an arrayref of names is supplied, the argument is passed through
-directly to SQL. The hashref syntax allows for connection-agnostic
-specification of ordering direction:
+Which column(s) to order the results by.
+[The full list of suitable values is documented in
+L<SQL::Abstract/"ORDER BY CLAUSES">; the following is a summary of
+common options.]
+
+If a single column name, or an arrayref of names is supplied, the
+argument is passed through directly to SQL. The hashref syntax allows
+for connection-agnostic specification of ordering direction:
+
For descending order:
order_by => { -desc => [qw/col1 col2 col3/] }
@@ -3373,6 +3377,42 @@
B<NOTE:> If you specify a C<prefetch> attribute, the C<join> and C<select>
attributes will be ignored.
+B<CAVEATs>: Prefetch does a lot of deep magic. As such, it may not behave
+exactly as you might expect.
+
+=over 4
+
+=item *
+
+Prefetch uses the L</cache> to populate the prefetched relationships. This
+may or may not be what you want.
+
+=item *
+
+If you specify a condition on a prefetched relationship, ONLY those
+rows that match the prefetched condition will be fetched into that relationship.
+This means that adding prefetch to a search() B<may alter> what is returned by
+traversing a relationship. So, if you have C<< Artist->has_many(CDs) >> and you do
+
+ my $artist_rs = $schema->resultset('Artist')->search({
+ 'cds.year' => 2008,
+ }, {
+ join => 'cds',
+ });
+
+ my $count = $artist_rs->first->cds->count;
+
+ my $artist_rs_prefetch = $artist_rs->search( {}, { prefetch => 'cds' } );
+
+ my $prefetch_count = $artist_rs_prefetch->first->cds->count;
+
+ cmp_ok( $count, '==', $prefetch_count, "Counts should be the same" );
+
+that cmp_ok() may or may not pass depending on the datasets involved. This
+behavior may or may not survive the 0.09 transition.
+
+=back
+
=head2 page
=over 4
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSetColumn.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSetColumn.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSetColumn.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -338,7 +338,7 @@
sub func {
my ($self,$function) = @_;
my $cursor = $self->func_rs($function)->cursor;
-
+
if( wantarray ) {
return map { $_->[ 0 ] } $cursor->all;
}
@@ -373,9 +373,9 @@
=head2 throw_exception
See L<DBIx::Class::Schema/throw_exception> for details.
-
+
=cut
-
+
sub throw_exception {
my $self=shift;
if (ref $self && $self->{_parent_resultset}) {
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSource/View.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSource/View.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSource/View.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -17,7 +17,7 @@
=head1 SYNOPSIS
- package MyDB::Schema::Year2000CDs;
+ package MyDB::Schema::Result::Year2000CDs;
use DBIx::Class::ResultSource::View;
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSource.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSource.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSource.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -24,13 +24,76 @@
=head1 SYNOPSIS
+ # Create a table based result source, in a result class.
+
+ package MyDB::Schema::Result::Artist;
+ use base qw/DBIx::Class/;
+
+ __PACKAGE__->load_components(qw/Core/);
+ __PACKAGE__->table('artist');
+ __PACKAGE__->add_columns(qw/ artistid name /);
+ __PACKAGE__->set_primary_key('artistid');
+ __PACKAGE__->has_many(cds => 'MyDB::Schema::Result::CD');
+
+ 1;
+
+ # Create a query (view) based result source, in a result class
+ package MyDB::Schema::Result::Year2000CDs;
+
+ use DBIx::Class::ResultSource::View;
+
+ __PACKAGE__->load_components('Core');
+ __PACKAGE__->table_class('DBIx::Class::ResultSource::View');
+
+ __PACKAGE__->table('year2000cds');
+ __PACKAGE__->result_source_instance->is_virtual(1);
+ __PACKAGE__->result_source_instance->view_definition(
+ "SELECT cdid, artist, title FROM cd WHERE year ='2000'"
+ );
+
+
=head1 DESCRIPTION
-A ResultSource is a component of a schema from which results can be directly
-retrieved, most usually a table (see L<DBIx::Class::ResultSource::Table>)
+A ResultSource is an object that represents a source of data for querying.
-Basic view support also exists, see L<<DBIx::Class::ResultSource::View>.
+This class is a base class for various specialised types of result
+sources, for example L<DBIx::Class::ResultSource::Table>. Table is the
+default result source type, so one is created for you when defining a
+result class as described in the synopsis above.
+More specifically, the L<DBIx::Class::Core> component pulls in the
+L<DBIx::Class::ResultSourceProxy::Table> as a base class, which
+defines the L<table|DBIx::Class::ResultSourceProxy::Table/table>
+method. When called, C<table> creates and stores an instance of
+L<DBIx::Class::ResultSoure::Table>. Luckily, to use tables as result
+sources, you don't need to remember any of this.
+
+Result sources representing select queries, or views, can also be
+created, see L<DBIx::Class::ResultSource::View> for full details.
+
+=head2 Finding result source objects
+
+As mentioned above, a result source instance is created and stored for
+you when you define a L<Result Class|DBIx::Class::Manual::Glossary/Result Class>.
+
+You can retrieve the result source at runtime in the following ways:
+
+=over
+
+=item From a Schema object:
+
+ $schema->source($source_name);
+
+=item From a Row object:
+
+ $row->result_source;
+
+=item From a ResultSet object:
+
+ $rs->result_source;
+
+=back
+
=head1 METHODS
=pod
@@ -69,9 +132,9 @@
$source->add_columns('col1' => \%col1_info, 'col2' => \%col2_info, ...);
-Adds columns to the result source. If supplied key => hashref pairs, uses
-the hashref as the column_info for that column. Repeated calls of this
-method will add more columns, not replace them.
+Adds columns to the result source. If supplied colname => hashref
+pairs, uses the hashref as the L</column_info> for that column. Repeated
+calls of this method will add more columns, not replace them.
The column names given will be created as accessor methods on your
L<DBIx::Class::Row> objects. You can change the name of the accessor
@@ -84,40 +147,62 @@
=item accessor
+ { accessor => '_name' }
+
+ # example use, replace standard accessor with one of your own:
+ sub name {
+ my ($self, $value) = @_;
+
+ die "Name cannot contain digits!" if($value =~ /\d/);
+ $self->_name($value);
+
+ return $self->_name();
+ }
+
Use this to set the name of the accessor method for this column. If unset,
the name of the column will be used.
=item data_type
-This contains the column type. It is automatically filled by the
-L<SQL::Translator::Producer::DBIx::Class::File> producer, and the
-L<DBIx::Class::Schema::Loader> module. If you do not enter a
-data_type, DBIx::Class will attempt to retrieve it from the
-database for you, using L<DBI>'s column_info method. The values of this
-key are typically upper-cased.
+ { data_type => 'integer' }
+This contains the column type. It is automatically filled if you use the
+L<SQL::Translator::Producer::DBIx::Class::File> producer, or the
+L<DBIx::Class::Schema::Loader> module.
+
Currently there is no standard set of values for the data_type. Use
whatever your database supports.
=item size
+ { size => 20 }
+
The length of your column, if it is a column type that can have a size
-restriction. This is currently only used by L<DBIx::Class::Schema/deploy>.
+restriction. This is currently only used to create tables from your
+schema, see L<DBIx::Class::Schema/deploy>.
=item is_nullable
-Set this to a true value for a columns that is allowed to contain
-NULL values. This is currently only used by L<DBIx::Class::Schema/deploy>.
+ { is_nullable => 1 }
+Set this to a true value for a columns that is allowed to contain NULL
+values, default is false. This is currently only used to create tables
+from your schema, see L<DBIx::Class::Schema/deploy>.
+
=item is_auto_increment
+ { is_auto_increment => 1 }
+
Set this to a true value for a column whose value is somehow
-automatically set. This is used to determine which columns to empty
-when cloning objects using L<DBIx::Class::Row/copy>. It is also used by
+automatically set, defaults to false. This is used to determine which
+columns to empty when cloning objects using
+L<DBIx::Class::Row/copy>. It is also used by
L<DBIx::Class::Schema/deploy>.
=item is_numeric
+ { is_numeric => 1 }
+
Set this to a true or false value (not C<undef>) to explicitly specify
if this column contains numeric data. This controls how set_column
decides whether to consider a column dirty after an update: if
@@ -130,22 +215,29 @@
=item is_foreign_key
+ { is_foreign_key => 1 }
+
Set this to a true value for a column that contains a key from a
-foreign table. This is currently only used by
-L<DBIx::Class::Schema/deploy>.
+foreign table, defaults to false. This is currently only used to
+create tables from your schema, see L<DBIx::Class::Schema/deploy>.
=item default_value
-Set this to the default value which will be inserted into a column
-by the database. Can contain either a value or a function (use a
+ { default_value => \'now()' }
+
+Set this to the default value which will be inserted into a column by
+the database. Can contain either a value or a function (use a
reference to a scalar e.g. C<\'now()'> if you want a function). This
-is currently only used by L<DBIx::Class::Schema/deploy>.
+is currently only used to create tables from your schema, see
+L<DBIx::Class::Schema/deploy>.
See the note on L<DBIx::Class::Row/new> for more information about possible
issues related to db-side default values.
=item sequence
+ { sequence => 'my_table_seq' }
+
Set this on a primary key column to the name of the sequence used to
generate a new key value. If not specified, L<DBIx::Class::PK::Auto>
will attempt to retrieve the name of the sequence from the database
@@ -171,13 +263,13 @@
=over
-=item Arguments: $colname, [ \%columninfo ]
+=item Arguments: $colname, \%columninfo?
=item Return value: 1/0 (true/false)
=back
- $source->add_column('col' => \%info?);
+ $source->add_column('col' => \%info);
Add a single column and optional column info. Uses the same column
info keys as L</add_columns>.
@@ -237,8 +329,8 @@
my $info = $source->column_info($col);
Returns the column metadata hashref for a column, as originally passed
-to L</add_columns>. See the description of L</add_columns> for information
-on the contents of the hashref.
+to L</add_columns>. See L</add_columns> above for information on the
+contents of the hashref.
=cut
@@ -362,14 +454,16 @@
=back
-Defines one or more columns as primary key for this source. Should be
+Defines one or more columns as primary key for this source. Must be
called after L</add_columns>.
Additionally, defines a L<unique constraint|add_unique_constraint>
named C<primary>.
The primary key columns are used by L<DBIx::Class::PK::Auto> to
-retrieve automatically created values from the database.
+retrieve automatically created values from the database. They are also
+used as default joining columns when specifying relationships, see
+L<DBIx::Class::Relationship>.
=cut
@@ -408,7 +502,7 @@
=over 4
-=item Arguments: [ $name ], \@colnames
+=item Arguments: $name?, \@colnames
=item Return value: undefined
@@ -426,11 +520,13 @@
__PACKAGE__->add_unique_constraint([ qw/column1 column2/ ]);
-This will result in a unique constraint named C<table_column1_column2>, where
-C<table> is replaced with the table name.
+This will result in a unique constraint named
+C<table_column1_column2>, where C<table> is replaced with the table
+name.
-Unique constraints are used, for example, when you call
-L<DBIx::Class::ResultSet/find>. Only columns in the constraint are searched.
+Unique constraints are used, for example, when you pass the constraint
+name as the C<key> attribute to L<DBIx::Class::ResultSet/find>. Then
+only columns in the constraint are searched.
Throws an error if any of the given column names do not yet exist on
the result source.
@@ -499,7 +595,8 @@
$source->unique_constraints();
-Read-only accessor which returns a hash of unique constraints on this source.
+Read-only accessor which returns a hash of unique constraints on this
+source.
The hash is keyed by constraint name, and contains an arrayref of
column names as values.
@@ -659,12 +756,16 @@
=back
- package My::ResultSetClass;
+ package My::Schema::ResultSet::Artist;
use base 'DBIx::Class::ResultSet';
...
- $source->resultset_class('My::ResultSet::Class');
+ # In the result class
+ __PACKAGE__->resultset_class('My::Schema::ResultSet::Artist');
+ # Or in code
+ $source->resultset_class('My::Schema::ResultSet::Artist');
+
Set the class of the resultset. This is useful if you want to create your
own resultset methods. Create your own class derived from
L<DBIx::Class::ResultSet>, and set it here. If called with no arguments,
@@ -681,6 +782,10 @@
=back
+ # In the result class
+ __PACKAGE__->resultset_attributes({ order_by => [ 'id' ] });
+
+ # Or in code
$source->resultset_attributes({ order_by => [ 'id' ] });
Store a collection of resultset attributes, that will be set on every
@@ -981,7 +1086,7 @@
L<DBIx::Class::Relationship>.
The returned hashref is keyed by the name of the opposing
-relationship, and contains it's data in the same manner as
+relationship, and contains its data in the same manner as
L</relationship_info>.
=cut
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSourceHandle.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSourceHandle.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSourceHandle.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -77,7 +77,7 @@
my ($self, $cloning) = @_;
my $to_serialize = { %$self };
-
+
my $class = $self->schema->class($self->source_moniker);
$to_serialize->{schema} = $class;
return (Storable::freeze($to_serialize));
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSourceProxy/Table.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSourceProxy/Table.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/ResultSourceProxy/Table.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -67,7 +67,7 @@
=head2 table
__PACKAGE__->table('tbl_name');
-
+
Gets or sets the table name.
=cut
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Row.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Row.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Row.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -1332,6 +1332,13 @@
changes made since the row was last read from storage. Actually
implemented in L<DBIx::Class::PK>
+Note: If you are using L<DBIx::Class::Storage::DBI::Replicated> as your
+storage, please kept in mind that if you L</discard_changes> on a row that you
+just updated or created, you should wrap the entire bit inside a transaction.
+Otherwise you run the risk that you insert or update to the master database
+but read from a replicant database that has not yet been updated from the
+master. This will result in unexpected results.
+
=cut
1;
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Schema/Versioned.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Schema/Versioned.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Schema/Versioned.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -308,7 +308,7 @@
# here to be sure.
# XXX - just fix it
$self->storage->sqlt_type;
-
+
my $upgrade_file = $self->ddl_filename(
$self->storage->sqlt_type,
$self->schema_version,
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Schema.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Schema.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Schema.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -511,7 +511,7 @@
general.
Note that C<connect_info> expects an arrayref of arguments, but
-C<connect> does not. C<connect> wraps it's arguments in an arrayref
+C<connect> does not. C<connect> wraps its arguments in an arrayref
before passing them to C<connect_info>.
=head3 Overloading
@@ -755,7 +755,7 @@
[ 2, 'Indie Band' ],
...
]);
-
+
Since wantarray context is basically the same as looping over $rs->create(...)
you won't see any performance benefits and in this case the method is more for
convenience. Void context sends the column information directly to storage
@@ -806,10 +806,10 @@
sub connection {
my ($self, @info) = @_;
return $self if !@info && $self->storage;
-
+
my ($storage_class, $args) = ref $self->storage_type ?
($self->_normalize_storage_type($self->storage_type),{}) : ($self->storage_type, {});
-
+
$storage_class = 'DBIx::Class::Storage'.$storage_class
if $storage_class =~ m/^::/;
eval "require ${storage_class};";
@@ -1146,7 +1146,7 @@
$filename =~ s/::/-/g;
$filename = File::Spec->catfile($dir, "$filename-$version-$type.sql");
$filename =~ s/$version/$preversion-$version/ if($preversion);
-
+
return $filename;
}
@@ -1372,7 +1372,7 @@
$self->throw_exception
("No arguments to load_classes and couldn't load ${base} ($@)")
if $@;
-
+
if ($self eq $target) {
# Pathological case, largely caused by the docs on early C::M::DBIC::Plain
foreach my $moniker ($self->sources) {
@@ -1385,14 +1385,14 @@
$self->connection(@info);
return $self;
}
-
+
my $schema = $self->compose_namespace($target, $base);
{
no strict 'refs';
my $name = join '::', $target, 'schema';
*$name = Sub::Name::subname $name, sub { $schema };
}
-
+
$schema->connection(@info);
foreach my $moniker ($schema->sources) {
my $source = $schema->source($moniker);
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/StartupCheck.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/StartupCheck.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/StartupCheck.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -7,7 +7,7 @@
=head1 SYNOPSIS
use DBIx::Class::StartupCheck;
-
+
=head1 DESCRIPTION
This module used to check for, and if necessary issue a warning for, a
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/DB2.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/DB2.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/DB2.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -21,11 +21,11 @@
sub _sql_maker_opts {
my ( $self, $opts ) = @_;
-
+
if ( $opts ) {
$self->{_sql_maker_opts} = { %$opts };
}
-
+
return { limit_dialect => 'RowNumberOver', %{$self->{_sql_maker_opts}||{}} };
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/ODBC/ACCESS.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -40,11 +40,11 @@
sub bind_attribute_by_data_type {
my $self = shift;
-
+
my ( $data_type ) = @_;
-
+
return { TYPE => $data_type } if $data_type == DBI::SQL_LONGVARCHAR;
-
+
return;
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/ODBC/DB2_400_SQL.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -23,7 +23,7 @@
sub _sql_maker_opts {
my ($self) = @_;
-
+
$self->dbh_do(sub {
my ($self, $dbh) = @_;
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -38,6 +38,19 @@
my $self = shift;
my ($op, $extra_bind, $ident, $args) = @_;
+# cast MONEY values properly
+ if ($op eq 'insert' || $op eq 'update') {
+ my $fields = $args->[0];
+ my $col_info = $self->_resolve_column_info($ident, [keys %$fields]);
+
+ for my $col (keys %$fields) {
+ if ($col_info->{$col}{data_type} =~ /^money\z/i) {
+ my $val = $fields->{$col};
+ $fields->{$col} = \['CAST(? AS MONEY)', [ $col => $val ]];
+ }
+ }
+ }
+
my ($sql, $bind) = $self->next::method (@_);
if ($op eq 'insert') {
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -5,7 +5,7 @@
=head1 NAME
-DBIx::Class::Storage::DBI::Oracle::Generic - Automatic primary key class for Oracle
+DBIx::Class::Storage::DBI::Oracle::Generic - Oracle Support for DBIx::Class
=head1 SYNOPSIS
@@ -26,9 +26,6 @@
use base qw/DBIx::Class::Storage::DBI/;
use mro 'c3';
-# For ORA_BLOB => 113, ORA_CLOB => 112
-use DBD::Oracle qw( :ora_types );
-
sub _dbh_last_insert_id {
my ($self, $dbh, $source, @columns) = @_;
my @ids = ();
@@ -52,7 +49,7 @@
};
# trigger_body is a LONG
- $dbh->{LongReadLen} = 64 * 1024 if ($dbh->{LongReadLen} < 64 * 1024);
+ local $dbh->{LongReadLen} = 64 * 1024 if ($dbh->{LongReadLen} < 64 * 1024);
my $sth;
@@ -139,7 +136,7 @@
sub get_autoinc_seq {
my ($self, $source, $col) = @_;
-
+
$self->dbh_do('_dbh_get_autoinc_seq', $source, $col);
}
@@ -195,7 +192,6 @@
sub connect_call_datetime_setup {
my $self = shift;
- my $dbh = $self->dbh;
my $date_format = $ENV{NLS_DATE_FORMAT} ||= 'YYYY-MM-DD HH24:MI:SS';
my $timestamp_format = $ENV{NLS_TIMESTAMP_FORMAT} ||=
@@ -203,14 +199,16 @@
my $timestamp_tz_format = $ENV{NLS_TIMESTAMP_TZ_FORMAT} ||=
'YYYY-MM-DD HH24:MI:SS.FF TZHTZM';
- $dbh->do("alter session set nls_date_format = '$date_format'");
- $dbh->do("alter session set nls_timestamp_format = '$timestamp_format'");
- $dbh->do("alter session set nls_timestamp_tz_format='$timestamp_tz_format'");
+ $self->_do_query("alter session set nls_date_format = '$date_format'");
+ $self->_do_query(
+"alter session set nls_timestamp_format = '$timestamp_format'");
+ $self->_do_query(
+"alter session set nls_timestamp_tz_format='$timestamp_tz_format'");
}
sub _svp_begin {
my ($self, $name) = @_;
-
+
$self->dbh->do("SAVEPOINT $name");
}
@@ -233,6 +231,7 @@
sub source_bind_attributes
{
+ require DBD::Oracle;
my $self = shift;
my($source) = @_;
@@ -245,8 +244,9 @@
my %column_bind_attrs = $self->bind_attribute_by_data_type($data_type);
if ($data_type =~ /^[BC]LOB$/i) {
- $column_bind_attrs{'ora_type'}
- = uc($data_type) eq 'CLOB' ? ORA_CLOB : ORA_BLOB;
+ $column_bind_attrs{'ora_type'} = uc($data_type) eq 'CLOB' ?
+ DBD::Oracle::ORA_CLOB() :
+ DBD::Oracle::ORA_BLOB();
$column_bind_attrs{'ora_field'} = $column;
}
@@ -266,12 +266,10 @@
$self->dbh->do("ROLLBACK TO SAVEPOINT $name")
}
-=head1 AUTHORS
+=head1 AUTHOR
-Andy Grundman <andy at hybridized.org>
+See L<DBIx::Class/CONTRIBUTORS>.
-Scott Connelly <scottsweep at yahoo.com>
-
=head1 LICENSE
You may distribute this code under the same terms as Perl itself.
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Pg.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Pg.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Pg.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -9,7 +9,7 @@
use DBD::Pg qw(:pg_types);
# Ask for a DBD::Pg with array support
-warn "DBD::Pg 2.9.2 or greater is strongly recommended"
+warn "DBD::Pg 2.9.2 or greater is strongly recommended\n"
if ($DBD::Pg::VERSION < 2.009002); # pg uses (used?) version::qv()
sub with_deferred_fk_checks {
@@ -50,7 +50,7 @@
sub get_autoinc_seq {
my ($self,$source,$col) = @_;
-
+
my @pri = $source->primary_columns;
my ($schema,$table) = $source->name =~ /^(.+)\.(.+)$/ ? ($1,$2)
: (undef,$source->name);
@@ -71,7 +71,7 @@
bytea => { pg_type => DBD::Pg::PG_BYTEA },
blob => { pg_type => DBD::Pg::PG_BYTEA },
};
-
+
if( defined $bind_attributes->{$data_type} ) {
return $bind_attributes->{$data_type};
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -12,7 +12,7 @@
This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>. You
shouldn't need to create instances of this class.
-
+
=head1 DESCRIPTION
Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -13,7 +13,7 @@
This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>. You
shouldn't need to create instances of this class.
-
+
=head1 DESCRIPTION
Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -3,7 +3,8 @@
use Moose::Role;
requires 'next_storage';
use MooseX::Types::Moose qw/Int/;
-
+use DBIx::Class::Storage::DBI::Replicated::Pool;
+use DBIx::Class::Storage::DBI::Replicated::Types qw/DBICStorageDBI/;
use namespace::clean -except => 'meta';
=head1 NAME
@@ -13,7 +14,7 @@
=head1 SYNOPSIS
This role is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
-
+
=head1 DESCRIPTION
Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
@@ -48,7 +49,7 @@
has 'master' => (
is=>'ro',
- isa=>'DBIx::Class::Storage::DBI',
+ isa=>DBICStorageDBI,
required=>1,
);
@@ -74,13 +75,13 @@
This attribute returns the next slave to handle a read request. Your L</pool>
attribute has methods to help you shuffle through all the available replicants
-via it's balancer object.
+via its balancer object.
=cut
has 'current_replicant' => (
is=> 'rw',
- isa=>'DBIx::Class::Storage::DBI',
+ isa=>DBICStorageDBI,
lazy_build=>1,
handles=>[qw/
select
@@ -169,10 +170,12 @@
around 'select' => sub {
my ($select, $self, @args) = @_;
-
+
if (my $forced_pool = $args[-1]->{force_pool}) {
delete $args[-1]->{force_pool};
return $self->_get_forced_pool($forced_pool)->select(@args);
+ } elsif($self->master->{transaction_depth}) {
+ return $self->master->select(@args);
} else {
$self->increment_storage;
return $self->$select(@args);
@@ -189,10 +192,12 @@
around 'select_single' => sub {
my ($select_single, $self, @args) = @_;
-
+
if (my $forced_pool = $args[-1]->{force_pool}) {
delete $args[-1]->{force_pool};
return $self->_get_forced_pool($forced_pool)->select_single(@args);
+ } elsif($self->master->{transaction_depth}) {
+ return $self->master->select_single(@args);
} else {
$self->increment_storage;
return $self->$select_single(@args);
@@ -224,7 +229,7 @@
return $forced_pool;
} elsif($forced_pool eq 'master') {
return $self->master;
- } elsif(my $replicant = $self->pool->replicants($forced_pool)) {
+ } elsif(my $replicant = $self->pool->replicants->{$forced_pool}) {
return $replicant;
} else {
$self->master->throw_exception("$forced_pool is not a named replicant.");
@@ -233,7 +238,7 @@
=head1 AUTHOR
-John Napiorkowski <john.napiorkowski at takkle.com>
+John Napiorkowski <jjnapiork at cpan.org>
=head1 LICENSE
Added: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod (rev 0)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Introduction.pod 2009-08-03 08:13:39 UTC (rev 7171)
@@ -0,0 +1,185 @@
+package DBIx::Class::Storage::DBI::Replicated::Introduction;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Introduction - Minimum Need to Know
+
+=head1 SYNOPSIS
+
+This is an introductory document for L<DBIx::Class::Storage::Replication>.
+
+This document is not an overview of what replication is or why you should be
+using it. It is not a document explaing how to setup MySQL native replication
+either. Copious external resources are avialable for both. This document
+presumes you have the basics down.
+
+=head1 DESCRIPTION
+
+L<DBIx::Class> supports a framework for using database replication. This system
+is integrated completely, which means once it's setup you should be able to
+automatically just start using a replication cluster without additional work or
+changes to your code. Some caveats apply, primarily related to the proper use
+of transactions (you are wrapping all your database modifying statements inside
+a transaction, right ;) ) however in our experience properly written DBIC will
+work transparently with Replicated storage.
+
+Currently we have support for MySQL native replication, which is relatively
+easy to install and configure. We also currently support single master to one
+or more replicants (also called 'slaves' in some documentation). However the
+framework is not specifically tied to the MySQL framework and supporting other
+replication systems or topographies should be possible. Please bring your
+patches and ideas to the #dbix-class IRC channel or the mailing list.
+
+For an easy way to start playing with MySQL native replication, see:
+L<MySQL::Sandbox>.
+
+If you are using this with a L<Catalyst> based appplication, you may also wish
+to see more recent updates to L<Catalyst::Model::DBIC::Schema>, which has
+support for replication configuration options as well.
+
+=head1 REPLICATED STORAGE
+
+By default, when you start L<DBIx::Class>, your Schema (L<DBIx::Class::Schema>)
+is assigned a storage_type, which when fully connected will reflect your
+underlying storage engine as defined by your choosen database driver. For
+example, if you connect to a MySQL database, your storage_type will be
+L<DBIx::Class::Storage::DBI::mysql> Your storage type class will contain
+database specific code to help smooth over the differences between databases
+and let L<DBIx::Class> do its thing.
+
+If you want to use replication, you will override this setting so that the
+replicated storage engine will 'wrap' your underlying storages and present to
+the end programmer a unified interface. This wrapper storage class will
+delegate method calls to either a master database or one or more replicated
+databases based on if they are read only (by default sent to the replicants)
+or write (reserved for the master). Additionally, the Replicated storage
+will monitor the health of your replicants and automatically drop them should
+one exceed configurable parameters. Later, it can automatically restore a
+replicant when its health is restored.
+
+This gives you a very robust system, since you can add or drop replicants
+and DBIC will automatically adjust itself accordingly.
+
+Additionally, if you need high data integrity, such as when you are executing
+a transaction, replicated storage will automatically delegate all database
+traffic to the master storage. There are several ways to enable this high
+integrity mode, but wrapping your statements inside a transaction is the easy
+and canonical option.
+
+=head1 PARTS OF REPLICATED STORAGE
+
+A replicated storage contains several parts. First, there is the replicated
+storage itself (L<DBIx::Class::Storage::DBI::Replicated>). A replicated storage
+takes a pool of replicants (L<DBIx::Class::Storage::DBI::Replicated::Pool>)
+and a software balancer (L<DBIx::Class::Storage::DBI::Replicated::Pool>). The
+balancer does the job of splitting up all the read traffic amongst each
+replicant in the Pool. Currently there are two types of balancers, a Random one
+which chooses a Replicant in the Pool using a naive randomizer algorithm, and a
+First replicant, which just uses the first one in the Pool (and obviously is
+only of value when you have a single replicant).
+
+=head1 REPLICATED STORAGE CONFIGURATION
+
+All the parts of replication can be altered dynamically at runtime, which makes
+it possibly to create a system that automatically scales under load by creating
+more replicants as needed, perhaps using a cloud system such as Amazon EC2.
+However, for common use you can setup your replicated storage to be enabled at
+the time you connect the databases. The following is a breakdown of how you
+may wish to do this. Again, if you are using L<Catalyst>, I strongly recommend
+you use (or upgrade to) the latest L<Catalyst::Model::DBIC::Schema>, which makes
+this job even easier.
+
+First, you need to connect your L<DBIx::Class::Schema>. Let's assume you have
+such a schema called, "MyApp::Schema".
+
+ use MyApp::Schema;
+ my $schema = MyApp::Schema->connect($dsn, $user, $pass);
+
+Next, you need to set the storage_type.
+
+ $schema->storage_type(
+ ::DBI::Replicated' => {
+ balancer_type => '::Random',
+ balancer_args => {
+ auto_validate_every => 5,
+ master_read_weight => 1
+ },
+ pool_args => {
+ maximum_lag =>2,
+ },
+ }
+ );
+
+Let's break down the settings. The method L<DBIx::Class::Schema/storage_type>
+takes one mandatory parameter, a scalar value, and an option second value which
+is a Hash Reference of configuration options for that storage. In this case,
+we are setting the Replicated storage type using '::DBI::Replicated' as the
+first value. You will only use a different value if you are subclassing the
+replicated storage, so for now just copy that first parameter.
+
+The second parameter contains a hash reference of stuff that gets passed to the
+replicated storage. L<DBIx::Class::Storage::DBI::Replicated/balancer_type> is
+the type of software load balancer you will use to split up traffic among all
+your replicants. Right now we have two options, "::Random" and "::First". You
+can review documentation for both at:
+
+L<DBIx::Class::Storage::DBI::Replicated::Balancer::First>,
+L<DBIx::Class::Storage::DBI::Replicated::Balancer::Random>.
+
+In this case we will have three replicants, so the ::Random option is the only
+one that makes sense.
+
+'balancer_args' get passed to the balancer when it's instantiated. All
+balancers have the 'auto_validate_every' option. This is the number of seconds
+we allow to pass between validation checks on a load balanced replicant. So
+the higher the number, the more possibility that your reads to the replicant
+may be inconsistant with what's on the master. Setting this number too low
+will result in increased database loads, so choose a number with care. Our
+experience is that setting the number around 5 seconds results in a good
+performance / integrity balance.
+
+'master_read_weight' is an option associated with the ::Random balancer. It
+allows you to let the master be read from. I usually leave this off (default
+is off).
+
+The 'pool_args' are configuration options associated with the replicant pool.
+This object (L<DBIx::Class::Storage::DBI::Replicated::Pool>) manages all the
+declared replicants. 'maximum_lag' is the number of seconds a replicant is
+allowed to lag behind the master before being temporarily removed from the pool.
+Keep in mind that the Balancer option 'auto_validate_every' determins how often
+a replicant is tested against this condition, so the true possible lag can be
+higher than the number you set. The default is zero.
+
+No matter how low you set the maximum_lag or the auto_validate_every settings,
+there is always the chance that your replicants will lag a bit behind the
+master for the supported replication system built into MySQL. You can ensure
+reliabily reads by using a transaction, which will force both read and write
+activity to the master, however this will increase the load on your master
+database.
+
+After you've configured the replicated storage, you need to add the connection
+information for the replicants:
+
+ $schema->storage->connect_replicants(
+ [$dsn1, $user, $pass, \%opts],
+ [$dsn2, $user, $pass, \%opts],
+ [$dsn3, $user, $pass, \%opts],
+ );
+
+These replicants should be configured as slaves to the master using the
+instructions for MySQL native replication, or if you are just learning, you
+will find L<MySQL::Sandbox> an easy way to set up a replication cluster.
+
+And now your $schema object is properly configured! Enjoy!
+
+=head1 AUTHOR
+
+John Napiorkowski <jjnapiork at cpan.org>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -18,7 +18,7 @@
This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>. You
shouldn't need to create instances of this class.
-
+
=head1 DESCRIPTION
In a replicated storage type, there is at least one replicant to handle the
@@ -34,7 +34,7 @@
This is a number which defines the maximum allowed lag returned by the
L<DBIx::Class::Storage::DBI/lag_behind_master> method. The default is 0. In
general, this should return a larger number when the replicant is lagging
-behind it's master, however the implementation of this is database specific, so
+behind its master, however the implementation of this is database specific, so
don't count on this number having a fixed meaning. For example, MySQL will
return a number of seconds that the replicating database is lagging.
@@ -51,7 +51,7 @@
=head2 last_validated
This is an integer representing a time since the last time the replicants were
-validated. It's nothing fancy, just an integer provided via the perl time
+validated. It's nothing fancy, just an integer provided via the perl L<time|perlfunc/time>
builtin.
=cut
@@ -89,11 +89,11 @@
actual replicant storage. For example if the $dsn element is something like:
"dbi:SQLite:dbname=dbfile"
-
+
You could access the specific replicant via:
$schema->storage->replicants->{'dbname=dbfile'}
-
+
This attributes also supports the following helper methods:
=over 4
@@ -125,14 +125,15 @@
has 'replicants' => (
is=>'rw',
metaclass => 'Collection::Hash',
- isa=>HashRef['DBIx::Class::Storage::DBI'],
+ isa=>HashRef['Object'],
default=>sub {{}},
provides => {
'set' => 'set_replicant',
- 'get' => 'get_replicant',
+ 'get' => 'get_replicant',
'empty' => 'has_replicants',
'count' => 'num_replicants',
'delete' => 'delete_replicant',
+ 'values' => 'all_replicant_storages',
},
);
@@ -151,7 +152,7 @@
sub connect_replicants {
my $self = shift @_;
my $schema = shift @_;
-
+
my @newly_created = ();
foreach my $connect_info (@_) {
$connect_info = [ $connect_info ]
@@ -169,7 +170,7 @@
$self->set_replicant( $key => $replicant);
push @newly_created, $replicant;
}
-
+
return @newly_created;
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -14,7 +14,7 @@
=head1 SYNOPSIS
This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
-
+
=head1 DESCRIPTION
Replicants are DBI Storages that follow a master DBI Storage. Typically this
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Types.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Types.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/Types.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -5,12 +5,15 @@
# L<DBIx::Class::Storage::DBI::Replicated>
use MooseX::Types
- -declare => [qw/BalancerClassNamePart Weight/];
+ -declare => [qw/BalancerClassNamePart Weight DBICSchema DBICStorageDBI/];
use MooseX::Types::Moose qw/ClassName Str Num/;
class_type 'DBIx::Class::Storage::DBI';
class_type 'DBIx::Class::Schema';
+subtype DBICSchema, as 'DBIx::Class::Schema';
+subtype DBICStorageDBI, as 'DBIx::Class::Storage::DBI';
+
subtype BalancerClassNamePart,
as ClassName;
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -13,7 +13,7 @@
=head1 SYNOPSIS
This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
-
+
=head1 DESCRIPTION
This role adds C<DSN: > info to storage debugging output.
@@ -31,7 +31,10 @@
around '_query_start' => sub {
my ($method, $self, $sql, @bind) = @_;
my $dsn = $self->_dbi_connect_info->[0];
- $self->$method("DSN: $dsn SQL: $sql", @bind);
+ my($op, $rest) = (($sql=~m/^(\w+)(.+)$/),'NOP', 'NO SQL');
+ my $storage_type = $self->can('active') ? 'REPLICANT' : 'MASTER';
+
+ $self->$method("$op [DSN_$storage_type=$dsn]$rest", @bind);
};
=head1 ALSO SEE
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/Replicated.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -2,35 +2,35 @@
BEGIN {
use Carp::Clan qw/^DBIx::Class/;
-
+
## Modules required for Replication support not required for general DBIC
## use, so we explicitly test for these.
-
+
my %replication_required = (
- Moose => '0.77',
- MooseX::AttributeHelpers => '0.12',
- MooseX::Types => '0.10',
- namespace::clean => '0.11',
- Hash::Merge => '0.11'
+ 'Moose' => '0.87',
+ 'MooseX::AttributeHelpers' => '0.21',
+ 'MooseX::Types' => '0.16',
+ 'namespace::clean' => '0.11',
+ 'Hash::Merge' => '0.11'
);
-
+
my @didnt_load;
-
+
for my $module (keys %replication_required) {
eval "use $module $replication_required{$module}";
push @didnt_load, "$module $replication_required{$module}"
if $@;
}
-
+
croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication")
- if @didnt_load;
+ if @didnt_load;
}
use Moose;
use DBIx::Class::Storage::DBI;
use DBIx::Class::Storage::DBI::Replicated::Pool;
use DBIx::Class::Storage::DBI::Replicated::Balancer;
-use DBIx::Class::Storage::DBI::Replicated::Types 'BalancerClassNamePart';
+use DBIx::Class::Storage::DBI::Replicated::Types qw/BalancerClassNamePart DBICSchema DBICStorageDBI/;
use MooseX::Types::Moose qw/ClassName HashRef Object/;
use Scalar::Util 'reftype';
use Carp::Clan qw/^DBIx::Class/;
@@ -48,33 +48,45 @@
storage type, add some replicated (readonly) databases, and perform reporting
tasks.
- ## Change storage_type in your schema class
+You should set the 'storage_type attribute to a replicated type. You should
+also define your arguments, such as which balancer you want and any arguments
+that the Pool object should get.
+
$schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] );
-
- ## Add some slaves. Basically this is an array of arrayrefs, where each
- ## arrayref is database connect information
-
+
+Next, you need to add in the Replicants. Basically this is an array of
+arrayrefs, where each arrayref is database connect information. Think of these
+arguments as what you'd pass to the 'normal' $schema->connect method.
+
$schema->storage->connect_replicants(
[$dsn1, $user, $pass, \%opts],
[$dsn2, $user, $pass, \%opts],
[$dsn3, $user, $pass, \%opts],
);
-
- ## Now, just use the $schema as normal
+
+Now, just use the $schema as you normally would. Automatically all reads will
+be delegated to the replicants, while writes to the master.
+
$schema->resultset('Source')->search({name=>'etc'});
-
- ## You can force a given query to use a particular storage using the search
- ### attribute 'force_pool'. For example:
-
+
+You can force a given query to use a particular storage using the search
+attribute 'force_pool'. For example:
+
my $RS = $schema->resultset('Source')->search(undef, {force_pool=>'master'});
-
- ## Now $RS will force everything (both reads and writes) to use whatever was
- ## setup as the master storage. 'master' is hardcoded to always point to the
- ## Master, but you can also use any Replicant name. Please see:
- ## L<DBIx::Class::Storage::Replicated::Pool> and the replicants attribute for
- ## More. Also see transactions and L</execute_reliably> for alternative ways
- ## to force read traffic to the master.
-
+
+Now $RS will force everything (both reads and writes) to use whatever was setup
+as the master storage. 'master' is hardcoded to always point to the Master,
+but you can also use any Replicant name. Please see:
+L<DBIx::Class::Storage::DBI::Replicated::Pool> and the replicants attribute for more.
+
+Also see transactions and L</execute_reliably> for alternative ways to
+force read traffic to the master. In general, you should wrap your statements
+in a transaction when you are reading and writing to the same tables at the
+same time, since your replicants will often lag a bit behind the master.
+
+See L<DBIx::Class::Storage::DBI::Replicated::Instructions> for more help and
+walkthroughs.
+
=head1 DESCRIPTION
Warning: This class is marked BETA. This has been running a production
@@ -100,7 +112,7 @@
=head1 NOTES
The consistancy betweeen master and replicants is database specific. The Pool
-gives you a method to validate it's replicants, removing and replacing them
+gives you a method to validate its replicants, removing and replacing them
when they fail/pass predefined criteria. Please make careful use of the ways
to force a query to run against Master when needed.
@@ -108,12 +120,12 @@
Replicated Storage has additional requirements not currently part of L<DBIx::Class>
- Moose => 0.77
- MooseX::AttributeHelpers => 0.12
- MooseX::Types => 0.10
- namespace::clean => 0.11
- Hash::Merge => 0.11
-
+ Moose => '0.87',
+ MooseX::AttributeHelpers => '0.20',
+ MooseX::Types => '0.16',
+ namespace::clean => '0.11',
+ Hash::Merge => '0.11'
+
You will need to install these modules manually via CPAN or make them part of the
Makefile for your distribution.
@@ -129,7 +141,7 @@
has 'schema' => (
is=>'rw',
- isa=>'DBIx::Class::Schema',
+ isa=>DBICSchema,
weak_ref=>1,
required=>1,
);
@@ -153,7 +165,7 @@
=head2 pool_args
Contains a hashref of initialized information to pass to the Balancer object.
-See L<DBIx::Class::Storage::Replicated::Pool> for available arguments.
+See L<DBIx::Class::Storage::DBI::Replicated::Pool> for available arguments.
=cut
@@ -186,7 +198,7 @@
=head2 balancer_args
Contains a hashref of initialized information to pass to the Balancer object.
-See L<DBIx::Class::Storage::Replicated::Balancer> for available arguments.
+See L<DBIx::Class::Storage::DBI::Replicated::Balancer> for available arguments.
=cut
@@ -242,7 +254,7 @@
has 'master' => (
is=> 'ro',
- isa=>'DBIx::Class::Storage::DBI',
+ isa=>DBICStorageDBI,
lazy_build=>1,
);
@@ -288,7 +300,8 @@
create_ddl_dir
deployment_statements
datetime_parser
- datetime_parser_type
+ datetime_parser_type
+ build_datetime_parser
last_insert_id
insert
insert_bulk
@@ -303,10 +316,19 @@
sth
deploy
with_deferred_fk_checks
-
+ dbh_do
reload_row
+ with_deferred_fk_checks
_prep_for_execute
-
+
+ backup
+ is_datatype_numeric
+ _count_select
+ _subq_count_select
+ _subq_update_delete
+ svp_rollback
+ svp_begin
+ svp_release
/],
);
@@ -381,7 +403,7 @@
=head2 BUILDARGS
-L<DBIx::Class::Schema> when instantiating it's storage passed itself as the
+L<DBIx::Class::Schema> when instantiating its storage passed itself as the
first argument. So we need to massage the arguments a bit so that all the
bits get put into the correct places.
@@ -389,7 +411,7 @@
sub BUILDARGS {
my ($class, $schema, $storage_type_args, @args) = @_;
-
+
return {
schema=>$schema,
%$storage_type_args,
@@ -546,24 +568,24 @@
sub execute_reliably {
my ($self, $coderef, @args) = @_;
-
+
unless( ref $coderef eq 'CODE') {
$self->throw_exception('Second argument must be a coderef');
}
-
+
##Get copy of master storage
my $master = $self->master;
-
+
##Get whatever the current read hander is
my $current = $self->read_handler;
-
+
##Set the read handler to master
$self->read_handler($master);
-
+
## do whatever the caller needs
my @result;
my $want_array = wantarray;
-
+
eval {
if($want_array) {
@result = $coderef->(@args);
@@ -573,13 +595,13 @@
$coderef->(@args);
}
};
-
+
##Reset to the original state
$self->read_handler($current);
-
+
##Exception testing has to come last, otherwise you might leave the
##read_handler set to master.
-
+
if($@) {
$self->throw_exception("coderef returned an error: $@");
} else {
@@ -591,14 +613,14 @@
Sets the current $schema to be 'reliable', that is all queries, both read and
write are sent to the master
-
+
=cut
sub set_reliable_storage {
my $self = shift @_;
my $schema = $self->schema;
my $write_handler = $self->schema->storage->write_handler;
-
+
$schema->storage->read_handler($write_handler);
}
@@ -606,30 +628,17 @@
Sets the current $schema to be use the </balancer> for all reads, while all
writea are sent to the master only
-
+
=cut
sub set_balanced_storage {
my $self = shift @_;
my $schema = $self->schema;
- my $write_handler = $self->schema->storage->balancer;
-
- $schema->storage->read_handler($write_handler);
+ my $balanced_handler = $self->schema->storage->balancer;
+
+ $schema->storage->read_handler($balanced_handler);
}
-=head2 around: txn_do ($coderef)
-
-Overload to the txn_do method, which is delegated to whatever the
-L<write_handler> is set to. We overload this in order to wrap in inside a
-L</execute_reliably> method.
-
-=cut
-
-around 'txn_do' => sub {
- my($txn_do, $self, $coderef, @args) = @_;
- $self->execute_reliably(sub {$self->$txn_do($coderef, @args)});
-};
-
=head2 connected
Check that the master and at least one of the replicants is connected.
@@ -802,7 +811,7 @@
}
$self->master->cursor_class;
}
-
+
=head1 GOTCHAS
Due to the fact that replicants can lag behind a master, you must take care to
@@ -836,7 +845,7 @@
my $new_schema = $schema->clone;
$new_schema->set_reliable_storage;
-
+
## $new_schema will use only the Master storage for all reads/writes while
## the $schema object will use replicated storage.
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI/mysql.pm
===================================================================
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/DBI.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -1480,7 +1480,7 @@
my $alias = $attrs->{alias};
my $sql_maker = $self->sql_maker;
- # create subquery select list - loop only over primary columns
+ # create subquery select list - consider only stuff *not* brought in by the prefetch
my $sub_select = [];
for my $i (0 .. @{$attrs->{select}} - @{$attrs->{prefetch_select}} - 1) {
my $sel = $attrs->{select}[$i];
@@ -1489,7 +1489,7 @@
# adjust the outer select accordingly
if (ref $sel eq 'HASH' && !$sel->{-select}) {
$sel = { -select => $sel, -as => $attrs->{as}[$i] };
- $select->[$i] = join ('.', $attrs->{alias}, $attrs->{as}[$i]);
+ $select->[$i] = join ('.', $attrs->{alias}, ($attrs->{as}[$i] || "select_$i") );
}
push @$sub_select, $sel;
@@ -1547,6 +1547,8 @@
{
# produce stuff unquoted, so it can be scanned
local $sql_maker->{quote_char};
+ my $sep = $self->_sql_maker_opts->{name_sep} || '.';
+ $sep = "\Q$sep\E";
my @order_by = (map
{ ref $_ ? $_->[0] : $_ }
@@ -1554,6 +1556,7 @@
);
my $where_sql = $sql_maker->where ($where);
+ my $select_sql = $sql_maker->_recurse_fields ($sub_select);
# sort needed joins
for my $alias (keys %join_info) {
@@ -1561,8 +1564,8 @@
# any table alias found on a column name in where or order_by
# gets included in %inner_joins
# Also any parent joins that are needed to reach this particular alias
- for my $piece ($where_sql, @order_by ) {
- if ($piece =~ /\b$alias\./) {
+ for my $piece ($select_sql, $where_sql, @order_by ) {
+ if ($piece =~ /\b $alias $sep/x) {
$inner_joins{$alias} = 1;
}
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/Statistics.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/Statistics.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/Storage/Statistics.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -16,7 +16,7 @@
=head1 DESCRIPTION
This class is called by DBIx::Class::Storage::DBI as a means of collecting
-statistics on it's actions. Using this class alone merely prints the SQL
+statistics on its actions. Using this class alone merely prints the SQL
executed, the fact that it completes and begin/end notification for
transactions.
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/UTF8Columns.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/UTF8Columns.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class/UTF8Columns.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -25,7 +25,7 @@
package Artist;
__PACKAGE__->load_components(qw/UTF8Columns Core/);
__PACKAGE__->utf8_columns(qw/name description/);
-
+
# then belows return strings with utf8 flag
$artist->name;
$artist->get_column('description');
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/DBIx/Class.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -25,7 +25,7 @@
# i.e. first release of 0.XX *must* be 0.XX000. This avoids fBSD ports
# brain damage and presumably various other packaging systems too
-$VERSION = '0.08107';
+$VERSION = '0.08108';
$VERSION = eval $VERSION; # numify for warning-free dev releases
@@ -73,9 +73,11 @@
1;
-Create a table class to represent artists, who have many CDs, in
+Create a result class to represent artists, who have many CDs, in
MyDB/Schema/Result/Artist.pm:
+See L<DBIx::Class::ResultSource> for docs on defining result classes.
+
package MyDB::Schema::Result::Artist;
use base qw/DBIx::Class/;
@@ -87,7 +89,7 @@
1;
-A table class to represent a CD, which belongs to an artist, in
+A result class to represent a CD, which belongs to an artist, in
MyDB/Schema/Result/CD.pm:
package MyDB::Schema::Result::CD;
@@ -109,9 +111,17 @@
# Query for all artists and put them in an array,
# or retrieve them as a result set object.
+ # $schema->resultset returns a DBIx::Class::ResultSet
my @all_artists = $schema->resultset('Artist')->all;
my $all_artists_rs = $schema->resultset('Artist');
+ # Output all artists names
+ # $artist here is a DBIx::Class::Row, which has accessors
+ # for all its columns. Rows are also subclasses of your Result class.
+ foreach $artist (@artists) {
+ print $artist->name, "\n";
+ }
+
# Create a result set to search for artists.
# This does not query the DB.
my $johns_rs = $schema->resultset('Artist')->search(
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/SQL/Translator/Parser/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/SQL/Translator/Parser/DBIx/Class.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/SQL/Translator/Parser/DBIx/Class.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -337,7 +337,7 @@
## Standalone
use MyApp::Schema;
use SQL::Translator;
-
+
my $schema = MyApp::Schema->connect;
my $trans = SQL::Translator->new (
parser => 'SQL::Translator::Parser::DBIx::Class',
@@ -353,7 +353,7 @@
C<SQL::Translator::Parser::DBIx::Class> reads a DBIx::Class schema,
interrogates the columns, and stuffs it all in an $sqlt_schema object.
-It's primary use is in deploying database layouts described as a set
+Its primary use is in deploying database layouts described as a set
of L<DBIx::Class> classes, to a database. To do this, see
L<DBIx::Class::Schema/deploy>.
Modified: DBIx-Class/0.08/branches/mysql_ansi/lib/SQL/Translator/Producer/DBIx/Class/File.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/lib/SQL/Translator/Producer/DBIx/Class/File.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/lib/SQL/Translator/Producer/DBIx/Class/File.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -128,7 +128,7 @@
$tableextras{$table->name} .= "\n__PACKAGE__->belongs_to('" .
$cont->fields->[0]->name . "', '" .
"${dbixschema}::" . $cont->reference_table . "');\n";
-
+
my $other = "\n__PACKAGE__->has_many('" .
"get_" . $table->name. "', '" .
"${dbixschema}::" . $table->name. "', '" .
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/03podcoverage.t
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/03podcoverage.t 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/03podcoverage.t 2009-08-03 08:13:39 UTC (rev 7171)
@@ -106,6 +106,7 @@
'DBIx::Class::ResultSetManager' => { skip => 1 },
'DBIx::Class::ResultSourceProxy' => { skip => 1 },
'DBIx::Class::Storage::DBI' => { skip => 1 },
+ 'DBIx::Class::Storage::DBI::Replicated::Types' => { skip => 1 },
'DBIx::Class::Storage::DBI::DB2' => { skip => 1 },
'DBIx::Class::Storage::DBI::MSSQL' => { skip => 1 },
'DBIx::Class::Storage::DBI::Sybase::MSSQL' => { skip => 1 },
@@ -116,6 +117,7 @@
'DBIx::Class::Storage::DBI::Pg' => { skip => 1 },
'DBIx::Class::Storage::DBI::SQLite' => { skip => 1 },
'DBIx::Class::Storage::DBI::mysql' => { skip => 1 },
+ 'DBIx::Class::SQLAHacks' => { skip => 1 },
'DBIx::Class::SQLAHacks::MySQL' => { skip => 1 },
'DBIx::Class::SQLAHacks::MSSQL' => { skip => 1 },
'SQL::Translator::Parser::DBIx::Class' => { skip => 1 },
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/746mssql.t
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/746mssql.t 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/746mssql.t 2009-08-03 08:13:39 UTC (rev 7171)
@@ -12,7 +12,7 @@
plan skip_all => 'Set $ENV{DBICTEST_MSSQL_ODBC_DSN}, _USER and _PASS to run this test'
unless ($dsn && $user);
-plan tests => 27;
+plan tests => 33;
my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
@@ -75,8 +75,44 @@
is( $it->next->name, "Artist 2", "iterator->next ok" );
is( $it->next, undef, "next past end of resultset ok" );
+# test MONEY type
$schema->storage->dbh_do (sub {
my ($storage, $dbh) = @_;
+ eval { $dbh->do("DROP TABLE money_test") };
+ $dbh->do(<<'SQL');
+
+CREATE TABLE money_test (
+ id INT IDENTITY PRIMARY KEY,
+ amount MONEY NULL
+)
+
+SQL
+
+});
+
+my $rs = $schema->resultset('Money');
+
+my $row;
+lives_ok {
+ $row = $rs->create({ amount => 100 });
+} 'inserted a money value';
+
+is $rs->find($row->id)->amount, '100.00', 'money value round-trip';
+
+lives_ok {
+ $row->update({ amount => 200 });
+} 'updated a money value';
+
+is $rs->find($row->id)->amount, '200.00', 'updated money value round-trip';
+
+lives_ok {
+ $row->update({ amount => undef });
+} 'updated a money value to NULL';
+
+is $rs->find($row->id)->amount, undef,'updated money value to NULL round-trip';
+
+$schema->storage->dbh_do (sub {
+ my ($storage, $dbh) = @_;
eval { $dbh->do("DROP TABLE Owners") };
eval { $dbh->do("DROP TABLE Books") };
$dbh->do(<<'SQL');
@@ -232,7 +268,11 @@
# clean up our mess
END {
- my $dbh = eval { $schema->storage->_dbh };
- $dbh->do('DROP TABLE artist') if $dbh;
+ if (my $dbh = eval { $schema->storage->_dbh }) {
+ $dbh->do('DROP TABLE artist');
+ $dbh->do('DROP TABLE money_test');
+ $dbh->do('DROP TABLE Books');
+ $dbh->do('DROP TABLE Owners');
+ }
}
# vim:sw=2 sts=2
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/74mssql.t
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/74mssql.t 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/74mssql.t 2009-08-03 08:13:39 UTC (rev 7171)
@@ -18,7 +18,7 @@
plan skip_all => 'Set $ENV{DBICTEST_MSSQL_DSN}, _USER and _PASS to run this test'
unless ($dsn);
-plan tests => 7;
+plan tests => 13;
my $schema = DBICTest::Schema->clone;
$schema->connection($dsn, $user, $pass);
@@ -74,10 +74,48 @@
$it->next;
is( $it->next, undef, "next past end of resultset ok" );
+# test MONEY column support
+$schema->storage->dbh_do (sub {
+ my ($storage, $dbh) = @_;
+ eval { $dbh->do("DROP TABLE money_test") };
+ $dbh->do(<<'SQL');
+
+CREATE TABLE money_test (
+ id INT IDENTITY PRIMARY KEY,
+ amount MONEY NULL
+)
+
+SQL
+
+});
+
+my $rs = $schema->resultset('Money');
+
+my $row;
+lives_ok {
+ $row = $rs->create({ amount => 100 });
+} 'inserted a money value';
+
+is $rs->find($row->id)->amount, 100, 'money value round-trip';
+
+lives_ok {
+ $row->update({ amount => 200 });
+} 'updated a money value';
+
+is $rs->find($row->id)->amount, 200, 'updated money value round-trip';
+
+lives_ok {
+ $row->update({ amount => undef });
+} 'updated a money value to NULL';
+
+is $rs->find($row->id)->amount, undef,'updated money value to NULL round-trip';
+
# clean up our mess
END {
$dbh->do("IF OBJECT_ID('artist', 'U') IS NOT NULL DROP TABLE artist")
if $dbh;
$dbh->do("IF OBJECT_ID('cd', 'U') IS NOT NULL DROP TABLE cd")
if $dbh;
+ $dbh->do("IF OBJECT_ID('money_test', 'U') IS NOT NULL DROP TABLE money_test")
+ if $dbh;
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/93storage_replication.t
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/93storage_replication.t 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/93storage_replication.t 2009-08-03 08:13:39 UTC (rev 7171)
@@ -6,13 +6,14 @@
use DBICTest;
use List::Util 'first';
use Scalar::Util 'reftype';
+use File::Spec;
use IO::Handle;
BEGIN {
eval "use DBIx::Class::Storage::DBI::Replicated; use Test::Moose";
plan $@
? ( skip_all => "Deps not installed: $@" )
- : ( tests => 90 );
+ : ( tests => 126 );
}
use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
@@ -20,6 +21,10 @@
use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant';
use_ok 'DBIx::Class::Storage::DBI::Replicated';
+use Moose();
+use MooseX::Types();
+diag "Using Moose version $Moose::VERSION and MooseX::Types version $MooseX::Types::VERSION";
+
=head1 HOW TO USE
This is a test of the replicated storage system. This will work in one of
@@ -142,9 +147,9 @@
use File::Copy;
use base 'DBIx::Class::DBI::Replicated::TestReplication';
- __PACKAGE__->mk_accessors( qw/master_path slave_paths/ );
+ __PACKAGE__->mk_accessors(qw/master_path slave_paths/);
- ## Set the mastep path from DBICTest
+ ## Set the master path from DBICTest
sub new {
my $class = shift @_;
@@ -152,9 +157,9 @@
$self->master_path( DBICTest->_sqlite_dbfilename );
$self->slave_paths([
- "t/var/DBIxClass_slave1.db",
- "t/var/DBIxClass_slave2.db",
- ]);
+ File::Spec->catfile(qw/t var DBIxClass_slave1.db/),
+ File::Spec->catfile(qw/t var DBIxClass_slave2.db/),
+ ]);
return $self;
}
@@ -170,7 +175,10 @@
my @connect_infos = map { [$_,'','',{AutoCommit=>1}] } @dsn;
- # try a hashref too
+ ## Make sure nothing is left over from a failed test
+ $self->cleanup;
+
+ ## try a hashref too
my $c = $connect_infos[0];
$connect_infos[0] = {
dsn => $c->[0],
@@ -198,7 +206,9 @@
sub cleanup {
my $self = shift @_;
foreach my $slave (@{$self->slave_paths}) {
- unlink $slave;
+ if(-e $slave) {
+ unlink $slave;
+ }
}
}
@@ -275,6 +285,19 @@
ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects)
=> 'Created some storages suitable for replicants';
+our %debug;
+$replicated->schema->storage->debug(1);
+$replicated->schema->storage->debugcb(sub {
+ my ($op, $info) = @_;
+ ##warn "\n$op, $info\n";
+ %debug = (
+ op => $op,
+ info => $info,
+ dsn => ($info=~m/\[(.+)\]/)[0],
+ storage_type => $info=~m/REPLICANT/ ? 'REPLICANT' : 'MASTER',
+ );
+});
+
ok my @all_storages = $replicated->schema->storage->all_storages
=> '->all_storages';
@@ -296,6 +319,8 @@
my @replicant_names = keys %{ $replicated->schema->storage->replicants };
+ok @replicant_names, "found replicant names @replicant_names";
+
## Silence warning about not supporting the is_replicating method if using the
## sqlite dbs.
$replicated->schema->storage->debugobj->silence(1)
@@ -332,6 +357,11 @@
[ qw/artistid name/ ],
[ 4, "Ozric Tentacles"],
]);
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ like $debug{info}, qr/INSERT/, 'Last was an insert';
## Make sure all the slaves have the table definitions
@@ -353,6 +383,11 @@
ok my $artist1 = $replicated->schema->resultset('Artist')->find(4)
=> 'Created Result';
+## We removed testing here since master read weight is on, so we can't tell in
+## advance what storage to expect. We turn master read weight off a bit lower
+## is $debug{storage_type}, 'REPLICANT'
+## => "got last query from a replicant: $debug{dsn}, $debug{info}";
+
isa_ok $artist1
=> 'DBICTest::Artist';
@@ -391,6 +426,11 @@
[ 7, "Watergate"],
]);
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ like $debug{info}, qr/INSERT/, 'Last was an insert';
+
## Make sure all the slaves have the table definitions
$replicated->replicate;
@@ -398,7 +438,10 @@
ok my $artist2 = $replicated->schema->resultset('Artist')->find(5)
=> 'Sync succeed';
-
+
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
isa_ok $artist2
=> 'DBICTest::Artist';
@@ -420,7 +463,10 @@
ok my $artist3 = $replicated->schema->resultset('Artist')->find(6)
=> 'Still finding stuff.';
-
+
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
isa_ok $artist3
=> 'DBICTest::Artist';
@@ -434,7 +480,10 @@
ok ! $replicated->schema->resultset('Artist')->find(666)
=> 'Correctly failed to find something.';
-
+
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
## test the reliable option
TESTRELIABLE: {
@@ -443,24 +492,39 @@
ok $replicated->schema->resultset('Artist')->find(2)
=> 'Read from master 1';
-
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
ok $replicated->schema->resultset('Artist')->find(5)
=> 'Read from master 2';
-
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
$replicated->schema->storage->set_balanced_storage;
ok $replicated->schema->resultset('Artist')->find(3)
=> 'Read from replicant';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
}
## Make sure when reliable goes out of scope, we are using replicants again
ok $replicated->schema->resultset('Artist')->find(1)
=> 'back to replicant 1.';
-
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
ok $replicated->schema->resultset('Artist')->find(2)
=> 'back to replicant 2.';
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
## set all the replicants to inactive, and make sure the balancer falls back to
## the master.
@@ -474,10 +538,13 @@
$replicated->schema->storage->debugfh($debugfh);
ok $replicated->schema->resultset('Artist')->find(2)
- => 'Fallback to master';
+ => 'Fallback to master';
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
like $fallback_warning, qr/falling back to master/
- => 'emits falling back to master warning';
+ => 'emits falling back to master warning';
$replicated->schema->storage->debugfh($oldfh);
}
@@ -496,6 +563,9 @@
ok $replicated->schema->resultset('Artist')->find(2)
=> 'Returned to replicates';
+
+is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
## Getting slave status tests
@@ -503,7 +573,7 @@
## We skip this tests unless you have a custom replicants, since the default
## sqlite based replication tests don't support these functions.
- skip 'Cannot Test Replicant Status on Non Replicating Database', 9
+ skip 'Cannot Test Replicant Status on Non Replicating Database', 10
unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
$replicated->replicate; ## Give the slaves a chance to catchup.
@@ -559,6 +629,9 @@
ok $replicated->schema->resultset('Artist')->find(5)
=> 'replicant reactivated';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
is $replicated->schema->storage->pool->active_replicants => 2
=> "both replicants reactivated";
@@ -569,7 +642,10 @@
ok my $reliably = sub {
ok $replicated->schema->resultset('Artist')->find(5)
- => 'replicant reactivated';
+ => 'replicant reactivated';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
} => 'created coderef properly';
@@ -592,6 +668,8 @@
ok $replicated->schema->resultset('Artist')->find(3)
=> 'replicant reactivated';
+
+is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
## make sure transactions are set to execute_reliably
@@ -607,11 +685,17 @@
]);
ok my $result = $replicated->schema->resultset('Artist')->find($id)
- => 'Found expected artist';
-
+ => "Found expected artist for $id";
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'Found expected artist again';
-
+ => 'Found expected artist again for 1';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
return ($result, $more);
} => 'Created a coderef properly';
@@ -623,18 +707,28 @@
is $return[0]->id, 666
=> 'first returned value is correct';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
is $return[1]->id, 1
=> 'second returned value is correct';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
}
## Test that asking for single return works
{
- ok my $return = $replicated->schema->txn_do($transaction, 777)
+ ok my @return = $replicated->schema->txn_do($transaction, 777)
=> 'did transaction';
- is $return->id, 777
+ is $return[0]->id, 777
=> 'first returned value is correct';
+
+ is $return[1]->id, 1
+ => 'second returned value is correct';
}
## Test transaction returning a single value
@@ -643,6 +737,7 @@
ok my $result = $replicated->schema->txn_do(sub {
ok my $more = $replicated->schema->resultset('Artist')->find(1)
=> 'found inside a transaction';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
return $more;
}) => 'successfully processed transaction';
@@ -654,15 +749,22 @@
ok $replicated->schema->resultset('Artist')->find(1)
=> 'replicant reactivated';
+
+is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
## Test Discard changes
{
ok my $artist = $replicated->schema->resultset('Artist')->find(2)
=> 'got an artist to test discard changes';
-
- ok $artist->discard_changes
+
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+
+ ok $artist->get_from_storage({force_pool=>'master'})
=> 'properly discard changes';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+
}
## Test some edge cases, like trying to do a transaction inside a transaction, etc
@@ -672,6 +774,7 @@
return $replicated->schema->txn_do(sub {
ok my $more = $replicated->schema->resultset('Artist')->find(1)
=> 'found inside a transaction inside a transaction';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
return $more;
});
}) => 'successfully processed transaction';
@@ -686,7 +789,8 @@
return $replicated->schema->txn_do(sub {
return $replicated->schema->storage->execute_reliably(sub {
ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'found inside crazy deep transactions and execute_reliably';
+ => 'found inside crazy deep transactions and execute_reliably';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
return $more;
});
});
@@ -709,8 +813,25 @@
ok my $artist = $reliable_artist_rs->find(2)
=> 'got an artist result via force_pool storage';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
}
+## Test the force_pool resultset attribute part two.
+
+{
+ ok my $artist_rs = $replicated->schema->resultset('Artist')
+ => 'got artist resultset';
+
+ ## Turn on Forced Pool Storage
+ ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>$replicant_names[0]})
+ => 'Created a resultset using force_pool storage';
+
+ ok my $artist = $reliable_artist_rs->find(2)
+ => 'got an artist result via force_pool storage';
+
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+}
## Delete the old database files
$replicated->cleanup;
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/99dbic_sqlt_parser.t
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/99dbic_sqlt_parser.t 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/99dbic_sqlt_parser.t 2009-08-03 08:13:39 UTC (rev 7171)
@@ -15,9 +15,13 @@
my $schema = DBICTest->init_schema();
# Dummy was yanked out by the sqlt hook test
+# CustomSql tests the horrific/deprecated ->name(\$sql) hack
# YearXXXXCDs are views
-my @sources = grep { $_ ne 'Dummy' && $_ !~ /^Year\d{4}CDs$/ }
- $schema->sources;
+#
+my @sources = grep
+ { $_ !~ /^ (?: Dummy | CustomSql | Year\d{4}CDs ) $/x }
+ $schema->sources
+;
plan tests => ( @sources * 3);
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/bind/attribute.t
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/bind/attribute.t 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/bind/attribute.t 2009-08-03 08:13:39 UTC (rev 7171)
@@ -13,7 +13,7 @@
eval "use DBD::SQLite";
plan $@
? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 9 );
+ : ( tests => 13 );
}
my $where_bind = {
@@ -45,34 +45,34 @@
is ( $rs->count, 1, 'where/bind last' );
}
-# More complex cases, based primarily on the Cookbook
-# "Arbitrary SQL through a custom ResultSource" technique,
-# which seems to be the only place the bind attribute is
-# documented. Breaking this technique probably breaks existing
-# application code.
-my $source = DBICTest::Artist->result_source_instance;
-my $new_source = $source->new($source);
-$new_source->source_name('Complex');
+{
+ # More complex cases, based primarily on the Cookbook
+ # "Arbitrary SQL through a custom ResultSource" technique,
+ # which seems to be the only place the bind attribute is
+ # documented. Breaking this technique probably breaks existing
+ # application code.
+ my $source = DBICTest::Artist->result_source_instance;
+ my $new_source = $source->new($source);
+ $new_source->source_name('Complex');
-$new_source->name(\<<'');
-( SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year
- FROM artist a
- JOIN cd ON cd.artist = a.artistid
- WHERE cd.year = ?)
+ $new_source->name(\<<'');
+ ( SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year
+ FROM artist a
+ JOIN cd ON cd.artist = a.artistid
+ WHERE cd.year = ?)
-$schema->register_extra_source('Complex' => $new_source);
+ $schema->register_extra_source('Complex' => $new_source);
-$rs = $schema->resultset('Complex')->search({}, { bind => [ 1999 ] });
-is ( $rs->count, 1, 'cookbook arbitrary sql example' );
+ $rs = $schema->resultset('Complex')->search({}, { bind => [ 1999 ] });
+ is ( $rs->count, 1, 'cookbook arbitrary sql example' );
-$rs = $schema->resultset('Complex')->search({ 'artistid' => 1 }, { bind => [ 1999 ] });
-is ( $rs->count, 1, '...coobook + search condition' );
+ $rs = $schema->resultset('Complex')->search({ 'artistid' => 1 }, { bind => [ 1999 ] });
+ is ( $rs->count, 1, '...cookbook + search condition' );
-$rs = $schema->resultset('Complex')->search({}, { bind => [ 1999 ] })
- ->search({ 'artistid' => 1 });
-is ( $rs->count, 1, '...cookbook (bind first) + chained search' );
+ $rs = $schema->resultset('Complex')->search({}, { bind => [ 1999 ] })
+ ->search({ 'artistid' => 1 });
+ is ( $rs->count, 1, '...cookbook (bind first) + chained search' );
-{
$rs = $schema->resultset('Complex')->search({}, { bind => [ 1999 ] })->search({}, { where => \"title LIKE ?", bind => [ 'Spoon%' ] });
is_same_sql_bind(
$rs->as_query,
@@ -82,8 +82,36 @@
[ '!!dummy' => 'Spoon%' ]
],
'got correct SQL'
-);
+ );
+}
+{
+ # More complex cases, based primarily on the Cookbook
+ # "Arbitrary SQL through a custom ResultSource" technique,
+ # which seems to be the only place the bind attribute is
+ # documented. Breaking this technique probably breaks existing
+ # application code.
+
+ $rs = $schema->resultset('CustomSql')->search({}, { bind => [ 1999 ] });
+ is ( $rs->count, 1, 'cookbook arbitrary sql example (in separate file)' );
+
+ $rs = $schema->resultset('CustomSql')->search({ 'artistid' => 1 }, { bind => [ 1999 ] });
+ is ( $rs->count, 1, '...cookbook (in separate file) + search condition' );
+
+ $rs = $schema->resultset('CustomSql')->search({}, { bind => [ 1999 ] })
+ ->search({ 'artistid' => 1 });
+ is ( $rs->count, 1, '...cookbook (bind first, in separate file) + chained search' );
+
+ $rs = $schema->resultset('CustomSql')->search({}, { bind => [ 1999 ] })->search({}, { where => \"title LIKE ?", bind => [ 'Spoon%' ] });
+ is_same_sql_bind(
+ $rs->as_query,
+ "(SELECT me.artistid, me.name, me.rank, me.charfield FROM (SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year FROM artist a JOIN cd ON cd.artist = a.artistid WHERE cd.year = ?) WHERE title LIKE ?)",
+ [
+ [ '!!dummy' => '1999' ],
+ [ '!!dummy' => 'Spoon%' ]
+ ],
+ 'got correct SQL (cookbook arbitrary SQL, in separate file)'
+ );
}
TODO: {
Property changes on: DBIx-Class/0.08/branches/mysql_ansi/t/cdbi/testlib/DBIC/Test/SQLite.pm
___________________________________________________________________
Name: svn:eol-style
- native
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/inflate/datetime_pg.t
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/inflate/datetime_pg.t 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/inflate/datetime_pg.t 2009-08-03 08:13:39 UTC (rev 7171)
@@ -13,7 +13,7 @@
eval { require DateTime::Format::Pg };
plan $@
? ( skip_all => 'Need DateTime::Format::Pg for timestamp inflation tests')
- : ( tests => 3 )
+ : ( tests => 6 )
;
@@ -27,4 +27,14 @@
is($event->created_on->time_zone->name, "America/Chicago", "Timezone changed");
# Time zone difference -> -6hours
is($event->created_on->iso8601, "2009-01-15T11:00:00", "Time with TZ correct");
+
+# test 'timestamp without time zone'
+ my $dt = DateTime->from_epoch(epoch => time);
+ $dt->set_nanosecond(int 500_000_000);
+ $event->update({ts_without_tz => $dt});
+ $event->discard_changes;
+ isa_ok($event->ts_without_tz, "DateTime") or diag $event->created_on;
+ is($event->ts_without_tz, $dt, 'timestamp without time zone inflation');
+ is($event->ts_without_tz->microsecond, $dt->microsecond,
+ 'timestamp without time zone microseconds survived');
}
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/Bookmark.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/Bookmark.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/Bookmark.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -15,6 +15,7 @@
},
'link' => {
data_type => 'integer',
+ is_nullable => 1,
},
);
Added: DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/CustomSql.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/CustomSql.pm (rev 0)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/CustomSql.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -0,0 +1,15 @@
+package # hide from PAUSE
+ DBICTest::Schema::CustomSql;
+
+use base qw/DBICTest::Schema::Artist/;
+
+__PACKAGE__->table('dummy');
+
+__PACKAGE__->result_source_instance->name(\<<SQL);
+ ( SELECT a.*, cd.cdid AS cdid, cd.title AS title, cd.year AS year
+ FROM artist a
+ JOIN cd ON cd.artist = a.artistid
+ WHERE cd.year = ?)
+SQL
+
+1;
Property changes on: DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/CustomSql.pm
___________________________________________________________________
Name: svn:eol-style
+ native
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/Event.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/Event.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/Event.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -15,6 +15,7 @@
varchar_date => { data_type => 'varchar', inflate_date => 1, size => 20, is_nullable => 1 },
varchar_datetime => { data_type => 'varchar', inflate_datetime => 1, size => 20, is_nullable => 1 },
skip_inflation => { data_type => 'datetime', inflate_datetime => 0, is_nullable => 1 },
+ ts_without_tz => { data_type => 'datetime', is_nullable => 1 }, # used in EventTZPg
);
__PACKAGE__->set_primary_key('id');
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/EventTZPg.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/EventTZPg.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/EventTZPg.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -12,6 +12,7 @@
id => { data_type => 'integer', is_auto_increment => 1 },
starts_at => { data_type => 'datetime', timezone => "America/Chicago", locale => 'de_DE' },
created_on => { data_type => 'timestamp with time zone', timezone => "America/Chicago" },
+ ts_without_tz => { data_type => 'timestamp without time zone' },
);
__PACKAGE__->set_primary_key('id');
Added: DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/Money.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/Money.pm (rev 0)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema/Money.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -0,0 +1,21 @@
+package # hide from PAUSE
+ DBICTest::Schema::Money;
+
+use base qw/DBICTest::BaseResult/;
+
+__PACKAGE__->table('money_test');
+
+__PACKAGE__->add_columns(
+ 'id' => {
+ data_type => 'integer',
+ is_auto_increment => 1,
+ },
+ 'amount' => {
+ data_type => 'money',
+ is_nullable => 1,
+ },
+);
+
+__PACKAGE__->set_primary_key('id');
+
+1;
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema.pm
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema.pm 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/lib/DBICTest/Schema.pm 2009-08-03 08:13:39 UTC (rev 7171)
@@ -20,6 +20,8 @@
Tag
Year2000CDs
Year1999CDs
+ CustomSql
+ Money
/,
{ 'DBICTest::Schema' => [qw/
LinerNotes
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/lib/sqlite.sql
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/lib/sqlite.sql 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/lib/sqlite.sql 2009-08-03 08:13:39 UTC (rev 7171)
@@ -1,6 +1,6 @@
--
-- Created by SQL::Translator::Producer::SQLite
--- Created on Sat Jun 27 14:02:39 2009
+-- Created on Thu Jul 30 08:44:22 2009
--
@@ -63,7 +63,8 @@
created_on timestamp NOT NULL,
varchar_date varchar(20),
varchar_datetime varchar(20),
- skip_inflation datetime
+ skip_inflation datetime,
+ ts_without_tz datetime
);
--
@@ -107,6 +108,14 @@
);
--
+-- Table: money_test
+--
+CREATE TABLE money_test (
+ id INTEGER PRIMARY KEY NOT NULL,
+ amount money
+);
+
+--
-- Table: noprimarykey
--
CREATE TABLE noprimarykey (
@@ -225,7 +234,7 @@
--
CREATE TABLE bookmark (
id INTEGER PRIMARY KEY NOT NULL,
- link integer NOT NULL
+ link integer
);
CREATE INDEX bookmark_idx_link ON bookmark (link);
Modified: DBIx-Class/0.08/branches/mysql_ansi/t/prefetch/grouped.t
===================================================================
--- DBIx-Class/0.08/branches/mysql_ansi/t/prefetch/grouped.t 2009-08-03 03:52:01 UTC (rev 7170)
+++ DBIx-Class/0.08/branches/mysql_ansi/t/prefetch/grouped.t 2009-08-03 08:13:39 UTC (rev 7171)
@@ -28,7 +28,6 @@
my $track_rs = $schema->resultset ('Track')->search (
{ 'me.cd' => { -in => [ $cd_rs->get_column ('cdid')->all ] } },
{
- # the select/as is deliberately silly to test both funcs and refs below
select => [
'me.cd',
{ count => 'me.trackid' },
@@ -67,8 +66,6 @@
# Test sql by hand, as the sqlite db will simply paper over
# improper group/select combinations
#
- # the exploded IN needs fixing below, coming in another branch
- #
is_same_sql_bind (
$track_rs->count_rs->as_query,
'(
@@ -131,14 +128,19 @@
# test a has_many/might_have prefetch at the same level
# Note that one of the CDs now has 4 tracks instead of 3
{
- my $most_tracks_rs = $cd_rs->search ({}, {
- prefetch => 'liner_notes', # tracks are alredy prefetched
- select => ['me.cdid', { count => 'tracks.trackid' } ],
- as => [qw/cdid track_count/],
- group_by => 'me.cdid',
- order_by => { -desc => 'track_count' },
- rows => 2,
- });
+ my $most_tracks_rs = $schema->resultset ('CD')->search (
+ {
+ 'me.cdid' => { '!=' => undef }, # duh - this is just to test WHERE
+ },
+ {
+ prefetch => [qw/tracks liner_notes/],
+ select => ['me.cdid', { count => 'tracks.trackid' } ],
+ as => [qw/cdid track_count/],
+ group_by => 'me.cdid',
+ order_by => { -desc => 'track_count' },
+ rows => 2,
+ }
+ );
is_same_sql_bind (
$most_tracks_rs->count_rs->as_query,
@@ -149,7 +151,7 @@
FROM cd me
LEFT JOIN track tracks ON tracks.cd = me.cdid
LEFT JOIN liner_notes liner_notes ON liner_notes.liner_id = me.cdid
- WHERE ( tracks.cd IS NOT NULL )
+ WHERE ( me.cdid IS NOT NULL )
GROUP BY me.cdid
LIMIT 2
) count_subq
@@ -166,14 +168,14 @@
SELECT me.cdid, COUNT( tracks.trackid ) AS track_count
FROM cd me
LEFT JOIN track tracks ON tracks.cd = me.cdid
- WHERE ( tracks.cd IS NOT NULL )
+ WHERE ( me.cdid IS NOT NULL )
GROUP BY me.cdid
ORDER BY track_count DESC
LIMIT 2
) me
LEFT JOIN track tracks ON tracks.cd = me.cdid
LEFT JOIN liner_notes liner_notes ON liner_notes.liner_id = me.cdid
- WHERE ( tracks.cd IS NOT NULL )
+ WHERE ( me.cdid IS NOT NULL )
ORDER BY track_count DESC, tracks.cd
)',
[],
More information about the Bast-commits
mailing list