[Bast-commits] r7786 - in DBIx-Class/0.08/branches/ado_mssql: .
examples/Schema lib/DBIx lib/DBIx/Class
lib/DBIx/Class/InflateColumn lib/DBIx/Class/Manual
lib/DBIx/Class/Schema lib/DBIx/Class/Serialize
lib/DBIx/Class/Storage lib/DBIx/Class/Storage/DBI
lib/DBIx/Class/Storage/DBI/ODBC lib/DBIx/Class/Storage/DBI/Oracle
lib/DBIx/Class/Storage/DBI/Replicated
lib/DBIx/Class/Storage/DBI/Sybase
lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server script
t t/cdbi t/cdbi/testlib t/count t/inflate t/lib
t/lib/DBICTest t/lib/DBICTest/Schema t/prefetch
t/relationship t/resultset t/search t/storage
caelum at dev.catalyst.perl.org
caelum at dev.catalyst.perl.org
Wed Oct 14 13:45:35 GMT 2009
Author: caelum
Date: 2009-10-14 13:45:34 +0000 (Wed, 14 Oct 2009)
New Revision: 7786
Added:
DBIx-Class/0.08/branches/ado_mssql/.gitignore
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/AutoCast.pm
DBIx-Class/0.08/branches/ado_mssql/t/93autocast.t
DBIx-Class/0.08/branches/ado_mssql/t/inflate/datetime_determine_parser.t
DBIx-Class/0.08/branches/ado_mssql/t/prefetch/join_type.t
DBIx-Class/0.08/branches/ado_mssql/t/resultset/is_paged.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/
DBIx-Class/0.08/branches/ado_mssql/t/storage/base.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/dbh_do.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/dbi_coderef.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/debug.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/disable_sth_caching.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/error.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/exception.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/on_connect_call.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/on_connect_do.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/ping_count.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/reconnect.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/replication.t
DBIx-Class/0.08/branches/ado_mssql/t/storage/stats.t
Removed:
DBIx-Class/0.08/branches/ado_mssql/t/18inserterror.t
DBIx-Class/0.08/branches/ado_mssql/t/31stats.t
DBIx-Class/0.08/branches/ado_mssql/t/32connect_code_ref.t
DBIx-Class/0.08/branches/ado_mssql/t/33storage_reconnect.t
DBIx-Class/0.08/branches/ado_mssql/t/35disable_sth_caching.t
DBIx-Class/0.08/branches/ado_mssql/t/36datetime.t
DBIx-Class/0.08/branches/ado_mssql/t/91debug.t
DBIx-Class/0.08/branches/ado_mssql/t/92storage.t
DBIx-Class/0.08/branches/ado_mssql/t/92storage_on_connect_call.t
DBIx-Class/0.08/branches/ado_mssql/t/92storage_on_connect_do.t
DBIx-Class/0.08/branches/ado_mssql/t/92storage_ping_count.t
DBIx-Class/0.08/branches/ado_mssql/t/93storage_replication.t
DBIx-Class/0.08/branches/ado_mssql/t/cdbi/testlib/Binary.pm
DBIx-Class/0.08/branches/ado_mssql/t/cdbi/testlib/PgBase.pm
DBIx-Class/0.08/branches/ado_mssql/t/dbh_do.t
Modified:
DBIx-Class/0.08/branches/ado_mssql/
DBIx-Class/0.08/branches/ado_mssql/Changes
DBIx-Class/0.08/branches/ado_mssql/MANIFEST.SKIP
DBIx-Class/0.08/branches/ado_mssql/Makefile.PL
DBIx-Class/0.08/branches/ado_mssql/TODO
DBIx-Class/0.08/branches/ado_mssql/examples/Schema/insertdb.pl
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Componentised.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Core.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Cursor.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/InflateColumn/DateTime.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Component.pod
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Cookbook.pod
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/DocMap.pod
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Example.pod
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/FAQ.pod
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Troubleshooting.pod
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Ordered.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSet.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSetColumn.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSource.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSourceHandle.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Row.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/SQLAHacks.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Schema.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Schema/Versioned.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Serialize/Storable.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Cursor.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/MSSQL.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/NoBindVars.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Oracle.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Pg.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/mysql.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/Statistics.pm
DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/TxnScopeGuard.pm
DBIx-Class/0.08/branches/ado_mssql/script/dbicadmin
DBIx-Class/0.08/branches/ado_mssql/t/02pod.t
DBIx-Class/0.08/branches/ado_mssql/t/03podcoverage.t
DBIx-Class/0.08/branches/ado_mssql/t/05components.t
DBIx-Class/0.08/branches/ado_mssql/t/103many_to_many_warning.t
DBIx-Class/0.08/branches/ado_mssql/t/26dumper.t
DBIx-Class/0.08/branches/ado_mssql/t/46where_attribute.t
DBIx-Class/0.08/branches/ado_mssql/t/60core.t
DBIx-Class/0.08/branches/ado_mssql/t/71mysql.t
DBIx-Class/0.08/branches/ado_mssql/t/72pg.t
DBIx-Class/0.08/branches/ado_mssql/t/746mssql.t
DBIx-Class/0.08/branches/ado_mssql/t/746sybase.t
DBIx-Class/0.08/branches/ado_mssql/t/74mssql.t
DBIx-Class/0.08/branches/ado_mssql/t/76joins.t
DBIx-Class/0.08/branches/ado_mssql/t/80unique.t
DBIx-Class/0.08/branches/ado_mssql/t/81transactions.t
DBIx-Class/0.08/branches/ado_mssql/t/83cache.t
DBIx-Class/0.08/branches/ado_mssql/t/86sqlt.t
DBIx-Class/0.08/branches/ado_mssql/t/88result_set_column.t
DBIx-Class/0.08/branches/ado_mssql/t/89dbicadmin.t
DBIx-Class/0.08/branches/ado_mssql/t/94versioning.t
DBIx-Class/0.08/branches/ado_mssql/t/95sql_maker.t
DBIx-Class/0.08/branches/ado_mssql/t/99dbic_sqlt_parser.t
DBIx-Class/0.08/branches/ado_mssql/t/cdbi/13-constraint.t
DBIx-Class/0.08/branches/ado_mssql/t/count/grouped_pager.t
DBIx-Class/0.08/branches/ado_mssql/t/count/in_subquery.t
DBIx-Class/0.08/branches/ado_mssql/t/inflate/serialize.t
DBIx-Class/0.08/branches/ado_mssql/t/lib/DBICTest/AuthorCheck.pm
DBIx-Class/0.08/branches/ado_mssql/t/lib/DBICTest/Schema/Artist.pm
DBIx-Class/0.08/branches/ado_mssql/t/lib/DBICTest/Schema/Track.pm
DBIx-Class/0.08/branches/ado_mssql/t/lib/sqlite.sql
DBIx-Class/0.08/branches/ado_mssql/t/prefetch/attrs_untouched.t
DBIx-Class/0.08/branches/ado_mssql/t/prefetch/grouped.t
DBIx-Class/0.08/branches/ado_mssql/t/prefetch/multiple_hasmany.t
DBIx-Class/0.08/branches/ado_mssql/t/prefetch/standard.t
DBIx-Class/0.08/branches/ado_mssql/t/prefetch/via_search_related.t
DBIx-Class/0.08/branches/ado_mssql/t/relationship/after_update.t
DBIx-Class/0.08/branches/ado_mssql/t/relationship/core.t
DBIx-Class/0.08/branches/ado_mssql/t/relationship/doesnt_exist.t
DBIx-Class/0.08/branches/ado_mssql/t/relationship/update_or_create_multi.t
DBIx-Class/0.08/branches/ado_mssql/t/relationship/update_or_create_single.t
DBIx-Class/0.08/branches/ado_mssql/t/resultset/as_query.t
DBIx-Class/0.08/branches/ado_mssql/t/search/preserve_original_rs.t
DBIx-Class/0.08/branches/ado_mssql/t/search/subquery.t
DBIx-Class/0.08/branches/ado_mssql/t/zzzzzzz_perl_perf_bug.t
Log:
r7355 at pentium (orig r7354): frew | 2009-08-20 17:54:04 -0400
add some basic guards to get rid of warnings
r7359 at pentium (orig r7358): ribasushi | 2009-08-21 05:18:43 -0400
Because prefetch uses the cache system, it is not possible to set HRI on a prefetched rs without upsetting the tests - don't compare
r7370 at pentium (orig r7369): caelum | 2009-08-24 06:32:57 -0400
bump CAG dep
r7389 at pentium (orig r7388): ribasushi | 2009-08-25 07:43:38 -0400
typo
r7390 at pentium (orig r7389): ribasushi | 2009-08-25 08:29:37 -0400
r7354 at Thesaurus (orig r7351): abraxxa | 2009-08-20 17:46:06 +0200
new branch grouped_has_many_join
r7382 at Thesaurus (orig r7379): ribasushi | 2009-08-24 22:50:13 +0200
Seems like abraxxa's bug is fixed
r7385 at Thesaurus (orig r7382): ribasushi | 2009-08-25 11:33:40 +0200
One more test
r7394 at pentium (orig r7393): ribasushi | 2009-08-26 12:07:51 -0400
Stop testing deprecated json::syck
r7395 at pentium (orig r7394): ribasushi | 2009-08-26 12:08:24 -0400
Make sure sqlt_type gets called after determining driver
r7396 at pentium (orig r7395): ribasushi | 2009-08-26 12:21:53 -0400
Make POD::Coverage happy... again
r7397 at pentium (orig r7396): ribasushi | 2009-08-26 12:31:54 -0400
Clarify
r7398 at pentium (orig r7397): frew | 2009-08-26 16:24:19 -0400
Remove dead, sketchtowne link
r7402 at pentium (orig r7401): ribasushi | 2009-08-27 12:50:12 -0400
Changes
r7404 at pentium (orig r7403): ribasushi | 2009-08-27 18:11:29 -0400
Add a test proving how dumb I am
r7405 at pentium (orig r7404): ribasushi | 2009-08-28 10:34:46 -0400
Warning to spare mst explanations
r7420 at pentium (orig r7419): caelum | 2009-08-29 02:34:07 -0400
r7381 at hlagh (orig r7380): ribasushi | 2009-08-24 17:07:58 -0400
Branch to add autocast support as a standalone piece of code
r7382 at hlagh (orig r7381): ribasushi | 2009-08-25 05:06:43 -0400
Move storage tests to their own dir
r7385 at hlagh (orig r7384): ribasushi | 2009-08-25 06:35:19 -0400
Switch storage class loading to ensure_class_loaded
r7386 at hlagh (orig r7385): ribasushi | 2009-08-25 06:37:48 -0400
Change a datatype for test purposes
r7387 at hlagh (orig r7386): ribasushi | 2009-08-25 06:45:35 -0400
Fix two storage tests
r7388 at hlagh (orig r7387): ribasushi | 2009-08-25 06:45:52 -0400
Actual autocast code
r18697 at hlagh (orig r7416): caelum | 2009-08-29 01:42:29 -0400
rename method and add docs
r18698 at hlagh (orig r7417): ribasushi | 2009-08-29 02:07:18 -0400
Make sure arrays work
r18699 at hlagh (orig r7418): caelum | 2009-08-29 02:11:14 -0400
rename _map_data_type to _native_data_type
r7423 at pentium (orig r7422): ribasushi | 2009-08-29 02:55:12 -0400
Make podcoverage happy
r7424 at pentium (orig r7423): ribasushi | 2009-08-29 03:06:07 -0400
Reduce the number of heavy dbh_do calls
r7437 at pentium (orig r7436): ribasushi | 2009-08-30 02:54:10 -0400
r7435 at Thesaurus (orig r7432): caelum | 2009-08-30 02:53:21 +0200
new branch
r7436 at Thesaurus (orig r7433): caelum | 2009-08-30 03:14:36 +0200
add dbh_maker option to connect_info hash
r7437 at Thesaurus (orig r7434): ribasushi | 2009-08-30 08:51:14 +0200
Minor cleanup and test enhancement
r7438 at Thesaurus (orig r7435): ribasushi | 2009-08-30 08:53:59 +0200
Changes
r7442 at pentium (orig r7441): ribasushi | 2009-08-30 03:53:04 -0400
Sanify 03podcoverage.t, allow wildcard skipping
r7447 at pentium (orig r7446): caelum | 2009-08-30 22:36:08 -0400
support coderef connect_infos for repicated storage
r7448 at pentium (orig r7447): caelum | 2009-08-30 22:58:43 -0400
make replicant dsn detection a bit nicer
r7449 at pentium (orig r7448): caelum | 2009-08-31 11:30:37 -0400
fix case where repelicant coderef dsn does not connect
r7450 at pentium (orig r7449): arcanez | 2009-08-31 17:13:50 -0400
remove . from end of =head links
r7453 at pentium (orig r7452): ribasushi | 2009-09-01 04:38:37 -0400
Quote deps, avoid floating problems
r7454 at pentium (orig r7453): ribasushi | 2009-09-01 05:10:11 -0400
Fix misleading FAQ entry
r7462 at pentium (orig r7461): ribasushi | 2009-09-01 10:51:58 -0400
Fix insert_bulk with rebless
r7463 at pentium (orig r7462): ribasushi | 2009-09-01 10:52:39 -0400
Comment
r7464 at pentium (orig r7463): matthewt | 2009-09-01 11:17:08 -0400
clearer copyright
r7465 at pentium (orig r7464): matthewt | 2009-09-01 11:18:31 -0400
split copyright and license
r7467 at pentium (orig r7466): frew | 2009-09-01 14:27:36 -0400
pod describing strife with MSSQL
r7481 at pentium (orig r7480): ribasushi | 2009-09-02 05:07:04 -0400
Streamline pg test-schemas cleanup
r7482 at pentium (orig r7481): ribasushi | 2009-09-02 05:20:25 -0400
Centralize handling of minimum sqlt version to DBIx::Class
Bump version to the latest unborked sqlt (still just a recommend)
r7483 at pentium (orig r7482): ribasushi | 2009-09-02 05:31:50 -0400
Some cleanup... don't remember where it came from
r7484 at pentium (orig r7483): ribasushi | 2009-09-02 06:19:11 -0400
First part of mysql insanity
r7485 at pentium (orig r7484): ribasushi | 2009-09-02 06:25:35 -0400
Invoke default_join_type only on undefined types
r7486 at pentium (orig r7485): ribasushi | 2009-09-02 06:42:39 -0400
No fancy methods for the default_jointype, as we don't have proper sqlahacks inheritance and they are... well hacks
r7487 at pentium (orig r7486): ribasushi | 2009-09-02 07:00:07 -0400
Mysql v3 support (ick)
r7492 at pentium (orig r7491): rbuels | 2009-09-02 14:33:47 -0400
POD patch, corrected erroneous usage of dbh_do in Storage::DBI synopsis
r7498 at pentium (orig r7497): ribasushi | 2009-09-03 05:11:29 -0400
POD lists the storable hooks, but does no load them
r7499 at pentium (orig r7498): ribasushi | 2009-09-03 05:11:50 -0400
Storable sanification
r7500 at pentium (orig r7499): ribasushi | 2009-09-03 05:24:17 -0400
Storable is now in Core
r7501 at pentium (orig r7500): ribasushi | 2009-09-03 05:36:58 -0400
Make sure mysql is fixed
r7504 at pentium (orig r7503): ribasushi | 2009-09-03 11:16:17 -0400
Add podcoverage skip
r7505 at pentium (orig r7504): ribasushi | 2009-09-03 11:23:19 -0400
Consolidate _verify_pid calls
r7509 at pentium (orig r7508): matthewt | 2009-09-03 14:12:53 -0400
get the COPYRIGHT in the right pless to not confuse META.yml generation
r7511 at pentium (orig r7510): ribasushi | 2009-09-03 14:41:22 -0400
r7512 at pentium (orig r7511): ribasushi | 2009-09-03 14:41:34 -0400
r7472 at Thesaurus (orig r7469): norbi | 2009-09-01 21:43:08 +0200
r7635 at vger: mendel | 2009-09-01 21:02:23 +0200
Added pointer to 'SQL functions on the lhs' to the 'using stored procs' section.
r7513 at pentium (orig r7512): ribasushi | 2009-09-03 14:41:44 -0400
r7473 at Thesaurus (orig r7470): norbi | 2009-09-01 21:43:19 +0200
r7636 at vger: mendel | 2009-09-01 21:09:43 +0200
Mentions the possibiliby of creating indexes on SQL function return values.
r7514 at pentium (orig r7513): ribasushi | 2009-09-03 14:41:52 -0400
r7474 at Thesaurus (orig r7471): norbi | 2009-09-01 21:43:31 +0200
r7637 at vger: mendel | 2009-09-01 21:19:14 +0200
Rewrote 'SQL functions on the lhs' to use the new SQLA literal SQL + bind feature.
r7515 at pentium (orig r7514): ribasushi | 2009-09-03 14:41:59 -0400
r7475 at Thesaurus (orig r7472): norbi | 2009-09-01 21:43:42 +0200
r7638 at vger: mendel | 2009-09-01 21:20:17 +0200
Added a comment to the example code to stress that it does not work.
r7516 at pentium (orig r7515): ribasushi | 2009-09-03 14:42:10 -0400
r7476 at Thesaurus (orig r7473): norbi | 2009-09-01 21:43:54 +0200
r7639 at vger: mendel | 2009-09-01 21:28:18 +0200
Added pointer to DBIx::Class::DynamicSubclass.
r7517 at pentium (orig r7516): ribasushi | 2009-09-03 14:42:15 -0400
r7477 at Thesaurus (orig r7474): norbi | 2009-09-01 21:44:03 +0200
r7640 at vger: mendel | 2009-09-01 21:30:13 +0200
Replaced deprecated \'colname DESC' order_by syntax with { -desc => 'colname' } syntax.
r7518 at pentium (orig r7517): ribasushi | 2009-09-03 14:42:22 -0400
r7478 at Thesaurus (orig r7475): norbi | 2009-09-01 21:44:17 +0200
r7641 at vger: mendel | 2009-09-01 21:32:48 +0200
Rewrote 'SQL functions on the lhs' to use the new SQLA literal SQL + bind feature.
r7519 at pentium (orig r7518): ribasushi | 2009-09-03 14:42:26 -0400
r7479 at Thesaurus (orig r7476): norbi | 2009-09-01 21:44:28 +0200
r7642 at vger: mendel | 2009-09-01 21:42:25 +0200
Added many-to-many add_to_*() example to stress that it returns the related row and not the linking table row.
r7520 at pentium (orig r7519): ribasushi | 2009-09-03 14:42:32 -0400
r7480 at Thesaurus (orig r7477): norbi | 2009-09-01 22:14:25 +0200
r7653 at vger: mendel | 2009-09-01 22:14:11 +0200
Fixed wrong literal SQL + bind examples (missing operator and placeholders).
r7521 at pentium (orig r7520): ribasushi | 2009-09-03 14:42:37 -0400
r7481 at Thesaurus (orig r7478): norbi | 2009-09-01 22:30:48 +0200
r7655 at vger: mendel | 2009-09-01 22:30:35 +0200
Fixed the bind value column names in the SQL literal + bind examples.
r7522 at pentium (orig r7521): ribasushi | 2009-09-03 14:42:45 -0400
r7482 at Thesaurus (orig r7479): norbi | 2009-09-01 22:52:21 +0200
r7657 at vger: mendel | 2009-09-01 22:52:09 +0200
Further improvement in the bind value column names in the SQL literal + bind examples.
r7547 at pentium (orig r7546): ribasushi | 2009-09-04 02:47:19 -0400
Stop connecting to determine dt-parser (test is in pg branch)
r7551 at pentium (orig r7550): ribasushi | 2009-09-04 05:20:48 -0400
Require sqla with bool support
r7558 at pentium (orig r7557): ribasushi | 2009-09-04 13:17:32 -0400
Dumper follies
r7559 at pentium (orig r7558): ribasushi | 2009-09-04 13:27:50 -0400
Even better sqla
r7568 at pentium (orig r7567): ribasushi | 2009-09-04 14:49:53 -0400
r7459 at Thesaurus (orig r7456): rbuels | 2009-09-01 12:46:46 +0200
making another pg_unqualified_schema branch, for real this time
r7460 at Thesaurus (orig r7457): rbuels | 2009-09-01 12:51:31 +0200
reworked tests for pg last_insert_id in presence of un-schema-qualified things. adds some todo tests, including a case for which is does not seem to be possible to correctly guess the sequence to use for the liid
r7461 at Thesaurus (orig r7458): rbuels | 2009-09-01 12:54:34 +0200
in Pg storage, added a warning for case when the nextval sequence is not schema qualified
r7462 at Thesaurus (orig r7459): rbuels | 2009-09-01 13:01:31 +0200
tweak to Pg test, warnings_like -> warnings_exist
r7463 at Thesaurus (orig r7460): ribasushi | 2009-09-01 13:34:59 +0200
Rewrap todo properly
r7490 at Thesaurus (orig r7487): ribasushi | 2009-09-02 14:16:01 +0200
Make pg sequence autodetect deterministic (or throw exceptions). Test needs adjusting
r7491 at Thesaurus (orig r7488): rbuels | 2009-09-02 19:15:01 +0200
some reorganization and cleanup of pg-specific tests
r7492 at Thesaurus (orig r7489): rbuels | 2009-09-02 20:08:31 +0200
more cleanup of 72pg.t
r7495 at Thesaurus (orig r7492): rbuels | 2009-09-02 20:48:12 +0200
more cleanup of pg tests, added cascade to drop function, cleaned up create and drop of schemas to use dbh_do
r7496 at Thesaurus (orig r7493): rbuels | 2009-09-02 20:50:42 +0200
oops, missed something screwed up by the pull
r7525 at Thesaurus (orig r7522): rbuels | 2009-09-03 20:45:53 +0200
added __END__ before pod in Pg storage
r7526 at Thesaurus (orig r7523): rbuels | 2009-09-03 20:46:00 +0200
renamed pg test schemas to be more organized
r7531 at Thesaurus (orig r7528): rbuels | 2009-09-04 00:28:11 +0200
more pg test cleanup
r7532 at Thesaurus (orig r7529): rbuels | 2009-09-04 00:28:17 +0200
more pg test cleanup
r7533 at Thesaurus (orig r7530): rbuels | 2009-09-04 00:28:25 +0200
starting work on extended set of Pg auto-pk tests
r7534 at Thesaurus (orig r7531): rbuels | 2009-09-04 00:28:31 +0200
more work on extended set of Pg auto-pk tests
r7535 at Thesaurus (orig r7532): rbuels | 2009-09-04 00:28:39 +0200
more work on pg tests
r7536 at Thesaurus (orig r7533): rbuels | 2009-09-04 00:28:45 +0200
more work on extended set of Pg auto-pk tests
r7537 at Thesaurus (orig r7534): rbuels | 2009-09-04 00:28:50 +0200
added .gitignore for users of git-svn
r7538 at Thesaurus (orig r7535): rbuels | 2009-09-04 00:28:58 +0200
more work on extended set of Pg auto-pk tests
r7539 at Thesaurus (orig r7536): rbuels | 2009-09-04 00:29:04 +0200
added darcs and git to MANIFEST.SKIP version control skipping section
r7540 at Thesaurus (orig r7537): rbuels | 2009-09-04 00:41:26 +0200
more work on extended set of Pg auto-pk tests
r7541 at Thesaurus (orig r7538): rbuels | 2009-09-04 00:41:32 +0200
more work on extended set of Pg auto-pk tests
r7542 at Thesaurus (orig r7539): rbuels | 2009-09-04 00:41:38 +0200
more work on extended set of Pg auto-pk tests
r7543 at Thesaurus (orig r7540): rbuels | 2009-09-04 02:20:23 +0200
more work on extended set of Pg auto-pk tests
r7544 at Thesaurus (orig r7541): rbuels | 2009-09-04 02:20:32 +0200
rewrote autoinc fetcher as a query into the pg_catalog. all the old tests pass now, but not my new tests. the new tests might be buggy
r7545 at Thesaurus (orig r7542): rbuels | 2009-09-04 02:20:39 +0200
oops, forgot to put the drop for the extended tests back in the pg tests
r7546 at Thesaurus (orig r7543): rbuels | 2009-09-04 02:41:56 +0200
couple of comment/documentation tweaks to pg storage driver
r7547 at Thesaurus (orig r7544): rbuels | 2009-09-04 02:42:02 +0200
fixed my tests
r7548 at Thesaurus (orig r7545): rbuels | 2009-09-04 02:42:09 +0200
clarified the POD in Pg storage driver regarding multi-schema support
r7551 at Thesaurus (orig r7548): ribasushi | 2009-09-04 08:51:30 +0200
Proper unconnected test
r7554 at Thesaurus (orig r7551): ribasushi | 2009-09-04 11:26:12 +0200
Fixes to pg test after review:
- Move the store_column test to 60core.t
- Streamline the select ... for update test
- Disable all exception warnings for normal test runs
r7555 at Thesaurus (orig r7552): ribasushi | 2009-09-04 11:56:00 +0200
Rewrite selector using sqla
r7562 at Thesaurus (orig r7559): rbuels | 2009-09-04 19:42:52 +0200
moved search_path querying function from Pg storage driver into tests
r7563 at Thesaurus (orig r7560): rbuels | 2009-09-04 19:43:00 +0200
refactored how Pg storage driver calls sequence search, made erorror message more informative when query into pg_catalog fails
r7564 at Thesaurus (orig r7561): rbuels | 2009-09-04 19:43:08 +0200
tweaked pg sequence discovery error message a bit more
r7565 at Thesaurus (orig r7562): rbuels | 2009-09-04 19:43:17 +0200
added big block comment explaining Pg sequence discovery strategy
r7566 at Thesaurus (orig r7563): rbuels | 2009-09-04 20:35:10 +0200
added code to use DBD::Pg column_info to fetch column default if recent enough
r7567 at Thesaurus (orig r7564): rbuels | 2009-09-04 20:35:18 +0200
tweaked comment
r7568 at Thesaurus (orig r7565): rbuels | 2009-09-04 20:35:30 +0200
oops, DBD::Pg 2.15.1 should be included in working versions
r7570 at pentium (orig r7569): ribasushi | 2009-09-04 15:32:01 -0400
Stop double-caching datetime_parser - keep it in the storage only
r7571 at pentium (orig r7570): ribasushi | 2009-09-04 15:36:39 -0400
No Serialize::Storable in core
r7572 at pentium (orig r7571): ribasushi | 2009-09-04 15:49:54 -0400
Changes
r7578 at pentium (orig r7577): ribasushi | 2009-09-06 06:28:44 -0400
Add mysterious exception test
r7580 at pentium (orig r7579): ribasushi | 2009-09-06 09:43:10 -0400
No connection - no cleanup
r7581 at pentium (orig r7580): ribasushi | 2009-09-06 09:45:51 -0400
Streamline test
r7582 at pentium (orig r7581): ribasushi | 2009-09-06 11:39:03 -0400
Test cleanup:
Benchmark and Data::Dumper have been in core forever
Make POD testing conditional as shown in http://use.perl.org/~Alias/journal/38822
Remove some dead cdbi test files
Stop openly giving contributors an option to override the authorcheck
r7583 at pentium (orig r7582): ribasushi | 2009-09-06 11:48:32 -0400
Done long time ago
r7584 at pentium (orig r7583): ribasushi | 2009-09-06 11:56:27 -0400
Release 0.08110
r7586 at pentium (orig r7585): ribasushi | 2009-09-06 12:33:46 -0400
Stop eating exceptions in ::Storage::DBI::DESTROY
r7587 at pentium (orig r7586): ribasushi | 2009-09-06 14:35:30 -0400
Centralize identity insert control for mssql (it seems that issuing an OFF is not necessary)
r7588 at pentium (orig r7587): ribasushi | 2009-09-06 14:45:41 -0400
Clearer MSSQL error message
r7589 at pentium (orig r7588): ribasushi | 2009-09-06 17:58:22 -0400
Fix mssql pod
r7590 at pentium (orig r7589): ribasushi | 2009-09-07 03:06:05 -0400
Release 0.08111
r7596 at pentium (orig r7595): wreis | 2009-09-07 09:31:38 -0400
improved warn for Storable hooks in ResultSourceHandle
r7598 at pentium (orig r7597): ribasushi | 2009-09-07 10:26:59 -0400
Whoops - last_insert_id allows for multiple autoinc columns - support it in pg
r7599 at pentium (orig r7598): ribasushi | 2009-09-07 10:46:14 -0400
Prune duplicate constraints from the find() condition
r7604 at pentium (orig r7603): frew | 2009-09-08 14:13:29 -0400
Turn IDENTITY_INSERT back off after inserts
r7614 at pentium (orig r7613): ribasushi | 2009-09-09 08:16:12 -0400
Fix warning
r7615 at pentium (orig r7614): ribasushi | 2009-09-09 08:42:49 -0400
Really sanify exception text
r7622 at pentium (orig r7621): mo | 2009-09-10 12:53:32 -0400
added test to make sure that store_column is called even for non-dirty columns
r7623 at pentium (orig r7622): bluefeet | 2009-09-10 13:03:21 -0400
Fix RSC->reset() to no longer return $self, which fixes Cursor::Cached + RSC.
r7624 at pentium (orig r7623): ribasushi | 2009-09-10 13:32:03 -0400
The real fix
r7625 at pentium (orig r7624): matthewt | 2009-09-10 20:33:17 -0400
make it clear that we are not supposed to have optional deps
r7626 at pentium (orig r7625): ribasushi | 2009-09-11 00:30:03 -0400
Changes so far
r7627 at pentium (orig r7626): ribasushi | 2009-09-11 00:39:45 -0400
Fix borked makefile
r7628 at pentium (orig r7627): ribasushi | 2009-09-11 09:39:42 -0400
Fixed minor problem with txn scope guard - rollback exceptions were never reported
r7630 at pentium (orig r7629): ribasushi | 2009-09-11 17:06:54 -0400
Extend prefetch tests
r7631 at pentium (orig r7630): ribasushi | 2009-09-11 17:13:45 -0400
Reverting http://dev.catalyst.perl.org/svnweb/bast/revision?rev=4278 - it seems to pass fine now
r7632 at pentium (orig r7631): ribasushi | 2009-09-11 18:15:50 -0400
Add single() ro RSC
r7633 at pentium (orig r7632): ribasushi | 2009-09-11 18:44:01 -0400
This is how the txnguard should really work
r7634 at pentium (orig r7633): ribasushi | 2009-09-11 18:58:21 -0400
Fix borked example
r7635 at pentium (orig r7634): ribasushi | 2009-09-11 18:58:58 -0400
scopeguard almost done
r7636 at pentium (orig r7635): brunov | 2009-09-11 19:25:12 -0400
Update DBIx::Class::Manual::Example.pod to reflect previous changes in examples/Schema/insertdb.pl
r7637 at pentium (orig r7636): brunov | 2009-09-11 19:27:17 -0400
Added Bruno Vecchi to the Contributors section in DBIx/Class.pm
r7638 at pentium (orig r7637): ribasushi | 2009-09-11 19:31:16 -0400
Final scopeguard tweak (?)
r7642 at pentium (orig r7641): ribasushi | 2009-09-12 06:46:51 -0400
Even better localization of $@, and don't use Test::Warn for the time being, as something is freaking out Sub::UpLevel
r7660 at pentium (orig r7659): ribasushi | 2009-09-14 12:24:44 -0400
Someone claimed this is a problem...
r7663 at pentium (orig r7662): ribasushi | 2009-09-15 03:43:46 -0400
Warn when distinct is used with group_by
r7664 at pentium (orig r7663): rbuels | 2009-09-15 16:45:32 -0400
doc patch, clarified warning about using find_or_create() and friends on tables with auto-increment or similar columns
r7665 at pentium (orig r7664): rbuels | 2009-09-15 16:55:15 -0400
another doc clarification regarding auto-inc columns with find_or_create() and such functions
r7673 at pentium (orig r7672): ribasushi | 2009-09-17 07:54:44 -0400
Fix left-join chaining
r7684 at pentium (orig r7683): ribasushi | 2009-09-18 06:36:42 -0400
r6389 at Thesaurus (orig r6388): caelum | 2009-05-23 22:48:06 +0200
recreating Sybase branch
r6395 at Thesaurus (orig r6394): caelum | 2009-05-24 01:47:32 +0200
try not to fuck mssql with the sybase crap
r6488 at Thesaurus (orig r6487): caelum | 2009-06-03 17:31:24 +0200
resolve conflict
r6490 at Thesaurus (orig r6489): caelum | 2009-06-03 18:25:36 +0200
add missing files to sybase branch
r6492 at Thesaurus (orig r6491): caelum | 2009-06-04 01:51:39 +0200
fix Sybase DT stuff and storage bases
r6493 at Thesaurus (orig r6492): caelum | 2009-06-04 02:10:45 +0200
fix base for mssql (can't be a sybase anymore)
r6494 at Thesaurus (orig r6493): caelum | 2009-06-04 02:20:37 +0200
test sybase SMALLDATETIME inflation
r6495 at Thesaurus (orig r6494): caelum | 2009-06-04 04:52:31 +0200
update Sybase docs
r6501 at Thesaurus (orig r6500): caelum | 2009-06-04 14:50:49 +0200
sybase limit count without offset now works
r6504 at Thesaurus (orig r6503): caelum | 2009-06-04 18:03:01 +0200
use TOP for sybase limit count thanks to refactored count
r6505 at Thesaurus (orig r6504): caelum | 2009-06-04 18:41:54 +0200
back to counting rows for Sybase LIMIT counts
r6506 at Thesaurus (orig r6505): caelum | 2009-06-04 19:07:48 +0200
minor sybase count fix
r6512 at Thesaurus (orig r6511): caelum | 2009-06-05 01:02:48 +0200
test sybase group_by count, works
r6513 at Thesaurus (orig r6512): caelum | 2009-06-05 01:28:18 +0200
set date format on _rebless correctly
r6516 at Thesaurus (orig r6515): caelum | 2009-06-05 02:24:46 +0200
manually merged in sybase_noquote branch
r6518 at Thesaurus (orig r6517): caelum | 2009-06-05 06:34:25 +0200
shit doesn't work yet
r6520 at Thesaurus (orig r6519): caelum | 2009-06-05 16:55:41 +0200
update sybase types which shouldn't be quoted
r6525 at Thesaurus (orig r6524): caelum | 2009-06-06 04:40:51 +0200
tweaks to sybase types
r6527 at Thesaurus (orig r6526): caelum | 2009-06-06 05:36:03 +0200
temporary sybase noquote hack
r6595 at Thesaurus (orig r6594): caelum | 2009-06-10 13:46:37 +0200
Sybase::NoBindVars now correctly quotes
r6596 at Thesaurus (orig r6595): caelum | 2009-06-10 14:04:19 +0200
cache rsrc in NoBindVars, use name_sep
r6597 at Thesaurus (orig r6596): caelum | 2009-06-10 14:35:52 +0200
Sybase count by first pk, if available
r6599 at Thesaurus (orig r6598): caelum | 2009-06-10 15:00:42 +0200
cache rsrc in NoBindVars correctly
r6600 at Thesaurus (orig r6599): caelum | 2009-06-10 15:27:41 +0200
handle unknown rsrc in NoBindVars and Sybase::NoBindVars
r6605 at Thesaurus (orig r6604): caelum | 2009-06-10 18:17:31 +0200
cache rsrc properly in NoBindVars, return undef if no rsrc
r6658 at Thesaurus (orig r6657): caelum | 2009-06-13 05:57:40 +0200
switch to DateTime::Format::Sybase
r6700 at Thesaurus (orig r6699): caelum | 2009-06-17 16:25:28 +0200
rename and document dt setup method, will be an on_connect_call at later merge point
r6701 at Thesaurus (orig r6700): caelum | 2009-06-17 16:30:08 +0200
more dt docs reorg
r6715 at Thesaurus (orig r6714): caelum | 2009-06-19 01:28:17 +0200
todo tests for text/image columns in sybase
r6716 at Thesaurus (orig r6715): caelum | 2009-06-19 01:46:56 +0200
added connect_call_blob_setup for Sybase
r6724 at Thesaurus (orig r6723): caelum | 2009-06-19 17:12:20 +0200
cleanups
r6771 at Thesaurus (orig r6770): caelum | 2009-06-23 16:42:32 +0200
minor changes
r6788 at Thesaurus (orig r6787): caelum | 2009-06-25 05:31:06 +0200
fixup POD, comment out count
r6811 at Thesaurus (orig r6810): caelum | 2009-06-28 02:14:56 +0200
prototype blob implementation
r6857 at Thesaurus (orig r6856): caelum | 2009-06-29 23:45:19 +0200
branch pushed, removing
r6868 at Thesaurus (orig r6867): caelum | 2009-06-30 03:39:51 +0200
merge on_connect_call updates
r6877 at Thesaurus (orig r6876): caelum | 2009-06-30 12:46:43 +0200
code cleanups
r6957 at Thesaurus (orig r6956): caelum | 2009-07-03 02:32:48 +0200
minor changes
r6959 at Thesaurus (orig r6958): caelum | 2009-07-03 05:04:12 +0200
fix sybase mro
r7001 at Thesaurus (orig r7000): caelum | 2009-07-07 13:34:23 +0200
fix sybase rebless to NoBindVars
r7021 at Thesaurus (orig r7020): caelum | 2009-07-10 12:52:13 +0200
fix NoBindVars
r7053 at Thesaurus (orig r7052): caelum | 2009-07-15 01:39:02 +0200
set maxConnect in DSN and add docs
r7065 at Thesaurus (orig r7064): caelum | 2009-07-17 09:39:54 +0200
make insertion of blobs into tables with identity columns work, other minor fixes
r7070 at Thesaurus (orig r7069): caelum | 2009-07-17 23:30:13 +0200
some compatibility updated for older DBD::Sybase versions, some initial work on _select_args for blobs
r7072 at Thesaurus (orig r7071): caelum | 2009-07-19 23:57:11 +0200
mangling _select_args turned out to be unnecessary
r7073 at Thesaurus (orig r7072): caelum | 2009-07-20 01:02:19 +0200
minor cleanups
r7074 at Thesaurus (orig r7073): caelum | 2009-07-20 15:47:48 +0200
blob update now works
r7076 at Thesaurus (orig r7075): caelum | 2009-07-20 19:06:46 +0200
change the (incorrect) version check to a check for FreeTDS
r7077 at Thesaurus (orig r7076): caelum | 2009-07-20 19:13:25 +0200
better check for FreeTDS thanks to arcanez
r7089 at Thesaurus (orig r7086): caelum | 2009-07-22 07:09:21 +0200
minor cleanups
r7091 at Thesaurus (orig r7088): caelum | 2009-07-22 17:05:37 +0200
remove unnecessary test Result class
r7092 at Thesaurus (orig r7089): caelum | 2009-07-23 00:47:14 +0200
fix doc for how to check for FreeTDS
r7095 at Thesaurus (orig r7092): caelum | 2009-07-23 14:35:53 +0200
doc tweak
r7115 at Thesaurus (orig r7112): caelum | 2009-07-24 09:58:24 +0200
add support for IDENTITY_INSERT
r7117 at Thesaurus (orig r7114): caelum | 2009-07-24 16:19:08 +0200
savepoint support
r7120 at Thesaurus (orig r7117): caelum | 2009-07-24 20:35:37 +0200
fix race condition in last_insert_id with placeholders
r7121 at Thesaurus (orig r7118): caelum | 2009-07-24 21:22:25 +0200
code cleanup
r7124 at Thesaurus (orig r7121): caelum | 2009-07-25 16:19:58 +0200
use _resolve_column_info in NoBindVars
r7125 at Thesaurus (orig r7122): caelum | 2009-07-25 21:23:49 +0200
make insert work as a nested transaction too
r7126 at Thesaurus (orig r7123): caelum | 2009-07-25 22:52:17 +0200
add money type support
r7128 at Thesaurus (orig r7125): caelum | 2009-07-27 03:48:35 +0200
better FreeTDS support
r7130 at Thesaurus (orig r7127): caelum | 2009-07-28 06:23:54 +0200
minor refactoring, cleanups, doc updates
r7131 at Thesaurus (orig r7128): caelum | 2009-07-28 09:32:45 +0200
forgot to set mro in dbi::cursor
r7141 at Thesaurus (orig r7138): caelum | 2009-07-30 10:21:20 +0200
better test for "smalldatetime" in Sybase
r7146 at Thesaurus (orig r7143): caelum | 2009-07-30 15:37:18 +0200
update sqlite test schema
r7207 at Thesaurus (orig r7204): caelum | 2009-08-04 23:40:16 +0200
update Changes
r7222 at Thesaurus (orig r7219): caelum | 2009-08-05 11:02:26 +0200
fix a couple minor issues after pull from trunk
r7260 at Thesaurus (orig r7257): caelum | 2009-08-07 14:45:18 +0200
add note about where to get Schema::Loader
r7273 at Thesaurus (orig r7270): ribasushi | 2009-08-09 01:19:49 +0200
Changes and minor code rewrap
r7285 at Thesaurus (orig r7282): ribasushi | 2009-08-10 08:08:06 +0200
pesky whitespace
r7286 at Thesaurus (orig r7283): ribasushi | 2009-08-10 08:11:46 +0200
privatize dormant method - it may be useful for sybase at *some* point
r7287 at Thesaurus (orig r7284): ribasushi | 2009-08-10 08:19:55 +0200
Whoops
r7289 at Thesaurus (orig r7286): caelum | 2009-08-10 08:44:51 +0200
document placeholders_with_type_conversion_supported and add a redispatch to reblessed storage in DBI::update
r7290 at Thesaurus (orig r7287): caelum | 2009-08-10 10:07:45 +0200
fix and test redispatch to reblessed storage insert/update
r7292 at Thesaurus (orig r7289): caelum | 2009-08-10 10:32:37 +0200
rename get_connected_schema to get_schema in sybase test
r7345 at Thesaurus (orig r7342): ribasushi | 2009-08-18 22:45:06 +0200
Fix Changes
r7367 at Thesaurus (orig r7364): ribasushi | 2009-08-23 10:00:34 +0200
Minaor speedup
r7368 at Thesaurus (orig r7365): ribasushi | 2009-08-23 10:01:10 +0200
Generalize and hide placeholder support check
r7369 at Thesaurus (orig r7366): ribasushi | 2009-08-23 10:04:26 +0200
Rename the common sybase driver
r7373 at Thesaurus (orig r7370): caelum | 2009-08-24 13:21:51 +0200
make insert only use a txn if needed, add connect_call_unsafe_insert
r7374 at Thesaurus (orig r7371): caelum | 2009-08-24 14:42:57 +0200
add test for IDENTITY_INSERT
r7378 at Thesaurus (orig r7375): caelum | 2009-08-24 15:51:48 +0200
use debugobj->callback instead of local *_query_start in test to capture query
r7379 at Thesaurus (orig r7376): caelum | 2009-08-24 17:19:46 +0200
remove duplicate oracle method and fix an mssql method call
r7417 at Thesaurus (orig r7414): caelum | 2009-08-29 07:23:45 +0200
update link to Schema::Loader branch
r7427 at Thesaurus (orig r7424): caelum | 2009-08-29 09:31:41 +0200
switch to ::DBI::AutoCast
r7428 at Thesaurus (orig r7425): ribasushi | 2009-08-29 13:36:22 +0200
Cleanup:
Added commented method signatures for easier debugging
privatize transform_unbound_value as _prep_bind_value
Remove \@_ splice's in lieu of of simple shifts
Exposed TYPE_MAPPING used by native_data_type via our
Removed use of txn_do - internal code uses the scope guard
Renamed some variables, whitespace cleanup, the works
r7429 at Thesaurus (orig r7426): ribasushi | 2009-08-29 13:40:48 +0200
Varname was absolutely correct
r7430 at Thesaurus (orig r7427): caelum | 2009-08-29 14:09:13 +0200
minor changes for tests to pass again
r7431 at Thesaurus (orig r7428): caelum | 2009-08-29 21:08:51 +0200
fix inserts with active cursors
r7432 at Thesaurus (orig r7429): caelum | 2009-08-29 22:53:02 +0200
remove extra connection
r7434 at Thesaurus (orig r7431): caelum | 2009-08-30 00:02:20 +0200
test correlated subquery
r7442 at Thesaurus (orig r7439): ribasushi | 2009-08-30 09:07:00 +0200
Put the ocmment back
r7443 at Thesaurus (orig r7440): ribasushi | 2009-08-30 09:15:41 +0200
Change should_quote_value to interpolate_unquoted to make it harder to stop quoting by accident (it's easier to return a undef by accident than a 1)
r7446 at Thesaurus (orig r7443): caelum | 2009-08-30 18:19:46 +0200
added txn_scope_guards for blob operations
r7447 at Thesaurus (orig r7444): ribasushi | 2009-08-30 18:56:43 +0200
Rename insert_txn to unsafe_insert
r7512 at Thesaurus (orig r7509): ribasushi | 2009-09-03 20:24:14 +0200
Minor cleanups
r7575 at Thesaurus (orig r7572): caelum | 2009-09-05 07:23:57 +0200
pending review by mpeppler
r7593 at Thesaurus (orig r7590): ribasushi | 2009-09-07 09:10:05 +0200
Release 0.08111 tag
r7594 at Thesaurus (orig r7591): ribasushi | 2009-09-07 09:14:33 +0200
Whoops this should not have committed
r7602 at Thesaurus (orig r7599): caelum | 2009-09-07 21:31:38 +0200
fix _insert_dbh code to only connect when needed, doc update
r7607 at Thesaurus (orig r7604): caelum | 2009-09-09 02:15:54 +0200
remove unsafe_insert
r7608 at Thesaurus (orig r7605): ribasushi | 2009-09-09 09:14:20 +0200
Localisation ain't free, we don't do it unless we have to
r7609 at Thesaurus (orig r7606): ribasushi | 2009-09-09 09:40:29 +0200
Much simpler
r7610 at Thesaurus (orig r7607): ribasushi | 2009-09-09 10:38:41 +0200
Reduce amount of perl-golf :)
r7611 at Thesaurus (orig r7608): ribasushi | 2009-09-09 10:41:15 +0200
This should not have worked - I guess we lack tests?
r7614 at Thesaurus (orig r7611): caelum | 2009-09-09 12:08:36 +0200
test multi-row blob update
r7619 at Thesaurus (orig r7616): caelum | 2009-09-09 18:01:15 +0200
remove Sub::Name hack for method dispatch, pass $next instead
r7620 at Thesaurus (orig r7617): caelum | 2009-09-10 02:16:03 +0200
do blob update over _insert_dbh
r7661 at Thesaurus (orig r7650): caelum | 2009-09-13 10:27:44 +0200
change _insert_dbh to _insert_storage
r7663 at Thesaurus (orig r7652): caelum | 2009-09-13 11:52:20 +0200
make sure _init doesn't loop, steal insert_bulk from mssql, add some insert_bulk tests
r7664 at Thesaurus (orig r7653): caelum | 2009-09-13 13:27:51 +0200
allow subclassing of methods proxied to _writer_storage
r7666 at Thesaurus (orig r7655): caelum | 2009-09-14 15:09:21 +0200
sybase bulk API support stuff (no blobs yet, coming soon...)
r7667 at Thesaurus (orig r7656): caelum | 2009-09-14 15:33:14 +0200
add another test for sybase bulk stuff (passes)
r7668 at Thesaurus (orig r7657): caelum | 2009-09-14 15:44:06 +0200
minor change (fix inverted boolean for warning)
r7669 at Thesaurus (orig r7658): caelum | 2009-09-14 15:48:52 +0200
remove @args from DBI::sth, use full arg list
r7676 at Thesaurus (orig r7665): caelum | 2009-09-16 15:06:35 +0200
use execute_array for insert_bulk, test insert_bulk with blobs, clean up blob tests a bit
r7680 at Thesaurus (orig r7669): ribasushi | 2009-09-16 19:36:19 +0200
Remove branched changes
r7682 at Thesaurus (orig r7671): caelum | 2009-09-17 03:03:34 +0200
I'll rewrite this bit tomorrow to be less retarded
r7684 at Thesaurus (orig r7673): caelum | 2009-09-18 04:03:15 +0200
fix yesterday's stuff, identity_update works, blob updates are better
r7686 at Thesaurus (orig r7675): caelum | 2009-09-18 04:22:38 +0200
column no longer necessary in test
r7688 at Thesaurus (orig r7677): caelum | 2009-09-18 08:33:14 +0200
fix freetds
r7691 at Thesaurus (orig r7680): ribasushi | 2009-09-18 12:25:42 +0200
r7678 at Thesaurus (orig r7667): ribasushi | 2009-09-16 19:31:14 +0200
New subbranch
r7679 at Thesaurus (orig r7668): ribasushi | 2009-09-16 19:34:29 +0200
Caelum's work so far
r7690 at Thesaurus (orig r7679): caelum | 2009-09-18 11:10:16 +0200
support for blobs in insert_bulk fallback
r7692 at Thesaurus (orig r7681): ribasushi | 2009-09-18 12:28:09 +0200
Rollback all bulk insert code before merge
r7689 at pentium (orig r7688): ribasushi | 2009-09-18 08:12:05 -0400
Cleanup exception handling
r7690 at pentium (orig r7689): ribasushi | 2009-09-18 08:22:02 -0400
duh
r7691 at pentium (orig r7690): ribasushi | 2009-09-18 08:25:06 -0400
Minor cleanup of RSC with has_many joins
r7692 at pentium (orig r7691): ribasushi | 2009-09-18 08:32:15 -0400
Changes and dev notes in makefile
r7695 at pentium (orig r7694): ribasushi | 2009-09-18 08:52:26 -0400
Nothing says the grouping column can not be nullable
r7696 at pentium (orig r7695): ribasushi | 2009-09-18 08:53:33 -0400
Changes
r7697 at pentium (orig r7696): ribasushi | 2009-09-18 14:09:04 -0400
This code belogs in Storage::DBI
r7698 at pentium (orig r7697): ribasushi | 2009-09-18 14:38:26 -0400
Clear up some legacy cruft and straighten inheritance
r7700 at pentium (orig r7699): ribasushi | 2009-09-20 18:25:20 -0400
Backout sybase changes
r7703 at pentium (orig r7702): ribasushi | 2009-09-20 18:46:32 -0400
Missed a part of the revert
r7710 at pentium (orig r7709): ribasushi | 2009-09-20 20:49:11 -0400
Oops
r7711 at pentium (orig r7710): ribasushi | 2009-09-21 05:02:14 -0400
Changes
r7712 at pentium (orig r7711): ribasushi | 2009-09-21 06:49:30 -0400
Undocument the from attribute (the description was mostly outdated anyway)
r7713 at pentium (orig r7712): ribasushi | 2009-09-21 06:58:58 -0400
Release 0.08112
r7716 at pentium (orig r7715): ribasushi | 2009-09-21 10:26:07 -0400
A test for an obscure join syntax - make sure we don't break it
r7722 at pentium (orig r7721): ribasushi | 2009-09-22 06:58:09 -0400
this would break in the future - sanitize sql fed to the tester
r7725 at pentium (orig r7724): ribasushi | 2009-09-22 07:07:31 -0400
The hack is no longer necessary with a recent sqla
r7730 at pentium (orig r7729): caelum | 2009-09-24 17:44:01 -0400
add test for multiple active statements in mssql over dbd::sybase
r7731 at pentium (orig r7730): caelum | 2009-09-25 02:46:22 -0400
test on_connect_do with a coderef connect_info too
r7732 at pentium (orig r7731): caelum | 2009-09-25 17:26:52 -0400
failing test for simple transaction with mssql via dbd::sybase
r7754 at pentium (orig r7753): ribasushi | 2009-10-03 09:49:14 -0400
Test reorg (no changes)
r7755 at pentium (orig r7754): ribasushi | 2009-10-03 09:55:25 -0400
Add failing tests for RT#50003
r7756 at pentium (orig r7755): caelum | 2009-10-03 10:09:45 -0400
fix on_connect_ with coderef connect_info
r7760 at pentium (orig r7759): ribasushi | 2009-10-04 07:17:53 -0400
Fix AutoCast's POD
r7771 at pentium (orig r7770): ribasushi | 2009-10-09 00:57:20 -0400
r7777 at Thesaurus (orig r7765): frew | 2009-10-07 20:05:05 +0200
add method to check if an rs is paginated
r7778 at Thesaurus (orig r7766): frew | 2009-10-07 20:31:02 +0200
is_paginated method and test
r7780 at Thesaurus (orig r7768): frew | 2009-10-09 06:45:36 +0200
change name of method
r7781 at Thesaurus (orig r7769): frew | 2009-10-09 06:47:31 +0200
add message to changelog for is_paged
r7774 at pentium (orig r7773): ribasushi | 2009-10-09 05:00:36 -0400
Ugh CRLF
r7775 at pentium (orig r7774): ribasushi | 2009-10-09 05:04:35 -0400
Skip versioning test on really old perls lacking Time::HiRes
r7776 at pentium (orig r7775): ribasushi | 2009-10-09 05:04:50 -0400
Changes
r7777 at pentium (orig r7776): triode | 2009-10-09 16:32:04 -0400
added troubleshooting case of excessive memory allocation involving TEXT/BLOB/etc
columns and large LongReadLen
r7778 at pentium (orig r7777): triode | 2009-10-09 16:44:21 -0400
added my name to contributors list
r7779 at pentium (orig r7778): ribasushi | 2009-10-10 12:49:15 -0400
Whoops, this isn't right
r7780 at pentium (orig r7779): ribasushi | 2009-10-11 09:44:18 -0400
More ordered fixes
r7782 at pentium (orig r7781): norbi | 2009-10-13 05:27:18 -0400
r7982 at vger: mendel | 2009-10-13 11:26:11 +0200
Fixed a typo and a POD error.
Property changes on: DBIx-Class/0.08/branches/ado_mssql
___________________________________________________________________
Name: svk:merge
- 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7237
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7331
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:5651
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
+ 168d5346-440b-0410-b799-f706be625ff1:/DBIx-Class-current:2207
462d4d0c-b505-0410-bf8e-ce8f877b3390:/local/bast/DBIx-Class:3159
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/cookbook_fixes:7657
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/resultsetcolumn_custom_columns:5160
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/branches/sqla_1.50_compat:5414
4d5fae46-8e6a-4e08-abee-817e9fb894a2:/local/bast/DBIx-Class/0.08/trunk:7982
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class:32260
9c88509d-e914-0410-b01c-b9530614cbfe:/local/DBIx-Class-CDBICompat:54993
9c88509d-e914-0410-b01c-b9530614cbfe:/vendor/DBIx-Class:31122
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_column_attr:10946
ab17426e-7cd3-4704-a2a2-80b7c0a611bb:/local/dbic_trunk:11788
bd5ac9a7-f185-4d95-9186-dbb8b392a572:/local/os/bast/DBIx-Class/0.08/trunk:2798
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/_abandoned_but_possibly_useful/table_name_ref:7266
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/autocast:7418
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/belongs_to_null_col_fix:5244
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cdbicompat_integration:4160
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/column_attr:5074
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/complex_join_rels:4589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/connect_info_hash:7435
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/cookbook_fixes:7479
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_distinct:6218
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/count_rs:6741
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/diamond_relationships:6310
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/discard_changes_replication_fix:7252
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/file_column:3920
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/fix-update-and-delete-as_query:6162
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_has_many_join:7382
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/grouped_prefetch:6885
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/is_resultset_paginated:7769
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/joined_count:6323
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mc_fixes:6645
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_money_type:7096
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_storage_minor_refactor:7210
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mssql_top_fixes:6971
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multi_stuff:5565
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/multicreate_fixes:7275
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mysql_ansi:7175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/mystery_join:6589
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/new_replication_transaction_fixup:7058
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_connect_call:6854
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/on_disconnect_do:3694
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle-tweaks:6222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/oracle_sequence:4173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/order_by_refactor:6475
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/parser_fk_index:4485
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/pg_unqualified_schema:7566
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch:5699
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_limit:6724
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/prefetch_redux:7206
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/reduce_pings:7261
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/replication_dedux:4600
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rsrc_in_storage:6577
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/rt_bug_41083:5437
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/savepoints:4223
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/search_related_prefetch:6818
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sqla_1.50_compat:5321
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-ms-access:4142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/storage-tweaks:6262
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subclassed_rsset:5930
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/subquery:5617
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/syb_connected:6919
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase:7682
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_bulk_insert:7679
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/sybase_mssql:6125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/table_name_ref:7132
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/top_limit_altfix:6429
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/type_aware_update:6619
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/unresolvable_prefetch:6949
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioned_enhancements:4125
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/versioning:4578
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/branches/views:5585
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/0.08108_prerelease_please_do_not_pull_into_it:7008
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/tags/pre_0.08109_please_do_not_merge:7336
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/DBIx-Class/0.08/trunk:7781
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-C3:318
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-current:2222
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-joins:173
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class-resultset:570
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/datetime:1716
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_compat:1855
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/find_unique_query_fixes:2142
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/inflate:1988
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/many_to_many:2025
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/re_refactor_bugfix:1944
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/reorganize_tests:1827
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset-new-refactor:1766
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_2_electric_boogaloo:2175
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/resultset_cleanup:2102
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/branches/DBIx-Class/sqlt_tests_refactor:2043
bd8105ee-0ff8-0310-8827-fb3f25b6796d:/trunk/DBIx-Class:3606
fe160bb6-dc1c-0410-9f2b-d64a711b54a5:/local/DBIC-trunk-0.08:10510
Added: DBIx-Class/0.08/branches/ado_mssql/.gitignore
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/.gitignore (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/.gitignore 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,7 @@
+META.yml
+Makefile
+README
+blib/
+inc/
+pm_to_blib
+t/var/
Modified: DBIx-Class/0.08/branches/ado_mssql/Changes
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/Changes 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/Changes 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,5 +1,54 @@
Revision history for DBIx::Class
+ - Add is_paged method to DBIx::Class::ResultSet so that we can
+ check that if we want a pager
+ - Skip versioning test on really old perls lacking Time::HiRes
+ (RT #50209)
+ - Fixed on_connect_do/call regression when used with a coderef
+ connector (RT #50003)
+ - A couple of fixes to Ordered to remedy subclassing issues
+
+0.08112 2009-09-21 10:57:00 (UTC)
+ - Remove the recommends from Makefile.PL, DBIx::Class is not
+ supposed to have optional dependencies. ever.
+ - Mangle the DBIx/Class.pm POD to be more clear about
+ copyright and license
+ - Put back PG's multiple autoinc per table support, accidentally
+ dropped during the serial-autodetection rewrite
+ - Make sure ResultSetColumn does not depend on the (undefined)
+ return value of ->cursor->reset()
+ - Add single() to ResultSetColumn (same semantics as ResultSet)
+ - Make sure to turn off IDENTITY_INSERT after insert() on MSSQL
+ tables that needed it
+ - More informative exception on failing _resolve_relationship
+ - Allow undef/NULL as the sole grouping value in Ordered
+ - Fix unreported rollback exceptions in TxnScopeGuard
+ - Fix overly-eager left-join chain enforcing code
+ - Warn about using distinct with an existing group_by
+ - Warn about attempting to $rs->get_column a non-unique column
+ when has_many joins are added to resultset
+ - Refactor of the exception handling system (now everything is a
+ DBIx::Class::Exception object)
+
+0.08111 2009-09-06 21:58:00 (UTC)
+ - The hashref to connection_info now accepts a 'dbh_maker'
+ coderef, allowing better intergration with Catalyst
+ - Fixed a complex prefetch + regular join regression introduced
+ in 0.08108
+ - Fixed insert_bulk rebless handling
+ - Fixed Storable roundtrip regression, and general serialization
+ cleanup
+ - SQLT related fixes:
+ - sqlt_type is now called on the correct storage object
+ - hooks can now see the correct producer_type (RT#47891)
+ - optional SQLT requirements for e.g. deploy() bumped to 0.11002
+ - Really fixed (and greatly cleaned up) postgresql autoinc sequence
+ autodetection
+ - Automatically detect MySQL v3 and use INNER JOIN instead of JOIN
+ - POD improvements (including RT#48769)
+ - Test suite tweaks (including fixes for recent CPANTS fails)
+ - Better support for MSSQL IDENTITY_INSERT ON
+
0.08109 2009-08-18 08:35:00 (UTC)
- Replication updates:
- Improved the replication tests so that they are more reliable
@@ -22,7 +71,7 @@
- Support for MSSQL 'money' type
- Support for 'smalldatetime' type used in MSSQL and Sybase for
InflateColumn::DateTime
- - support for Postgres 'timestamp without timezone' type in
+ - Support for Postgres 'timestamp without timezone' type in
InflateColumn::DateTime (RT#48389)
- Added new MySQL specific on_connect_call macro 'set_strict_mode'
(also known as make_mysql_not_suck_as_much)
@@ -61,7 +110,7 @@
nonexisting prefetch
- make_column_dirty() now overwrites the deflated value with an
inflated one if such exists
- - Fixed set_$rel with where restriction deleting rows outside
+ - Fixed set_$rel with where restriction deleting rows outside
the restriction
- populate() returns the created objects or an arrayref of the
created objects depending on scalar vs. list context
@@ -113,7 +162,7 @@
side of the relation, to avoid duplicates
- DBIC now properly handles empty inserts (invoking all default
values from the DB, normally via INSERT INTO tbl DEFAULT VALUES
- - Fix find_or_new/create to stop returning random rows when
+ - Fix find_or_new/create to stop returning random rows when
default value insert is requested (RT#28875)
- Make IC::DT extra warning state the column name too
- It is now possible to transparrently search() on columns
@@ -135,9 +184,9 @@
- Change ->count code to work correctly with DISTINCT (distinct => 1)
via GROUP BY
- Removed interpolation of bind vars for as_query - placeholders
- are preserved and nested query bind variables are properly
+ are preserved and nested query bind variables are properly
merged in the correct order
- - Refactor DBIx::Class::Storage::DBI::Sybase to automatically
+ - Refactor DBIx::Class::Storage::DBI::Sybase to automatically
load a subclass, namely Microsoft_SQL_Server.pm
(similar to DBIx::Class::Storage::DBI::ODBC)
- Refactor InflateColumn::DateTime to allow components to
@@ -200,7 +249,7 @@
- not try and insert things tagged on via new_related unless required
- Possible to set locale in IC::DateTime extra => {} config
- Calling the accessor of a belongs_to when the foreign_key
- was NULL and the row was not stored would unexpectedly fail
+ was NULL and the row was not stored would unexpectedly fail
- Split sql statements for deploy only if SQLT::Producer returned a scalar
containing all statements to be executed
- Add as_query() for ResultSet and ResultSetColumn. This makes subqueries
@@ -228,8 +277,8 @@
- new order_by => { -desc => 'colname' } syntax supported
- PG array datatype supported
- insert should use store_column, not set_column to avoid marking
- clean just-stored values as dirty. New test for this
- - regression test for source_name
+ clean just-stored values as dirty. New test for this
+ - regression test for source_name
0.08099_05 2008-10-30 21:30:00 (UTC)
- Rewrite of Storage::DBI::connect_info(), extended with an
@@ -243,7 +292,7 @@
- Fixed up related resultsets and multi-create
- Fixed superfluous connection in ODBC::_rebless
- Fixed undef PK for first insert in ODBC::Microsoft_SQL_Server
- - Added virtual method to Versioned so a user can create upgrade
+ - Added virtual method to Versioned so a user can create upgrade
path across multiple versions (jgoulah)
- Better (and marginally faster) implementation of the HashRefInflator
hash construction algorithm
@@ -252,7 +301,7 @@
0.08099_04 2008-07-24 01:00:00
- Functionality to storage to enable a sub to be run without FK checks
- - Fixed $schema->clone bug which caused clone and source to share
+ - Fixed $schema->clone bug which caused clone and source to share
internal hash refs
- Added register_extra_source methods for additional sources
- Added datetime_undef_if_invalid for InflateColumn::DateTime to
@@ -278,11 +327,11 @@
- Add warnings for non-unique ResultSet::find queries
- Changed Storage::DBI::Replication to Storage::DBI::Replicated and
refactored support.
- - By default now deploy/diff et al. will ignore constraint and index
+ - By default now deploy/diff et al. will ignore constraint and index
names
- Add ResultSet::_is_deterministic_value, make new_result filter the
values passed to new to drop values that would generate invalid SQL.
- - Use Sub::Name to name closures before installing them. Fixes
+ - Use Sub::Name to name closures before installing them. Fixes
incompatibility with Moose method modifiers on generated methods.
0.08010 2008-03-01 10:30
@@ -291,7 +340,7 @@
0.08009 2008-01-20 13:30
- Made search_rs smarter about when to preserve the cache to fix
mm prefetch usage
- - Added Storage::DBI subclass for MSSQL over ODBC.
+ - Added Storage::DBI subclass for MSSQL over ODBC.
- Added freeze, thaw and dclone methods to Schema so that thawed
objects will get re-attached to the schema.
- Moved dbicadmin to JSON::Any wrapped JSON.pm for a sane API
@@ -305,20 +354,20 @@
foreign and self parts the wrong way round in the condition
- ResultSetColumn::func() now returns all results if called in list
context; this makes things like func('DISTINCT') work as expected
- - Many-to-many relationships now warn if the utility methods would
+ - Many-to-many relationships now warn if the utility methods would
clash
- InflateColumn::DateTime now accepts an extra parameter of timezone
to set timezone on the DT object (thanks Sergio Salvi)
- - Added sqlt_deploy_hook to result classes so that indexes can be
+ - Added sqlt_deploy_hook to result classes so that indexes can be
added.
- - Added startup checks to warn loudly if we appear to be running on
+ - Added startup checks to warn loudly if we appear to be running on
RedHat systems from perl-5.8.8-10 and up that have the bless/overload
patch applied (badly) which causes 2x -> 100x performance penalty.
(Jon Schutz)
- - ResultSource::reverse_relationship_info can distinguish between
+ - ResultSource::reverse_relationship_info can distinguish between
sources using the same table
- Row::insert will now not fall over if passed duplicate related objects
- - Row::copy will not fall over if you have two relationships to the
+ - Row::copy will not fall over if you have two relationships to the
same source with a unique constraint on it
0.08007 2007-09-04 19:36:00
@@ -330,7 +379,7 @@
- Move to using Class::C3::Componentised
- Remove warn statement from DBIx::Class::Row
-0.08005 2007-08-06
+0.08005 2007-08-06
- add timestamp fix re rt.cpan 26978 - no test yet but change
clearly should cause no regressions
- provide alias for related_resultset via local() so it's set
@@ -345,7 +394,7 @@
(original fix from diz)
0.08004 2007-08-06 19:00:00
- - fix storage connect code to not trigger bug via auto-viv
+ - fix storage connect code to not trigger bug via auto-viv
(test from aherzog)
- fixup cursor_class to be an 'inherited' attr for per-package defaults
- add default_resultset_attributes entry to Schema
Modified: DBIx-Class/0.08/branches/ado_mssql/MANIFEST.SKIP
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/MANIFEST.SKIP 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/MANIFEST.SKIP 2009-10-14 13:45:34 UTC (rev 7786)
@@ -6,6 +6,9 @@
\bCVS\b
,v$
\B\.svn\b
+\B\.git\b
+\B\.gitignore\b
+\b_darcs\b
# Avoid Makemaker generated and utility files.
\bMakefile$
Modified: DBIx-Class/0.08/branches/ado_mssql/Makefile.PL
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/Makefile.PL 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/Makefile.PL 2009-10-14 13:45:34 UTC (rev 7786)
@@ -5,103 +5,121 @@
use 5.006001; # delete this line if you want to send patches for earlier.
+# ****** DO NOT ADD OPTIONAL DEPENDENCIES. EVER. --mst ******
+
name 'DBIx-Class';
perl_version '5.006001';
all_from 'lib/DBIx/Class.pm';
-test_requires 'Test::Builder' => 0.33;
-test_requires 'Test::Deep' => 0;
-test_requires 'Test::Exception' => 0;
-test_requires 'Test::More' => 0.92;
-test_requires 'Test::Warn' => 0.11;
+test_requires 'Test::Builder' => '0.33';
+test_requires 'Test::Deep' => '0';
+test_requires 'Test::Exception' => '0';
+test_requires 'Test::More' => '0.92';
+test_requires 'Test::Warn' => '0.21';
-test_requires 'File::Temp' => 0.22;
+test_requires 'File::Temp' => '0.22';
# Core
-requires 'List::Util' => 0;
-requires 'Scalar::Util' => 0;
-requires 'Storable' => 0;
+requires 'List::Util' => '0';
+requires 'Scalar::Util' => '0';
+requires 'Storable' => '0';
# Perl 5.8.0 doesn't have utf8::is_utf8()
-requires 'Encode' => 0 if ($] <= 5.008000);
+requires 'Encode' => '0' if ($] <= 5.008000);
# Dependencies (keep in alphabetical order)
-requires 'Carp::Clan' => 6.0;
-requires 'Class::Accessor::Grouped' => 0.08003;
-requires 'Class::C3::Componentised' => 1.0005;
-requires 'Class::Inspector' => 1.24;
-requires 'Data::Page' => 2.00;
-requires 'DBD::SQLite' => 1.25;
-requires 'DBI' => 1.605;
-requires 'JSON::Any' => 1.18;
-requires 'MRO::Compat' => 0.09;
-requires 'Module::Find' => 0.06;
-requires 'Path::Class' => 0.16;
-requires 'Scope::Guard' => 0.03;
-requires 'SQL::Abstract' => 1.56;
-requires 'SQL::Abstract::Limit' => 0.13;
-requires 'Sub::Name' => 0.04;
+requires 'Carp::Clan' => '6.0';
+requires 'Class::Accessor::Grouped' => '0.09000';
+requires 'Class::C3::Componentised' => '1.0005';
+requires 'Class::Inspector' => '1.24';
+requires 'Data::Page' => '2.00';
+requires 'DBD::SQLite' => '1.25';
+requires 'DBI' => '1.605';
+requires 'JSON::Any' => '1.18';
+requires 'MRO::Compat' => '0.09';
+requires 'Module::Find' => '0.06';
+requires 'Path::Class' => '0.16';
+requires 'Scope::Guard' => '0.03';
+requires 'SQL::Abstract' => '1.60';
+requires 'SQL::Abstract::Limit' => '0.13';
+requires 'Sub::Name' => '0.04';
-recommends 'SQL::Translator' => 0.09004;
-
my %replication_requires = (
- 'Moose', => 0.87,
- 'MooseX::AttributeHelpers' => 0.21,
- 'MooseX::Types', => 0.16,
- 'namespace::clean' => 0.11,
- 'Hash::Merge', => 0.11,
+ 'Moose', => '0.87',
+ 'MooseX::AttributeHelpers' => '0.21',
+ 'MooseX::Types', => '0.16',
+ 'namespace::clean' => '0.11',
+ 'Hash::Merge', => '0.11',
);
+#************************************************************************#
+# Make *ABSOLUTELY SURE* that nothing on this list is a real require, #
+# since every module listed in %force_requires_if_author is deleted #
+# from the final META.yml (thus will never make it as a CPAN dependency) #
+#************************************************************************#
my %force_requires_if_author = (
%replication_requires,
-# 'Module::Install::Pod::Inherit' => 0.01,
- 'Test::Pod::Coverage' => 1.04,
- 'SQL::Translator' => 0.09007,
+ # when changing also adjust $DBIx::Class::Storage::DBI::minimum_sqlt_version
+ 'SQL::Translator' => '0.11002',
+# 'Module::Install::Pod::Inherit' => '0.01',
+
+ # when changing also adjust version in t/02pod.t
+ 'Test::Pod' => '1.26',
+
+ # when changing also adjust version in t/03podcoverage.t
+ 'Test::Pod::Coverage' => '1.08',
+ 'Pod::Coverage' => '0.20',
+
# CDBI-compat related
- 'DBIx::ContextualFetch' => 0,
- 'Class::DBI::Plugin::DeepAbstractSearch' => 0,
- 'Class::Trigger' => 0,
- 'Time::Piece::MySQL' => 0,
- 'Clone' => 0,
- 'Date::Simple' => 3.03,
+ 'DBIx::ContextualFetch' => '0',
+ 'Class::DBI::Plugin::DeepAbstractSearch' => '0',
+ 'Class::Trigger' => '0',
+ 'Time::Piece::MySQL' => '0',
+ 'Clone' => '0',
+ 'Date::Simple' => '3.03',
# t/52cycle.t
- 'Test::Memory::Cycle' => 0,
- 'Devel::Cycle' => 1.10,
+ 'Test::Memory::Cycle' => '0',
+ 'Devel::Cycle' => '1.10',
# t/36datetime.t
# t/60core.t
- 'DateTime::Format::SQLite' => 0,
+ 'DateTime::Format::SQLite' => '0',
# t/96_is_deteministic_value.t
- 'DateTime::Format::Strptime'=> 0,
+ 'DateTime::Format::Strptime'=> '0',
# database-dependent reqs
#
$ENV{DBICTEST_PG_DSN}
? (
- 'Sys::SigAction' => 0,
- 'DBD::Pg' => 2.009002,
- 'DateTime::Format::Pg' => 0,
+ 'Sys::SigAction' => '0',
+ 'DBD::Pg' => '2.009002',
+ 'DateTime::Format::Pg' => '0',
) : ()
,
$ENV{DBICTEST_MYSQL_DSN}
? (
- 'DateTime::Format::MySQL' => 0,
+ 'DateTime::Format::MySQL' => '0',
) : ()
,
$ENV{DBICTEST_ORACLE_DSN}
? (
- 'DateTime::Format::Oracle' => 0,
+ 'DateTime::Format::Oracle' => '0',
) : ()
,
);
+#************************************************************************#
+# Make ABSOLUTELY SURE that nothing on the list above is a real require, #
+# since every module listed in %force_requires_if_author is deleted #
+# from the final META.yml (thus will never make it as a CPAN dependency) #
+#************************************************************************#
install_script (qw|
Modified: DBIx-Class/0.08/branches/ado_mssql/TODO
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/TODO 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/TODO 2009-10-14 13:45:34 UTC (rev 7786)
@@ -25,13 +25,6 @@
__PACKAGE__->table(__PACKAGE__->table()); for the result set to
return the correct object type.
-2006-03-27 by mst
- Add the ability for deploy to be given a directory and grab <dbname>.sql
- out of there if available. Try SQL::Translator if not. If none of the above,
- cry (and die()). Then you can have a script that pre-gens for all available
- SQLT modules so an app can do its own deploy without SQLT on the target
- system
-
2006-05-25 by mst (TODOed by bluefeet)
Add the search attributes "limit" and "rows_per_page".
limit: work as expected just like offset does
Modified: DBIx-Class/0.08/branches/ado_mssql/examples/Schema/insertdb.pl
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/examples/Schema/insertdb.pl 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/examples/Schema/insertdb.pl 2009-10-14 13:45:34 UTC (rev 7786)
@@ -23,10 +23,10 @@
my @cds;
foreach my $lp (keys %albums) {
- my $artist = $schema->resultset('Artist')->search({
+ my $artist = $schema->resultset('Artist')->find({
name => $albums{$lp}
});
- push @cds, [$lp, $artist->first];
+ push @cds, [$lp, $artist->id];
}
$schema->populate('Cd', [
@@ -47,10 +47,10 @@
my @tracks;
foreach my $track (keys %tracks) {
- my $cdname = $schema->resultset('Cd')->search({
+ my $cd = $schema->resultset('Cd')->find({
title => $tracks{$track},
});
- push @tracks, [$cdname->first, $track];
+ push @tracks, [$cd->id, $track];
}
$schema->populate('Track',[
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Componentised.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Componentised.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Componentised.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -4,31 +4,10 @@
use strict;
use warnings;
+###
+# Keep this class for backwards compatibility
+###
+
use base 'Class::C3::Componentised';
-use Carp::Clan qw/^DBIx::Class/;
-sub inject_base {
- my ($class, $target, @to_inject) = @_;
- {
- no strict 'refs';
- foreach my $to (reverse @to_inject) {
- my @comps = qw(DigestColumns ResultSetManager Ordered UTF8Columns);
- # Add components here that need to be loaded before Core
- foreach my $first_comp (@comps) {
- if ($to eq 'DBIx::Class::Core' &&
- $target->isa("DBIx::Class::${first_comp}")) {
- carp "Possible incorrect order of components in ".
- "${target}::load_components($first_comp) call: Core loaded ".
- "before $first_comp. See the documentation for ".
- "DBIx::Class::$first_comp for more information";
- }
- }
- unshift( @{"${target}::ISA"}, $to )
- unless ($target eq $to || $target->isa($to));
- }
- }
-
- $class->next::method($target, @to_inject);
-}
-
1;
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Core.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Core.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Core.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -34,8 +34,6 @@
=over 4
-=item L<DBIx::Class::Serialize::Storable>
-
=item L<DBIx::Class::InflateColumn>
=item L<DBIx::Class::Relationship>
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Cursor.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Cursor.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Cursor.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -3,6 +3,8 @@
use strict;
use warnings;
+use base qw/DBIx::Class/;
+
=head1 NAME
DBIx::Class::Cursor - Abstract object representing a query cursor on a
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/InflateColumn/DateTime.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/InflateColumn/DateTime.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/InflateColumn/DateTime.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -71,7 +71,7 @@
reports to the list very much welcome).
If the data_type of a field is C<date>, C<datetime> or C<timestamp> (or
-a derivative of these datatypes, e.g. C<timestamp with timezone>, this
+a derivative of these datatypes, e.g. C<timestamp with timezone>), this
module will automatically call the appropriate parse/format method for
deflation/inflation as defined in the storage class. For instance, for
a C<datetime> field the methods C<parse_datetime> and C<format_datetime>
@@ -86,8 +86,6 @@
__PACKAGE__->load_components(qw/InflateColumn/);
-__PACKAGE__->mk_group_accessors('simple' => '__datetime_parser');
-
=head2 register_column
Chains with the L<DBIx::Class::Row/register_column> method, and sets
@@ -224,12 +222,7 @@
}
sub _datetime_parser {
- my $self = shift;
- if (my $parser = $self->__datetime_parser) {
- return $parser;
- }
- my $parser = $self->result_source->storage->datetime_parser(@_);
- return $self->__datetime_parser($parser);
+ shift->result_source->storage->datetime_parser (@_);
}
1;
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Component.pod
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Component.pod 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Component.pod 2009-10-14 13:45:34 UTC (rev 7786)
@@ -84,6 +84,8 @@
These components provide extra functionality beyond
basic functionality that you can't live without.
+L<DBIx::Class::Serialize::Storable> - Hooks for Storable freeze/thaw.
+
L<DBIx::Class::CDBICompat> - Class::DBI Compatibility layer.
L<DBIx::Class::FormTools> - Build forms with multiple interconnected objects.
@@ -110,10 +112,6 @@
change, they may not work, etc. So, use them if you want, but
be warned.
-L<DBIx::Class::Serialize> - Hooks for Storable freeze/thaw.
-
-L<DBIx::Class::Serialize::Storable> - Hooks for Storable freeze/thaw.
-
L<DBIx::Class::Validation> - Validate all data before submitting to your database.
=head2 Core
@@ -145,4 +143,3 @@
=head1 AUTHOR
Aran Clary Deltac <bluefeet at cpan.org>
-
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Cookbook.pod
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Cookbook.pod 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Cookbook.pod 2009-10-14 13:45:34 UTC (rev 7786)
@@ -37,8 +37,11 @@
This results in something like the following C<WHERE> clause:
- WHERE artist LIKE '%Lamb%' AND title LIKE '%Fear of Fours%'
+ WHERE artist LIKE ? AND title LIKE ?
+And the following bind values for the placeholders: C<'%Lamb%'>, C<'%Fear of
+Fours%'>.
+
Other queries might require slightly more complex logic:
my @albums = $schema->resultset('Album')->search({
@@ -244,6 +247,8 @@
# Or use DBIx::Class::AccessorGroup:
__PACKAGE__->mk_group_accessors('column' => 'name_length');
+See also L</Using SQL functions on the left hand side of a comparison>.
+
=head2 SELECT DISTINCT with multiple columns
my $rs = $schema->resultset('Artist')->search(
@@ -331,7 +336,7 @@
The following will B<not> work:
my $rs = $schema->resultset('CD')->search({
- artist_id => $inside_rs->get_column('id')->as_query,
+ artist_id => $inside_rs->get_column('id')->as_query, # does NOT work
});
=head3 Support
@@ -404,8 +409,10 @@
=head2 Using SQL functions on the left hand side of a comparison
-Using SQL functions on the left hand side of a comparison is generally
-not a good idea since it requires a scan of the entire table. However,
+Using SQL functions on the left hand side of a comparison is generally not a
+good idea since it requires a scan of the entire table. (Unless your RDBMS
+supports indexes on expressions - including return values of functions -, and
+you create an index on the return value of the function in question.) However,
it can be accomplished with C<DBIx::Class> when necessary.
If you do not have quoting on, simply include the function in your search
@@ -413,25 +420,30 @@
$rs->search({ 'YEAR(date_of_birth)' => 1979 });
-With quoting on, or for a more portable solution, use the C<where>
-attribute:
+With quoting on, or for a more portable solution, use literal SQL values with
+placeholders:
- $rs->search({}, { where => \'YEAR(date_of_birth) = 1979' });
+ $rs->search(\[ 'YEAR(date_of_birth) = ?', [ plain_value => 1979 ] ]);
-=begin hidden
+ # Equivalent SQL:
+ # SELECT * FROM employee WHERE YEAR(date_of_birth) = ?
-(When the bind args ordering bug is fixed, this technique will be better
-and can replace the one above.)
+ $rs->search({
+ name => 'Bob',
+ -nest => \[ 'YEAR(date_of_birth) = ?', [ plain_value => 1979 ] ],
+ });
-With quoting on, or for a more portable solution, use the C<where> and
-C<bind> attributes:
+ # Equivalent SQL:
+ # SELECT * FROM employee WHERE name = ? AND YEAR(date_of_birth) = ?
- $rs->search({}, {
- where => \'YEAR(date_of_birth) = ?',
- bind => [ 1979 ]
- });
+Note: the C<plain_value> string in the C<< [ plain_value => 1979 ] >> part
+should be either the same as the name of the column (do this if the type of the
+return value of the function is the same as the type of the column) or
+otherwise it's essentially a dummy string currently (use C<plain_value> as a
+habit). It is used by L<DBIx::Class> to handle special column types.
-=end hidden
+See also L<SQL::Abstract/Literal SQL with placeholders and bind values
+(subqueries)>.
=head1 JOINS AND PREFETCHING
@@ -922,6 +934,9 @@
### The statement below will print
print "I can do admin stuff\n" if $admin->can('do_admin_stuff');
+Alternatively you can use L<DBIx::Class::DynamicSubclass> that implements
+exactly the above functionality.
+
=head2 Skip row object creation for faster results
DBIx::Class is not built for speed, it's built for convenience and
@@ -1062,7 +1077,7 @@
To order C<< $book->pages >> by descending page_number, create the relation
as follows:
- __PACKAGE__->has_many('pages' => 'Page', 'book', { order_by => \'page_number DESC'} );
+ __PACKAGE__->has_many('pages' => 'Page', 'book', { order_by => { -desc => 'page_number'} } );
=head2 Filtering a relationship result set
@@ -1104,6 +1119,16 @@
$rs = $user->addresses(); # get all addresses for a user
$rs = $address->users(); # get all users for an address
+ my $address = $user->add_to_addresses( # returns a My::Address instance,
+ # NOT a My::UserAddress instance!
+ {
+ country => 'United Kingdom',
+ area_code => 'XYZ',
+ town => 'London',
+ street => 'Sesame',
+ }
+ );
+
=head2 Relationships across DB schemas
Mapping relationships across L<DB schemas|DBIx::Class::Manual::Glossary/DB schema>
@@ -1517,7 +1542,7 @@
Alternatively, you can send the conversion sql scripts to your
customers as above.
-=head2 Setting quoting for the generated SQL.
+=head2 Setting quoting for the generated SQL
If the database contains column names with spaces and/or reserved words, they
need to be quoted in the SQL queries. This is done using:
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/DocMap.pod
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/DocMap.pod 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/DocMap.pod 2009-10-14 13:45:34 UTC (rev 7786)
@@ -40,8 +40,6 @@
=item L<DBIx::Class::Core> - Set of standard components to load.
-=item L<DBIx::Class::Serialize::Storable> - ?
-
=item L<DBIx::Class::InflateColumn> - Making objects out of your columns.
=item L<DBIx::Class::InflateColumn::DateTime> - Magically turn your datetime or timestamp columns into DateTime objects.
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Example.pod
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Example.pod 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Example.pod 2009-10-14 13:45:34 UTC (rev 7786)
@@ -27,7 +27,7 @@
Install DBIx::Class via CPAN should be sufficient.
-=head3 Create the database/tables.
+=head3 Create the database/tables
First make and change the directory:
@@ -126,7 +126,7 @@
1;
-=head3 Write a script to insert some records.
+=head3 Write a script to insert some records
insertdb.pl
@@ -155,10 +155,10 @@
my @cds;
foreach my $lp (keys %albums) {
- my $artist = $schema->resultset('Artist')->search({
+ my $artist = $schema->resultset('Artist')->find({
name => $albums{$lp}
});
- push @cds, [$lp, $artist->first];
+ push @cds, [$lp, $artist->id];
}
$schema->populate('Cd', [
@@ -179,10 +179,10 @@
my @tracks;
foreach my $track (keys %tracks) {
- my $cdname = $schema->resultset('Cd')->search({
+ my $cdname = $schema->resultset('Cd')->find({
title => $tracks{$track},
});
- push @tracks, [$cdname->first, $track];
+ push @tracks, [$cdname->id, $track];
}
$schema->populate('Track',[
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/FAQ.pod
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/FAQ.pod 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/FAQ.pod 2009-10-14 13:45:34 UTC (rev 7786)
@@ -26,8 +26,7 @@
Next, spend some time defining which data you need to store, and how
it relates to the other data you have. For some help on normalisation,
-go to L<http://b62.tripod.com/doc/dbbase.htm> or
-L<http://209.197.234.36/db/simple.html>.
+go to L<http://b62.tripod.com/doc/dbbase.htm>.
Now, decide whether you want to have the database itself be the
definitive source of information about the data layout, or your
@@ -217,10 +216,10 @@
->search({'created_time' => { '>=', '2006-06-01 00:00:00' } })
-Note that to use a function here you need to make the whole value into
-a scalar reference:
+Note that to use a function here you need to make it a scalar
+reference:
- ->search({'created_time' => \'>= yesterday()' })
+ ->search({'created_time' => { '>=', \'yesterday()' } })
=item .. search in several tables simultaneously?
@@ -244,34 +243,18 @@
query, which can be accessed similarly to a table, see your database
documentation for details.
-=item .. search using greater-than or less-than and database functions?
-
-To use functions or literal SQL with conditions other than equality
-you need to supply the entire condition, for example:
-
- my $interval = "< now() - interval '12 hours'";
- ->search({last_attempt => \$interval})
-
-and not:
-
- my $interval = "now() - interval '12 hours'";
- ->search({last_attempt => { '<' => \$interval } })
-
=item .. search with an SQL function on the left hand side?
To use an SQL function on the left hand side of a comparison:
- ->search({}, { where => \'YEAR(date_of_birth)=1979' });
+ ->search({ -nest => \[ 'YEAR(date_of_birth) = ?', [ plain_value => 1979 ] ] });
-=begin hidden
+Note: the C<plain_value> string in the C<< [ plain_value => 1979 ] >> part
+should be either the same as the name of the column (do this if the type of the
+return value of the function is the same as the type of the column) or
+otherwise it's essentially a dummy string currently (use C<plain_value> as a
+habit). It is used by L<DBIx::Class> to handle special column types.
-(When the bind arg ordering bug is fixed, the previous example can be
-replaced with the following.)
-
- ->search({}, { where => \'YEAR(date_of_birth)=?', bind => [ 1979 ] });
-
-=end hidden
-
Or, if you have quoting off:
->search({ 'YEAR(date_of_birth)' => 1979 });
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Troubleshooting.pod
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Troubleshooting.pod 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Manual/Troubleshooting.pod 2009-10-14 13:45:34 UTC (rev 7786)
@@ -156,5 +156,16 @@
L<https://bugzilla.redhat.com/show_bug.cgi?id=460308> and
L<http://rhn.redhat.com/errata/RHBA-2008-0876.html>
+=head2 Excessive Memory Allocation with TEXT/BLOB/etc. Columns and Large LongReadLen
+
+It has been observed, using L<DBD::ODBC>, that a creating a L<DBIx::Class::Row>
+object which includes a column of data type TEXT/BLOB/etc. will allocate
+LongReadLen bytes. This allocation does not leak, but if LongReadLen
+is large in size, and many such row objects are created, e.g. as the
+output of a ResultSet query, the memory footprint of the Perl interpreter
+can grow very large.
+
+The solution is to use the smallest practical value for LongReadLen.
+
=cut
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Ordered.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Ordered.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Ordered.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -434,10 +434,7 @@
sub move_to_group {
my( $self, $to_group, $to_position ) = @_;
- $self->throw_exception ('move_to_group() expects a group specification')
- unless defined $to_group;
-
- # if we're given a string, turn it into a hashref
+ # if we're given a single value, turn it into a hashref
unless (ref $to_group eq 'HASH') {
my @gcols = $self->_grouping_columns;
@@ -504,7 +501,7 @@
}
else {
my $bumped_pos_val = $self->_position_value ($to_position);
- my @between = ($to_position, $new_group_last_position);
+ my @between = map { $self->_position_value ($_) } ($to_position, $new_group_last_position);
$self->_shift_siblings (1, @between); #shift right
$self->set_column( $position_column => $bumped_pos_val );
}
@@ -685,27 +682,9 @@
if you are working with preexisting non-normalised position data,
or if you need to work with materialized path columns.
-=head2 _position
-
- my $num_pos = $item->_position;
-
-Returns the B<absolute numeric position> of the current object, with the
-first object being at position 1, its sibling at position 2 and so on.
-By default simply returns the value of L</position_column>.
-
-=cut
-sub _position {
- my $self = shift;
-
-# #the right way to do this
-# return $self->previous_siblings->count + 1;
-
- return $self->get_column ($self->position_column);
-}
-
=head2 _position_from_value
- my $num_pos = $item->_position_of_value ( $pos_value )
+ my $num_pos = $item->_position_from_value ( $pos_value )
Returns the B<absolute numeric position> of an object with a B<position
value> set to C<$pos_value>. By default simply returns C<$pos_value>.
@@ -867,6 +846,19 @@
);
}
+=head2 _position
+
+ my $num_pos = $item->_position;
+
+Returns the B<absolute numeric position> of the current object, with the
+first object being at position 1, its sibling at position 2 and so on.
+
+=cut
+sub _position {
+ my $self = shift;
+ return $self->_position_from_value ($self->get_column ($self->position_column) );
+}
+
=head2 _grouping_clause
This method returns one or more name=>value pairs for limiting a search
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSet.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSet.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSet.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -7,6 +7,7 @@
'bool' => "_bool",
fallback => 1;
use Carp::Clan qw/^DBIx::Class/;
+use DBIx::Class::Exception;
use Data::Page;
use Storable;
use DBIx::Class::ResultSetColumn;
@@ -518,7 +519,7 @@
# in ::Relationship::Base::search_related (the row method), and furthermore
# the relationship is of the 'single' type. This means that the condition
# provided by the relationship (already attached to $self) is sufficient,
- # as there can be only one row in the databse that would satisfy the
+ # as there can be only one row in the databse that would satisfy the
# relationship
}
else {
@@ -570,12 +571,16 @@
my $where = $self->_collapse_cond($self->{attrs}{where} || {});
my $num_where = scalar keys %$where;
- my @unique_queries;
+ my (@unique_queries, %seen_column_combinations);
foreach my $name (@constraint_names) {
- my @unique_cols = $self->result_source->unique_constraint_columns($name);
- my $unique_query = $self->_build_unique_query($query, \@unique_cols);
+ my @constraint_cols = $self->result_source->unique_constraint_columns($name);
- my $num_cols = scalar @unique_cols;
+ my $constraint_sig = join "\x00", sort @constraint_cols;
+ next if $seen_column_combinations{$constraint_sig}++;
+
+ my $unique_query = $self->_build_unique_query($query, \@constraint_cols);
+
+ my $num_cols = scalar @constraint_cols;
my $num_query = scalar keys %$unique_query;
my $total = $num_query + $num_where;
@@ -1235,7 +1240,7 @@
my $tmp_attrs = { %$attrs };
- # take off any limits, record_filter is cdbi, and no point of ordering a count
+ # take off any limits, record_filter is cdbi, and no point of ordering a count
delete $tmp_attrs->{$_} for (qw/select as rows offset order_by record_filter/);
# overwrite the selector (supplied by the storage)
@@ -2192,13 +2197,14 @@
a unique constraint that is not the primary key, or looking for
related rows.
-If you want objects to be saved immediately, use L</find_or_create> instead.
+If you want objects to be saved immediately, use L</find_or_create>
+instead.
-B<Note>: C<find_or_new> is probably not what you want when creating a
-new row in a table that uses primary keys supplied by the
-database. Passing in a primary key column with a value of I<undef>
-will cause L</find> to attempt to search for a row with a value of
-I<NULL>.
+B<Note>: Take care when using C<find_or_new> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<find_or_new>, even when set to C<undef>.
=cut
@@ -2278,6 +2284,19 @@
}
});
+=over
+
+=item WARNING
+
+When subclassing ResultSet never attempt to override this method. Since
+it is a simple shortcut for C<< $self->new_result($attrs)->insert >>, a
+lot of the internals simply never call it, so your override will be
+bypassed more often than not. Override either L<new|DBIx::Class::Row/new>
+or L<insert|DBIx::Class::Row/insert> depending on how early in the
+L</create> process you need to intervene.
+
+=back
+
=cut
sub create {
@@ -2327,11 +2346,11 @@
the find has completed and before the create has started. To avoid
this problem, use find_or_create() inside a transaction.
-B<Note>: C<find_or_create> is probably not what you want when creating
-a new row in a table that uses primary keys supplied by the
-database. Passing in a primary key column with a value of I<undef>
-will cause L</find> to attempt to search for a row with a value of
-I<NULL>.
+B<Note>: Take care when using C<find_or_create> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<find_or_create>, even when set to C<undef>.
See also L</find> and L</update_or_create>. For information on how to declare
unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
@@ -2394,11 +2413,11 @@
See also L</find> and L</find_or_create>. For information on how to declare
unique constraints, see L<DBIx::Class::ResultSource/add_unique_constraint>.
-B<Note>: C<update_or_create> is probably not what you want when
-looking for a row in a table that uses primary keys supplied by the
-database, unless you actually have a key value. Passing in a primary
-key column with a value of I<undef> will cause L</find> to attempt to
-search for a row with a value of I<NULL>.
+B<Note>: Take care when using C<update_or_create> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<update_or_create>, even when set to C<undef>.
=cut
@@ -2455,8 +2474,14 @@
$cd->insert;
}
-See also L</find>, L</find_or_create> and L<find_or_new>.
+B<Note>: Take care when using C<update_or_new> with a table having
+columns with default values that you intend to be automatically
+supplied by the database (e.g. an auto_increment primary key column).
+In normal usage, the value of such columns should NOT be included at
+all in the call to C<update_or_new>, even when set to C<undef>.
+See also L</find>, L</find_or_create> and L</find_or_new>.
+
=cut
sub update_or_new {
@@ -2539,6 +2564,23 @@
shift->set_cache(undef);
}
+=head2 is_paged
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: true, if the resultset has been paginated
+
+=back
+
+=cut
+
+sub is_paged {
+ my ($self) = @_;
+ return !!$self->{attrs}{page};
+}
+
=head2 related_resultset
=over 4
@@ -2686,8 +2728,8 @@
}];
my $seen = { %{$attrs->{seen_join} || {} } };
- my $jpath = ($attrs->{seen_join} && keys %{$attrs->{seen_join}})
- ? $from->[-1][0]{-join_path}
+ my $jpath = ($attrs->{seen_join} && keys %{$attrs->{seen_join}})
+ ? $from->[-1][0]{-join_path}
: [];
@@ -2765,24 +2807,35 @@
# build columns (as long as select isn't set) into a set of as/select hashes
unless ( $attrs->{select} ) {
- @colbits = map {
- ( ref($_) eq 'HASH' )
- ? $_
- : {
- (
- /^\Q${alias}.\E(.+)$/
- ? "$1"
- : "$_"
- )
- =>
- (
- /\./
- ? "$_"
- : "${alias}.$_"
- )
- }
- } ( ref($attrs->{columns}) eq 'ARRAY' ) ? @{ delete $attrs->{columns}} : (delete $attrs->{columns} || $source->columns );
+
+ my @cols = ( ref($attrs->{columns}) eq 'ARRAY' )
+ ? @{ delete $attrs->{columns}}
+ : (
+ ( delete $attrs->{columns} )
+ ||
+ $source->columns
+ )
+ ;
+
+ @colbits = map {
+ ( ref($_) eq 'HASH' )
+ ? $_
+ : {
+ (
+ /^\Q${alias}.\E(.+)$/
+ ? "$1"
+ : "$_"
+ )
+ =>
+ (
+ /\./
+ ? "$_"
+ : "${alias}.$_"
+ )
+ }
+ } @cols;
}
+
# add the additional columns on
foreach ( 'include_columns', '+columns' ) {
push @colbits, map {
@@ -2840,7 +2893,7 @@
if ( $attrs->{join} || $attrs->{prefetch} ) {
- $self->throw_exception ('join/prefetch can not be used with a literal scalarref {from}')
+ $self->throw_exception ('join/prefetch can not be used with a custom {from}')
if ref $attrs->{from} ne 'ARRAY';
my $join = delete $attrs->{join} || {};
@@ -2879,7 +2932,12 @@
# generate the distinct induced group_by early, as prefetch will be carried via a
# subquery (since a group_by is present)
if (delete $attrs->{distinct}) {
- $attrs->{group_by} ||= [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
+ if ($attrs->{group_by}) {
+ carp ("Useless use of distinct on a grouped resultset ('distinct' is ignored when a 'group_by' is present)");
+ }
+ else {
+ $attrs->{group_by} = [ grep { !ref($_) || (ref($_) ne 'HASH') } @{$attrs->{select}} ];
+ }
}
$attrs->{collapse} ||= {};
@@ -2907,7 +2965,7 @@
# even though it doesn't make much sense, this is what pre 081xx has
# been doing
if (my $page = delete $attrs->{page}) {
- $attrs->{offset} =
+ $attrs->{offset} =
($attrs->{rows} * ($page - 1))
+
($attrs->{offset} || 0)
@@ -2986,6 +3044,13 @@
sub _calculate_score {
my ($self, $a, $b) = @_;
+ if (defined $a xor defined $b) {
+ return 0;
+ }
+ elsif (not defined $a) {
+ return 1;
+ }
+
if (ref $b eq 'HASH') {
my ($b_key) = keys %{$b};
if (ref $a eq 'HASH') {
@@ -3067,12 +3132,13 @@
sub throw_exception {
my $self=shift;
+
if (ref $self && $self->_source_handle->schema) {
$self->_source_handle->schema->throw_exception(@_)
- } else {
- croak(@_);
}
-
+ else {
+ DBIx::Class::Exception->throw(@_);
+ }
}
# XXX: FIXME: Attributes docs need clearing up
@@ -3094,7 +3160,7 @@
=back
-Which column(s) to order the results by.
+Which column(s) to order the results by.
[The full list of suitable values is documented in
L<SQL::Abstract/"ORDER BY CLAUSES">; the following is a summary of
@@ -3386,12 +3452,12 @@
=over 4
-=item *
+=item *
Prefetch uses the L</cache> to populate the prefetched relationships. This
may or may not be what you want.
-=item *
+=item *
If you specify a condition on a prefetched relationship, ONLY those
rows that match the prefetched condition will be fetched into that relationship.
@@ -3491,7 +3557,8 @@
=back
-Set to 1 to group by all columns.
+Set to 1 to group by all columns. If the resultset already has a group_by
+attribute, this setting is ignored and an appropriate warning is issued.
=head2 where
@@ -3502,8 +3569,8 @@
# only return rows WHERE deleted IS NULL for all searches
__PACKAGE__->resultset_attributes({ where => { deleted => undef } }); )
-Can be overridden by passing C<{ where => undef }> as an attribute
-to a resulset.
+Can be overridden by passing C<< { where => undef } >> as an attribute
+to a resultset.
=back
@@ -3525,177 +3592,6 @@
For more examples of using these attributes, see
L<DBIx::Class::Manual::Cookbook>.
-=head2 from
-
-=over 4
-
-=item Value: \@from_clause
-
-=back
-
-The C<from> attribute gives you manual control over the C<FROM> clause of SQL
-statements generated by L<DBIx::Class>, allowing you to express custom C<JOIN>
-clauses.
-
-NOTE: Use this on your own risk. This allows you to shoot off your foot!
-
-C<join> will usually do what you need and it is strongly recommended that you
-avoid using C<from> unless you cannot achieve the desired result using C<join>.
-And we really do mean "cannot", not just tried and failed. Attempting to use
-this because you're having problems with C<join> is like trying to use x86
-ASM because you've got a syntax error in your C. Trust us on this.
-
-Now, if you're still really, really sure you need to use this (and if you're
-not 100% sure, ask the mailing list first), here's an explanation of how this
-works.
-
-The syntax is as follows -
-
- [
- { <alias1> => <table1> },
- [
- { <alias2> => <table2>, -join_type => 'inner|left|right' },
- [], # nested JOIN (optional)
- { <table1.column1> => <table2.column2>, ... (more conditions) },
- ],
- # More of the above [ ] may follow for additional joins
- ]
-
- <table1> <alias1>
- JOIN
- <table2> <alias2>
- [JOIN ...]
- ON <table1.column1> = <table2.column2>
- <more joins may follow>
-
-An easy way to follow the examples below is to remember the following:
-
- Anything inside "[]" is a JOIN
- Anything inside "{}" is a condition for the enclosing JOIN
-
-The following examples utilize a "person" table in a family tree application.
-In order to express parent->child relationships, this table is self-joined:
-
- # Person->belongs_to('father' => 'Person');
- # Person->belongs_to('mother' => 'Person');
-
-C<from> can be used to nest joins. Here we return all children with a father,
-then search against all mothers of those children:
-
- $rs = $schema->resultset('Person')->search(
- undef,
- {
- alias => 'mother', # alias columns in accordance with "from"
- from => [
- { mother => 'person' },
- [
- [
- { child => 'person' },
- [
- { father => 'person' },
- { 'father.person_id' => 'child.father_id' }
- ]
- ],
- { 'mother.person_id' => 'child.mother_id' }
- ],
- ]
- },
- );
-
- # Equivalent SQL:
- # SELECT mother.* FROM person mother
- # JOIN (
- # person child
- # JOIN person father
- # ON ( father.person_id = child.father_id )
- # )
- # ON ( mother.person_id = child.mother_id )
-
-The type of any join can be controlled manually. To search against only people
-with a father in the person table, we could explicitly use C<INNER JOIN>:
-
- $rs = $schema->resultset('Person')->search(
- undef,
- {
- alias => 'child', # alias columns in accordance with "from"
- from => [
- { child => 'person' },
- [
- { father => 'person', -join_type => 'inner' },
- { 'father.id' => 'child.father_id' }
- ],
- ]
- },
- );
-
- # Equivalent SQL:
- # SELECT child.* FROM person child
- # INNER JOIN person father ON child.father_id = father.id
-
-You can select from a subquery by passing a resultset to from as follows.
-
- $schema->resultset('Artist')->search(
- undef,
- { alias => 'artist2',
- from => [ { artist2 => $artist_rs->as_query } ],
- } );
-
- # and you'll get sql like this..
- # SELECT artist2.artistid, artist2.name, artist2.rank, artist2.charfield FROM
- # ( SELECT me.artistid, me.name, me.rank, me.charfield FROM artists me ) artist2
-
-If you need to express really complex joins, you
-can supply literal SQL to C<from> via a scalar reference. In this case
-the contents of the scalar will replace the table name associated with the
-resultsource.
-
-WARNING: This technique might very well not work as expected on chained
-searches - you have been warned.
-
- # Assuming the Event resultsource is defined as:
-
- MySchema::Event->add_columns (
- sequence => {
- data_type => 'INT',
- is_auto_increment => 1,
- },
- location => {
- data_type => 'INT',
- },
- type => {
- data_type => 'INT',
- },
- );
- MySchema::Event->set_primary_key ('sequence');
-
- # This will get back the latest event for every location. The column
- # selector is still provided by DBIC, all we do is add a JOIN/WHERE
- # combo to limit the resultset
-
- $rs = $schema->resultset('Event');
- $table = $rs->result_source->name;
- $latest = $rs->search (
- undef,
- { from => \ "
- (SELECT e1.* FROM $table e1
- JOIN $table e2
- ON e1.location = e2.location
- AND e1.sequence < e2.sequence
- WHERE e2.sequence is NULL
- ) me",
- },
- );
-
- # Equivalent SQL (with the DBIC chunks added):
-
- SELECT me.sequence, me.location, me.type FROM
- (SELECT e1.* FROM events e1
- JOIN events e2
- ON e1.location = e2.location
- AND e1.sequence < e2.sequence
- WHERE e2.sequence is NULL
- ) me;
-
=head2 for
=over 4
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSetColumn.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSetColumn.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSetColumn.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,7 +1,12 @@
package DBIx::Class::ResultSetColumn;
+
use strict;
use warnings;
+
use base 'DBIx::Class';
+
+use Carp::Clan qw/^DBIx::Class/;
+use DBIx::Class::Exception;
use List::Util;
=head1 NAME
@@ -61,7 +66,7 @@
my $select = defined $as_index ? $select_list->[$as_index] : $column;
# {collapse} would mean a has_many join was injected, which in turn means
- # we need to group IF WE CAN (only if the column in question is unique)
+ # we need to group *IF WE CAN* (only if the column in question is unique)
if (!$new_attrs->{group_by} && keys %{$orig_attrs->{collapse}}) {
# scan for a constraint that would contain our column only - that'd be proof
@@ -76,9 +81,17 @@
if ($col eq $select or $fqcol eq $select) {
$new_attrs->{group_by} = [ $select ];
+ delete $new_attrs->{distinct}; # it is ignored when group_by is present
last;
}
}
+
+ if (!$new_attrs->{group_by}) {
+ carp (
+ "Attempting to retrieve non-unique column '$column' on a resultset containing "
+ . 'one-to-many joins will return duplicate results.'
+ );
+ }
}
my $new = bless { _select => $select, _as => $column, _parent_resultset => $new_parent_rs }, $class;
@@ -125,7 +138,10 @@
sub next {
my $self = shift;
+
+ # using cursor so we don't inflate anything
my ($row) = $self->_resultset->cursor->next;
+
return $row;
}
@@ -149,6 +165,8 @@
sub all {
my $self = shift;
+
+ # using cursor so we don't inflate anything
return map { $_->[0] } $self->_resultset->cursor->all;
}
@@ -194,10 +212,41 @@
sub first {
my $self = shift;
- my ($row) = $self->_resultset->cursor->reset->next;
+
+ # using cursor so we don't inflate anything
+ $self->_resultset->cursor->reset;
+ my ($row) = $self->_resultset->cursor->next;
+
return $row;
}
+=head2 single
+
+=over 4
+
+=item Arguments: none
+
+=item Return Value: $value
+
+=back
+
+Much like L<DBIx::Class::ResultSet/single> fetches one and only one column
+value using the cursor directly. If additional rows are present a warning
+is issued before discarding the cursor.
+
+=cut
+
+sub single {
+ my $self = shift;
+
+ my $attrs = $self->_resultset->_resolved_attrs;
+ my ($row) = $self->_resultset->result_source->storage->select_single(
+ $attrs->{from}, $attrs->{select}, $attrs->{where}, $attrs
+ );
+
+ return $row;
+}
+
=head2 min
=over 4
@@ -378,11 +427,13 @@
sub throw_exception {
my $self=shift;
+
if (ref $self && $self->{_parent_resultset}) {
- $self->{_parent_resultset}->throw_exception(@_)
- } else {
- croak(@_);
+ $self->{_parent_resultset}->throw_exception(@_);
}
+ else {
+ DBIx::Class::Exception->throw(@_);
+ }
}
# _resultset
@@ -395,7 +446,7 @@
#
# Returns the underlying resultset. Creates it from the parent resultset if
# necessary.
-#
+#
sub _resultset {
my $self = shift;
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSource.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSource.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSource.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -5,8 +5,9 @@
use DBIx::Class::ResultSet;
use DBIx::Class::ResultSourceHandle;
+
+use DBIx::Class::Exception;
use Carp::Clan qw/^DBIx::Class/;
-use Storable;
use base qw/DBIx::Class/;
@@ -1195,7 +1196,7 @@
# Returns the {from} structure used to express JOIN conditions
sub _resolve_join {
- my ($self, $join, $alias, $seen, $jpath, $force_left) = @_;
+ my ($self, $join, $alias, $seen, $jpath, $parent_force_left) = @_;
# we need a supplied one, because we do in-place modifications, no returns
$self->throw_exception ('You must supply a seen hashref as the 3rd argument to _resolve_join')
@@ -1206,46 +1207,56 @@
$jpath = [@$jpath];
- if (ref $join eq 'ARRAY') {
+ if (not defined $join) {
+ return ();
+ }
+ elsif (ref $join eq 'ARRAY') {
return
map {
- $self->_resolve_join($_, $alias, $seen, $jpath, $force_left);
+ $self->_resolve_join($_, $alias, $seen, $jpath, $parent_force_left);
} @$join;
- } elsif (ref $join eq 'HASH') {
- return
- map {
- my $as = ($seen->{$_} ? join ('_', $_, $seen->{$_} + 1) : $_); # the actual seen value will be incremented below
- local $force_left->{force} = $force_left->{force};
- (
- $self->_resolve_join($_, $alias, $seen, [@$jpath], $force_left),
- $self->related_source($_)->_resolve_join(
- $join->{$_}, $as, $seen, [@$jpath, $_], $force_left
- )
- );
- } keys %$join;
- } elsif (ref $join) {
- $self->throw_exception("No idea how to resolve join reftype ".ref $join);
- } else {
+ }
+ elsif (ref $join eq 'HASH') {
- return() unless defined $join;
+ my @ret;
+ for my $rel (keys %$join) {
+ my $rel_info = $self->relationship_info($rel)
+ or $self->throw_exception("No such relationship ${rel}");
+
+ my $force_left = $parent_force_left;
+ $force_left ||= lc($rel_info->{attrs}{join_type}||'') eq 'left';
+
+ # the actual seen value will be incremented by the recursion
+ my $as = ($seen->{$rel} ? join ('_', $rel, $seen->{$rel} + 1) : $rel);
+
+ push @ret, (
+ $self->_resolve_join($rel, $alias, $seen, [@$jpath], $force_left),
+ $self->related_source($rel)->_resolve_join(
+ $join->{$rel}, $as, $seen, [@$jpath, $rel], $force_left
+ )
+ );
+ }
+ return @ret;
+
+ }
+ elsif (ref $join) {
+ $self->throw_exception("No idea how to resolve join reftype ".ref $join);
+ }
+ else {
my $count = ++$seen->{$join};
my $as = ($count > 1 ? "${join}_${count}" : $join);
- my $rel_info = $self->relationship_info($join);
- $self->throw_exception("No such relationship ${join}") unless $rel_info;
- my $type;
- if ($force_left) {
- $type = 'left';
- } else {
- $type = $rel_info->{attrs}{join_type} || '';
- $force_left = 1 if lc($type) eq 'left';
- }
+ my $rel_info = $self->relationship_info($join)
+ or $self->throw_exception("No such relationship ${join}");
my $rel_src = $self->related_source($join);
return [ { $as => $rel_src->from,
-source_handle => $rel_src->handle,
- -join_type => $type,
+ -join_type => $parent_force_left
+ ? 'left'
+ : $rel_info->{attrs}{join_type}
+ ,
-join_path => [@$jpath, $join],
-alias => $as,
-relation_chain_depth => $seen->{-relation_chain_depth} || 0,
@@ -1322,10 +1333,14 @@
#warn "$self $k $for $v";
unless ($for->has_column_loaded($v)) {
if ($for->in_storage) {
- $self->throw_exception(
- "Column ${v} not loaded or not passed to new() prior to insert()"
- ." on ${for} trying to resolve relationship (maybe you forgot "
- ."to call ->discard_changes to get defaults from the db)"
+ $self->throw_exception(sprintf
+ 'Unable to resolve relationship from %s to %s: column %s.%s not '
+ . 'loaded from storage (or not passed to new() prior to insert()). '
+ . 'Maybe you forgot to call ->discard_changes to get defaults from the db.',
+
+ $for->result_source->source_name,
+ $as,
+ $as, $v,
);
}
return $UNRESOLVABLE_CONDITION;
@@ -1435,7 +1450,10 @@
my ($self, $pre, $alias, $alias_map, $order, $collapse, $pref_path) = @_;
$pref_path ||= [];
- if( ref $pre eq 'ARRAY' ) {
+ if (not defined $pre) {
+ return ();
+ }
+ elsif( ref $pre eq 'ARRAY' ) {
return
map { $self->_resolve_prefetch( $_, $alias, $alias_map, $order, $collapse, [ @$pref_path ] ) }
@$pre;
@@ -1458,7 +1476,7 @@
$p = $p->{$_} for (@$pref_path, $pre);
$self->throw_exception (
- "Unable to resolve prefetch $pre - join alias map does not contain an entry for path: "
+ "Unable to resolve prefetch '$pre' - join alias map does not contain an entry for path: "
. join (' -> ', @$pref_path, $pre)
) if (ref $p->{-join_aliases} ne 'ARRAY' or not @{$p->{-join_aliases}} );
@@ -1575,11 +1593,13 @@
sub throw_exception {
my $self = shift;
+
if (defined $self->schema) {
$self->schema->throw_exception(@_);
- } else {
- croak(@_);
}
+ else {
+ DBIx::Class::Exception->throw(@_);
+ }
}
=head2 source_info
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSourceHandle.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSourceHandle.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/ResultSourceHandle.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -78,8 +78,9 @@
my $to_serialize = { %$self };
- my $class = $self->schema->class($self->source_moniker);
- $to_serialize->{schema} = $class;
+ delete $to_serialize->{schema};
+ $to_serialize->{_frozen_from_class} = $self->schema->class($self->source_moniker);
+
return (Storable::freeze($to_serialize));
}
@@ -93,10 +94,10 @@
sub STORABLE_thaw {
- my ($self, $cloning,$ice) = @_;
+ my ($self, $cloning, $ice) = @_;
%$self = %{ Storable::thaw($ice) };
- my $class = delete $self->{schema};
+ my $class = delete $self->{_frozen_from_class};
if( $thaw_schema ) {
$self->{schema} = $thaw_schema;
}
@@ -105,7 +106,8 @@
$self->{schema} = $rs->schema if $rs;
}
- carp "Unable to restore schema" unless $self->{schema};
+ carp "Unable to restore schema. Look at 'freeze' and 'thaw' methods in DBIx::Class::Schema."
+ unless $self->{schema};
}
=head1 AUTHOR
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Row.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Row.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Row.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -4,9 +4,9 @@
use warnings;
use base qw/DBIx::Class/;
-use Carp::Clan qw/^DBIx::Class/;
+
+use DBIx::Class::Exception;
use Scalar::Util ();
-use Scope::Guard;
###
### Internal method
@@ -168,7 +168,8 @@
foreach my $key (keys %$attrs) {
if (ref $attrs->{$key}) {
## Can we extract this lot to use with update(_or .. ) ?
- confess "Can't do multi-create without result source" unless $source;
+ $new->throw_exception("Can't do multi-create without result source")
+ unless $source;
my $info = $source->relationship_info($key);
if ($info && $info->{attrs}{accessor}
&& $info->{attrs}{accessor} eq 'single')
@@ -1330,11 +1331,13 @@
sub throw_exception {
my $self=shift;
+
if (ref $self && ref $self->result_source && $self->result_source->schema) {
- $self->result_source->schema->throw_exception(@_);
- } else {
- croak(@_);
+ $self->result_source->schema->throw_exception(@_)
}
+ else {
+ DBIx::Class::Exception->throw(@_);
+ }
}
=head2 id
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/SQLAHacks.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/SQLAHacks.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/SQLAHacks.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -47,53 +47,7 @@
$self;
}
-# Some databases (sqlite) do not handle multiple parenthesis
-# around in/between arguments. A tentative x IN ( (1, 2 ,3) )
-# is interpreted as x IN 1 or something similar.
-#
-# Since we currently do not have access to the SQLA AST, resort
-# to barbaric mutilation of any SQL supplied in literal form
-sub _strip_outer_paren {
- my ($self, $arg) = @_;
- return $self->_SWITCH_refkind ($arg, {
- ARRAYREFREF => sub {
- $$arg->[0] = __strip_outer_paren ($$arg->[0]);
- return $arg;
- },
- SCALARREF => sub {
- return \__strip_outer_paren( $$arg );
- },
- FALLBACK => sub {
- return $arg
- },
- });
-}
-
-sub __strip_outer_paren {
- my $sql = shift;
-
- if ($sql and not ref $sql) {
- while ($sql =~ /^ \s* \( (.*) \) \s* $/x ) {
- $sql = $1;
- }
- }
-
- return $sql;
-}
-
-sub _where_field_IN {
- my ($self, $lhs, $op, $rhs) = @_;
- $rhs = $self->_strip_outer_paren ($rhs);
- return $self->SUPER::_where_field_IN ($lhs, $op, $rhs);
-}
-
-sub _where_field_BETWEEN {
- my ($self, $lhs, $op, $rhs) = @_;
- $rhs = $self->_strip_outer_paren ($rhs);
- return $self->SUPER::_where_field_BETWEEN ($lhs, $op, $rhs);
-}
-
# Slow but ANSI standard Limit/Offset support. DB2 uses this
sub _RowNumberOver {
my ($self, $sql, $order, $rows, $offset ) = @_;
@@ -508,16 +462,22 @@
foreach my $j (@join) {
my ($to, $on) = @$j;
+
# check whether a join type exists
- my $join_clause = '';
my $to_jt = ref($to) eq 'ARRAY' ? $to->[0] : $to;
- if (ref($to_jt) eq 'HASH' and exists($to_jt->{-join_type})) {
- $join_clause = ' '.uc($to_jt->{-join_type}).' JOIN ';
- } else {
- $join_clause = ' JOIN ';
+ my $join_type;
+ if (ref($to_jt) eq 'HASH' and defined($to_jt->{-join_type})) {
+ $join_type = $to_jt->{-join_type};
+ $join_type =~ s/^\s+ | \s+$//xg;
}
- push(@sqlf, $join_clause);
+ $join_type = $self->{_default_jointype} if not defined $join_type;
+
+ my $join_clause = sprintf ('%s JOIN ',
+ $join_type ? ' ' . uc($join_type) : ''
+ );
+ push @sqlf, $join_clause;
+
if (ref $to eq 'ARRAY') {
push(@sqlf, '(', $self->_recurse_from(@$to), ')');
} else {
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Schema/Versioned.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Schema/Versioned.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Schema/Versioned.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -520,13 +520,11 @@
return;
}
- eval 'require SQL::Translator "0.09003"';
- if ($@) {
- $self->throw_exception("SQL::Translator 0.09003 required");
- }
+ $self->throw_exception($self->storage->_sqlt_version_error)
+ if (not $self->storage->_sqlt_version_ok);
- my $db_tr = SQL::Translator->new({
- add_drop_table => 1,
+ my $db_tr = SQL::Translator->new({
+ add_drop_table => 1,
parser => 'DBI',
parser_args => { dbh => $self->storage->dbh }
});
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Schema.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Schema.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Schema.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -814,7 +814,7 @@
$storage_class = 'DBIx::Class::Storage'.$storage_class
if $storage_class =~ m/^::/;
- eval "require ${storage_class};";
+ eval { $self->ensure_class_loaded ($storage_class) };
$self->throw_exception(
"No arguments to load_classes and couldn't load ${storage_class} ($@)"
) if $@;
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Serialize/Storable.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Serialize/Storable.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Serialize/Storable.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -7,10 +7,13 @@
my ($self, $cloning) = @_;
my $to_serialize = { %$self };
+ # The source is either derived from _source_handle or is
+ # reattached in the thaw handler below
delete $to_serialize->{result_source};
- delete $to_serialize->{related_resultsets};
- delete $to_serialize->{_inflated_column};
+ # Dynamic values, easy to recalculate
+ delete $to_serialize->{$_} for qw/related_resultsets _inflated_column/;
+
return (Storable::freeze($to_serialize));
}
@@ -18,8 +21,10 @@
my ($self, $cloning, $serialized) = @_;
%$self = %{ Storable::thaw($serialized) };
+
+ # if the handle went missing somehow, reattach
$self->result_source($self->result_source_instance)
- if $self->can('result_source_instance');
+ if !$self->_source_handle && $self->can('result_source_instance');
}
1;
Added: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/AutoCast.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/AutoCast.pm (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/AutoCast.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,74 @@
+package DBIx::Class::Storage::DBI::AutoCast;
+
+use strict;
+use warnings;
+
+use base qw/DBIx::Class::Storage::DBI/;
+use mro 'c3';
+
+__PACKAGE__->mk_group_accessors('simple' => 'auto_cast' );
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::AutoCast - Storage component for RDBMS requiring explicit placeholder typing
+
+=head1 SYNOPSIS
+
+ $schema->storage->auto_cast(1);
+
+=head1 DESCRIPTION
+
+In some combinations of RDBMS and DBD drivers (e.g. FreeTDS and Sybase)
+statements with values bound to columns or conditions that are not strings will
+throw implicit type conversion errors.
+
+As long as a column L<data_type|DBIx::Class::ResultSource/add_columns> is
+defined, and it resolves to a base RDBMS native type via L</_native_data_type> as
+defined in your Storage driver, the placeholder for this column will be
+converted to:
+
+ CAST(? as $mapped_type)
+
+=cut
+
+sub _prep_for_execute {
+ my $self = shift;
+ my ($op, $extra_bind, $ident, $args) = @_;
+
+ my ($sql, $bind) = $self->next::method (@_);
+
+# If we're using ::NoBindVars, there are no binds by this point so this code
+# gets skippeed.
+ if ($self->auto_cast && @$bind) {
+ my $new_sql;
+ my @sql_part = split /\?/, $sql;
+ my $col_info = $self->_resolve_column_info($ident,[ map $_->[0], @$bind ]);
+
+ foreach my $bound (@$bind) {
+ my $col = $bound->[0];
+ my $type = $self->_native_data_type($col_info->{$col}{data_type});
+
+ foreach my $data (@{$bound}[1..$#$bound]) {
+ $new_sql .= shift(@sql_part) .
+ ($type ? "CAST(? AS $type)" : '?');
+ }
+ }
+ $new_sql .= join '', @sql_part;
+ $sql = $new_sql;
+ }
+
+ return ($sql, $bind);
+}
+
+
+=head1 AUTHOR
+
+See L<DBIx::Class/CONTRIBUTORS>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Cursor.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Cursor.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Cursor.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -5,6 +5,10 @@
use base qw/DBIx::Class::Cursor/;
+__PACKAGE__->mk_group_accessors('simple' =>
+ qw/sth/
+);
+
=head1 NAME
DBIx::Class::Storage::DBI::Cursor - Object representing a query cursor on a
@@ -73,24 +77,24 @@
&& $self->{attrs}{rows}
&& $self->{pos} >= $self->{attrs}{rows}
) {
- $self->{sth}->finish if $self->{sth}->{Active};
- delete $self->{sth};
+ $self->sth->finish if $self->sth->{Active};
+ $self->sth(undef);
$self->{done} = 1;
}
return if $self->{done};
- unless ($self->{sth}) {
- $self->{sth} = ($storage->_select(@{$self->{args}}))[1];
+ unless ($self->sth) {
+ $self->sth(($storage->_select(@{$self->{args}}))[1]);
if ($self->{attrs}{software_limit}) {
if (my $offset = $self->{attrs}{offset}) {
- $self->{sth}->fetch for 1 .. $offset;
+ $self->sth->fetch for 1 .. $offset;
}
}
}
- my @row = $self->{sth}->fetchrow_array;
+ my @row = $self->sth->fetchrow_array;
if (@row) {
$self->{pos}++;
} else {
- delete $self->{sth};
+ $self->sth(undef);
$self->{done} = 1;
}
return @row;
@@ -120,8 +124,8 @@
my ($storage, $dbh, $self) = @_;
$self->_check_dbh_gen;
- $self->{sth}->finish if $self->{sth}->{Active};
- delete $self->{sth};
+ $self->sth->finish if $self->sth && $self->sth->{Active};
+ $self->sth(undef);
my ($rv, $sth) = $storage->_select(@{$self->{args}});
return @{$sth->fetchall_arrayref};
}
@@ -146,17 +150,17 @@
my ($self) = @_;
# No need to care about failures here
- eval { $self->{sth}->finish if $self->{sth} && $self->{sth}->{Active} };
+ eval { $self->sth->finish if $self->sth && $self->sth->{Active} };
$self->_soft_reset;
+ return undef;
}
sub _soft_reset {
my ($self) = @_;
- delete $self->{sth};
+ $self->sth(undef);
delete $self->{done};
$self->{pos} = 0;
- return $self;
}
sub _check_dbh_gen {
@@ -173,7 +177,7 @@
# None of the reasons this would die matter if we're in DESTROY anyways
local $@;
- eval { $self->{sth}->finish if $self->{sth} && $self->{sth}->{Active} };
+ eval { $self->sth->finish if $self->sth && $self->sth->{Active} };
}
1;
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/MSSQL.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/MSSQL.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/MSSQL.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -14,30 +14,55 @@
__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks::MSSQL');
+sub _set_identity_insert {
+ my ($self, $table) = @_;
+
+ my $sql = sprintf (
+ 'SET IDENTITY_INSERT %s ON',
+ $self->sql_maker->_quote ($table),
+ );
+
+ my $dbh = $self->_get_dbh;
+ eval { $dbh->do ($sql) };
+ if ($@) {
+ $self->throw_exception (sprintf "Error executing '%s': %s",
+ $sql,
+ $dbh->errstr,
+ );
+ }
+}
+
+sub _unset_identity_insert {
+ my ($self, $table) = @_;
+
+ my $sql = sprintf (
+ 'SET IDENTITY_INSERT %s OFF',
+ $self->sql_maker->_quote ($table),
+ );
+
+ my $dbh = $self->_get_dbh;
+ $dbh->do ($sql);
+}
+
sub insert_bulk {
my $self = shift;
my ($source, $cols, $data) = @_;
- my $identity_insert = 0;
+ my $is_identity_insert = (List::Util::first
+ { $source->column_info ($_)->{is_auto_increment} }
+ (@{$cols})
+ )
+ ? 1
+ : 0;
- COLUMNS:
- foreach my $col (@{$cols}) {
- if ($source->column_info($col)->{is_auto_increment}) {
- $identity_insert = 1;
- last COLUMNS;
- }
+ if ($is_identity_insert) {
+ $self->_set_identity_insert ($source->name);
}
- if ($identity_insert) {
- my $table = $source->from;
- $self->_get_dbh->do("SET IDENTITY_INSERT $table ON");
- }
-
$self->next::method(@_);
- if ($identity_insert) {
- my $table = $source->from;
- $self->_get_dbh->do("SET IDENTITY_INSERT $table OFF");
+ if ($is_identity_insert) {
+ $self->_unset_identity_insert ($source->name);
}
}
@@ -47,7 +72,7 @@
my $self = shift;
my ($source, $to_insert) = @_;
- my $updated_cols = {};
+ my $supplied_col_info = $self->_resolve_column_info($source, [keys %$to_insert] );
my %guid_cols;
my @pk_cols = $source->primary_columns;
@@ -55,10 +80,14 @@
@pk_cols{@pk_cols} = ();
my @pk_guids = grep {
+ $source->column_info($_)->{data_type}
+ &&
$source->column_info($_)->{data_type} =~ /^uniqueidentifier/i
} @pk_cols;
my @auto_guids = grep {
+ $source->column_info($_)->{data_type}
+ &&
$source->column_info($_)->{data_type} =~ /^uniqueidentifier/i
&&
$source->column_info($_)->{auto_nextval}
@@ -67,13 +96,28 @@
my @get_guids_for =
grep { not exists $to_insert->{$_} } (@pk_guids, @auto_guids);
+ my $updated_cols = {};
+
for my $guid_col (@get_guids_for) {
my ($new_guid) = $self->_get_dbh->selectrow_array('SELECT NEWID()');
$updated_cols->{$guid_col} = $to_insert->{$guid_col} = $new_guid;
}
+ my $is_identity_insert = (List::Util::first { $_->{is_auto_increment} } (values %$supplied_col_info) )
+ ? 1
+ : 0;
+
+ if ($is_identity_insert) {
+ $self->_set_identity_insert ($source->name);
+ }
+
$updated_cols = { %$updated_cols, %{ $self->next::method(@_) } };
+ if ($is_identity_insert) {
+ $self->_unset_identity_insert ($source->name);
+ }
+
+
return $updated_cols;
}
@@ -87,7 +131,9 @@
for my $col (keys %$fields) {
# $ident is a result source object with INSERT/UPDATE ops
- if ($ident->column_info ($col)->{data_type} =~ /^money\z/i) {
+ if ($ident->column_info ($col)->{data_type}
+ &&
+ $ident->column_info ($col)->{data_type} =~ /^money\z/i) {
my $val = $fields->{$col};
$fields->{$col} = \['CAST(? AS MONEY)', [ $col => $val ]];
}
@@ -99,14 +145,6 @@
if ($op eq 'insert') {
$sql .= ';SELECT SCOPE_IDENTITY()';
- my $col_info = $self->_resolve_column_info($ident, [map $_->[0], @{$bind}]);
- if (List::Util::first { $_->{is_auto_increment} } (values %$col_info) ) {
-
- my $table = $ident->from;
- my $identity_insert_on = "SET IDENTITY_INSERT $table ON";
- my $identity_insert_off = "SET IDENTITY_INSERT $table OFF";
- $sql = "$identity_insert_on; $sql; $identity_insert_off";
- }
}
return ($sql, $bind);
@@ -192,6 +230,8 @@
=head1 IMPLEMENTATION NOTES
+=head2 IDENTITY information
+
Microsoft SQL Server supports three methods of retrieving the IDENTITY
value for inserted row: IDENT_CURRENT, @@IDENTITY, and SCOPE_IDENTITY().
SCOPE_IDENTITY is used here because it is the safest. However, it must
@@ -210,6 +250,16 @@
inserts into another table with an identity will give erroneous results on
recent versions of SQL Server.
+=head2 identity insert
+
+Be aware that we have tried to make things as simple as possible for our users.
+For MSSQL that means that when a user tries to create a row, while supplying an
+explicit value for an autoincrementing column, we will try to issue the
+appropriate database call to make this possible, namely C<SET IDENTITY_INSERT
+$table_name ON>. Unfortunately this operation in MSSQL requires the
+C<db_ddladmin> privilege, which is normally not included in the standard
+write-permissions.
+
=head1 AUTHOR
See L<DBIx::Class/CONTRIBUTORS>.
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/NoBindVars.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/NoBindVars.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -40,24 +40,32 @@
sub _prep_for_execute {
my $self = shift;
- my ($op, $extra_bind, $ident) = @_;
-
my ($sql, $bind) = $self->next::method(@_);
- # stringify args, quote via $dbh, and manually insert
+ # stringify bind args, quote via $dbh, and manually insert
+ #my ($op, $extra_bind, $ident, $args) = @_;
+ my $ident = $_[2];
my @sql_part = split /\?/, $sql;
my $new_sql;
+ my $col_info = $self->_resolve_column_info($ident, [ map $_->[0], @$bind ]);
+
foreach my $bound (@$bind) {
my $col = shift @$bound;
- my $datatype = 'FIXME!!!';
+
+ my $datatype = $col_info->{$col}{data_type};
+
foreach my $data (@$bound) {
- if(ref $data) {
- $data = ''.$data;
- }
- $data = $self->_dbh->quote($data);
- $new_sql .= shift(@sql_part) . $data;
+ $data = ''.$data if ref $data;
+
+ $data = $self->_prep_interpolated_value($datatype, $data)
+ if $datatype;
+
+ $data = $self->_dbh->quote($data)
+ unless $self->interpolate_unquoted($datatype, $data);
+
+ $new_sql .= shift(@sql_part) . $data;
}
}
$new_sql .= join '', @sql_part;
@@ -65,12 +73,44 @@
return ($new_sql, []);
}
+=head2 interpolate_unquoted
+
+This method is called by L</_prep_for_execute> for every column in
+order to determine if its value should be quoted or not. The arguments
+are the current column data type and the actual bind value. The return
+value is interpreted as: true - do not quote, false - do quote. You should
+override this in you Storage::DBI::<database> subclass, if your RDBMS
+does not like quotes around certain datatypes (e.g. Sybase and integer
+columns). The default method always returns false (do quote).
+
+ WARNING!!!
+
+ Always validate that the bind-value is valid for the current datatype.
+ Otherwise you may very well open the door to SQL injection attacks.
+
+=cut
+
+sub interpolate_unquoted {
+ #my ($self, $datatype, $value) = @_;
+ return 0;
+}
+
+=head2 _prep_interpolated_value
+
+Given a datatype and the value to be inserted directly into a SQL query, returns
+the necessary string to represent that value (by e.g. adding a '$' sign)
+
+=cut
+
+sub _prep_interpolated_value {
+ #my ($self, $datatype, $value) = @_;
+ return $_[2];
+}
+
=head1 AUTHORS
-Brandon Black <blblack at gmail.com>
+See L<DBIx::Class/CONTRIBUTORS>
-Trym Skaar <trym at tryms.no>
-
=head1 LICENSE
You may distribute this code under the same terms as Perl itself.
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -5,7 +5,6 @@
use base qw/DBIx::Class::Storage::DBI::MSSQL/;
use mro 'c3';
-use Carp::Clan qw/^DBIx::Class/;
use List::Util();
use Scalar::Util ();
@@ -62,7 +61,7 @@
my $self = shift;
if (ref($self->_dbi_connect_info->[0]) eq 'CODE') {
- croak 'cannot set DBI attributes on a CODE ref connect_info';
+ $self->throw_exception ('cannot set DBI attributes on a CODE ref connect_info');
}
my $dbi_attrs = $self->_dbi_connect_info->[-1];
@@ -91,7 +90,7 @@
$dbh->do('SELECT @@IDENTITY');
};
if ($@) {
- croak <<'EOF';
+ $self->throw_exception (<<'EOF');
Your drivers do not seem to support dynamic cursors (odbc_cursortype => 2),
if you're using FreeTDS, make sure to set tds_version to 8.0 or greater.
@@ -102,12 +101,18 @@
$self->_identity_method('@@identity');
}
-sub _rebless {
- no warnings 'uninitialized';
+sub _init {
my $self = shift;
- if (ref($self->_dbi_connect_info->[0]) ne 'CODE' &&
- eval { $self->_dbi_connect_info->[-1]{odbc_cursortype} } == 2) {
+ no warnings qw/uninitialized/;
+
+ if (
+ ref($self->_dbi_connect_info->[0]) ne 'CODE'
+ &&
+ ref ($self->_dbi_connect_info->[-1]) eq 'HASH'
+ &&
+ $self->_dbi_connect_info->[-1]{odbc_cursortype} == 2
+ ) {
$self->_set_dynamic_cursors;
return;
}
@@ -159,7 +164,7 @@
my $dsn = $self->_dbi_connect_info->[0];
if (ref($dsn) eq 'CODE') {
- croak 'cannot change the DBI DSN on a CODE ref connect_info';
+ $self->throw_exception('cannot change the DBI DSN on a CODE ref connect_info');
}
if ($dsn !~ /MARS_Connection=/) {
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Oracle/Generic.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -206,12 +206,6 @@
"alter session set nls_timestamp_tz_format='$timestamp_tz_format'");
}
-sub _svp_begin {
- my ($self, $name) = @_;
-
- $self->_get_dbh->do("SAVEPOINT $name");
-}
-
=head2 source_bind_attributes
Handle LOB types in Oracle. Under a certain size (4k?), you can get away
@@ -256,6 +250,12 @@
return \%bind_attributes;
}
+sub _svp_begin {
+ my ($self, $name) = @_;
+
+ $self->_get_dbh->do("SAVEPOINT $name");
+}
+
# Oracle automatically releases a savepoint when you start another one with the
# same name.
sub _svp_release { 1 }
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Oracle.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Oracle.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Oracle.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -19,10 +19,8 @@
? 'DBIx::Class::Storage::DBI::Oracle::WhereJoins'
: 'DBIx::Class::Storage::DBI::Oracle::Generic';
- # Load and rebless
- eval "require $class";
-
- bless $self, $class unless $@;
+ $self->ensure_class_loaded ($class);
+ bless $self, $class;
}
}
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Pg.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Pg.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Pg.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -19,90 +19,120 @@
$sub->();
}
+sub last_insert_id {
+ my ($self,$source, at cols) = @_;
+
+ my @values;
+
+ for my $col (@cols) {
+ my $seq = ( $source->column_info($col)->{sequence} ||= $self->dbh_do('_dbh_get_autoinc_seq', $source, $col) )
+ or $self->throw_exception( "could not determine sequence for "
+ . $source->name
+ . ".$col, please consider adding a "
+ . "schema-qualified sequence to its column info"
+ );
+
+ push @values, $self->_dbh_last_insert_id ($self->_dbh, $seq);
+ }
+
+ return @values;
+}
+
+# there seems to be absolutely no reason to have this as a separate method,
+# but leaving intact in case someone is already overriding it
sub _dbh_last_insert_id {
my ($self, $dbh, $seq) = @_;
$dbh->last_insert_id(undef, undef, undef, undef, {sequence => $seq});
}
-sub last_insert_id {
- my ($self,$source,$col) = @_;
- my $seq = ($source->column_info($col)->{sequence} ||= $self->get_autoinc_seq($source,$col));
- $self->throw_exception("could not fetch primary key for " . $source->name . ", could not "
- . "get autoinc sequence for $col (check that table and column specifications are correct "
- . "and in the correct case)") unless defined $seq;
- $self->dbh_do('_dbh_last_insert_id', $seq);
-}
-sub _get_pg_search_path {
- my ($self,$dbh) = @_;
- # cache the search path as ['schema','schema',...] in the storage
- # obj
- $self->{_pg_search_path} ||= do {
- my @search_path;
- my ($sp_string) = $dbh->selectrow_array('SHOW search_path');
- while( $sp_string =~ s/("[^"]+"|[^,]+),?// ) {
- unless( defined $1 and length $1 ) {
- $self->throw_exception("search path sanity check failed: '$1'")
- }
- push @search_path, $1;
- }
- \@search_path
- };
-}
-
sub _dbh_get_autoinc_seq {
- my ($self, $dbh, $schema, $table, @pri) = @_;
+ my ($self, $dbh, $source, $col) = @_;
- # get the list of postgres schemas to search. if we have a schema
- # specified, use that. otherwise, use the search path
- my @search_path;
- if( defined $schema and length $schema ) {
- @search_path = ( $schema );
- } else {
- @search_path = @{ $self->_get_pg_search_path($dbh) };
+ my $schema;
+ my $table = $source->name;
+
+ # deref table name if it needs it
+ $table = $$table
+ if ref $table eq 'SCALAR';
+
+ # parse out schema name if present
+ if( $table =~ /^(.+)\.(.+)$/ ) {
+ ( $schema, $table ) = ( $1, $2 );
}
- foreach my $search_schema (@search_path) {
- foreach my $col (@pri) {
- my $info = $dbh->column_info(undef,$search_schema,$table,$col)->fetchrow_hashref;
- if($info) {
- # if we get here, we have definitely found the right
- # column.
- if( defined $info->{COLUMN_DEF} and
- $info->{COLUMN_DEF}
- =~ /^nextval\(+'([^']+)'::(?:text|regclass)\)/i
- ) {
- my $seq = $1;
- return $seq =~ /\./ ? $seq : $info->{TABLE_SCHEM} . "." . $seq;
- } else {
- # we have found the column, but cannot figure out
- # the nextval seq
- return;
- }
- }
- }
+ # use DBD::Pg to fetch the column info if it is recent enough to
+ # work. otherwise, use custom SQL
+ my $seq_expr = $DBD::Pg::VERSION >= 2.015001
+ ? eval{ $dbh->column_info(undef,$schema,$table,$col)->fetchrow_hashref->{COLUMN_DEF} }
+ : $self->_dbh_get_column_default( $dbh, $schema, $table, $col );
+
+ # if no default value is set on the column, or if we can't parse the
+ # default value as a sequence, throw.
+ unless ( defined $seq_expr and $seq_expr =~ /^nextval\(+'([^']+)'::(?:text|regclass)\)/i ){
+ $seq_expr = '' unless defined $seq_expr;
+ $schema = "$schema." if defined $schema && length $schema;
+ $self->throw_exception( "no sequence found for $schema$table.$col, check table definition, "
+ . "or explicitly set the 'sequence' for this column in the "
+ . $source->source_name
+ . " class"
+ );
}
- return;
+
+ return $1;
}
-sub get_autoinc_seq {
- my ($self,$source,$col) = @_;
+# custom method for fetching column default, since column_info has a
+# bug with older versions of DBD::Pg
+sub _dbh_get_column_default {
+ my ( $self, $dbh, $schema, $table, $col ) = @_;
- my @pri = $source->primary_columns;
+ # Build and execute a query into the pg_catalog to find the Pg
+ # expression for the default value for this column in this table.
+ # If the table name is schema-qualified, query using that specific
+ # schema name.
- my $schema;
- my $table = $source->name;
+ # Otherwise, find the table in the standard Postgres way, using the
+ # search path. This is done with the pg_catalog.pg_table_is_visible
+ # function, which returns true if a given table is 'visible',
+ # meaning the first table of that name to be found in the search
+ # path.
- if (ref $table eq 'SCALAR') {
- $table = $$table;
- }
- elsif ($table =~ /^(.+)\.(.+)$/) {
- ($schema, $table) = ($1, $2);
- }
+ # I *think* we can be assured that this query will always find the
+ # correct column according to standard Postgres semantics.
+ #
+ # -- rbuels
- $self->dbh_do('_dbh_get_autoinc_seq', $schema, $table, @pri);
+ my $sqlmaker = $self->sql_maker;
+ local $sqlmaker->{bindtype} = 'normal';
+
+ my ($where, @bind) = $sqlmaker->where ({
+ 'a.attnum' => {'>', 0},
+ 'c.relname' => $table,
+ 'a.attname' => $col,
+ -not_bool => 'a.attisdropped',
+ (defined $schema && length $schema)
+ ? ( 'n.nspname' => $schema )
+ : ( -bool => \'pg_catalog.pg_table_is_visible(c.oid)' )
+ });
+
+ my ($seq_expr) = $dbh->selectrow_array(<<EOS,undef, at bind);
+
+SELECT
+ (SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid)
+ FROM pg_catalog.pg_attrdef d
+ WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef)
+FROM pg_catalog.pg_class c
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ JOIN pg_catalog.pg_attribute a ON a.attrelid = c.oid
+$where
+
+EOS
+
+ return $seq_expr;
}
+
sub sqlt_type {
return 'PostgreSQL';
}
@@ -151,6 +181,8 @@
1;
+__END__
+
=head1 NAME
DBIx::Class::Storage::DBI::Pg - Automatic primary key class for PostgreSQL
@@ -168,14 +200,18 @@
=head1 POSTGRESQL SCHEMA SUPPORT
-This supports multiple PostgreSQL schemas, with one caveat: for
-performance reasons, the schema search path is queried the first time it is
-needed and CACHED for subsequent uses.
+This driver supports multiple PostgreSQL schemas, with one caveat: for
+performance reasons, data about the search path, sequence names, and
+so forth is queried as needed and CACHED for subsequent uses.
-For this reason, you should do any necessary manipulation of the
-PostgreSQL search path BEFORE instantiating your schema object, or as
-part of the on_connect_do option to connect(), for example:
+For this reason, once your schema is instantiated, you should not
+change the PostgreSQL schema search path for that schema's database
+connection. If you do, Bad Things may happen.
+You should do any necessary manipulation of the search path BEFORE
+instantiating your schema object, or as part of the on_connect_do
+option to connect(), for example:
+
my $schema = My::Schema->connect
( $dsn,$user,$pass,
{ on_connect_do =>
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -5,6 +5,7 @@
use DBIx::Class::Storage::DBI::Replicated::Replicant;
use List::Util 'sum';
use Scalar::Util 'reftype';
+use DBI ();
use Carp::Clan qw/^DBIx::Class/;
use MooseX::Types::Moose qw/Num Int ClassName HashRef/;
@@ -137,6 +138,16 @@
},
);
+has next_unknown_replicant_id => (
+ is => 'rw',
+ metaclass => 'Counter',
+ isa => Int,
+ default => 1,
+ provides => {
+ inc => 'inc_unknown_replicant_id'
+ },
+);
+
=head1 METHODS
This class defines the following methods.
@@ -158,16 +169,45 @@
$connect_info = [ $connect_info ]
if reftype $connect_info ne 'ARRAY';
- croak "coderef replicant connect_info not supported"
- if ref $connect_info->[0] && reftype $connect_info->[0] eq 'CODE';
+ my $connect_coderef =
+ (reftype($connect_info->[0])||'') eq 'CODE' ? $connect_info->[0]
+ : (reftype($connect_info->[0])||'') eq 'HASH' &&
+ $connect_info->[0]->{dbh_maker};
- my $replicant = $self->connect_replicant($schema, $connect_info);
+ my $dsn;
+ my $replicant = do {
+# yes this is evil, but it only usually happens once (for coderefs)
+# this will fail if the coderef does not actually DBI::connect
+ no warnings 'redefine';
+ my $connect = \&DBI::connect;
+ local *DBI::connect = sub {
+ $dsn = $_[1];
+ goto $connect;
+ };
+ $self->connect_replicant($schema, $connect_info);
+ };
- my $key = $connect_info->[0];
- $key = $key->{dsn} if ref $key && reftype $key eq 'HASH';
- ($key) = ($key =~ m/^dbi\:.+\:(.+)$/);
+ my $key;
- $self->set_replicant( $key => $replicant);
+ if (!$dsn) {
+ if (!$connect_coderef) {
+ $dsn = $connect_info->[0];
+ $dsn = $dsn->{dsn} if (reftype($dsn)||'') eq 'HASH';
+ }
+ else {
+ # all attempts to get the DSN failed
+ $key = "UNKNOWN_" . $self->next_unknown_replicant_id;
+ $self->inc_unknown_replicant_id;
+ }
+ }
+ if ($dsn) {
+ $replicant->dsn($dsn);
+ ($key) = ($dsn =~ m/^dbi\:.+\:(.+)$/i);
+ }
+
+ $replicant->id($key);
+ $self->set_replicant($key => $replicant);
+
push @newly_created, $replicant;
}
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -3,7 +3,7 @@
use Moose::Role;
requires qw/_query_start/;
with 'DBIx::Class::Storage::DBI::Replicated::WithDSN';
-use MooseX::Types::Moose 'Bool';
+use MooseX::Types::Moose qw/Bool Str/;
use namespace::clean -except => 'meta';
@@ -52,6 +52,9 @@
default=>1,
);
+has dsn => (is => 'rw', isa => Str);
+has id => (is => 'rw', isa => Str);
+
=head1 METHODS
This class defines the following methods.
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated/WithDSN.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,6 +1,7 @@
package DBIx::Class::Storage::DBI::Replicated::WithDSN;
use Moose::Role;
+use Scalar::Util 'reftype';
requires qw/_query_start/;
use namespace::clean -except => 'meta';
@@ -30,11 +31,25 @@
around '_query_start' => sub {
my ($method, $self, $sql, @bind) = @_;
- my $dsn = $self->_dbi_connect_info->[0];
+
+ my $dsn = eval { $self->dsn } || $self->_dbi_connect_info->[0];
+
my($op, $rest) = (($sql=~m/^(\w+)(.+)$/),'NOP', 'NO SQL');
my $storage_type = $self->can('active') ? 'REPLICANT' : 'MASTER';
- $self->$method("$op [DSN_$storage_type=$dsn]$rest", @bind);
+ my $query = do {
+ if ((reftype($dsn)||'') ne 'CODE') {
+ "$op [DSN_$storage_type=$dsn]$rest";
+ }
+ elsif (my $id = eval { $self->id }) {
+ "$op [$storage_type=$id]$rest";
+ }
+ else {
+ "$op [$storage_type]$rest";
+ }
+ };
+
+ $self->$method($query, @bind);
};
=head1 ALSO SEE
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Replicated.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -17,9 +17,9 @@
my @didnt_load;
for my $module (keys %replication_required) {
- eval "use $module $replication_required{$module}";
- push @didnt_load, "$module $replication_required{$module}"
- if $@;
+ eval "use $module $replication_required{$module}";
+ push @didnt_load, "$module $replication_required{$module}"
+ if $@;
}
croak("@{[ join ', ', @didnt_load ]} are missing and are required for Replication")
@@ -33,7 +33,6 @@
use DBIx::Class::Storage::DBI::Replicated::Types qw/BalancerClassNamePart DBICSchema DBICStorageDBI/;
use MooseX::Types::Moose qw/ClassName HashRef Object/;
use Scalar::Util 'reftype';
-use Carp::Clan qw/^DBIx::Class/;
use Hash::Merge 'merge';
use namespace::clean -except => 'meta';
@@ -222,7 +221,7 @@
isa=>'DBIx::Class::Storage::DBI::Replicated::Pool',
lazy_build=>1,
handles=>[qw/
- connect_replicants
+ connect_replicants
replicants
has_replicants
/],
@@ -277,7 +276,7 @@
select
select_single
columns_info_for
- /],
+ /],
);
=head2 write_handler
@@ -290,9 +289,9 @@
is=>'ro',
isa=>Object,
lazy_build=>1,
- handles=>[qw/
+ handles=>[qw/
on_connect_do
- on_disconnect_do
+ on_disconnect_do
connect_info
throw_exception
sql_maker
@@ -300,8 +299,8 @@
create_ddl_dir
deployment_statements
datetime_parser
- datetime_parser_type
- build_datetime_parser
+ datetime_parser_type
+ build_datetime_parser
last_insert_id
insert
insert_bulk
@@ -316,19 +315,19 @@
sth
deploy
with_deferred_fk_checks
- dbh_do
+ dbh_do
reload_row
- with_deferred_fk_checks
+ with_deferred_fk_checks
_prep_for_execute
- backup
- is_datatype_numeric
- _count_select
- _subq_count_select
- _subq_update_delete
- svp_rollback
- svp_begin
- svp_release
+ backup
+ is_datatype_numeric
+ _count_select
+ _subq_count_select
+ _subq_update_delete
+ svp_rollback
+ svp_begin
+ svp_release
/],
);
@@ -364,7 +363,7 @@
);
$self->pool($self->_build_pool)
- if $self->pool;
+ if $self->pool;
}
if (@opts{qw/balancer_type balancer_args/}) {
@@ -376,7 +375,7 @@
);
$self->balancer($self->_build_balancer)
- if $self->balancer;
+ if $self->balancer;
}
$self->_master_connect_info_opts(\%opts);
@@ -413,9 +412,9 @@
my ($class, $schema, $storage_type_args, @args) = @_;
return {
- schema=>$schema,
- %$storage_type_args,
- @args
+ schema=>$schema,
+ %$storage_type_args,
+ @args
}
}
@@ -452,7 +451,7 @@
sub _build_balancer {
my $self = shift @_;
$self->create_balancer(
- pool=>$self->pool,
+ pool=>$self->pool,
master=>$self->master,
%{$self->balancer_args},
);
@@ -494,23 +493,23 @@
for my $r (@args) {
$r = [ $r ] unless reftype $r eq 'ARRAY';
- croak "coderef replicant connect_info not supported"
+ $self->throw_exception('coderef replicant connect_info not supported')
if ref $r->[0] && reftype $r->[0] eq 'CODE';
# any connect_info options?
my $i = 0;
$i++ while $i < @$r && (reftype($r->[$i])||'') ne 'HASH';
-# make one if none
+# make one if none
$r->[$i] = {} unless $r->[$i];
# merge if two hashes
my @hashes = @$r[$i .. $#{$r}];
- croak "invalid connect_info options"
+ $self->throw_exception('invalid connect_info options')
if (grep { reftype($_) eq 'HASH' } @hashes) != @hashes;
- croak "too many hashrefs in connect_info"
+ $self->throw_exception('too many hashrefs in connect_info')
if @hashes > 2;
my %opts = %{ merge(reverse @hashes) };
@@ -518,8 +517,15 @@
# delete them
splice @$r, $i+1, ($#{$r} - $i), ();
+# make sure master/replicants opts don't clash
+ my %master_opts = %{ $self->_master_connect_info_opts };
+ if (exists $opts{dbh_maker}) {
+ delete @master_opts{qw/dsn user password/};
+ }
+ delete $master_opts{dbh_maker};
+
# merge with master
- %opts = %{ merge(\%opts, $self->_master_connect_info_opts) };
+ %opts = %{ merge(\%opts, \%master_opts) };
# update
$r->[$i] = \%opts;
@@ -593,11 +599,11 @@
($result[0]) = ($coderef->(@args));
} else {
$coderef->(@args);
- }
+ }
};
##Reset to the original state
- $self->read_handler($current);
+ $self->read_handler($current);
##Exception testing has to come last, otherwise you might leave the
##read_handler set to master.
@@ -731,7 +737,7 @@
if(@_) {
foreach my $source ($self->all_storages) {
$source->debug(@_);
- }
+ }
}
return $self->master->debug;
}
@@ -747,7 +753,7 @@
if(@_) {
foreach my $source ($self->all_storages) {
$source->debugobj(@_);
- }
+ }
}
return $self->master->debugobj;
}
@@ -763,7 +769,7 @@
if(@_) {
foreach my $source ($self->all_storages) {
$source->debugfh(@_);
- }
+ }
}
return $self->master->debugfh;
}
@@ -779,7 +785,7 @@
if(@_) {
foreach my $source ($self->all_storages) {
$source->debugcb(@_);
- }
+ }
}
return $self->master->debugcb;
}
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server/NoBindVars.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -9,9 +9,8 @@
/;
use mro 'c3';
-sub _rebless {
+sub _init {
my $self = shift;
-
$self->disable_sth_caching(1);
}
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/Sybase/Microsoft_SQL_Server.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -13,7 +13,7 @@
my $self = shift;
my $dbh = $self->_get_dbh;
- if (not $self->_placeholders_supported) {
+ if (not $self->_typeless_placeholders_supported) {
bless $self,
'DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars';
$self->_rebless;
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/mysql.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/mysql.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI/mysql.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -33,6 +33,21 @@
$dbh->{mysql_insertid};
}
+# we need to figure out what mysql version we're running
+sub sql_maker {
+ my $self = shift;
+
+ unless ($self->_sql_maker) {
+ my $maker = $self->next::method (@_);
+
+ # mysql 3 does not understand a bare JOIN
+ my $mysql_ver = $self->_get_dbh->get_info(18);
+ $maker->{_default_jointype} = 'INNER' if $mysql_ver =~ /^3/;
+ }
+
+ return $self->_sql_maker;
+}
+
sub sqlt_type {
return 'MySQL';
}
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/DBI.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -14,6 +14,11 @@
use Scalar::Util();
use List::Util();
+# what version of sqlt do we require if deploy() without a ddl_dir is invoked
+# when changing also adjust the corresponding author_require in Makefile.PL
+my $minimum_sqlt_version = '0.11002';
+
+
__PACKAGE__->mk_group_accessors('simple' =>
qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid
_conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/
@@ -44,8 +49,15 @@
my $schema = MySchema->connect('dbi:SQLite:my.db');
$schema->storage->debug(1);
- $schema->dbh_do("DROP TABLE authors");
+ my @stuff = $schema->storage->dbh_do(
+ sub {
+ my ($storage, $dbh, @args) = @_;
+ $dbh->do("DROP TABLE authors");
+ },
+ @column_list
+ );
+
$schema->resultset('Book')->search({
written_on => $schema->storage->datetime_parser(DateTime->now)
});
@@ -112,6 +124,12 @@
%extra_attributes,
}];
+ $connect_info_args = [{
+ dbh_maker => sub { DBI->connect (...) },
+ %dbi_attributes,
+ %extra_attributes,
+ }];
+
This is particularly useful for L<Catalyst> based applications, allowing the
following config (L<Config::General> style):
@@ -125,6 +143,10 @@
</connect_info>
</Model::DB>
+The C<dsn>/C<user>/C<password> combination can be substituted by the
+C<dbh_maker> key whose value is a coderef that returns a connected
+L<DBI database handle|DBI/connect>
+
=back
Please note that the L<DBI> docs recommend that you always explicitly
@@ -337,6 +359,12 @@
# Connect via subref
->connect_info([ sub { DBI->connect(...) } ]);
+ # Connect via subref in hashref
+ ->connect_info([{
+ dbh_maker => sub { DBI->connect(...) },
+ on_connect_do => 'alter session ...',
+ }]);
+
# A bit more complicated
->connect_info(
[
@@ -407,9 +435,22 @@
elsif (ref $args[0] eq 'HASH') { # single hashref (i.e. Catalyst config)
%attrs = %{$args[0]};
@args = ();
- for (qw/password user dsn/) {
- unshift @args, delete $attrs{$_};
+ if (my $code = delete $attrs{dbh_maker}) {
+ @args = $code;
+
+ my @ignored = grep { delete $attrs{$_} } (qw/dsn user password/);
+ if (@ignored) {
+ carp sprintf (
+ 'Attribute(s) %s in connect_info were ignored, as they can not be applied '
+ . "to the result of 'dbh_maker'",
+
+ join (', ', map { "'$_'" } (@ignored) ),
+ );
+ }
}
+ else {
+ @args = delete @attrs{qw/dsn user password/};
+ }
}
else { # otherwise assume dsn/user/password + \%attrs + \%extra_attrs
%attrs = (
@@ -527,7 +568,7 @@
my $self = shift;
my $code = shift;
- my $dbh = $self->_dbh;
+ my $dbh = $self->_get_dbh;
return $self->$code($dbh, @_) if $self->{_in_dbh_do}
|| $self->{transaction_depth};
@@ -538,11 +579,6 @@
my $want_array = wantarray;
eval {
- $self->_verify_pid if $dbh;
- if(!$self->_dbh) {
- $self->_populate_dbh;
- $dbh = $self->_dbh;
- }
if($want_array) {
@result = $self->$code($dbh, @_);
@@ -589,8 +625,7 @@
my $tried = 0;
while(1) {
eval {
- $self->_verify_pid if $self->_dbh;
- $self->_populate_dbh if !$self->_dbh;
+ $self->_get_dbh;
$self->txn_begin;
if($want_array) {
@@ -651,7 +686,8 @@
$self->_do_connection_actions(disconnect_call_ => $_) for @actions;
- $self->_dbh->rollback unless $self->_dbh_autocommit;
+ $self->_dbh_rollback unless $self->_dbh_autocommit;
+
$self->_dbh->disconnect;
$self->_dbh(undef);
$self->{_dbh_gen}++;
@@ -779,6 +815,7 @@
# this is the internal "get dbh or connect (don't check)" method
sub _get_dbh {
my $self = shift;
+ $self->_verify_pid if $self->_dbh;
$self->_populate_dbh unless $self->_dbh;
return $self->_dbh;
}
@@ -804,7 +841,9 @@
return $self->_sql_maker;
}
+# nothing to do by default
sub _rebless {}
+sub _init {}
sub _populate_dbh {
my ($self) = @_;
@@ -839,18 +878,26 @@
my ($self) = @_;
if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) {
- my $started_unconnected = 0;
+ my $started_connected = 0;
local $self->{_in_determine_driver} = 1;
if (ref($self) eq __PACKAGE__) {
my $driver;
if ($self->_dbh) { # we are connected
$driver = $self->_dbh->{Driver}{Name};
+ $started_connected = 1;
} else {
- # try to use dsn to not require being connected, the driver may still
- # force a connection in _rebless to determine version
- ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
- $started_unconnected = 1;
+ # if connect_info is a CODEREF, we have no choice but to connect
+ if (ref $self->_dbi_connect_info->[0] &&
+ Scalar::Util::reftype($self->_dbi_connect_info->[0]) eq 'CODE') {
+ $self->_populate_dbh;
+ $driver = $self->_dbh->{Driver}{Name};
+ }
+ else {
+ # try to use dsn to not require being connected, the driver may still
+ # force a connection in _rebless to determine version
+ ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
+ }
}
my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
@@ -863,8 +910,10 @@
$self->_driver_determined(1);
+ $self->_init; # run driver-specific initializations
+
$self->_run_connection_actions
- if $started_unconnected && defined $self->_dbh;
+ if !$started_connected && defined $self->_dbh;
}
}
@@ -922,7 +971,7 @@
my @bind = map { [ undef, $_ ] } @do_args;
$self->_query_start($sql, @bind);
- $self->_dbh->do($sql, $attrs, @do_args);
+ $self->_get_dbh->do($sql, $attrs, @do_args);
$self->_query_end($sql, @bind);
}
@@ -958,6 +1007,8 @@
$weak_self->throw_exception("DBI Exception: $_[0]");
}
else {
+ # the handler may be invoked by something totally out of
+ # the scope of DBIC
croak ("DBI Exception: $_[0]");
}
};
@@ -1067,27 +1118,36 @@
if($self->{transaction_depth} == 0) {
$self->debugobj->txn_begin()
if $self->debug;
-
- # being here implies we have AutoCommit => 1
- # if the user is utilizing txn_do - good for
- # him, otherwise we need to ensure that the
- # $dbh is healthy on BEGIN
- my $dbh_method = $self->{_in_dbh_do} ? '_dbh' : 'dbh';
- $self->$dbh_method->begin_work;
-
- } elsif ($self->auto_savepoint) {
+ $self->_dbh_begin_work;
+ }
+ elsif ($self->auto_savepoint) {
$self->svp_begin;
}
$self->{transaction_depth}++;
}
+sub _dbh_begin_work {
+ my $self = shift;
+
+ # if the user is utilizing txn_do - good for him, otherwise we need to
+ # ensure that the $dbh is healthy on BEGIN.
+ # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
+ # will be replaced by a failure of begin_work itself (which will be
+ # then retried on reconnect)
+ if ($self->{_in_dbh_do}) {
+ $self->_dbh->begin_work;
+ } else {
+ $self->dbh_do(sub { $_[1]->begin_work });
+ }
+}
+
sub txn_commit {
my $self = shift;
if ($self->{transaction_depth} == 1) {
my $dbh = $self->_dbh;
$self->debugobj->txn_commit()
if ($self->debug);
- $dbh->commit;
+ $self->_dbh_commit;
$self->{transaction_depth} = 0
if $self->_dbh_autocommit;
}
@@ -1098,6 +1158,11 @@
}
}
+sub _dbh_commit {
+ my $self = shift;
+ $self->_dbh->commit;
+}
+
sub txn_rollback {
my $self = shift;
my $dbh = $self->_dbh;
@@ -1107,7 +1172,7 @@
if ($self->debug);
$self->{transaction_depth} = 0
if $self->_dbh_autocommit;
- $dbh->rollback;
+ $self->_dbh_rollback;
}
elsif($self->{transaction_depth} > 1) {
$self->{transaction_depth}--;
@@ -1130,6 +1195,11 @@
}
}
+sub _dbh_rollback {
+ my $self = shift;
+ $self->_dbh->rollback;
+}
+
# This used to be the top-half of _execute. It was split out to make it
# easier to override in NoBindVars without duping the rest. It takes up
# all of _execute's args, and emits $sql, @bind.
@@ -1224,7 +1294,7 @@
sub _execute {
my $self = shift;
- $self->dbh_do('_dbh_execute', @_)
+ $self->dbh_do('_dbh_execute', @_); # retry over disconnects
}
sub insert {
@@ -1266,13 +1336,18 @@
## only prepped once.
sub insert_bulk {
my ($self, $source, $cols, $data) = @_;
+
+# redispatch to insert_bulk method of storage we reblessed into, if necessary
+ if (not $self->_driver_determined) {
+ $self->_determine_driver;
+ goto $self->can('insert_bulk');
+ }
+
my %colvalues;
my $table = $source->from;
@colvalues{@$cols} = (0..$#$cols);
my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues);
- $self->_determine_driver;
-
$self->_query_start( $sql, @bind );
my $sth = $self->sth($sql);
@@ -1315,6 +1390,7 @@
local $Data::Dumper::Indent = 1;
local $Data::Dumper::Useqq = 1;
local $Data::Dumper::Quotekeys = 0;
+ local $Data::Dumper::Sortkeys = 1;
$self->throw_exception(sprintf "%s for populate slice:\n%s",
$tuple_status->[$i][1],
@@ -1330,12 +1406,17 @@
}
sub update {
- my $self = shift @_;
- my $source = shift @_;
- $self->_determine_driver;
+ my ($self, $source, @args) = @_;
+
+# redispatch to update method of storage we reblessed into, if necessary
+ if (not $self->_driver_determined) {
+ $self->_determine_driver;
+ goto $self->can('update');
+ }
+
my $bind_attributes = $self->source_bind_attributes($source);
- return $self->_execute('update' => [], $source, $bind_attributes, @_);
+ return $self->_execute('update' => [], $source, $bind_attributes, @args);
}
@@ -1577,179 +1658,224 @@
sub _adjust_select_args_for_complex_prefetch {
my ($self, $from, $select, $where, $attrs) = @_;
+ $self->throw_exception ('Nothing to prefetch... how did we get here?!')
+ if not @{$attrs->{_prefetch_select}};
+
$self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute')
- if (ref $from ne 'ARRAY');
+ if (ref $from ne 'ARRAY' || ref $from->[0] ne 'HASH' || ref $from->[1] ne 'ARRAY');
- # copies for mangling
- $from = [ @$from ];
- $select = [ @$select ];
- $attrs = { %$attrs };
- # separate attributes
- my $sub_attrs = { %$attrs };
- delete $attrs->{$_} for qw/where bind rows offset group_by having/;
- delete $sub_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
+ # generate inner/outer attribute lists, remove stuff that doesn't apply
+ my $outer_attrs = { %$attrs };
+ delete $outer_attrs->{$_} for qw/where bind rows offset group_by having/;
- my $select_root_alias = $attrs->{alias};
- my $sql_maker = $self->sql_maker;
+ my $inner_attrs = { %$attrs };
+ delete $inner_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/;
- # create subquery select list - consider only stuff *not* brought in by the prefetch
- my $sub_select = [];
- my $sub_group_by;
- for my $i (0 .. @{$attrs->{select}} - @{$attrs->{_prefetch_select}} - 1) {
- my $sel = $attrs->{select}[$i];
- # alias any functions to the dbic-side 'as' label
- # adjust the outer select accordingly
+ # bring over all non-collapse-induced order_by into the inner query (if any)
+ # the outer one will have to keep them all
+ delete $inner_attrs->{order_by};
+ if (my $ord_cnt = @{$outer_attrs->{order_by}} - @{$outer_attrs->{_collapse_order_by}} ) {
+ $inner_attrs->{order_by} = [
+ @{$outer_attrs->{order_by}}[ 0 .. $ord_cnt - 1]
+ ];
+ }
+
+
+ # generate the inner/outer select lists
+ # for inside we consider only stuff *not* brought in by the prefetch
+ # on the outside we substitute any function for its alias
+ my $outer_select = [ @$select ];
+ my $inner_select = [];
+ for my $i (0 .. ( @$outer_select - @{$outer_attrs->{_prefetch_select}} - 1) ) {
+ my $sel = $outer_select->[$i];
+
if (ref $sel eq 'HASH' ) {
$sel->{-as} ||= $attrs->{as}[$i];
- $select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "select_$i") );
+ $outer_select->[$i] = join ('.', $attrs->{alias}, ($sel->{-as} || "inner_column_$i") );
}
- push @$sub_select, $sel;
+ push @$inner_select, $sel;
}
- # bring over all non-collapse-induced order_by into the inner query (if any)
- # the outer one will have to keep them all
- delete $sub_attrs->{order_by};
- if (my $ord_cnt = @{$attrs->{order_by}} - @{$attrs->{_collapse_order_by}} ) {
- $sub_attrs->{order_by} = [
- @{$attrs->{order_by}}[ 0 .. $ord_cnt - 1]
- ];
- }
+ # normalize a copy of $from, so it will be easier to work with further
+ # down (i.e. promote the initial hashref to an AoH)
+ $from = [ @$from ];
+ $from->[0] = [ $from->[0] ];
+ my %original_join_info = map { $_->[0]{-alias} => $_->[0] } (@$from);
- # mangle {from}, keep in mind that $from is "headless" from here on
- my $join_root = shift @$from;
- my %inner_joins;
- my %join_info = map { $_->[0]{-alias} => $_->[0] } (@$from);
+ # decide which parts of the join will remain in either part of
+ # the outer/inner query
- # in complex search_related chains $select_root_alias may *not* be
- # 'me' so always include it in the inner join
- $inner_joins{$select_root_alias} = 1 if ($join_root->{-alias} ne $select_root_alias);
-
-
- # decide which parts of the join will remain on the inside
+ # First we compose a list of which aliases are used in restrictions
+ # (i.e. conditions/order/grouping/etc). Since we do not have
+ # introspectable SQLA, we fall back to ugly scanning of raw SQL for
+ # WHERE, and for pieces of ORDER BY in order to determine which aliases
+ # need to appear in the resulting sql.
+ # It may not be very efficient, but it's a reasonable stop-gap
+ # Also unqualified column names will not be considered, but more often
+ # than not this is actually ok
#
- # this is not a very viable optimisation, but it was written
- # before I realised this, so might as well remain. We can throw
- # away _any_ branches of the join tree that are:
- # 1) not mentioned in the condition/order
- # 2) left-join leaves (or left-join leaf chains)
- # Most of the join conditions will not satisfy this, but for real
- # complex queries some might, and we might make some RDBMS happy.
- #
- #
- # since we do not have introspectable SQLA, we fall back to ugly
- # scanning of raw SQL for WHERE, and for pieces of ORDER BY
- # in order to determine what goes into %inner_joins
- # It may not be very efficient, but it's a reasonable stop-gap
+ # In the same loop we enumerate part of the selection aliases, as
+ # it requires the same sqla hack for the time being
+ my ($restrict_aliases, $select_aliases, $prefetch_aliases);
{
# produce stuff unquoted, so it can be scanned
+ my $sql_maker = $self->sql_maker;
local $sql_maker->{quote_char};
my $sep = $self->_sql_maker_opts->{name_sep} || '.';
$sep = "\Q$sep\E";
- my @order_by = (map
+ my $non_prefetch_select_sql = $sql_maker->_recurse_fields ($inner_select);
+ my $prefetch_select_sql = $sql_maker->_recurse_fields ($outer_attrs->{_prefetch_select});
+ my $where_sql = $sql_maker->where ($where);
+ my $group_by_sql = $sql_maker->_order_by({
+ map { $_ => $inner_attrs->{$_} } qw/group_by having/
+ });
+ my @non_prefetch_order_by_chunks = (map
{ ref $_ ? $_->[0] : $_ }
- $sql_maker->_order_by_chunks ($sub_attrs->{order_by})
+ $sql_maker->_order_by_chunks ($inner_attrs->{order_by})
);
- my $where_sql = $sql_maker->where ($where);
- my $select_sql = $sql_maker->_recurse_fields ($sub_select);
- # sort needed joins
- for my $alias (keys %join_info) {
+ for my $alias (keys %original_join_info) {
+ my $seen_re = qr/\b $alias $sep/x;
- # any table alias found on a column name in where or order_by
- # gets included in %inner_joins
- # Also any parent joins that are needed to reach this particular alias
- for my $piece ($select_sql, $where_sql, @order_by ) {
- if ($piece =~ /\b $alias $sep/x) {
- $inner_joins{$alias} = 1;
+ for my $piece ($where_sql, $group_by_sql, @non_prefetch_order_by_chunks ) {
+ if ($piece =~ $seen_re) {
+ $restrict_aliases->{$alias} = 1;
}
}
+
+ if ($non_prefetch_select_sql =~ $seen_re) {
+ $select_aliases->{$alias} = 1;
+ }
+
+ if ($prefetch_select_sql =~ $seen_re) {
+ $prefetch_aliases->{$alias} = 1;
+ }
+
}
}
- # scan for non-leaf/non-left joins and mark as needed
- # also mark all ancestor joins that are needed to reach this particular alias
- # (e.g. join => { cds => 'tracks' } - tracks will bring cds too )
- #
- # traverse by the size of the -join_path i.e. reverse depth first
- for my $alias (sort { @{$join_info{$b}{-join_path}} <=> @{$join_info{$a}{-join_path}} } (keys %join_info) ) {
+ # Add any non-left joins to the restriction list (such joins are indeed restrictions)
+ for my $j (values %original_join_info) {
+ my $alias = $j->{-alias} or next;
+ $restrict_aliases->{$alias} = 1 if (
+ (not $j->{-join_type})
+ or
+ ($j->{-join_type} !~ /^left (?: \s+ outer)? $/xi)
+ );
+ }
- my $j = $join_info{$alias};
- $inner_joins{$alias} = 1 if (! $j->{-join_type} || ($j->{-join_type} !~ /^left$/i) );
-
- if ($inner_joins{$alias}) {
- $inner_joins{$_} = 1 for (@{$j->{-join_path}});
+ # mark all join parents as mentioned
+ # (e.g. join => { cds => 'tracks' } - tracks will need to bring cds too )
+ for my $collection ($restrict_aliases, $select_aliases) {
+ for my $alias (keys %$collection) {
+ $collection->{$_} = 1
+ for (@{ $original_join_info{$alias}{-join_path} || [] });
}
}
# construct the inner $from for the subquery
- my $inner_from = [ $join_root ];
+ my %inner_joins = (map { %{$_ || {}} } ($restrict_aliases, $select_aliases) );
+ my @inner_from;
for my $j (@$from) {
- push @$inner_from, $j if $inner_joins{$j->[0]{-alias}};
+ push @inner_from, $j if $inner_joins{$j->[0]{-alias}};
}
# if a multi-type join was needed in the subquery ("multi" is indicated by
# presence in {collapse}) - add a group_by to simulate the collapse in the subq
- unless ($sub_attrs->{group_by}) {
+ unless ($inner_attrs->{group_by}) {
for my $alias (keys %inner_joins) {
# the dot comes from some weirdness in collapse
# remove after the rewrite
if ($attrs->{collapse}{".$alias"}) {
- $sub_attrs->{group_by} ||= $sub_select;
+ $inner_attrs->{group_by} ||= $inner_select;
last;
}
}
}
+ # demote the inner_from head
+ $inner_from[0] = $inner_from[0][0];
+
# generate the subquery
my $subq = $self->_select_args_to_query (
- $inner_from,
- $sub_select,
+ \@inner_from,
+ $inner_select,
$where,
- $sub_attrs
+ $inner_attrs,
);
+
my $subq_joinspec = {
- -alias => $select_root_alias,
- -source_handle => $join_root->{-source_handle},
- $select_root_alias => $subq,
+ -alias => $attrs->{alias},
+ -source_handle => $inner_from[0]{-source_handle},
+ $attrs->{alias} => $subq,
};
- # Generate a new from (really just replace the join slot with the subquery)
- # Before we would start the outer chain from the subquery itself (i.e.
- # SELECT ... FROM (SELECT ... ) alias JOIN ..., but this turned out to be
- # a bad idea for search_related, as the root of the chain was effectively
- # lost (i.e. $artist_rs->search_related ('cds'... ) would result in alias
- # of 'cds', which would prevent from doing things like order_by artist.*)
- # See t/prefetch/via_search_related.t for a better idea
+ # Generate the outer from - this is relatively easy (really just replace
+ # the join slot with the subquery), with a major caveat - we can not
+ # join anything that is non-selecting (not part of the prefetch), but at
+ # the same time is a multi-type relationship, as it will explode the result.
+ #
+ # There are two possibilities here
+ # - either the join is non-restricting, in which case we simply throw it away
+ # - it is part of the restrictions, in which case we need to collapse the outer
+ # result by tackling yet another group_by to the outside of the query
+
+ # so first generate the outer_from, up to the substitution point
my @outer_from;
- if ($join_root->{-alias} eq $select_root_alias) { # just swap the root part and we're done
- @outer_from = (
- $subq_joinspec,
- @$from,
- )
+ while (my $j = shift @$from) {
+ if ($j->[0]{-alias} eq $attrs->{alias}) { # time to swap
+ push @outer_from, [
+ $subq_joinspec,
+ @{$j}[1 .. $#$j],
+ ];
+ last; # we'll take care of what's left in $from below
+ }
+ else {
+ push @outer_from, $j;
+ }
}
- else { # this is trickier
- @outer_from = ($join_root);
- for my $j (@$from) {
- if ($j->[0]{-alias} eq $select_root_alias) {
- push @outer_from, [
- $subq_joinspec,
- @{$j}[1 .. $#$j],
- ];
- }
- else {
- push @outer_from, $j;
- }
+ # see what's left - throw away if not selecting/restricting
+ # also throw in a group_by if restricting to guard against
+ # cross-join explosions
+ #
+ while (my $j = shift @$from) {
+ my $alias = $j->[0]{-alias};
+
+ if ($select_aliases->{$alias} || $prefetch_aliases->{$alias}) {
+ push @outer_from, $j;
}
+ elsif ($restrict_aliases->{$alias}) {
+ push @outer_from, $j;
+
+ # FIXME - this should be obviated by SQLA2, as I'll be able to
+ # have restrict_inner and restrict_outer... or something to that
+ # effect... I think...
+
+ # FIXME2 - I can't find a clean way to determine if a particular join
+ # is a multi - instead I am just treating everything as a potential
+ # explosive join (ribasushi)
+ #
+ # if (my $handle = $j->[0]{-source_handle}) {
+ # my $rsrc = $handle->resolve;
+ # ... need to bail out of the following if this is not a multi,
+ # as it will be much easier on the db ...
+
+ $outer_attrs->{group_by} ||= $outer_select;
+ # }
+ }
}
+ # demote the outer_from head
+ $outer_from[0] = $outer_from[0][0];
+
# This is totally horrific - the $where ends up in both the inner and outer query
# Unfortunately not much can be done until SQLA2 introspection arrives, and even
# then if where conditions apply to the *right* side of the prefetch, you may have
@@ -1757,7 +1883,7 @@
# the outer select to exclude joins you didin't want in the first place
#
# OTOH it can be seen as a plus: <ash> (notes that this query would make a DBA cry ;)
- return (\@outer_from, $select, $where, $attrs);
+ return (\@outer_from, $outer_select, $where, $outer_attrs);
}
sub _resolve_ident_sources {
@@ -1942,7 +2068,7 @@
sub sth {
my ($self, $sql) = @_;
- $self->dbh_do('_dbh_sth', $sql);
+ $self->dbh_do('_dbh_sth', $sql); # retry over disconnects
}
sub _dbh_columns_info_for {
@@ -2004,7 +2130,7 @@
sub columns_info_for {
my ($self, $table) = @_;
- $self->dbh_do('_dbh_columns_info_for', $table);
+ $self->_dbh_columns_info_for ($self->_get_dbh, $table);
}
=head2 last_insert_id
@@ -2030,17 +2156,87 @@
sub last_insert_id {
my $self = shift;
- $self->dbh_do('_dbh_last_insert_id', @_);
+ $self->_dbh_last_insert_id ($self->_dbh, @_);
}
+=head2 _native_data_type
+
+=over 4
+
+=item Arguments: $type_name
+
+=back
+
+This API is B<EXPERIMENTAL>, will almost definitely change in the future, and
+currently only used by L<::AutoCast|DBIx::Class::Storage::DBI::AutoCast> and
+L<::Sybase|DBIx::Class::Storage::DBI::Sybase>.
+
+The default implementation returns C<undef>, implement in your Storage driver if
+you need this functionality.
+
+Should map types from other databases to the native RDBMS type, for example
+C<VARCHAR2> to C<VARCHAR>.
+
+Types with modifiers should map to the underlying data type. For example,
+C<INTEGER AUTO_INCREMENT> should become C<INTEGER>.
+
+Composite types should map to the container type, for example
+C<ENUM(foo,bar,baz)> becomes C<ENUM>.
+
+=cut
+
+sub _native_data_type {
+ #my ($self, $data_type) = @_;
+ return undef
+}
+
+# Check if placeholders are supported at all
+sub _placeholders_supported {
+ my $self = shift;
+ my $dbh = $self->_get_dbh;
+
+ # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
+ # but it is inaccurate more often than not
+ eval {
+ local $dbh->{PrintError} = 0;
+ local $dbh->{RaiseError} = 1;
+ $dbh->do('select ?', {}, 1);
+ };
+ return $@ ? 0 : 1;
+}
+
+# Check if placeholders bound to non-string types throw exceptions
+#
+sub _typeless_placeholders_supported {
+ my $self = shift;
+ my $dbh = $self->_get_dbh;
+
+ eval {
+ local $dbh->{PrintError} = 0;
+ local $dbh->{RaiseError} = 1;
+ # this specifically tests a bind that is NOT a string
+ $dbh->do('select 1 where 1 = ?', {}, 1);
+ };
+ return $@ ? 0 : 1;
+}
+
=head2 sqlt_type
Returns the database driver name.
=cut
-sub sqlt_type { shift->_get_dbh->{Driver}->{Name} }
+sub sqlt_type {
+ my ($self) = @_;
+ if (not $self->_driver_determined) {
+ $self->_determine_driver;
+ goto $self->can ('sqlt_type');
+ }
+
+ $self->_get_dbh->{Driver}->{Name};
+}
+
=head2 bind_attribute_by_data_type
Given a datatype from column info, returns a database specific bind
@@ -2155,9 +2351,8 @@
%{$sqltargs || {}}
};
- $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09003: '}
- . $self->_check_sqlt_message . q{'})
- if !$self->_check_sqlt_version;
+ $self->throw_exception("Can't create a ddl file without SQL::Translator: " . $self->_sqlt_version_error)
+ if !$self->_sqlt_version_ok;
my $sqlt = SQL::Translator->new( $sqltargs );
@@ -2299,9 +2494,8 @@
return join('', @rows);
}
- $self->throw_exception(q{Can't deploy without SQL::Translator 0.09003: '}
- . $self->_check_sqlt_message . q{'})
- if !$self->_check_sqlt_version;
+ $self->throw_exception("Can't deploy without either SQL::Translator or a ddl_dir: " . $self->_sqlt_version_error )
+ if !$self->_sqlt_version_ok;
# sources needs to be a parser arg, but for simplicty allow at top level
# coming in
@@ -2360,7 +2554,6 @@
sub datetime_parser {
my $self = shift;
return $self->{datetime_parser} ||= do {
- $self->_populate_dbh unless $self->_dbh;
$self->build_datetime_parser(@_);
};
}
@@ -2381,29 +2574,18 @@
=cut
sub build_datetime_parser {
+ if (not $_[0]->_driver_determined) {
+ $_[0]->_determine_driver;
+ goto $_[0]->can('build_datetime_parser');
+ }
+
my $self = shift;
my $type = $self->datetime_parser_type(@_);
- eval "use ${type}";
- $self->throw_exception("Couldn't load ${type}: $@") if $@;
+ $self->ensure_class_loaded ($type);
return $type;
}
-{
- my $_check_sqlt_version; # private
- my $_check_sqlt_message; # private
- sub _check_sqlt_version {
- return $_check_sqlt_version if defined $_check_sqlt_version;
- eval 'use SQL::Translator "0.09003"';
- $_check_sqlt_message = $@ || '';
- $_check_sqlt_version = !$@;
- }
- sub _check_sqlt_message {
- _check_sqlt_version if !defined $_check_sqlt_message;
- $_check_sqlt_message;
- }
-}
-
=head2 is_replicating
A boolean that reports if a particular L<DBIx::Class::Storage::DBI> is set to
@@ -2429,12 +2611,41 @@
return;
}
+# SQLT version handling
+{
+ my $_sqlt_version_ok; # private
+ my $_sqlt_version_error; # private
+
+ sub _sqlt_version_ok {
+ if (!defined $_sqlt_version_ok) {
+ eval "use SQL::Translator $minimum_sqlt_version";
+ if ($@) {
+ $_sqlt_version_ok = 0;
+ $_sqlt_version_error = $@;
+ }
+ else {
+ $_sqlt_version_ok = 1;
+ }
+ }
+ return $_sqlt_version_ok;
+ }
+
+ sub _sqlt_version_error {
+ shift->_sqlt_version_ok unless defined $_sqlt_version_ok;
+ return $_sqlt_version_error;
+ }
+
+ sub _sqlt_minimum_version { $minimum_sqlt_version };
+}
+
sub DESTROY {
my $self = shift;
+
$self->_verify_pid if $self->_dbh;
# some databases need this to stop spewing warnings
if (my $dbh = $self->_dbh) {
+ local $@;
eval { $dbh->disconnect };
}
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/Statistics.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/Statistics.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/Statistics.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -2,7 +2,7 @@
use strict;
use warnings;
-use base qw/Class::Accessor::Grouped/;
+use base qw/DBIx::Class/;
use IO::File;
__PACKAGE__->mk_group_accessors(simple => qw/callback debugfh silence/);
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/TxnScopeGuard.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/TxnScopeGuard.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage/TxnScopeGuard.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -2,7 +2,7 @@
use strict;
use warnings;
-use Carp ();
+use Carp::Clan qw/^DBIx::Class/;
sub new {
my ($class, $storage) = @_;
@@ -24,21 +24,33 @@
return if $dismiss;
my $exception = $@;
- Carp::cluck("A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or an error - bad")
- unless $exception;
+
{
local $@;
+
+ carp 'A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or error. Rolling back.'
+ unless $exception;
+
eval { $storage->txn_rollback };
my $rollback_exception = $@;
- if($rollback_exception) {
- my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
- $storage->throw_exception(
- "Transaction aborted: ${exception}. "
- . "Rollback failed: ${rollback_exception}"
- ) unless $rollback_exception =~ /$exception_class/;
+ if ($rollback_exception && $rollback_exception !~ /DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION/) {
+ if ($exception) {
+ $exception = "Transaction aborted: ${exception} "
+ ."Rollback failed: ${rollback_exception}";
+ }
+ else {
+ carp (join ' ',
+ "********************* ROLLBACK FAILED!!! ********************",
+ "\nA rollback operation failed after the guard went out of scope.",
+ 'This is potentially a disastrous situation, check your data for',
+ "consistency: $rollback_exception"
+ );
+ }
}
}
+
+ $@ = $exception;
}
1;
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class/Storage.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -6,8 +6,8 @@
use base qw/DBIx::Class/;
use mro 'c3';
-use Scalar::Util qw/weaken/;
-use Carp::Clan qw/^DBIx::Class/;
+use DBIx::Class::Exception;
+use Scalar::Util();
use IO::File;
use DBIx::Class::Storage::TxnScopeGuard;
@@ -83,7 +83,7 @@
sub set_schema {
my ($self, $schema) = @_;
$self->schema($schema);
- weaken($self->{schema}) if ref $self->{schema};
+ Scalar::Util::weaken($self->{schema}) if ref $self->{schema};
}
=head2 connected
@@ -120,8 +120,12 @@
sub throw_exception {
my $self = shift;
- $self->schema->throw_exception(@_) if $self->schema;
- croak @_;
+ if ($self->schema) {
+ $self->schema->throw_exception(@_);
+ }
+ else {
+ DBIx::Class::Exception->throw(@_);
+ }
}
=head2 txn_do
Modified: DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/lib/DBIx/Class.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -6,7 +6,7 @@
use MRO::Compat;
use vars qw($VERSION);
-use base qw/DBIx::Class::Componentised Class::Accessor::Grouped/;
+use base qw/Class::C3::Componentised Class::Accessor::Grouped/;
use DBIx::Class::StartupCheck;
sub mk_classdata {
@@ -24,9 +24,8 @@
# Always remember to do all digits for the version even if they're 0
# i.e. first release of 0.XX *must* be 0.XX000. This avoids fBSD ports
# brain damage and presumably various other packaging systems too
+$VERSION = '0.08112';
-$VERSION = '0.08109';
-
$VERSION = eval $VERSION; # numify for warning-free dev releases
sub MODIFY_CODE_ATTRIBUTES {
@@ -230,6 +229,8 @@
bricas: Brian Cassidy <bricas at cpan.org>
+brunov: Bruno Vecchi <vecchi.b at gmail.com>
+
caelum: Rafael Kitover <rkitover at cpan.org>
castaway: Jess Robinson
@@ -340,6 +341,8 @@
Tom Hukins
+triode: Pete Gamache <gamache at cpan.org>
+
typester: Daisuke Murase <typester at cpan.org>
victori: Victor Igumnov <victori at cpan.org>
@@ -352,8 +355,14 @@
zamolxes: Bogdan Lucaciu <bogdan at wiz.ro>
+=head1 COPYRIGHT
+
+Copyright (c) 2005 - 2009 the DBIx::Class L</AUTHOR> and L</CONTRIBUTORS>
+as listed above.
+
=head1 LICENSE
-You may distribute this code under the same terms as Perl itself.
+This library is free software and may be distributed under the same terms
+as perl itself.
=cut
Modified: DBIx-Class/0.08/branches/ado_mssql/script/dbicadmin
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/script/dbicadmin 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/script/dbicadmin 2009-10-14 13:45:34 UTC (rev 7786)
@@ -30,7 +30,7 @@
}
pod2usage(1) if ($help);
-$ENV{DBIX_CLASS_STORAGE_DBI_DEBUG} = 1 if ($trace);
+$ENV{DBIC_TRACE} = 1 if ($trace);
die('No op specified') if(!$op);
die('Invalid op') if ($op!~/^insert|update|delete|select$/s);
Modified: DBIx-Class/0.08/branches/ado_mssql/t/02pod.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/02pod.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/02pod.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,6 +1,27 @@
+use warnings;
+use strict;
+
use Test::More;
+use lib qw(t/lib);
+use DBICTest;
-eval "use Test::Pod 1.14";
-plan skip_all => 'Test::Pod 1.14 required' if $@;
+my @MODULES = (
+ 'Test::Pod 1.26',
+);
+# Don't run tests for installs
+unless ( DBICTest::AuthorCheck->is_author || $ENV{AUTOMATED_TESTING} || $ENV{RELEASE_TESTING} ) {
+ plan( skip_all => "Author tests not required for installation" );
+}
+
+# Load the testing modules
+foreach my $MODULE ( @MODULES ) {
+ eval "use $MODULE";
+ if ( $@ ) {
+ $ENV{RELEASE_TESTING}
+ ? die( "Failed to load required release-testing module $MODULE" )
+ : plan( skip_all => "$MODULE not available for testing" );
+ }
+}
+
all_pod_files_ok();
Modified: DBIx-Class/0.08/branches/ado_mssql/t/03podcoverage.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/03podcoverage.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/03podcoverage.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,20 +1,39 @@
+use warnings;
+use strict;
+
use Test::More;
+use List::Util ();
+use lib qw(t/lib);
+use DBICTest;
-eval "use Pod::Coverage 0.19";
-plan skip_all => 'Pod::Coverage 0.19 required' if $@;
-eval "use Test::Pod::Coverage 1.04";
-plan skip_all => 'Test::Pod::Coverage 1.04 required' if $@;
+my @MODULES = (
+ 'Test::Pod::Coverage 1.08',
+ 'Pod::Coverage 0.20',
+);
-plan skip_all => 'set TEST_POD to enable this test'
- unless ($ENV{TEST_POD} || -e 'MANIFEST.SKIP');
+# Don't run tests for installs
+unless ( DBICTest::AuthorCheck->is_author || $ENV{AUTOMATED_TESTING} || $ENV{RELEASE_TESTING} ) {
+ plan( skip_all => "Author tests not required for installation" );
+}
-my @modules = sort { $a cmp $b } (Test::Pod::Coverage::all_modules());
-plan tests => scalar(@modules);
+# Load the testing modules
+foreach my $MODULE ( @MODULES ) {
+ eval "use $MODULE";
+ if ( $@ ) {
+ $ENV{RELEASE_TESTING}
+ ? die( "Failed to load required release-testing module $MODULE" )
+ : plan( skip_all => "$MODULE not available for testing" );
+ }
+}
# Since this is about checking documentation, a little documentation
-# of what this is doing might be in order...
+# of what this is doing might be in order.
# The exceptions structure below is a hash keyed by the module
-# name. The value for each is a hash, which contains one or more
+# name. Any * in a name is treated like a wildcard and will behave
+# as expected. Modules are matched by longest string first, so
+# A::B::C will match even if there is A::B*
+
+# The value for each is a hash, which contains one or more
# (although currently more than one makes no sense) of the following
# things:-
# skip => a true value means this module is not checked
@@ -22,131 +41,108 @@
# do not need to be documented.
my $exceptions = {
'DBIx::Class' => {
- ignore => [
- qw/MODIFY_CODE_ATTRIBUTES
- component_base_class
- mk_classdata
- mk_classaccessor/
- ]
+ ignore => [qw/
+ MODIFY_CODE_ATTRIBUTES
+ component_base_class
+ mk_classdata
+ mk_classaccessor
+ /]
},
'DBIx::Class::Row' => {
- ignore => [
- qw( MULTICREATE_DEBUG )
- ],
+ ignore => [qw/
+ MULTICREATE_DEBUG
+ /],
},
'DBIx::Class::ResultSource' => {
ignore => [qw/
- compare_relationship_keys
- pk_depends_on
- resolve_condition
- resolve_join
- resolve_prefetch
+ compare_relationship_keys
+ pk_depends_on
+ resolve_condition
+ resolve_join
+ resolve_prefetch
/],
},
+ 'DBIx::Class::ResultSourceHandle' => {
+ ignore => [qw/
+ schema
+ source_moniker
+ /],
+ },
'DBIx::Class::Storage' => {
- ignore => [
- qw(cursor)
- ]
+ ignore => [qw/
+ schema
+ cursor
+ /]
},
'DBIx::Class::Schema' => {
- ignore => [
- qw(setup_connection_class)
- ]
+ ignore => [qw/
+ setup_connection_class
+ /]
},
- 'DBIx::Class::Storage::DBI::Sybase' => {
- ignore => [
- qw/should_quote_data_type/,
- ]
+
+ 'DBIx::Class::Schema::Versioned' => {
+ ignore => [ qw/
+ connection
+ /]
},
- 'DBIx::Class::CDBICompat::AccessorMapping' => { skip => 1 },
- 'DBIx::Class::CDBICompat::AbstractSearch' => {
- ignore => [qw(search_where)]
- },
- 'DBIx::Class::CDBICompat::AttributeAPI' => { skip => 1 },
- 'DBIx::Class::CDBICompat::AutoUpdate' => { skip => 1 },
- 'DBIx::Class::CDBICompat::ColumnsAsHash' => {
- ignore => [qw(inflate_result new update)]
- },
- 'DBIx::Class::CDBICompat::ColumnCase' => { skip => 1 },
- 'DBIx::Class::CDBICompat::ColumnGroups' => { skip => 1 },
- 'DBIx::Class::CDBICompat::Constraints' => { skip => 1 },
- 'DBIx::Class::CDBICompat::Constructor' => { skip => 1 },
- 'DBIx::Class::CDBICompat::Copy' => {
- ignore => [qw(copy)]
- },
- 'DBIx::Class::CDBICompat::DestroyWarning' => { skip => 1 },
- 'DBIx::Class::CDBICompat::GetSet' => { skip => 1 },
- 'DBIx::Class::CDBICompat::HasA' => { skip => 1 },
- 'DBIx::Class::CDBICompat::HasMany' => { skip => 1 },
- 'DBIx::Class::CDBICompat::ImaDBI' => { skip => 1 },
- 'DBIx::Class::CDBICompat::LazyLoading' => { skip => 1 },
- 'DBIx::Class::CDBICompat::LiveObjectIndex' => { skip => 1 },
- 'DBIx::Class::CDBICompat::MightHave' => { skip => 1 },
- 'DBIx::Class::CDBICompat::NoObjectIndex' => { skip => 1 },
- 'DBIx::Class::CDBICompat::Pager' => { skip => 1 },
- 'DBIx::Class::CDBICompat::ReadOnly' => { skip => 1 },
- 'DBIx::Class::CDBICompat::Relationship' => { skip => 1 },
- 'DBIx::Class::CDBICompat::Relationships' => { skip => 1 },
- 'DBIx::Class::CDBICompat::Retrieve' => { skip => 1 },
- 'DBIx::Class::CDBICompat::SQLTransformer' => { skip => 1 },
- 'DBIx::Class::CDBICompat::Stringify' => { skip => 1 },
- 'DBIx::Class::CDBICompat::TempColumns' => { skip => 1 },
- 'DBIx::Class::CDBICompat::Triggers' => { skip => 1 },
- 'DBIx::Class::ClassResolver::PassThrough' => { skip => 1 },
- 'DBIx::Class::Componentised' => { skip => 1 },
- 'DBIx::Class::Relationship::Accessor' => { skip => 1 },
- 'DBIx::Class::Relationship::BelongsTo' => { skip => 1 },
- 'DBIx::Class::Relationship::CascadeActions' => { skip => 1 },
- 'DBIx::Class::Relationship::HasMany' => { skip => 1 },
- 'DBIx::Class::Relationship::HasOne' => { skip => 1 },
- 'DBIx::Class::Relationship::Helpers' => { skip => 1 },
- 'DBIx::Class::Relationship::ManyToMany' => { skip => 1 },
- 'DBIx::Class::Relationship::ProxyMethods' => { skip => 1 },
- 'DBIx::Class::ResultSetProxy' => { skip => 1 },
- 'DBIx::Class::ResultSetManager' => { skip => 1 },
- 'DBIx::Class::ResultSourceProxy' => { skip => 1 },
- 'DBIx::Class::Storage::DBI' => { skip => 1 },
- 'DBIx::Class::Storage::DBI::Replicated::Types' => { skip => 1 },
- 'DBIx::Class::Storage::DBI::DB2' => { skip => 1 },
- 'DBIx::Class::Storage::DBI::MSSQL' => { skip => 1 },
- 'DBIx::Class::Storage::DBI::Sybase::MSSQL' => { skip => 1 },
- 'DBIx::Class::Storage::DBI::ODBC400' => { skip => 1 },
- 'DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL' => { skip => 1 },
- 'DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server' => { skip => 1 },
- 'DBIx::Class::Storage::DBI::Oracle' => { skip => 1 },
- 'DBIx::Class::Storage::DBI::Pg' => { skip => 1 },
- 'DBIx::Class::Storage::DBI::SQLite' => { skip => 1 },
- 'DBIx::Class::Storage::DBI::mysql' => { skip => 1 },
- 'DBIx::Class::SQLAHacks' => { skip => 1 },
- 'DBIx::Class::SQLAHacks::MySQL' => { skip => 1 },
- 'DBIx::Class::SQLAHacks::MSSQL' => { skip => 1 },
- 'SQL::Translator::Parser::DBIx::Class' => { skip => 1 },
- 'SQL::Translator::Producer::DBIx::Class::File' => { skip => 1 },
-# skipped because the synopsis covers it clearly
+ 'DBIx::Class::ClassResolver::PassThrough' => { skip => 1 },
+ 'DBIx::Class::Componentised' => { skip => 1 },
+ 'DBIx::Class::Relationship::*' => { skip => 1 },
+ 'DBIx::Class::ResultSetProxy' => { skip => 1 },
+ 'DBIx::Class::ResultSourceProxy' => { skip => 1 },
+ 'DBIx::Class::Storage::Statistics' => { skip => 1 },
+ 'DBIx::Class::Storage::DBI::Replicated::Types' => { skip => 1 },
- 'DBIx::Class::InflateColumn::File' => { skip => 1 },
+# test some specific components whose parents are exempt below
+ 'DBIx::Class::Storage::DBI::Replicated*' => {},
+ 'DBIx::Class::Relationship::Base' => {},
-# skip connection since it's just an override
+# internals
+ 'DBIx::Class::SQLAHacks*' => { skip => 1 },
+ 'DBIx::Class::Storage::DBI*' => { skip => 1 },
+ 'SQL::Translator::*' => { skip => 1 },
- 'DBIx::Class::Schema::Versioned' => { ignore => [ qw(connection) ] },
+# deprecated / backcompat stuff
+ 'DBIx::Class::CDBICompat*' => { skip => 1 },
+ 'DBIx::Class::ResultSetManager' => { skip => 1 },
+ 'DBIx::Class::DB' => { skip => 1 },
-# don't bother since it's heavily deprecated
- 'DBIx::Class::ResultSetManager' => { skip => 1 },
+# skipped because the synopsis covers it clearly
+ 'DBIx::Class::InflateColumn::File' => { skip => 1 },
};
+my $ex_lookup = {};
+for my $string (keys %$exceptions) {
+ my $ex = $exceptions->{$string};
+ $string =~ s/\*/'.*?'/ge;
+ my $re = qr/^$string$/;
+ $ex_lookup->{$re} = $ex;
+}
+
+my @modules = sort { $a cmp $b } (Test::Pod::Coverage::all_modules());
+
foreach my $module (@modules) {
- SKIP:
- {
- skip "$module - No real methods", 1 if ($exceptions->{$module}{skip});
+ SKIP: {
- # build parms up from ignore list
- my $parms = {};
- $parms->{trustme} =
- [ map { qr/^$_$/ } @{ $exceptions->{$module}{ignore} } ]
- if exists($exceptions->{$module}{ignore});
+ my ($match) = List::Util::first
+ { $module =~ $_ }
+ (sort { length $b <=> length $a || $b cmp $a } (keys %$ex_lookup) )
+ ;
- # run the test with the potentially modified parm set
- pod_coverage_ok($module, $parms, "$module POD coverage");
- }
+ my $ex = $ex_lookup->{$match} if $match;
+
+ skip ("$module exempt", 1) if ($ex->{skip});
+
+ # build parms up from ignore list
+ my $parms = {};
+ $parms->{trustme} =
+ [ map { qr/^$_$/ } @{ $ex->{ignore} } ]
+ if exists($ex->{ignore});
+
+ # run the test with the potentially modified parm set
+ pod_coverage_ok($module, $parms, "$module POD coverage");
+ }
}
+
+done_testing;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/05components.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/05components.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/05components.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -7,8 +7,6 @@
use lib qw(t/lib);
use DBICTest::ForeignComponent;
-plan tests => 6;
-
# Tests if foreign component was loaded by calling foreign's method
ok( DBICTest::ForeignComponent->foreign_test_method, 'foreign component' );
@@ -35,32 +33,7 @@
'inject_base filters duplicates'
);
-# Test for a warning with incorrect order in load_components
-my @warnings = ();
-{
- package A::Test;
- our @ISA = 'DBIx::Class';
- {
- local $SIG{__WARN__} = sub { push @warnings, shift};
- __PACKAGE__->load_components(qw(Core UTF8Columns));
- }
-}
-like( $warnings[0], qr/Core loaded before UTF8Columns/,
- 'warning issued for incorrect order in load_components()' );
-is( scalar @warnings, 1,
- 'only one warning issued for incorrect load_components call' );
-
-# Test that no warning is issued for the correct order in load_components
-{
- @warnings = ();
- package B::Test;
- our @ISA = 'DBIx::Class';
- {
- local $SIG{__WARN__} = sub { push @warnings, shift };
- __PACKAGE__->load_components(qw(UTF8Columns Core));
- }
-}
-is( scalar @warnings, 0,
- 'warning not issued for correct order in load_components()' );
-
use_ok('DBIx::Class::AccessorGroup');
+use_ok('DBIx::Class::Componentised');
+
+done_testing;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/103many_to_many_warning.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/103many_to_many_warning.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/103many_to_many_warning.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -3,7 +3,6 @@
use Test::More;
use lib qw(t/lib);
-use Data::Dumper;
plan tests => 4;
my $exp_warn = qr/The many-to-many relationship 'bars' is trying to create/;
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/18inserterror.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/18inserterror.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/18inserterror.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,29 +0,0 @@
-use Class::C3;
-use strict;
-use Test::More;
-use warnings;
-
-BEGIN {
- eval "use DBD::SQLite";
- plan $@
- ? ( skip_all => 'needs DBD::SQLite for testing' )
- : ( tests => 4 );
-}
-
-use lib qw(t/lib);
-
-use_ok( 'DBICTest' );
-use_ok( 'DBICTest::Schema' );
-my $schema = DBICTest->init_schema;
-
-{
- my $warnings;
- local $SIG{__WARN__} = sub { $warnings .= $_[0] };
- eval {
- $schema->resultset('CD')
- ->create({ title => 'vacation in antarctica' })
- };
- like $@, qr/NULL/; # as opposed to some other error
- unlike( $warnings, qr/uninitialized value/, "No warning from Storage" );
-}
-
Modified: DBIx-Class/0.08/branches/ado_mssql/t/26dumper.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/26dumper.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/26dumper.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -6,15 +6,6 @@
$Data::Dumper::Sortkeys = 1;
use lib qw(t/lib);
-
-BEGIN {
- eval "use DBD::SQLite";
- plan $ENV{DATA_DUMPER_TEST}
- ? ( tests => 2 )
- : ( skip_all => 'Set $ENV{DATA_DUMPER_TEST} to run this test' );
-}
-
-
use_ok('DBICTest');
my $schema = DBICTest->init_schema();
@@ -36,4 +27,4 @@
cmp_ok( $rs->count(), '==', 1, "Single record in after death with dumper");
-1;
+done_testing;
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/31stats.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/31stats.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/31stats.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,104 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use warnings;
-use Test::More;
-
-plan tests => 12;
-
-use lib qw(t/lib);
-
-use_ok('DBICTest');
-my $schema = DBICTest->init_schema();
-
-my $cbworks = 0;
-
-$schema->storage->debugcb(sub { $cbworks = 1; });
-$schema->storage->debug(0);
-my $rs = $schema->resultset('CD')->search({});
-$rs->count();
-ok(!$cbworks, 'Callback not called with debug disabled');
-
-$schema->storage->debug(1);
-
-$rs->count();
-ok($cbworks, 'Debug callback worked.');
-
-my $prof = new DBIx::Test::Profiler();
-$schema->storage->debugobj($prof);
-
-# Test non-transaction calls.
-$rs->count();
-ok($prof->{'query_start'}, 'query_start called');
-ok($prof->{'query_end'}, 'query_end called');
-ok(!$prof->{'txn_begin'}, 'txn_begin not called');
-ok(!$prof->{'txn_commit'}, 'txn_commit not called');
-
-$prof->reset();
-
-# Test transaction calls
-$schema->txn_begin();
-ok($prof->{'txn_begin'}, 'txn_begin called');
-
-$rs = $schema->resultset('CD')->search({});
-$rs->count();
-ok($prof->{'query_start'}, 'query_start called');
-ok($prof->{'query_end'}, 'query_end called');
-
-$schema->txn_commit();
-ok($prof->{'txn_commit'}, 'txn_commit called');
-
-$prof->reset();
-
-# Test a rollback
-$schema->txn_begin();
-$rs = $schema->resultset('CD')->search({});
-$rs->count();
-$schema->txn_rollback();
-ok($prof->{'txn_rollback'}, 'txn_rollback called');
-
-$schema->storage->debug(0);
-
-package DBIx::Test::Profiler;
-use strict;
-
-sub new {
- my $self = bless({});
-}
-
-sub query_start {
- my $self = shift();
- $self->{'query_start'} = 1;
-}
-
-sub query_end {
- my $self = shift();
- $self->{'query_end'} = 1;
-}
-
-sub txn_begin {
- my $self = shift();
- $self->{'txn_begin'} = 1;
-}
-
-sub txn_rollback {
- my $self = shift();
- $self->{'txn_rollback'} = 1;
-}
-
-sub txn_commit {
- my $self = shift();
- $self->{'txn_commit'} = 1;
-}
-
-sub reset {
- my $self = shift();
-
- $self->{'query_start'} = 0;
- $self->{'query_end'} = 0;
- $self->{'txn_begin'} = 0;
- $self->{'txn_rollback'} = 0;
- $self->{'txn_end'} = 0;
-}
-
-1;
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/32connect_code_ref.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/32connect_code_ref.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/32connect_code_ref.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,24 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-plan tests => 1;
-
-# Set up the "usual" sqlite for DBICTest
-my $normal_schema = DBICTest->init_schema( sqlite_use_file => 1 );
-
-# Steal the dsn, which should be like 'dbi:SQLite:t/var/DBIxClass.db'
-my $normal_dsn = $normal_schema->storage->_dbi_connect_info->[0];
-
-# Make sure we have no active connection
-$normal_schema->storage->disconnect;
-
-# Make a new clone with a new connection, using a code reference
-my $code_ref_schema = $normal_schema->connect(sub { DBI->connect($normal_dsn); });
-
-# Stolen from 60core.t - this just verifies things seem to work at all
-my @art = $code_ref_schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
-cmp_ok(@art, '==', 3, "Three artists returned");
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/33storage_reconnect.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/33storage_reconnect.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/33storage_reconnect.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,73 +0,0 @@
-use strict;
-use warnings;
-
-use FindBin;
-use File::Copy;
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-plan tests => 6;
-
-my $db_orig = "$FindBin::Bin/var/DBIxClass.db";
-my $db_tmp = "$db_orig.tmp";
-
-# Set up the "usual" sqlite for DBICTest
-my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
-
-# Make sure we're connected by doing something
-my @art = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
-cmp_ok(@art, '==', 3, "Three artists returned");
-
-# Disconnect the dbh, and be sneaky about it
-# Also test if DBD::SQLite finaly knows how to ->disconnect properly
-{
- my $w;
- local $SIG{__WARN__} = sub { $w = shift };
- $schema->storage->_dbh->disconnect;
- ok ($w !~ /active statement handles/, 'SQLite can disconnect properly');
-}
-
-# Try the operation again - What should happen here is:
-# 1. S::DBI blindly attempts the SELECT, which throws an exception
-# 2. It catches the exception, checks ->{Active}/->ping, sees the disconnected state...
-# 3. Reconnects, and retries the operation
-# 4. Success!
-my @art_two = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
-cmp_ok(@art_two, '==', 3, "Three artists returned");
-
-### Now, disconnect the dbh, and move the db file;
-# create a new one and chmod 000 to prevent SQLite from connecting.
-$schema->storage->_dbh->disconnect;
-move( $db_orig, $db_tmp );
-open DBFILE, '>', $db_orig;
-print DBFILE 'THIS IS NOT A REAL DATABASE';
-close DBFILE;
-chmod 0000, $db_orig;
-
-### Try the operation again... it should fail, since there's no db
-{
- # Catch the DBI connection error
- local $SIG{__WARN__} = sub {};
- eval {
- my @art_three = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
- };
- ok( $@, 'The operation failed' );
-}
-
-### Now, move the db file back to the correct name
-unlink($db_orig);
-move( $db_tmp, $db_orig );
-
-SKIP: {
- skip "Cannot reconnect if original connection didn't fail", 2
- if ( $@ =~ /encrypted or is not a database/ );
-
- ### Try the operation again... this time, it should succeed
- my @art_four;
- eval {
- @art_four = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
- };
- ok( !$@, 'The operation succeeded' );
- cmp_ok( @art_four, '==', 3, "Three artists returned" );
-}
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/35disable_sth_caching.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/35disable_sth_caching.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/35disable_sth_caching.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,19 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-plan tests => 2;
-
-# Set up the "usual" sqlite for DBICTest
-my $schema = DBICTest->init_schema;
-
-my $sth_one = $schema->storage->sth('SELECT 42');
-my $sth_two = $schema->storage->sth('SELECT 42');
-$schema->storage->disable_sth_caching(1);
-my $sth_three = $schema->storage->sth('SELECT 42');
-
-ok($sth_one == $sth_two, "statement caching works");
-ok($sth_two != $sth_three, "disabling statement caching works");
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/36datetime.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/36datetime.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/36datetime.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,28 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-
-eval { require DateTime::Format::SQLite };
-plan $@ ? ( skip_all => 'Requires DateTime::Format::SQLite' )
- : ( tests => 3 );
-
-my $schema = DBICTest->init_schema(
- no_deploy => 1, # Deploying would cause an early rebless
-);
-
-is(
- ref $schema->storage, 'DBIx::Class::Storage::DBI',
- 'Starting with generic storage'
-);
-
-# Calling date_time_parser should cause the storage to be reblessed,
-# so that we can pick up datetime_parser_type from subclasses
-
-my $parser = $schema->storage->datetime_parser();
-
-is($parser, 'DateTime::Format::SQLite', 'Got expected storage-set datetime_parser');
-isa_ok($schema->storage, 'DBIx::Class::Storage::DBI::SQLite', 'storage');
-
Modified: DBIx-Class/0.08/branches/ado_mssql/t/46where_attribute.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/46where_attribute.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/46where_attribute.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -2,7 +2,6 @@
use warnings;
use Test::More;
-use Data::Dumper;
use lib qw(t/lib);
use DBICTest;
my $schema = DBICTest->init_schema();
Modified: DBIx-Class/0.08/branches/ado_mssql/t/60core.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/60core.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/60core.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -3,6 +3,7 @@
use Test::More;
use Test::Exception;
+use Test::Warn;
use lib qw(t/lib);
use DBICTest;
use DBIC::SqlMakerTest;
@@ -35,10 +36,10 @@
my %not_dirty = $art->get_dirty_columns();
is(scalar(keys(%not_dirty)), 0, 'Nothing is dirty');
-eval {
+throws_ok ( sub {
my $ret = $art->make_column_dirty('name2');
-};
-ok(defined($@), 'Failed to make non-existent column dirty');
+}, qr/No such column 'name2'/, 'Failed to make non-existent column dirty');
+
$art->make_column_dirty('name');
my %fake_dirty = $art->get_dirty_columns();
is(scalar(keys(%fake_dirty)), 1, '1 fake dirty column');
@@ -104,6 +105,17 @@
is($new_again->ID, 'DBICTest::Artist|artist|artistid=4', 'unique object id generated correctly');
+# test that store_column is called once for create() for non sequence columns
+{
+ ok(my $artist = $schema->resultset('Artist')->create({name => 'store_column test'}));
+ is($artist->name, 'X store_column test'); # used to be 'X X store...'
+
+ # call store_column even though the column doesn't seem to be dirty
+ ok($artist->update({name => 'X store_column test'}));
+ is($artist->name, 'X X store_column test');
+ $artist->delete;
+}
+
# Test backwards compatibility
{
my $warnings = '';
@@ -210,10 +222,10 @@
isa_ok($tdata{'last_updated_on'}, 'DateTime', 'inflated accessored column');
}
-eval { $schema->class("Track")->load_components('DoesNotExist'); };
+throws_ok (sub {
+ $schema->class("Track")->load_components('DoesNotExist');
+}, qr!Can't locate DBIx/Class/DoesNotExist.pm!, 'exception on nonexisting component');
-ok $@, $@;
-
is($schema->class("Artist")->field_name_for->{name}, 'artist name', 'mk_classdata usage ok');
my $search = [ { 'tags.tag' => 'Cheesy' }, { 'tags.tag' => 'Blue' } ];
@@ -227,6 +239,13 @@
is ($collapsed_or_rs->all, 4, 'Collapsed joined search with OR returned correct number of rows');
is ($collapsed_or_rs->count, 4, 'Collapsed search count with OR ok');
+# make sure sure distinct on a grouped rs is warned about
+my $cd_rs = $schema->resultset ('CD')
+ ->search ({}, { distinct => 1, group_by => 'title' });
+warnings_exist (sub {
+ $cd_rs->next;
+}, qr/Useless use of distinct/, 'UUoD warning');
+
{
my $tcount = $schema->resultset('Track')->search(
{},
Modified: DBIx-Class/0.08/branches/ado_mssql/t/71mysql.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/71mysql.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/71mysql.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -6,6 +6,7 @@
use lib qw(t/lib);
use DBICTest;
use DBI::Const::GetInfoType;
+use DBIC::SqlMakerTest;
my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MYSQL_${_}" } qw/DSN USER PASS/};
@@ -14,8 +15,6 @@
plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
unless ($dsn && $user);
-plan tests => 19;
-
my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
my $dbh = $schema->storage->dbh;
@@ -46,6 +45,14 @@
#'dbi:mysql:host=localhost;database=dbic_test', 'dbic_test', '');
+# make sure sqlt_type overrides work (::Storage::DBI::mysql does this)
+{
+ my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+
+ ok (!$schema->storage->_dbh, 'definitely not connected');
+ is ($schema->storage->sqlt_type, 'MySQL', 'sqlt_type correct pre-connection');
+}
+
# This is in Core now, but it's here just to test that it doesn't break
$schema->class('Artist')->load_components('PK::Auto');
@@ -153,13 +160,42 @@
my $type_info = $schema->storage->columns_info_for('artist');
is_deeply($type_info, $test_type_info, 'columns_info_for - column data types');
+
+
}
my $cd = $schema->resultset ('CD')->create ({});
my $producer = $schema->resultset ('Producer')->create ({});
lives_ok { $cd->set_producers ([ $producer ]) } 'set_relationship doesnt die';
+{
+ my $artist = $schema->resultset('Artist')->next;
+ my $cd = $schema->resultset('CD')->next;
+ $cd->set_from_related ('artist', $artist);
+ $cd->update;
+ my $rs = $schema->resultset('CD')->search ({}, { prefetch => 'artist' });
+
+ lives_ok sub {
+ my $cd = $rs->next;
+ is ($cd->artist->name, $artist->name, 'Prefetched artist');
+ }, 'join does not throw (mysql 3 test)';
+
+ # induce a jointype override, make sure it works even if we don't have mysql3
+ local $schema->storage->sql_maker->{_default_jointype} = 'inner';
+ is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield
+ FROM cd me
+ INNER JOIN artist artist ON artist.artistid = me.artist
+ )',
+ [],
+ 'overriden default join type works',
+ );
+}
+
## Can we properly deal with the null search problem?
##
## Only way is to do a SET SQL_AUTO_IS_NULL = 0; on connect
@@ -190,3 +226,5 @@
is $artist => undef
=> 'Nothing Found!';
}
+
+done_testing;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/72pg.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/72pg.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/72pg.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -6,142 +6,74 @@
use lib qw(t/lib);
use DBICTest;
-{
- package DBICTest::Schema::Casecheck;
- use strict;
- use warnings;
- use base 'DBIx::Class';
+my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/};
- __PACKAGE__->load_components(qw/Core/);
- __PACKAGE__->table('testschema.casecheck');
- __PACKAGE__->add_columns(qw/id name NAME uc_name storecolumn/);
- __PACKAGE__->column_info_from_storage(1);
- __PACKAGE__->set_primary_key('id');
+plan skip_all => <<EOM unless $dsn && $user;
+Set \$ENV{DBICTEST_PG_DSN}, _USER and _PASS to run this test
+( NOTE: This test drops and creates tables called 'artist', 'casecheck',
+ 'array_test' and 'sequence_test' as well as following sequences:
+ 'pkid1_seq', 'pkid2_seq' and 'nonpkid_seq''. as well as following
+ schemas: 'dbic_t_schema', 'dbic_t_schema_2', 'dbic_t_schema_3',
+ 'dbic_t_schema_4', and 'dbic_t_schema_5'
+)
+EOM
- sub store_column {
- my ($self, $name, $value) = @_;
- $value = '#'.$value if($name eq "storecolumn");
- $self->maybe::next::method($name, $value);
- }
-}
+### load any test classes that are defined further down in the file via BEGIN blocks
-{
- package DBICTest::Schema::ArrayTest;
+our @test_classes; #< array that will be pushed into by test classes defined in this file
+DBICTest::Schema->load_classes( map {s/.+:://;$_} @test_classes ) if @test_classes;
- use strict;
- use warnings;
- use base 'DBIx::Class';
- __PACKAGE__->load_components(qw/Core/);
- __PACKAGE__->table('testschema.array_test');
- __PACKAGE__->add_columns(qw/id arrayfield/);
- __PACKAGE__->column_info_from_storage(1);
- __PACKAGE__->set_primary_key('id');
+### pre-connect tests (keep each test separate as to make sure rebless() runs)
+{
+ my $s = DBICTest::Schema->connect($dsn, $user, $pass);
-}
+ ok (!$s->storage->_dbh, 'definitely not connected');
-my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_PG_${_}" } qw/DSN USER PASS/};
+ # Check that datetime_parser returns correctly before we explicitly connect.
+ SKIP: {
+ eval { require DateTime::Format::Pg };
+ skip "DateTime::Format::Pg required", 2 if $@;
-plan skip_all => 'Set $ENV{DBICTEST_PG_DSN}, _USER and _PASS to run this test '.
- '(note: This test drops and creates tables called \'artist\', \'casecheck\', \'array_test\' and \'sequence_test\''.
- ' as well as following sequences: \'pkid1_seq\', \'pkid2_seq\' and \'nonpkid_seq\''.
- ' as well as following schemas: \'testschema\',\'anothertestschema\'!)'
- unless ($dsn && $user);
+ my $store = ref $s->storage;
+ is($store, 'DBIx::Class::Storage::DBI', 'Started with generic storage');
-DBICTest::Schema->load_classes( 'Casecheck', 'ArrayTest' );
-my $schema = DBICTest::Schema->connect($dsn, $user, $pass,);
+ my $parser = $s->storage->datetime_parser;
+ is( $parser, 'DateTime::Format::Pg', 'datetime_parser is as expected');
+ }
-# Check that datetime_parser returns correctly before we explicitly connect.
-SKIP: {
- eval { require DateTime::Format::Pg };
- skip "DateTime::Format::Pg required", 2 if $@;
-
- my $store = ref $schema->storage;
- is($store, 'DBIx::Class::Storage::DBI', 'Started with generic storage');
-
- my $parser = $schema->storage->datetime_parser;
- is( $parser, 'DateTime::Format::Pg', 'datetime_parser is as expected');
+ ok (!$s->storage->_dbh, 'still not connected');
}
-
-my $dbh = $schema->storage->dbh;
-$schema->source("Artist")->name("testschema.artist");
-$schema->source("SequenceTest")->name("testschema.sequence_test");
{
- local $SIG{__WARN__} = sub {};
- _cleanup ($dbh);
-
- my $artist_table_def = <<EOS;
-(
- artistid serial PRIMARY KEY
- , name VARCHAR(100)
- , rank INTEGER NOT NULL DEFAULT '13'
- , charfield CHAR(10)
- , arrayfield INTEGER[]
-)
-EOS
- $dbh->do("CREATE SCHEMA testschema;");
- $dbh->do("CREATE TABLE testschema.artist $artist_table_def;");
- $dbh->do("CREATE TABLE testschema.sequence_test (pkid1 integer, pkid2 integer, nonpkid integer, name VARCHAR(100), CONSTRAINT pk PRIMARY KEY(pkid1, pkid2));");
- $dbh->do("CREATE SEQUENCE pkid1_seq START 1 MAXVALUE 999999 MINVALUE 0");
- $dbh->do("CREATE SEQUENCE pkid2_seq START 10 MAXVALUE 999999 MINVALUE 0");
- $dbh->do("CREATE SEQUENCE nonpkid_seq START 20 MAXVALUE 999999 MINVALUE 0");
- ok ( $dbh->do('CREATE TABLE testschema.casecheck (id serial PRIMARY KEY, "name" VARCHAR(1), "NAME" VARCHAR(2), "UC_NAME" VARCHAR(3), "storecolumn" VARCHAR(10));'), 'Creation of casecheck table');
- ok ( $dbh->do('CREATE TABLE testschema.array_test (id serial PRIMARY KEY, arrayfield INTEGER[]);'), 'Creation of array_test table');
- $dbh->do("CREATE SCHEMA anothertestschema;");
- $dbh->do("CREATE TABLE anothertestschema.artist $artist_table_def;");
- $dbh->do("CREATE SCHEMA yetanothertestschema;");
- $dbh->do("CREATE TABLE yetanothertestschema.artist $artist_table_def;");
- $dbh->do('set search_path=testschema,public');
+ my $s = DBICTest::Schema->connect($dsn, $user, $pass);
+ # make sure sqlt_type overrides work (::Storage::DBI::Pg does this)
+ ok (!$s->storage->_dbh, 'definitely not connected');
+ is ($s->storage->sqlt_type, 'PostgreSQL', 'sqlt_type correct pre-connection');
+ ok (!$s->storage->_dbh, 'still not connected');
}
-# store_column is called once for create() for non sequence columns
+### connect, create postgres-specific test schema
-ok(my $storecolumn = $schema->resultset('Casecheck')->create({'storecolumn' => 'a'}));
+my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
-is($storecolumn->storecolumn, '#a'); # was '##a'
+drop_test_schema($schema);
+create_test_schema($schema);
+### begin main tests
-# This is in Core now, but it's here just to test that it doesn't break
-$schema->class('Artist')->load_components('PK::Auto');
-cmp_ok( $schema->resultset('Artist')->count, '==', 0, 'this should start with an empty artist table');
+# run a BIG bunch of tests for last-insert-id / Auto-PK / sequence
+# discovery
+run_apk_tests($schema); #< older set of auto-pk tests
+run_extended_apk_tests($schema); #< new extended set of auto-pk tests
-{ # test that auto-pk also works with the defined search path by
- # un-schema-qualifying the table name
- my $artist_name_save = $schema->source("Artist")->name;
- $schema->source("Artist")->name("artist");
- my $unq_new;
- lives_ok {
- $unq_new = $schema->resultset('Artist')->create({ name => 'baz' });
- } 'insert into unqualified, shadowed table succeeds';
- is($unq_new && $unq_new->artistid, 1, "and got correct artistid");
- #test with anothertestschema
- $schema->source('Artist')->name('anothertestschema.artist');
- my $another_new = $schema->resultset('Artist')->create({ name => 'ribasushi'});
- is( $another_new->artistid,1, 'got correct artistid for yetanotherschema');
- #test with yetanothertestschema
- $schema->source('Artist')->name('yetanothertestschema.artist');
- my $yetanother_new = $schema->resultset('Artist')->create({ name => 'ribasushi'});
- is( $yetanother_new->artistid,1, 'got correct artistid for yetanotherschema');
- is( $yetanother_new->artistid,1, 'got correct artistid for yetanotherschema');
+### type_info tests
- $schema->source("Artist")->name($artist_name_save);
-}
-
-my $new = $schema->resultset('Artist')->create({ name => 'foo' });
-
-is($new->artistid, 2, "Auto-PK worked");
-
-$new = $schema->resultset('Artist')->create({ name => 'bar' });
-
-is($new->artistid, 3, "Auto-PK worked");
-
-
my $test_type_info = {
'artistid' => {
'data_type' => 'integer',
@@ -175,8 +107,7 @@
},
};
-
-my $type_info = $schema->storage->columns_info_for('testschema.artist');
+my $type_info = $schema->storage->columns_info_for('dbic_t_schema.artist');
my $artistid_defval = delete $type_info->{artistid}->{default_value};
like($artistid_defval,
qr/^nextval\('([^\.]*\.){0,1}artist_artistid_seq'::(?:text|regclass)\)/,
@@ -184,6 +115,26 @@
is_deeply($type_info, $test_type_info,
'columns_info_for - column data types');
+
+
+
+####### Array tests
+
+BEGIN {
+ package DBICTest::Schema::ArrayTest;
+ push @main::test_classes, __PACKAGE__;
+
+ use strict;
+ use warnings;
+ use base 'DBIx::Class';
+
+ __PACKAGE__->load_components(qw/Core/);
+ __PACKAGE__->table('dbic_t_schema.array_test');
+ __PACKAGE__->add_columns(qw/id arrayfield/);
+ __PACKAGE__->column_info_from_storage(1);
+ __PACKAGE__->set_primary_key('id');
+
+}
SKIP: {
skip "Need DBD::Pg 2.9.2 or newer for array tests", 4 if $DBD::Pg::VERSION < 2.009002;
@@ -213,6 +164,24 @@
}
+
+########## Case check
+
+BEGIN {
+ package DBICTest::Schema::Casecheck;
+ push @main::test_classes, __PACKAGE__;
+
+ use strict;
+ use warnings;
+ use base 'DBIx::Class';
+
+ __PACKAGE__->load_components(qw/Core/);
+ __PACKAGE__->table('dbic_t_schema.casecheck');
+ __PACKAGE__->add_columns(qw/id name NAME uc_name/);
+ __PACKAGE__->column_info_from_storage(1);
+ __PACKAGE__->set_primary_key('id');
+}
+
my $name_info = $schema->source('Casecheck')->column_info( 'name' );
is( $name_info->{size}, 1, "Case sensitive matching info for 'name'" );
@@ -222,83 +191,72 @@
my $uc_name_info = $schema->source('Casecheck')->column_info( 'uc_name' );
is( $uc_name_info->{size}, 3, "Case insensitive matching info for 'uc_name'" );
-# Test SELECT ... FOR UPDATE
-my $HaveSysSigAction = eval "require Sys::SigAction" && !$@;
-if ($HaveSysSigAction) {
- Sys::SigAction->import( 'set_sig_handler' );
-}
+
+
+## Test SELECT ... FOR UPDATE
+
SKIP: {
- skip "Sys::SigAction is not available", 3 unless $HaveSysSigAction;
- # create a new schema
- my $schema2 = DBICTest::Schema->connect($dsn, $user, $pass);
- $schema2->source("Artist")->name("testschema.artist");
+ if(eval "require Sys::SigAction" && !$@) {
+ Sys::SigAction->import( 'set_sig_handler' );
+ }
+ else {
+ skip "Sys::SigAction is not available", 6;
+ }
- $schema->txn_do( sub {
- my $artist = $schema->resultset('Artist')->search(
- {
- artistid => 1
- },
- {
- for => 'update'
- }
- )->first;
- is($artist->artistid, 1, "select for update returns artistid = 1");
+ my ($timed_out, $artist2);
- my $artist_from_schema2;
- my $error_ok = 0;
- eval {
- my $h = set_sig_handler( 'ALRM', sub { die "DBICTestTimeout" } );
- alarm(2);
- $artist_from_schema2 = $schema2->resultset('Artist')->find(1);
- $artist_from_schema2->name('fooey');
- $artist_from_schema2->update;
- alarm(0);
- };
- if (my $e = $@) {
- $error_ok = $e =~ /DBICTestTimeout/;
- }
-
+ for my $t (
+ {
# Make sure that an error was raised, and that the update failed
- ok($error_ok, "update from second schema times out");
- ok($artist_from_schema2->is_column_changed('name'), "'name' column is still dirty from second schema");
- });
-}
+ update_lock => 1,
+ test_sub => sub {
+ ok($timed_out, "update from second schema times out");
+ ok($artist2->is_column_changed('name'), "'name' column is still dirty from second schema");
+ },
+ },
+ {
+ # Make sure that an error was NOT raised, and that the update succeeded
+ update_lock => 0,
+ test_sub => sub {
+ ok(! $timed_out, "update from second schema DOES NOT timeout");
+ ok(! $artist2->is_column_changed('name'), "'name' column is NOT dirty from second schema");
+ },
+ },
+ ) {
+ # create a new schema
+ my $schema2 = DBICTest::Schema->connect($dsn, $user, $pass);
+ $schema2->source("Artist")->name("dbic_t_schema.artist");
-SKIP: {
- skip "Sys::SigAction is not available", 3 unless $HaveSysSigAction;
- # create a new schema
- my $schema2 = DBICTest::Schema->connect($dsn, $user, $pass);
- $schema2->source("Artist")->name("testschema.artist");
-
- $schema->txn_do( sub {
+ $schema->txn_do( sub {
my $artist = $schema->resultset('Artist')->search(
{
artistid => 1
},
+ $t->{update_lock} ? { for => 'update' } : {}
)->first;
- is($artist->artistid, 1, "select for update returns artistid = 1");
+ is($artist->artistid, 1, "select returns artistid = 1");
- my $artist_from_schema2;
- my $error_ok = 0;
+ $timed_out = 0;
eval {
my $h = set_sig_handler( 'ALRM', sub { die "DBICTestTimeout" } );
alarm(2);
- $artist_from_schema2 = $schema2->resultset('Artist')->find(1);
- $artist_from_schema2->name('fooey');
- $artist_from_schema2->update;
+ $artist2 = $schema2->resultset('Artist')->find(1);
+ $artist2->name('fooey');
+ $artist2->update;
alarm(0);
};
- if (my $e = $@) {
- $error_ok = $e =~ /DBICTestTimeout/;
- }
+ $timed_out = $@ =~ /DBICTestTimeout/;
+ });
- # Make sure that an error was NOT raised, and that the update succeeded
- ok(! $error_ok, "update from second schema DOES NOT timeout");
- ok(! $artist_from_schema2->is_column_changed('name'), "'name' column is NOT dirty from second schema");
- });
+ $t->{test_sub}->();
+ }
}
+
+######## other older Auto-pk tests
+
+$schema->source("SequenceTest")->name("dbic_t_schema.sequence_test");
for (1..5) {
my $st = $schema->resultset('SequenceTest')->create({ name => 'foo' });
is($st->pkid1, $_, "Oracle Auto-PK without trigger: First primary key");
@@ -308,27 +266,404 @@
my $st = $schema->resultset('SequenceTest')->create({ name => 'foo', pkid1 => 55 });
is($st->pkid1, 55, "Oracle Auto-PK without trigger: First primary key set manually");
-sub _cleanup {
- my $dbh = shift or return;
+done_testing;
- for my $stat (
- 'DROP TABLE testschema.artist',
- 'DROP TABLE testschema.casecheck',
- 'DROP TABLE testschema.sequence_test',
- 'DROP TABLE testschema.array_test',
- 'DROP SEQUENCE pkid1_seq',
- 'DROP SEQUENCE pkid2_seq',
- 'DROP SEQUENCE nonpkid_seq',
- 'DROP SCHEMA testschema',
- 'DROP TABLE anothertestschema.artist',
- 'DROP SCHEMA anothertestschema',
- 'DROP TABLE yetanothertestschema.artist',
- 'DROP SCHEMA yetanothertestschema',
- ) {
- eval { $dbh->do ($stat) };
- }
+exit;
+
+END {
+ return unless $schema;
+ drop_test_schema($schema);
+ eapk_drop_all( $schema)
+};
+
+
+######### SUBROUTINES
+
+sub create_test_schema {
+ my $schema = shift;
+ $schema->storage->dbh_do(sub {
+ my (undef,$dbh) = @_;
+
+ local $dbh->{Warn} = 0;
+
+ my $std_artist_table = <<EOS;
+(
+ artistid serial PRIMARY KEY
+ , name VARCHAR(100)
+ , rank INTEGER NOT NULL DEFAULT '13'
+ , charfield CHAR(10)
+ , arrayfield INTEGER[]
+)
+EOS
+
+ $dbh->do("CREATE SCHEMA dbic_t_schema");
+ $dbh->do("CREATE TABLE dbic_t_schema.artist $std_artist_table");
+ $dbh->do(<<EOS);
+CREATE TABLE dbic_t_schema.sequence_test (
+ pkid1 integer
+ , pkid2 integer
+ , nonpkid integer
+ , name VARCHAR(100)
+ , CONSTRAINT pk PRIMARY KEY(pkid1, pkid2)
+)
+EOS
+ $dbh->do("CREATE SEQUENCE pkid1_seq START 1 MAXVALUE 999999 MINVALUE 0");
+ $dbh->do("CREATE SEQUENCE pkid2_seq START 10 MAXVALUE 999999 MINVALUE 0");
+ $dbh->do("CREATE SEQUENCE nonpkid_seq START 20 MAXVALUE 999999 MINVALUE 0");
+ $dbh->do(<<EOS);
+CREATE TABLE dbic_t_schema.casecheck (
+ id serial PRIMARY KEY
+ , "name" VARCHAR(1)
+ , "NAME" VARCHAR(2)
+ , "UC_NAME" VARCHAR(3)
+)
+EOS
+ $dbh->do(<<EOS);
+CREATE TABLE dbic_t_schema.array_test (
+ id serial PRIMARY KEY
+ , arrayfield INTEGER[]
+)
+EOS
+ $dbh->do("CREATE SCHEMA dbic_t_schema_2");
+ $dbh->do("CREATE TABLE dbic_t_schema_2.artist $std_artist_table");
+ $dbh->do("CREATE SCHEMA dbic_t_schema_3");
+ $dbh->do("CREATE TABLE dbic_t_schema_3.artist $std_artist_table");
+ $dbh->do('set search_path=dbic_t_schema,public');
+ $dbh->do("CREATE SCHEMA dbic_t_schema_4");
+ $dbh->do("CREATE SCHEMA dbic_t_schema_5");
+ $dbh->do(<<EOS);
+ CREATE TABLE dbic_t_schema_4.artist
+ (
+ artistid integer not null default nextval('artist_artistid_seq'::regclass) PRIMARY KEY
+ , name VARCHAR(100)
+ , rank INTEGER NOT NULL DEFAULT '13'
+ , charfield CHAR(10)
+ , arrayfield INTEGER[]
+ );
+EOS
+ $dbh->do('set search_path=public,dbic_t_schema,dbic_t_schema_3');
+ $dbh->do('create sequence public.artist_artistid_seq'); #< in the public schema
+ $dbh->do(<<EOS);
+ CREATE TABLE dbic_t_schema_5.artist
+ (
+ artistid integer not null default nextval('public.artist_artistid_seq'::regclass) PRIMARY KEY
+ , name VARCHAR(100)
+ , rank INTEGER NOT NULL DEFAULT '13'
+ , charfield CHAR(10)
+ , arrayfield INTEGER[]
+ );
+EOS
+ $dbh->do('set search_path=dbic_t_schema,public');
+ });
}
-done_testing;
-END { _cleanup($dbh) }
+
+sub drop_test_schema {
+ my ( $schema, $warn_exceptions ) = @_;
+
+ $schema->storage->dbh_do(sub {
+ my (undef,$dbh) = @_;
+
+ local $dbh->{Warn} = 0;
+
+ for my $stat (
+ 'DROP SCHEMA dbic_t_schema_5 CASCADE',
+ 'DROP SEQUENCE public.artist_artistid_seq',
+ 'DROP SCHEMA dbic_t_schema_4 CASCADE',
+ 'DROP SCHEMA dbic_t_schema CASCADE',
+ 'DROP SEQUENCE pkid1_seq',
+ 'DROP SEQUENCE pkid2_seq',
+ 'DROP SEQUENCE nonpkid_seq',
+ 'DROP SCHEMA dbic_t_schema_2 CASCADE',
+ 'DROP SCHEMA dbic_t_schema_3 CASCADE',
+ ) {
+ eval { $dbh->do ($stat) };
+ diag $@ if $@ && $warn_exceptions;
+ }
+ });
+}
+
+
+### auto-pk / last_insert_id / sequence discovery
+sub run_apk_tests {
+ my $schema = shift;
+
+ # This is in Core now, but it's here just to test that it doesn't break
+ $schema->class('Artist')->load_components('PK::Auto');
+ cmp_ok( $schema->resultset('Artist')->count, '==', 0, 'this should start with an empty artist table');
+
+ # test that auto-pk also works with the defined search path by
+ # un-schema-qualifying the table name
+ apk_t_set($schema,'artist');
+
+ my $unq_new;
+ lives_ok {
+ $unq_new = $schema->resultset('Artist')->create({ name => 'baz' });
+ } 'insert into unqualified, shadowed table succeeds';
+
+ is($unq_new && $unq_new->artistid, 1, "and got correct artistid");
+
+ my @test_schemas = ( [qw| dbic_t_schema_2 1 |],
+ [qw| dbic_t_schema_3 1 |],
+ [qw| dbic_t_schema_4 2 |],
+ [qw| dbic_t_schema_5 1 |],
+ );
+ foreach my $t ( @test_schemas ) {
+ my ($sch_name, $start_num) = @$t;
+ #test with dbic_t_schema_2
+ apk_t_set($schema,"$sch_name.artist");
+ my $another_new;
+ lives_ok {
+ $another_new = $schema->resultset('Artist')->create({ name => 'Tollbooth Willy'});
+ is( $another_new->artistid,$start_num, "got correct artistid for $sch_name")
+ or diag "USED SEQUENCE: ".($schema->source('Artist')->column_info('artistid')->{sequence} || '<none>');
+ } "$sch_name liid 1 did not die"
+ or diag "USED SEQUENCE: ".($schema->source('Artist')->column_info('artistid')->{sequence} || '<none>');
+ lives_ok {
+ $another_new = $schema->resultset('Artist')->create({ name => 'Adam Sandler'});
+ is( $another_new->artistid,$start_num+1, "got correct artistid for $sch_name")
+ or diag "USED SEQUENCE: ".($schema->source('Artist')->column_info('artistid')->{sequence} || '<none>');
+ } "$sch_name liid 2 did not die"
+ or diag "USED SEQUENCE: ".($schema->source('Artist')->column_info('artistid')->{sequence} || '<none>');
+
+ }
+
+ lives_ok {
+ apk_t_set($schema,'dbic_t_schema.artist');
+ my $new = $schema->resultset('Artist')->create({ name => 'foo' });
+ is($new->artistid, 4, "Auto-PK worked");
+ $new = $schema->resultset('Artist')->create({ name => 'bar' });
+ is($new->artistid, 5, "Auto-PK worked");
+ } 'old auto-pk tests did not die either';
+}
+
+# sets the artist table name and clears sequence name cache
+sub apk_t_set {
+ my ( $s, $n ) = @_;
+ $s->source("Artist")->name($n);
+ $s->source('Artist')->column_info('artistid')->{sequence} = undef; #< clear sequence name cache
+}
+
+
+######## EXTENDED AUTO-PK TESTS
+
+my @eapk_id_columns;
+BEGIN {
+ package DBICTest::Schema::ExtAPK;
+ push @main::test_classes, __PACKAGE__;
+
+ use strict;
+ use warnings;
+ use base 'DBIx::Class';
+
+ __PACKAGE__->load_components(qw/Core/);
+ __PACKAGE__->table('apk');
+
+ @eapk_id_columns = qw( id1 id2 id3 id4 );
+ __PACKAGE__->add_columns(
+ map { $_ => { data_type => 'integer', is_auto_increment => 1 } }
+ @eapk_id_columns
+ );
+
+ __PACKAGE__->set_primary_key('id2'); #< note the SECOND column is
+ #the primary key
+}
+
+my @eapk_schemas;
+BEGIN{ @eapk_schemas = map "dbic_apk_$_", 0..5 }
+
+sub run_extended_apk_tests {
+ my $schema = shift;
+
+ #save the search path and reset it at the end
+ my $search_path_save = eapk_get_search_path($schema);
+
+ eapk_drop_all($schema);
+
+ # make the test schemas and sequences
+ $schema->storage->dbh_do(sub {
+ my ( undef, $dbh ) = @_;
+
+ $dbh->do("CREATE SCHEMA $_")
+ for @eapk_schemas;
+
+ $dbh->do("CREATE SEQUENCE $eapk_schemas[5].fooseq");
+ $dbh->do("CREATE SEQUENCE $eapk_schemas[4].fooseq");
+ $dbh->do("CREATE SEQUENCE $eapk_schemas[3].fooseq");
+
+ $dbh->do("SET search_path = ".join ',', @eapk_schemas );
+ });
+
+ # clear our search_path cache
+ $schema->storage->{_pg_search_path} = undef;
+
+ eapk_create( $schema,
+ with_search_path => [0,1],
+ );
+ eapk_create( $schema,
+ with_search_path => [1,0,'public'],
+ nextval => "$eapk_schemas[5].fooseq",
+ );
+ eapk_create( $schema,
+ with_search_path => ['public',0,1],
+ qualify_table => 2,
+ );
+ eapk_create( $schema,
+ with_search_path => [3,1,0,'public'],
+ nextval => "$eapk_schemas[4].fooseq",
+ );
+ eapk_create( $schema,
+ with_search_path => [3,1,0,'public'],
+ nextval => "$eapk_schemas[3].fooseq",
+ qualify_table => 4,
+ );
+
+ eapk_poke( $schema, 0 );
+ eapk_poke( $schema, 2 );
+ eapk_poke( $schema, 4 );
+ eapk_poke( $schema, 1 );
+ eapk_poke( $schema, 0 );
+ eapk_poke( $schema, 1 );
+ eapk_poke( $schema, 4 );
+ eapk_poke( $schema, 3 );
+ eapk_poke( $schema, 1 );
+ eapk_poke( $schema, 2 );
+ eapk_poke( $schema, 0 );
+
+ # set our search path back
+ eapk_set_search_path( $schema, @$search_path_save );
+}
+
+# do a DBIC create on the apk table in the given schema number (which is an
+# index of @eapk_schemas)
+
+my %seqs; #< sanity-check hash of schema.table.col => currval of its sequence
+
+sub eapk_poke {
+ my ($s, $schema_num) = @_;
+
+ my $schema_name = defined $schema_num
+ ? $eapk_schemas[$schema_num]
+ : '';
+
+ my $schema_name_actual = $schema_name || eapk_get_search_path($s)->[0];
+
+ $s->source('ExtAPK')->name($schema_name ? $schema_name.'.apk' : 'apk');
+ #< clear sequence name cache
+ $s->source('ExtAPK')->column_info($_)->{sequence} = undef
+ for @eapk_id_columns;
+
+ no warnings 'uninitialized';
+ lives_ok {
+ my $new;
+ for my $inc (1,2,3) {
+ $new = $schema->resultset('ExtAPK')->create({});
+ my $proper_seqval = ++$seqs{"$schema_name_actual.apk.id2"};
+ is( $new->id2, $proper_seqval, "$schema_name_actual.apk.id2 correct inc $inc" )
+ or eapk_seq_diag($s,$schema_name);
+ $new->discard_changes;
+ for my $id (grep $_ ne 'id2', @eapk_id_columns) {
+ my $proper_seqval = ++$seqs{"$schema_name_actual.apk.$id"};
+ is( $new->$id, $proper_seqval, "$schema_name_actual.apk.$id correct inc $inc" )
+ or eapk_seq_diag($s,$schema_name);
+ }
+ }
+ } "create in schema '$schema_name' lives"
+ or eapk_seq_diag($s,$schema_name);
+}
+
+# print diagnostic info on which sequences were found in the ExtAPK
+# class
+sub eapk_seq_diag {
+ my $s = shift;
+ my $schema = shift || eapk_get_search_path($s)->[0];
+
+ diag "$schema.apk sequences: ",
+ join(', ',
+ map "$_:".($s->source('ExtAPK')->column_info($_)->{sequence} || '<none>'),
+ @eapk_id_columns
+ );
+}
+
+# get the postgres search path as an arrayref
+sub eapk_get_search_path {
+ my ( $s ) = @_;
+ # cache the search path as ['schema','schema',...] in the storage
+ # obj
+
+ return $s->storage->dbh_do(sub {
+ my (undef, $dbh) = @_;
+ my @search_path;
+ my ($sp_string) = $dbh->selectrow_array('SHOW search_path');
+ while ( $sp_string =~ s/("[^"]+"|[^,]+),?// ) {
+ unless( defined $1 and length $1 ) {
+ die "search path sanity check failed: '$1'";
+ }
+ push @search_path, $1;
+ }
+ \@search_path
+ });
+}
+sub eapk_set_search_path {
+ my ($s, at sp) = @_;
+ my $sp = join ',', at sp;
+ $s->storage->dbh_do( sub { $_[1]->do("SET search_path = $sp") } );
+}
+
+# create the apk table in the given schema, can set whether the table name is qualified, what the nextval is for the second ID
+sub eapk_create {
+ my ($schema, %a) = @_;
+
+ $schema->storage->dbh_do(sub {
+ my (undef,$dbh) = @_;
+
+ my $searchpath_save;
+ if ( $a{with_search_path} ) {
+ ($searchpath_save) = $dbh->selectrow_array('SHOW search_path');
+
+ my $search_path = join ',',map {/\D/ ? $_ : $eapk_schemas[$_]} @{$a{with_search_path}};
+
+ $dbh->do("SET search_path = $search_path");
+ }
+
+ my $table_name = $a{qualify_table}
+ ? ($eapk_schemas[$a{qualify_table}] || die). ".apk"
+ : 'apk';
+ local $_[1]->{Warn} = 0;
+
+ my $id_def = $a{nextval}
+ ? "integer primary key not null default nextval('$a{nextval}'::regclass)"
+ : 'serial primary key';
+ $dbh->do(<<EOS);
+CREATE TABLE $table_name (
+ id1 serial
+ , id2 $id_def
+ , id3 serial
+ , id4 serial
+)
+EOS
+
+ if( $searchpath_save ) {
+ $dbh->do("SET search_path = $searchpath_save");
+ }
+ });
+}
+
+sub eapk_drop_all {
+ my ( $schema, $warn_exceptions ) = @_;
+
+ $schema->storage->dbh_do(sub {
+ my (undef,$dbh) = @_;
+
+ local $dbh->{Warn} = 0;
+
+ # drop the test schemas
+ for (@eapk_schemas ) {
+ eval{ $dbh->do("DROP SCHEMA $_ CASCADE") };
+ diag $@ if $@ && $warn_exceptions;
+ }
+
+
+ });
+}
Modified: DBIx-Class/0.08/branches/ado_mssql/t/746mssql.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/746mssql.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/746mssql.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -12,8 +12,6 @@
plan skip_all => 'Set $ENV{DBICTEST_MSSQL_ODBC_DSN}, _USER and _PASS to run this test'
unless ($dsn && $user);
-plan tests => 39;
-
DBICTest::Schema->load_classes('ArtistGUID');
my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
@@ -198,6 +196,8 @@
});
lives_ok ( sub {
+ # start a new connection, make sure rebless works
+ my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
$schema->populate ('Owners', [
[qw/id name /],
[qw/1 wiggle/],
@@ -218,7 +218,22 @@
]);
}, 'populate with PKs supplied ok' );
+lives_ok (sub {
+ # start a new connection, make sure rebless works
+ # test an insert with a supplied identity, followed by one without
+ my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
+ for (1..2) {
+ my $id = $_ * 20 ;
+ $schema->resultset ('Owners')->create ({ id => $id, name => "troglodoogle $id" });
+ $schema->resultset ('Owners')->create ({ name => "troglodoogle " . ($id + 1) });
+ }
+}, 'create with/without PKs ok' );
+
+is ($schema->resultset ('Owners')->count, 19, 'owner rows really in db' );
+
lives_ok ( sub {
+ # start a new connection, make sure rebless works
+ my $schema = DBICTest::Schema->connect($dsn, $user, $pass);
$schema->populate ('BooksInLibrary', [
[qw/source owner title /],
[qw/Library 1 secrets0/],
@@ -325,9 +340,10 @@
],
);
}
-
}
+done_testing;
+
# clean up our mess
END {
if (my $dbh = eval { $schema->storage->_dbh }) {
Modified: DBIx-Class/0.08/branches/ado_mssql/t/746sybase.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/746sybase.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/746sybase.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,5 +1,5 @@
use strict;
-use warnings;
+use warnings;
use Test::More;
use Test::Exception;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/74mssql.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/74mssql.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/74mssql.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -18,7 +18,7 @@
plan skip_all => 'Set $ENV{DBICTEST_MSSQL_DSN}, _USER and _PASS to run this test'
unless ($dsn);
-my $TESTS = 13;
+my $TESTS = 15;
plan tests => $TESTS * 2;
@@ -133,6 +133,27 @@
is $rs->find($row->id)->amount,
undef, 'updated money value to NULL round-trip';
+
+ $rs->create({ amount => 300 }) for (1..3);
+
+ # test multiple active statements
+ lives_ok {
+ my $artist_rs = $schema->resultset('Artist');
+ while (my $row = $rs->next) {
+ my $artist = $artist_rs->next;
+ }
+ $rs->reset;
+ } 'multiple active statements';
+
+ # test multiple active statements in a transaction
+ TODO: {
+ local $TODO = 'needs similar FreeTDS fixes to the ones in Sybase.pm';
+ lives_ok {
+ $schema->txn_do(sub {
+ $rs->create({ amount => 400 });
+ });
+ } 'simple transaction';
+ }
}
# clean up our mess
Modified: DBIx-Class/0.08/branches/ado_mssql/t/76joins.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/76joins.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/76joins.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -4,7 +4,6 @@
use Test::More;
use lib qw(t/lib);
use DBICTest;
-use Data::Dumper;
use DBIC::SqlMakerTest;
my $schema = DBICTest->init_schema();
Modified: DBIx-Class/0.08/branches/ado_mssql/t/80unique.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/80unique.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/80unique.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,14 +1,14 @@
use strict;
-use warnings;
+use warnings;
use Test::More;
use lib qw(t/lib);
use DBICTest;
+use DBIC::SqlMakerTest;
+use DBIC::DebugObj;
my $schema = DBICTest->init_schema();
-plan tests => 49;
-
# Check the defined unique constraints
is_deeply(
[ sort $schema->source('CD')->unique_constraint_names ],
@@ -209,4 +209,27 @@
);
ok($cd2->in_storage, 'Updating year using update_or_new was successful');
is($cd2->id, $cd1->id, 'Got the same CD using update_or_new');
-}
\ No newline at end of file
+}
+
+# make sure the ident condition is assembled sanely
+{
+ my $artist = $schema->resultset('Artist')->next;
+
+ my ($sql, @bind);
+ $schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind)),
+ $schema->storage->debug(1);
+
+ $artist->discard_changes;
+
+ is_same_sql_bind (
+ $sql,
+ \@bind,
+ 'SELECT me.artistid, me.name, me.rank, me.charfield FROM artist me WHERE me.artistid = ?',
+ [qw/'1'/],
+ );
+
+ $schema->storage->debug(0);
+ $schema->storage->debugobj(undef);
+}
+
+done_testing;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/81transactions.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/81transactions.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/81transactions.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,23 +1,22 @@
use strict;
-use warnings;
+use warnings;
use Test::More;
+use Test::Warn;
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
my $schema = DBICTest->init_schema();
-plan tests => 64;
-
my $code = sub {
my ($artist, @cd_titles) = @_;
-
+
$artist->create_related('cds', {
title => $_,
year => 2006,
}) foreach (@cd_titles);
-
+
return $artist->cds->all;
};
@@ -258,13 +257,13 @@
name => 'Death Cab for Cutie',
made_up_column => 1,
});
-
+
$guard->commit;
} qr/No such column made_up_column .*? at .*?81transactions.t line \d+/s, "Error propogated okay";
ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");
- my $inner_exception;
+ my $inner_exception; # set in inner() below
eval {
outer($schema, 1);
};
@@ -273,14 +272,11 @@
ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");
lives_ok (sub {
- my $w;
- local $SIG{__WARN__} = sub { $w = shift };
-
- # The 0 arg says don't die, just let the scope guard go out of scope
- # forcing a txn_rollback to happen
- outer($schema, 0);
-
- like ($w, qr/A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or an error/, 'Out of scope warning detected');
+ warnings_exist ( sub {
+ # The 0 arg says don't die, just let the scope guard go out of scope
+ # forcing a txn_rollback to happen
+ outer($schema, 0);
+ }, qr/A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or error. Rolling back./, 'Out of scope warning detected');
ok(!$artist_rs->find({name => 'Death Cab for Cutie'}), "Artist not created");
}, 'rollback successful withot exception');
@@ -319,3 +315,63 @@
$inner_guard->commit;
}
}
+
+# make sure the guard does not eat exceptions
+{
+ my $schema = DBICTest->init_schema();
+ throws_ok (sub {
+ my $guard = $schema->txn_scope_guard;
+ $schema->resultset ('Artist')->create ({ name => 'bohhoo'});
+
+ $schema->storage->disconnect; # this should freak out the guard rollback
+
+ die 'Deliberate exception';
+ }, qr/Deliberate exception.+Rollback failed/s);
+}
+
+# make sure it warns *big* on failed rollbacks
+{
+ my $schema = DBICTest->init_schema();
+
+ # something is really confusing Test::Warn here, no time to debug
+=begin
+ warnings_exist (
+ sub {
+ my $guard = $schema->txn_scope_guard;
+ $schema->resultset ('Artist')->create ({ name => 'bohhoo'});
+
+ $schema->storage->disconnect; # this should freak out the guard rollback
+ },
+ [
+ qr/A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or error. Rolling back./,
+ qr/\*+ ROLLBACK FAILED\!\!\! \*+/,
+ ],
+ 'proper warnings generated on out-of-scope+rollback failure'
+ );
+=cut
+
+ my @want = (
+ qr/A DBIx::Class::Storage::TxnScopeGuard went out of scope without explicit commit or error. Rolling back./,
+ qr/\*+ ROLLBACK FAILED\!\!\! \*+/,
+ );
+
+ my @w;
+ local $SIG{__WARN__} = sub {
+ if (grep {$_[0] =~ $_} (@want)) {
+ push @w, $_[0];
+ }
+ else {
+ warn $_[0];
+ }
+ };
+ {
+ my $guard = $schema->txn_scope_guard;
+ $schema->resultset ('Artist')->create ({ name => 'bohhoo'});
+
+ $schema->storage->disconnect; # this should freak out the guard rollback
+ }
+
+ is (@w, 2, 'Both expected warnings found');
+}
+
+done_testing;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/83cache.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/83cache.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/83cache.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -74,8 +74,6 @@
}
);
-use Data::Dumper; $Data::Dumper::Deparse = 1;
-
# start test for prefetch SELECT count
$queries = 0;
$schema->storage->debug(1);
Modified: DBIx-Class/0.08/branches/ado_mssql/t/86sqlt.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/86sqlt.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/86sqlt.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -5,8 +5,12 @@
use lib qw(t/lib);
use DBICTest;
-eval "use SQL::Translator";
-plan skip_all => 'SQL::Translator required' if $@;
+BEGIN {
+ require DBIx::Class::Storage::DBI;
+ plan skip_all =>
+ 'Test needs SQL::Translator ' . DBIx::Class::Storage::DBI->_sqlt_minimum_version
+ if not DBIx::Class::Storage::DBI->_sqlt_version_ok;
+}
my $schema = DBICTest->init_schema (no_deploy => 1);
Modified: DBIx-Class/0.08/branches/ado_mssql/t/88result_set_column.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/88result_set_column.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/88result_set_column.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,15 +1,14 @@
use strict;
-use warnings;
+use warnings;
use Test::More;
+use Test::Warn;
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
my $schema = DBICTest->init_schema();
-plan tests => 20;
-
my $rs = $schema->resultset("CD")->search({}, { order_by => 'cdid' });
my $rs_title = $rs->get_column('title');
@@ -33,6 +32,10 @@
is($rs_year->first, 1999, "first okay");
+warnings_exist (sub {
+ is($rs_year->single, 1999, "single okay");
+}, qr/Query returned more than one row/, 'single warned');
+
# test +select/+as for single column
my $psrs = $schema->resultset('CD')->search({},
{
@@ -94,3 +97,5 @@
[ $rs->get_column ('cdid')->all ],
'prefetch properly collapses amount of rows from get_column',
);
+
+done_testing;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/89dbicadmin.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/89dbicadmin.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/89dbicadmin.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -16,7 +16,7 @@
plan skip_all => 'Install Text::CSV_XS or Text::CSV_PP to run this test' if ($@);
}
-my @json_backends = qw/XS JSON DWIW Syck/;
+my @json_backends = qw/XS JSON DWIW/;
my $tests_per_run = 5;
plan tests => $tests_per_run * @json_backends;
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/91debug.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/91debug.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/91debug.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,73 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use DBIC::DebugObj;
-use DBIC::SqlMakerTest;
-
-my $schema = DBICTest->init_schema();
-
-plan tests => 7;
-
-ok ( $schema->storage->debug(1), 'debug' );
-ok ( defined(
- $schema->storage->debugfh(
- IO::File->new('t/var/sql.log', 'w')
- )
- ),
- 'debugfh'
- );
-
-$schema->storage->debugfh->autoflush(1);
-my $rs = $schema->resultset('CD')->search({});
-$rs->count();
-
-my $log = new IO::File('t/var/sql.log', 'r') or die($!);
-my $line = <$log>;
-$log->close();
-ok($line =~ /^SELECT COUNT/, 'Log success');
-
-$schema->storage->debugfh(undef);
-$ENV{'DBIC_TRACE'} = '=t/var/foo.log';
-$rs = $schema->resultset('CD')->search({});
-$rs->count();
-$log = new IO::File('t/var/foo.log', 'r') or die($!);
-$line = <$log>;
-$log->close();
-ok($line =~ /^SELECT COUNT/, 'Log success');
-$schema->storage->debugobj->debugfh(undef);
-delete($ENV{'DBIC_TRACE'});
-open(STDERRCOPY, '>&STDERR');
-stat(STDERRCOPY); # nop to get warnings quiet
-close(STDERR);
-eval {
- $rs = $schema->resultset('CD')->search({});
- $rs->count();
-};
-ok($@, 'Died on closed FH');
-open(STDERR, '>&STDERRCOPY');
-
-# test trace output correctness for bind params
-{
- my ($sql, @bind);
- $schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind));
-
- my @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
- is_same_sql_bind(
- $sql, \@bind,
- "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) ): '1', '1', '3'",
- [qw/'1' '1' '3'/],
- 'got correct SQL with all bind parameters (debugcb)'
- );
-
- @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
- is_same_sql_bind(
- $sql, \@bind,
- "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) )", ["'1'", "'1'", "'3'"],
- 'got correct SQL with all bind parameters (debugobj)'
- );
-}
-
-1;
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/92storage.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/92storage.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/92storage.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,172 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use Data::Dumper;
-
-{
- package DBICTest::ExplodingStorage::Sth;
- use strict;
- use warnings;
-
- sub execute { die "Kablammo!" }
-
- sub bind_param {}
-
- package DBICTest::ExplodingStorage;
- use strict;
- use warnings;
- use base 'DBIx::Class::Storage::DBI::SQLite';
-
- my $count = 0;
- sub sth {
- my ($self, $sql) = @_;
- return bless {}, "DBICTest::ExplodingStorage::Sth" unless $count++;
- return $self->next::method($sql);
- }
-
- sub connected {
- return 0 if $count == 1;
- return shift->next::method(@_);
- }
-}
-
-plan tests => 17;
-
-my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
-
-is( ref($schema->storage), 'DBIx::Class::Storage::DBI::SQLite',
- 'Storage reblessed correctly into DBIx::Class::Storage::DBI::SQLite' );
-
-my $storage = $schema->storage;
-$storage->ensure_connected;
-
-eval {
- $schema->storage->throw_exception('test_exception_42');
-};
-like($@, qr/\btest_exception_42\b/, 'basic exception');
-
-eval {
- $schema->resultset('CD')->search_literal('broken +%$#$1')->all;
-};
-like($@, qr/prepare_cached failed/, 'exception via DBI->HandleError, etc');
-
-bless $storage, "DBICTest::ExplodingStorage";
-$schema->storage($storage);
-
-eval {
- $schema->resultset('Artist')->create({ name => "Exploding Sheep" });
-};
-
-is($@, "", "Exploding \$sth->execute was caught");
-
-is(1, $schema->resultset('Artist')->search({name => "Exploding Sheep" })->count,
- "And the STH was retired");
-
-
-# testing various invocations of connect_info ([ ... ])
-
-my $coderef = sub { 42 };
-my $invocations = {
- 'connect_info ([ $d, $u, $p, \%attr, \%extra_attr])' => {
- args => [
- 'foo',
- 'bar',
- undef,
- {
- on_connect_do => [qw/a b c/],
- PrintError => 0,
- },
- {
- AutoCommit => 1,
- on_disconnect_do => [qw/d e f/],
- },
- {
- unsafe => 1,
- auto_savepoint => 1,
- },
- ],
- dbi_connect_info => [
- 'foo',
- 'bar',
- undef,
- {
- %{$storage->_default_dbi_connect_attributes || {} },
- PrintError => 0,
- AutoCommit => 1,
- },
- ],
- },
-
- 'connect_info ([ \%code, \%extra_attr ])' => {
- args => [
- $coderef,
- {
- on_connect_do => [qw/a b c/],
- PrintError => 0,
- AutoCommit => 1,
- on_disconnect_do => [qw/d e f/],
- },
- {
- unsafe => 1,
- auto_savepoint => 1,
- },
- ],
- dbi_connect_info => [
- $coderef,
- ],
- },
-
- 'connect_info ([ \%attr ])' => {
- args => [
- {
- on_connect_do => [qw/a b c/],
- PrintError => 1,
- AutoCommit => 0,
- on_disconnect_do => [qw/d e f/],
- user => 'bar',
- dsn => 'foo',
- },
- {
- unsafe => 1,
- auto_savepoint => 1,
- },
- ],
- dbi_connect_info => [
- 'foo',
- 'bar',
- undef,
- {
- %{$storage->_default_dbi_connect_attributes || {} },
- PrintError => 1,
- AutoCommit => 0,
- },
- ],
- },
-};
-
-for my $type (keys %$invocations) {
-
- # we can not use a cloner portably because of the coderef
- # so compare dumps instead
- local $Data::Dumper::Sortkeys = 1;
- my $arg_dump = Dumper ($invocations->{$type}{args});
-
- $storage->connect_info ($invocations->{$type}{args});
-
- is ($arg_dump, Dumper ($invocations->{$type}{args}), "$type didn't modify passed arguments");
-
-
- is_deeply ($storage->_dbi_connect_info, $invocations->{$type}{dbi_connect_info}, "$type produced correct _dbi_connect_info");
- ok ( (not $storage->auto_savepoint and not $storage->unsafe), "$type correctly ignored extra hashref");
-
- is_deeply (
- [$storage->on_connect_do, $storage->on_disconnect_do ],
- [ [qw/a b c/], [qw/d e f/] ],
- "$type correctly parsed DBIC specific on_[dis]connect_do",
- );
-}
-
-1;
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/92storage_on_connect_call.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/92storage_on_connect_call.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/92storage_on_connect_call.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,68 +0,0 @@
-use strict;
-use warnings;
-no warnings qw/once redefine/;
-
-use lib qw(t/lib);
-use DBICTest;
-
-use Test::More tests => 9;
-
-my $schema = DBICTest->init_schema(
- no_connect => 1,
- no_deploy => 1,
-);
-
-local *DBIx::Class::Storage::DBI::connect_call_foo = sub {
- isa_ok $_[0], 'DBIx::Class::Storage::DBI',
- 'got storage in connect_call method';
- is $_[1], 'bar', 'got param in connect_call method';
-};
-
-local *DBIx::Class::Storage::DBI::disconnect_call_foo = sub {
- isa_ok $_[0], 'DBIx::Class::Storage::DBI',
- 'got storage in disconnect_call method';
-};
-
-ok $schema->connection(
- DBICTest->_database,
- {
- on_connect_call => [
- [ do_sql => 'create table test1 (id integer)' ],
- [ do_sql => [ 'insert into test1 values (?)', {}, 1 ] ],
- [ do_sql => sub { ['insert into test1 values (2)'] } ],
- [ sub { $_[0]->dbh->do($_[1]) }, 'insert into test1 values (3)' ],
- # this invokes $storage->connect_call_foo('bar') (above)
- [ foo => 'bar' ],
- ],
- on_connect_do => 'insert into test1 values (4)',
- on_disconnect_call => 'foo',
- },
-), 'connection()';
-
-is_deeply (
- $schema->storage->dbh->selectall_arrayref('select * from test1'),
- [ [ 1 ], [ 2 ], [ 3 ], [ 4 ] ],
- 'on_connect_call/do actions worked'
-);
-
-local *DBIx::Class::Storage::DBI::connect_call_foo = sub {
- isa_ok $_[0], 'DBIx::Class::Storage::DBI',
- 'got storage in connect_call method';
-};
-
-local *DBIx::Class::Storage::DBI::connect_call_bar = sub {
- isa_ok $_[0], 'DBIx::Class::Storage::DBI',
- 'got storage in connect_call method';
-};
-
-$schema->storage->disconnect;
-
-ok $schema->connection(
- DBICTest->_database,
- {
- # method list form
- on_connect_call => [ 'foo', sub { ok 1, "coderef in list form" }, 'bar' ],
- },
-), 'connection()';
-
-$schema->storage->ensure_connected;
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/92storage_on_connect_do.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/92storage_on_connect_do.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/92storage_on_connect_do.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,88 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More tests => 12;
-
-use lib qw(t/lib);
-use base 'DBICTest';
-
-
-my $schema = DBICTest->init_schema(
- no_connect => 1,
- no_deploy => 1,
-);
-
-ok $schema->connection(
- DBICTest->_database,
- {
- on_connect_do => 'CREATE TABLE TEST_empty (id INTEGER)',
- },
-), 'connection()';
-
-is_deeply (
- $schema->storage->dbh->selectall_arrayref('SELECT * FROM TEST_empty'),
- [],
- 'string version on_connect_do() worked'
-);
-
-$schema->storage->disconnect;
-
-ok $schema->connection(
- DBICTest->_database,
- {
- on_connect_do => [
- 'CREATE TABLE TEST_empty (id INTEGER)',
- [ 'INSERT INTO TEST_empty VALUES (?)', {}, 2 ],
- \&insert_from_subref,
- ],
- on_disconnect_do =>
- [\&check_exists, 'DROP TABLE TEST_empty', \&check_dropped],
- },
-), 'connection()';
-
-is_deeply (
- $schema->storage->dbh->selectall_arrayref('SELECT * FROM TEST_empty'),
- [ [ 2 ], [ 3 ], [ 7 ] ],
- 'on_connect_do() worked'
-);
-eval { $schema->storage->dbh->do('SELECT 1 FROM TEST_nonexistent'); };
-ok $@, 'Searching for nonexistent table dies';
-
-$schema->storage->disconnect();
-
-my($connected, $disconnected, @cb_args);
-ok $schema->connection(
- DBICTest->_database,
- {
- on_connect_do => sub { $connected = 1; @cb_args = @_; },
- on_disconnect_do => sub { $disconnected = 1 },
- },
-), 'second connection()';
-$schema->storage->dbh->do('SELECT 1');
-ok $connected, 'on_connect_do() called after connect()';
-ok ! $disconnected, 'on_disconnect_do() not called after connect()';
-$schema->storage->disconnect();
-ok $disconnected, 'on_disconnect_do() called after disconnect()';
-
-isa_ok($cb_args[0], 'DBIx::Class::Storage', 'first arg to on_connect_do hook');
-
-sub check_exists {
- my $storage = shift;
- ok $storage->dbh->do('SELECT 1 FROM TEST_empty'), 'Table still exists';
- return;
-}
-
-sub check_dropped {
- my $storage = shift;
- eval { $storage->dbh->do('SELECT 1 FROM TEST_empty'); };
- ok $@, 'Reading from dropped table fails';
- return;
-}
-
-sub insert_from_subref {
- my $storage = shift;
- return [
- [ 'INSERT INTO TEST_empty VALUES (?)', {}, 3 ],
- 'INSERT INTO TEST_empty VALUES (7)',
- ];
-}
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/92storage_ping_count.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/92storage_ping_count.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/92storage_ping_count.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,61 +0,0 @@
-use strict;
-use warnings;
-
-use Test::More;
-use lib qw(t/lib);
-use DBICTest;
-use Data::Dumper;
-use DBIC::SqlMakerTest;
-
-my $ping_count = 0;
-
-{
- local $SIG{__WARN__} = sub {};
- require DBIx::Class::Storage::DBI;
-
- my $ping = \&DBIx::Class::Storage::DBI::_ping;
-
- *DBIx::Class::Storage::DBI::_ping = sub {
- $ping_count++;
- goto &$ping;
- };
-}
-
-
-# measure pings around deploy() separately
-my $schema = DBICTest->init_schema( sqlite_use_file => 1, no_populate => 1 );
-
-is ($ping_count, 0, 'no _ping() calls during deploy');
-$ping_count = 0;
-
-
-
-DBICTest->populate_schema ($schema);
-
-# perform some operations and make sure they don't ping
-
-$schema->resultset('CD')->create({
- cdid => 6, artist => 3, title => 'mtfnpy', year => 2009
-});
-
-$schema->resultset('CD')->create({
- cdid => 7, artist => 3, title => 'mtfnpy2', year => 2009
-});
-
-$schema->storage->_dbh->disconnect;
-
-$schema->resultset('CD')->create({
- cdid => 8, artist => 3, title => 'mtfnpy3', year => 2009
-});
-
-$schema->storage->_dbh->disconnect;
-
-$schema->txn_do(sub {
- $schema->resultset('CD')->create({
- cdid => 9, artist => 3, title => 'mtfnpy4', year => 2009
- });
-});
-
-is $ping_count, 0, 'no _ping() calls';
-
-done_testing;
Added: DBIx-Class/0.08/branches/ado_mssql/t/93autocast.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/93autocast.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/93autocast.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,82 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+{ # Fake storage driver for sqlite with autocast
+ package DBICTest::SQLite::AutoCast;
+ use base qw/
+ DBIx::Class::Storage::DBI::AutoCast
+ DBIx::Class::Storage::DBI::SQLite
+ /;
+ use mro 'c3';
+
+ my $type_map = {
+ datetime => 'DateTime',
+ integer => 'INT',
+ int => undef, # no conversion
+ };
+
+ sub _native_data_type {
+ return $type_map->{$_[1]};
+ }
+}
+
+my $schema = DBICTest->init_schema (storage_type => 'DBICTest::SQLite::AutoCast');
+
+# 'me.id' will be cast unlike the unqualified 'id'
+my $rs = $schema->resultset ('CD')->search ({
+ cdid => { '>', 5 },
+ 'tracks.last_updated_at' => { '!=', undef },
+ 'tracks.last_updated_on' => { '<', 2009 },
+ 'tracks.position' => 4,
+ 'tracks.single_track' => \[ '= ?', [ single_track => [1, 2, 3 ] ] ],
+}, { join => 'tracks' });
+
+my $bind = [
+ [ cdid => 5 ],
+ [ 'tracks.last_updated_on' => 2009 ],
+ [ 'tracks.position' => 4 ],
+ [ 'single_track' => [ 1, 2, 3] ],
+];
+
+is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ FROM cd me
+ LEFT JOIN track tracks ON tracks.cd = me.cdid
+ WHERE
+ cdid > ?
+ AND tracks.last_updated_at IS NOT NULL
+ AND tracks.last_updated_on < ?
+ AND tracks.position = ?
+ AND tracks.single_track = ?
+ )',
+ $bind,
+ 'expected sql with casting off',
+);
+
+$schema->storage->auto_cast (1);
+
+is_same_sql_bind (
+ $rs->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ FROM cd me
+ LEFT JOIN track tracks ON tracks.cd = me.cdid
+ WHERE
+ cdid > CAST(? AS INT)
+ AND tracks.last_updated_at IS NOT NULL
+ AND tracks.last_updated_on < CAST (? AS yyy)
+ AND tracks.position = ?
+ AND tracks.single_track = CAST(? AS INT)
+ )',
+ $bind,
+ 'expected sql with casting on',
+);
+
+done_testing;
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/93storage_replication.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/93storage_replication.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/93storage_replication.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,853 +0,0 @@
-use strict;
-use warnings;
-use lib qw(t/lib);
-use Test::More;
-use Test::Exception;
-use DBICTest;
-use List::Util 'first';
-use Scalar::Util 'reftype';
-use File::Spec;
-use IO::Handle;
-
-BEGIN {
- eval "use DBIx::Class::Storage::DBI::Replicated; use Test::Moose";
- plan skip_all => "Deps not installed: $@" if $@;
-}
-
-use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
-use_ok 'DBIx::Class::Storage::DBI::Replicated::Balancer';
-use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-use_ok 'DBIx::Class::Storage::DBI::Replicated';
-
-use Moose();
-use MooseX::Types();
-diag "Using Moose version $Moose::VERSION and MooseX::Types version $MooseX::Types::VERSION";
-
-=head1 HOW TO USE
-
- This is a test of the replicated storage system. This will work in one of
- two ways, either it was try to fake replication with a couple of SQLite DBs
- and creative use of copy, or if you define a couple of %ENV vars correctly
- will try to test those. If you do that, it will assume the setup is properly
- replicating. Your results may vary, but I have demonstrated this to work with
- mysql native replication.
-
-=cut
-
-
-## ----------------------------------------------------------------------------
-## Build a class to hold all our required testing data and methods.
-## ----------------------------------------------------------------------------
-
-TESTSCHEMACLASSES: {
-
- ## --------------------------------------------------------------------- ##
- ## Create an object to contain your replicated stuff.
- ## --------------------------------------------------------------------- ##
-
- package DBIx::Class::DBI::Replicated::TestReplication;
-
- use DBICTest;
- use base qw/Class::Accessor::Fast/;
-
- __PACKAGE__->mk_accessors( qw/schema/ );
-
- ## Initialize the object
-
- sub new {
- my ($class, $schema_method) = (shift, shift);
- my $self = $class->SUPER::new(@_);
-
- $self->schema( $self->init_schema($schema_method) );
- return $self;
- }
-
- ## Get the Schema and set the replication storage type
-
- sub init_schema {
- # current SQLT SQLite producer does not handle DROP TABLE IF EXISTS, trap warnings here
- local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/ };
-
- my ($class, $schema_method) = @_;
-
- my $method = "get_schema_$schema_method";
- my $schema = $class->$method;
-
- return $schema;
- }
-
- sub get_schema_by_storage_type {
- DBICTest->init_schema(
- sqlite_use_file => 1,
- storage_type=>{
- '::DBI::Replicated' => {
- balancer_type=>'::Random',
- balancer_args=>{
- auto_validate_every=>100,
- master_read_weight => 1
- },
- }
- },
- deploy_args=>{
- add_drop_table => 1,
- },
- );
- }
-
- sub get_schema_by_connect_info {
- DBICTest->init_schema(
- sqlite_use_file => 1,
- storage_type=> '::DBI::Replicated',
- balancer_type=>'::Random',
- balancer_args=> {
- auto_validate_every=>100,
- master_read_weight => 1
- },
- deploy_args=>{
- add_drop_table => 1,
- },
- );
- }
-
- sub generate_replicant_connect_info {}
- sub replicate {}
- sub cleanup {}
-
- ## --------------------------------------------------------------------- ##
- ## Add a connect_info option to test option merging.
- ## --------------------------------------------------------------------- ##
- {
- package DBIx::Class::Storage::DBI::Replicated;
-
- use Moose;
-
- __PACKAGE__->meta->make_mutable;
-
- around connect_info => sub {
- my ($next, $self, $info) = @_;
- $info->[3]{master_option} = 1;
- $self->$next($info);
- };
-
- __PACKAGE__->meta->make_immutable;
-
- no Moose;
- }
-
- ## --------------------------------------------------------------------- ##
- ## Subclass for when you are using SQLite for testing, this provides a fake
- ## replication support.
- ## --------------------------------------------------------------------- ##
-
- package DBIx::Class::DBI::Replicated::TestReplication::SQLite;
-
- use DBICTest;
- use File::Copy;
- use base 'DBIx::Class::DBI::Replicated::TestReplication';
-
- __PACKAGE__->mk_accessors(qw/master_path slave_paths/);
-
- ## Set the master path from DBICTest
-
- sub new {
- my $class = shift @_;
- my $self = $class->SUPER::new(@_);
-
- $self->master_path( DBICTest->_sqlite_dbfilename );
- $self->slave_paths([
- File::Spec->catfile(qw/t var DBIxClass_slave1.db/),
- File::Spec->catfile(qw/t var DBIxClass_slave2.db/),
- ]);
-
- return $self;
- }
-
- ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
- ## $storage->connect_info to be used for connecting replicants.
-
- sub generate_replicant_connect_info {
- my $self = shift @_;
- my @dsn = map {
- "dbi:SQLite:${_}";
- } @{$self->slave_paths};
-
- my @connect_infos = map { [$_,'','',{AutoCommit=>1}] } @dsn;
-
- ## Make sure nothing is left over from a failed test
- $self->cleanup;
-
- ## try a hashref too
- my $c = $connect_infos[0];
- $connect_infos[0] = {
- dsn => $c->[0],
- user => $c->[1],
- password => $c->[2],
- %{ $c->[3] }
- };
-
- @connect_infos
- }
-
- ## Do a 'good enough' replication by copying the master dbfile over each of
- ## the slave dbfiles. If the master is SQLite we do this, otherwise we
- ## just do a one second pause to let the slaves catch up.
-
- sub replicate {
- my $self = shift @_;
- foreach my $slave (@{$self->slave_paths}) {
- copy($self->master_path, $slave);
- }
- }
-
- ## Cleanup after ourselves. Unlink all gthe slave paths.
-
- sub cleanup {
- my $self = shift @_;
- foreach my $slave (@{$self->slave_paths}) {
- if(-e $slave) {
- unlink $slave;
- }
- }
- }
-
- ## --------------------------------------------------------------------- ##
- ## Subclass for when you are setting the databases via custom export vars
- ## This is for when you have a replicating database setup that you are
- ## going to test against. You'll need to define the correct $ENV and have
- ## two slave databases to test against, as well as a replication system
- ## that will replicate in less than 1 second.
- ## --------------------------------------------------------------------- ##
-
- package DBIx::Class::DBI::Replicated::TestReplication::Custom;
- use base 'DBIx::Class::DBI::Replicated::TestReplication';
-
- ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
- ## $storage->connect_info to be used for connecting replicants.
-
- sub generate_replicant_connect_info {
- return (
- [$ENV{"DBICTEST_SLAVE0_DSN"}, $ENV{"DBICTEST_SLAVE0_DBUSER"}, $ENV{"DBICTEST_SLAVE0_DBPASS"}, {AutoCommit => 1}],
- [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}],
- );
- }
-
- ## pause a bit to let the replication catch up
-
- sub replicate {
- sleep 1;
- }
-}
-
-## ----------------------------------------------------------------------------
-## Create an object and run some tests
-## ----------------------------------------------------------------------------
-
-## Thi first bunch of tests are basic, just make sure all the bits are behaving
-
-my $replicated_class = DBICTest->has_custom_dsn ?
- 'DBIx::Class::DBI::Replicated::TestReplication::Custom' :
- 'DBIx::Class::DBI::Replicated::TestReplication::SQLite';
-
-my $replicated;
-
-for my $method (qw/by_connect_info by_storage_type/) {
- undef $replicated;
- ok $replicated = $replicated_class->new($method)
- => "Created a replication object $method";
-
- isa_ok $replicated->schema
- => 'DBIx::Class::Schema';
-
- isa_ok $replicated->schema->storage
- => 'DBIx::Class::Storage::DBI::Replicated';
-
- isa_ok $replicated->schema->storage->balancer
- => 'DBIx::Class::Storage::DBI::Replicated::Balancer::Random'
- => 'configured balancer_type';
-}
-
-ok $replicated->schema->storage->meta
- => 'has a meta object';
-
-isa_ok $replicated->schema->storage->master
- => 'DBIx::Class::Storage::DBI';
-
-isa_ok $replicated->schema->storage->pool
- => 'DBIx::Class::Storage::DBI::Replicated::Pool';
-
-does_ok $replicated->schema->storage->balancer
- => 'DBIx::Class::Storage::DBI::Replicated::Balancer';
-
-ok my @replicant_connects = $replicated->generate_replicant_connect_info
- => 'got replication connect information';
-
-ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects)
- => 'Created some storages suitable for replicants';
-
-our %debug;
-$replicated->schema->storage->debug(1);
-$replicated->schema->storage->debugcb(sub {
- my ($op, $info) = @_;
- ##warn "\n$op, $info\n";
- %debug = (
- op => $op,
- info => $info,
- dsn => ($info=~m/\[(.+)\]/)[0],
- storage_type => $info=~m/REPLICANT/ ? 'REPLICANT' : 'MASTER',
- );
-});
-
-ok my @all_storages = $replicated->schema->storage->all_storages
- => '->all_storages';
-
-is scalar @all_storages,
- 3
- => 'correct number of ->all_storages';
-
-is ((grep $_->isa('DBIx::Class::Storage::DBI'), @all_storages),
- 3
- => '->all_storages are correct type');
-
-my @all_storage_opts =
- grep { (reftype($_)||'') eq 'HASH' }
- map @{ $_->_connect_info }, @all_storages;
-
-is ((grep $_->{master_option}, @all_storage_opts),
- 3
- => 'connect_info was merged from master to replicants');
-
-my @replicant_names = keys %{ $replicated->schema->storage->replicants };
-
-ok @replicant_names, "found replicant names @replicant_names";
-
-## Silence warning about not supporting the is_replicating method if using the
-## sqlite dbs.
-$replicated->schema->storage->debugobj->silence(1)
- if first { m{^t/} } @replicant_names;
-
-isa_ok $replicated->schema->storage->balancer->current_replicant
- => 'DBIx::Class::Storage::DBI';
-
-$replicated->schema->storage->debugobj->silence(0);
-
-ok $replicated->schema->storage->pool->has_replicants
- => 'does have replicants';
-
-is $replicated->schema->storage->pool->num_replicants => 2
- => 'has two replicants';
-
-does_ok $replicated_storages[0]
- => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-
-does_ok $replicated_storages[1]
- => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-
-does_ok $replicated->schema->storage->replicants->{$replicant_names[0]}
- => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-
-does_ok $replicated->schema->storage->replicants->{$replicant_names[1]}
- => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
-
-## Add some info to the database
-
-$replicated
- ->schema
- ->populate('Artist', [
- [ qw/artistid name/ ],
- [ 4, "Ozric Tentacles"],
- ]);
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- like $debug{info}, qr/INSERT/, 'Last was an insert';
-
-## Make sure all the slaves have the table definitions
-
-$replicated->replicate;
-$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
-$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
-
-## Silence warning about not supporting the is_replicating method if using the
-## sqlite dbs.
-$replicated->schema->storage->debugobj->silence(1)
- if first { m{^t/} } @replicant_names;
-
-$replicated->schema->storage->pool->validate_replicants;
-
-$replicated->schema->storage->debugobj->silence(0);
-
-## Make sure we can read the data.
-
-ok my $artist1 = $replicated->schema->resultset('Artist')->find(4)
- => 'Created Result';
-
-## We removed testing here since master read weight is on, so we can't tell in
-## advance what storage to expect. We turn master read weight off a bit lower
-## is $debug{storage_type}, 'REPLICANT'
-## => "got last query from a replicant: $debug{dsn}, $debug{info}";
-
-isa_ok $artist1
- => 'DBICTest::Artist';
-
-is $artist1->name, 'Ozric Tentacles'
- => 'Found expected name for first result';
-
-## Check that master_read_weight is honored
-{
- no warnings qw/once redefine/;
-
- local
- *DBIx::Class::Storage::DBI::Replicated::Balancer::Random::_random_number =
- sub { 999 };
-
- $replicated->schema->storage->balancer->increment_storage;
-
- is $replicated->schema->storage->balancer->current_replicant,
- $replicated->schema->storage->master
- => 'master_read_weight is honored';
-
- ## turn it off for the duration of the test
- $replicated->schema->storage->balancer->master_read_weight(0);
- $replicated->schema->storage->balancer->increment_storage;
-}
-
-## Add some new rows that only the master will have This is because
-## we overload any type of write operation so that is must hit the master
-## database.
-
-$replicated
- ->schema
- ->populate('Artist', [
- [ qw/artistid name/ ],
- [ 5, "Doom's Children"],
- [ 6, "Dead On Arrival"],
- [ 7, "Watergate"],
- ]);
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- like $debug{info}, qr/INSERT/, 'Last was an insert';
-
-## Make sure all the slaves have the table definitions
-$replicated->replicate;
-
-## Should find some data now
-
-ok my $artist2 = $replicated->schema->resultset('Artist')->find(5)
- => 'Sync succeed';
-
-is $debug{storage_type}, 'REPLICANT'
- => "got last query from a replicant: $debug{dsn}";
-
-isa_ok $artist2
- => 'DBICTest::Artist';
-
-is $artist2->name, "Doom's Children"
- => 'Found expected name for first result';
-
-## What happens when we disconnect all the replicants?
-
-is $replicated->schema->storage->pool->connected_replicants => 2
- => "both replicants are connected";
-
-$replicated->schema->storage->replicants->{$replicant_names[0]}->disconnect;
-$replicated->schema->storage->replicants->{$replicant_names[1]}->disconnect;
-
-is $replicated->schema->storage->pool->connected_replicants => 0
- => "both replicants are now disconnected";
-
-## All these should pass, since the database should automatically reconnect
-
-ok my $artist3 = $replicated->schema->resultset('Artist')->find(6)
- => 'Still finding stuff.';
-
-is $debug{storage_type}, 'REPLICANT'
- => "got last query from a replicant: $debug{dsn}";
-
-isa_ok $artist3
- => 'DBICTest::Artist';
-
-is $artist3->name, "Dead On Arrival"
- => 'Found expected name for first result';
-
-is $replicated->schema->storage->pool->connected_replicants => 1
- => "At Least One replicant reconnected to handle the job";
-
-## What happens when we try to select something that doesn't exist?
-
-ok ! $replicated->schema->resultset('Artist')->find(666)
- => 'Correctly failed to find something.';
-
-is $debug{storage_type}, 'REPLICANT'
- => "got last query from a replicant: $debug{dsn}";
-
-## test the reliable option
-
-TESTRELIABLE: {
-
- $replicated->schema->storage->set_reliable_storage;
-
- ok $replicated->schema->resultset('Artist')->find(2)
- => 'Read from master 1';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- ok $replicated->schema->resultset('Artist')->find(5)
- => 'Read from master 2';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- $replicated->schema->storage->set_balanced_storage;
-
- ok $replicated->schema->resultset('Artist')->find(3)
- => 'Read from replicant';
-
- is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-}
-
-## Make sure when reliable goes out of scope, we are using replicants again
-
-ok $replicated->schema->resultset('Artist')->find(1)
- => 'back to replicant 1.';
-
- is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-
-ok $replicated->schema->resultset('Artist')->find(2)
- => 'back to replicant 2.';
-
- is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-
-## set all the replicants to inactive, and make sure the balancer falls back to
-## the master.
-
-$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
-$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
-
-{
- ## catch the fallback to master warning
- open my $debugfh, '>', \my $fallback_warning;
- my $oldfh = $replicated->schema->storage->debugfh;
- $replicated->schema->storage->debugfh($debugfh);
-
- ok $replicated->schema->resultset('Artist')->find(2)
- => 'Fallback to master';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- like $fallback_warning, qr/falling back to master/
- => 'emits falling back to master warning';
-
- $replicated->schema->storage->debugfh($oldfh);
-}
-
-$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
-$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
-
-## Silence warning about not supporting the is_replicating method if using the
-## sqlite dbs.
-$replicated->schema->storage->debugobj->silence(1)
- if first { m{^t/} } @replicant_names;
-
-$replicated->schema->storage->pool->validate_replicants;
-
-$replicated->schema->storage->debugobj->silence(0);
-
-ok $replicated->schema->resultset('Artist')->find(2)
- => 'Returned to replicates';
-
-is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-
-## Getting slave status tests
-
-SKIP: {
- ## We skip this tests unless you have a custom replicants, since the default
- ## sqlite based replication tests don't support these functions.
-
- skip 'Cannot Test Replicant Status on Non Replicating Database', 10
- unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
-
- $replicated->replicate; ## Give the slaves a chance to catchup.
-
- ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating
- => 'Replicants are replicating';
-
- is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0
- => 'Replicant is zero seconds behind master';
-
- ## Test the validate replicants
-
- $replicated->schema->storage->pool->validate_replicants;
-
- is $replicated->schema->storage->pool->active_replicants, 2
- => 'Still have 2 replicants after validation';
-
- ## Force the replicants to fail the validate test by required their lag to
- ## be negative (ie ahead of the master!)
-
- $replicated->schema->storage->pool->maximum_lag(-10);
- $replicated->schema->storage->pool->validate_replicants;
-
- is $replicated->schema->storage->pool->active_replicants, 0
- => 'No way a replicant be be ahead of the master';
-
- ## Let's be fair to the replicants again. Let them lag up to 5
-
- $replicated->schema->storage->pool->maximum_lag(5);
- $replicated->schema->storage->pool->validate_replicants;
-
- is $replicated->schema->storage->pool->active_replicants, 2
- => 'Both replicants in good standing again';
-
- ## Check auto validate
-
- is $replicated->schema->storage->balancer->auto_validate_every, 100
- => "Got the expected value for auto validate";
-
- ## This will make sure we auto validatge everytime
- $replicated->schema->storage->balancer->auto_validate_every(0);
-
- ## set all the replicants to inactive, and make sure the balancer falls back to
- ## the master.
-
- $replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
- $replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
-
- ## Ok, now when we go to run a query, autovalidate SHOULD reconnect
-
- is $replicated->schema->storage->pool->active_replicants => 0
- => "both replicants turned off";
-
- ok $replicated->schema->resultset('Artist')->find(5)
- => 'replicant reactivated';
-
- is $debug{storage_type}, 'REPLICANT',
- "got last query from a replicant: $debug{dsn}";
-
- is $replicated->schema->storage->pool->active_replicants => 2
- => "both replicants reactivated";
-}
-
-## Test the reliably callback
-
-ok my $reliably = sub {
-
- ok $replicated->schema->resultset('Artist')->find(5)
- => 'replicant reactivated';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
-} => 'created coderef properly';
-
-$replicated->schema->storage->execute_reliably($reliably);
-
-## Try something with an error
-
-ok my $unreliably = sub {
-
- ok $replicated->schema->resultset('ArtistXX')->find(5)
- => 'replicant reactivated';
-
-} => 'created coderef properly';
-
-throws_ok {$replicated->schema->storage->execute_reliably($unreliably)}
- qr/Can't find source for ArtistXX/
- => 'Bad coderef throws proper error';
-
-## Make sure replication came back
-
-ok $replicated->schema->resultset('Artist')->find(3)
- => 'replicant reactivated';
-
-is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
-
-## make sure transactions are set to execute_reliably
-
-ok my $transaction = sub {
-
- my $id = shift @_;
-
- $replicated
- ->schema
- ->populate('Artist', [
- [ qw/artistid name/ ],
- [ $id, "Children of the Grave"],
- ]);
-
- ok my $result = $replicated->schema->resultset('Artist')->find($id)
- => "Found expected artist for $id";
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'Found expected artist again for 1';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- return ($result, $more);
-
-} => 'Created a coderef properly';
-
-## Test the transaction with multi return
-{
- ok my @return = $replicated->schema->txn_do($transaction, 666)
- => 'did transaction';
-
- is $return[0]->id, 666
- => 'first returned value is correct';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
- is $return[1]->id, 1
- => 'second returned value is correct';
-
- is $debug{storage_type}, 'MASTER',
- "got last query from a master: $debug{dsn}";
-
-}
-
-## Test that asking for single return works
-{
- ok my @return = $replicated->schema->txn_do($transaction, 777)
- => 'did transaction';
-
- is $return[0]->id, 777
- => 'first returned value is correct';
-
- is $return[1]->id, 1
- => 'second returned value is correct';
-}
-
-## Test transaction returning a single value
-
-{
- ok my $result = $replicated->schema->txn_do(sub {
- ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'found inside a transaction';
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
- return $more;
- }) => 'successfully processed transaction';
-
- is $result->id, 1
- => 'Got expected single result from transaction';
-}
-
-## Make sure replication came back
-
-ok $replicated->schema->resultset('Artist')->find(1)
- => 'replicant reactivated';
-
-is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
-
-## Test Discard changes
-
-{
- ok my $artist = $replicated->schema->resultset('Artist')->find(2)
- => 'got an artist to test discard changes';
-
- is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
-
- ok $artist->get_from_storage({force_pool=>'master'})
- => 'properly discard changes';
-
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
-
- ok $artist->discard_changes({force_pool=>'master'})
- => 'properly called discard_changes against master (manual attrs)';
-
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
-
- ok $artist->discard_changes()
- => 'properly called discard_changes against master (default attrs)';
-
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
-
- ok $artist->discard_changes({force_pool=>$replicant_names[0]})
- => 'properly able to override the default attributes';
-
- is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}"
-}
-
-## Test some edge cases, like trying to do a transaction inside a transaction, etc
-
-{
- ok my $result = $replicated->schema->txn_do(sub {
- return $replicated->schema->txn_do(sub {
- ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'found inside a transaction inside a transaction';
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
- return $more;
- });
- }) => 'successfully processed transaction';
-
- is $result->id, 1
- => 'Got expected single result from transaction';
-}
-
-{
- ok my $result = $replicated->schema->txn_do(sub {
- return $replicated->schema->storage->execute_reliably(sub {
- return $replicated->schema->txn_do(sub {
- return $replicated->schema->storage->execute_reliably(sub {
- ok my $more = $replicated->schema->resultset('Artist')->find(1)
- => 'found inside crazy deep transactions and execute_reliably';
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
- return $more;
- });
- });
- });
- }) => 'successfully processed transaction';
-
- is $result->id, 1
- => 'Got expected single result from transaction';
-}
-
-## Test the force_pool resultset attribute.
-
-{
- ok my $artist_rs = $replicated->schema->resultset('Artist')
- => 'got artist resultset';
-
- ## Turn on Forced Pool Storage
- ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'})
- => 'Created a resultset using force_pool storage';
-
- ok my $artist = $reliable_artist_rs->find(2)
- => 'got an artist result via force_pool storage';
-
- is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
-}
-
-## Test the force_pool resultset attribute part two.
-
-{
- ok my $artist_rs = $replicated->schema->resultset('Artist')
- => 'got artist resultset';
-
- ## Turn on Forced Pool Storage
- ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>$replicant_names[0]})
- => 'Created a resultset using force_pool storage';
-
- ok my $artist = $reliable_artist_rs->find(2)
- => 'got an artist result via force_pool storage';
-
- is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
-}
-## Delete the old database files
-$replicated->cleanup;
-
-done_testing;
-
-# vim: sw=4 sts=4 :
Modified: DBIx-Class/0.08/branches/ado_mssql/t/94versioning.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/94versioning.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/94versioning.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,10 +1,10 @@
#!/usr/bin/perl
+
use strict;
use warnings;
use Test::More;
use File::Spec;
use File::Copy;
-use Time::HiRes qw/time sleep/;
#warn "$dsn $user $pass";
my ($dsn, $user, $pass);
@@ -15,11 +15,14 @@
plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test'
unless ($dsn);
+ eval { require Time::HiRes }
+ || plan skip_all => 'Test needs Time::HiRes';
+ Time::HiRes->import(qw/time sleep/);
- eval "use DBD::mysql; use SQL::Translator 0.09003;";
- plan $@
- ? ( skip_all => 'needs DBD::mysql and SQL::Translator 0.09003 for testing' )
- : ( tests => 22 );
+ require DBIx::Class::Storage::DBI;
+ plan skip_all =>
+ 'Test needs SQL::Translator ' . DBIx::Class::Storage::DBI->_sqlt_minimum_version
+ if not DBIx::Class::Storage::DBI->_sqlt_version_ok;
}
my $version_table_name = 'dbix_class_schema_versions';
@@ -182,3 +185,5 @@
unless ($ENV{DBICTEST_KEEP_VERSIONING_DDL}) {
unlink $_ for (values %$fn);
}
+
+done_testing;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/95sql_maker.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/95sql_maker.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/95sql_maker.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -7,11 +7,9 @@
use lib qw(t/lib);
use DBIC::SqlMakerTest;
-plan tests => 4;
-
use_ok('DBICTest');
-my $schema = DBICTest->init_schema();
+my $schema = DBICTest->init_schema(no_deploy => 1);
my $sql_maker = $schema->storage->sql_maker;
@@ -49,9 +47,33 @@
);
}
+# make sure the cookbook caveat of { $op, \'...' } no longer applies
+{
+ my ($sql, @bind) = $sql_maker->where({
+ last_attempt => \ '< now() - interval "12 hours"',
+ next_attempt => { '<', \ 'now() - interval "12 hours"' },
+ created => [
+ { '<=', \ '1969' },
+ \ '> 1984',
+ ],
+ });
+ is_same_sql_bind(
+ $sql,
+ \@bind,
+ 'WHERE
+ (created <= 1969 OR created > 1984 )
+ AND last_attempt < now() - interval "12 hours"
+ AND next_attempt < now() - interval "12 hours"
+ ',
+ [],
+ );
+}
+
# Make sure the carp/croak override in SQLA works (via SQLAHacks)
my $file = __FILE__;
$file = "\Q$file\E";
throws_ok (sub {
$schema->resultset ('Artist')->search ({}, { order_by => { -asc => 'stuff', -desc => 'staff' } } )->as_query;
}, qr/$file/, 'Exception correctly croak()ed');
+
+done_testing;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/99dbic_sqlt_parser.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/99dbic_sqlt_parser.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/99dbic_sqlt_parser.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -5,12 +5,11 @@
use lib qw(t/lib);
use DBICTest;
-
BEGIN {
- eval "use SQL::Translator 0.09003;";
- if ($@) {
- plan skip_all => 'needs SQL::Translator 0.09003 for testing';
- }
+ require DBIx::Class::Storage::DBI;
+ plan skip_all =>
+ 'Test needs SQL::Translator ' . DBIx::Class::Storage::DBI->_sqlt_minimum_version
+ if not DBIx::Class::Storage::DBI->_sqlt_version_ok;
}
my $schema = DBICTest->init_schema();
@@ -23,8 +22,6 @@
$schema->sources
;
-plan tests => ( @sources * 3);
-
{
my $sqlt_schema = create_schema({ schema => $schema, args => { parser_args => { } } });
@@ -65,6 +62,8 @@
}
}
+done_testing;
+
sub create_schema {
my $args = shift;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/cdbi/13-constraint.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/cdbi/13-constraint.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/cdbi/13-constraint.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -95,13 +95,11 @@
}
eval { Film->constrain_column(codirector => Untaint => 'date') };
is $@, '', 'Can constrain with untaint';
+
my $freeaa =
eval { Film->create({ title => "The Freaa", codirector => 'today' }) };
- TODO: {
- local $TODO = "no idea what this is supposed to do";
- is $@, '', "Can create codirector";
- is $freeaa && $freeaa->codirector, '2001-03-03', "Set the codirector";
- }
+ is $@, '', "Can create codirector";
+ is $freeaa && $freeaa->codirector, '2001-03-03', "Set the codirector";
}
__DATA__
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/cdbi/testlib/Binary.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/cdbi/testlib/Binary.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/cdbi/testlib/Binary.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,16 +0,0 @@
-package # hide from PAUSE
- Binary;
-
-use strict;
-use base 'PgBase';
-
-__PACKAGE__->table(cdbibintest => 'cdbibintest');
-__PACKAGE__->sequence('binseq');
-__PACKAGE__->columns(All => qw(id bin));
-
-# __PACKAGE__->data_type(bin => DBI::SQL_BINARY);
-
-sub schema { "id INTEGER, bin BYTEA" }
-
-1;
-
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/cdbi/testlib/PgBase.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/cdbi/testlib/PgBase.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/cdbi/testlib/PgBase.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,23 +0,0 @@
-package # hide from PAUSE
- PgBase;
-
-use strict;
-use base 'DBIx::Class::CDBICompat';
-
-my $db = $ENV{DBD_PG_DBNAME} || 'template1';
-my $user = $ENV{DBD_PG_USER} || 'postgres';
-my $pass = $ENV{DBD_PG_PASSWD} || '';
-
-__PACKAGE__->connection("dbi:Pg:dbname=$db", $user, $pass,
- { AutoCommit => 1 });
-
-sub CONSTRUCT {
- my $class = shift;
- my ($table, $sequence) = ($class->table, $class->sequence || "");
- my $schema = $class->schema;
- $class->db_Main->do("CREATE TEMPORARY SEQUENCE $sequence") if $sequence;
- $class->db_Main->do("CREATE TEMPORARY TABLE $table ( $schema )");
-}
-
-1;
-
Modified: DBIx-Class/0.08/branches/ado_mssql/t/count/grouped_pager.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/count/grouped_pager.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/count/grouped_pager.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -11,8 +11,6 @@
my $schema = DBICTest->init_schema();
-use Data::Dumper;
-
# add 2 extra artists
$schema->populate ('Artist', [
[qw/name/],
Modified: DBIx-Class/0.08/branches/ado_mssql/t/count/in_subquery.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/count/in_subquery.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/count/in_subquery.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -3,8 +3,6 @@
use strict;
use warnings;
-use Data::Dumper;
-
use Test::More;
plan ( tests => 1 );
Deleted: DBIx-Class/0.08/branches/ado_mssql/t/dbh_do.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/dbh_do.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/dbh_do.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,33 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use warnings;
-
-use Test::More tests => 8;
-use lib qw(t/lib);
-use DBICTest;
-
-
-my $schema = DBICTest->init_schema();
-my $storage = $schema->storage;
-
-my $test_func = sub {
- is $_[0], $storage;
- is $_[1], $storage->dbh;
- is $_[2], "foo";
- is $_[3], "bar";
-};
-
-$storage->dbh_do(
- $test_func,
- "foo", "bar"
-);
-
-my $storage_class = ref $storage;
-{
- no strict 'refs';
- *{$storage_class .'::__test_method'} = $test_func;
-}
-$storage->dbh_do("__test_method", "foo", "bar");
-
-
\ No newline at end of file
Copied: DBIx-Class/0.08/branches/ado_mssql/t/inflate/datetime_determine_parser.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/36datetime.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/inflate/datetime_determine_parser.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/inflate/datetime_determine_parser.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,28 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+eval { require DateTime::Format::SQLite };
+plan $@ ? ( skip_all => 'Requires DateTime::Format::SQLite' )
+ : ( tests => 3 );
+
+my $schema = DBICTest->init_schema(
+ no_deploy => 1, # Deploying would cause an early rebless
+);
+
+is(
+ ref $schema->storage, 'DBIx::Class::Storage::DBI',
+ 'Starting with generic storage'
+);
+
+# Calling date_time_parser should cause the storage to be reblessed,
+# so that we can pick up datetime_parser_type from subclasses
+
+my $parser = $schema->storage->datetime_parser();
+
+is($parser, 'DateTime::Format::SQLite', 'Got expected storage-set datetime_parser');
+isa_ok($schema->storage, 'DBIx::Class::Storage::DBI::SQLite', 'storage');
+
Modified: DBIx-Class/0.08/branches/ado_mssql/t/inflate/serialize.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/inflate/serialize.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/inflate/serialize.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -7,8 +7,6 @@
my $schema = DBICTest->init_schema();
-use Data::Dumper;
-
my @serializers = (
{ module => 'YAML.pm',
inflater => sub { YAML::Load (shift) },
Modified: DBIx-Class/0.08/branches/ado_mssql/t/lib/DBICTest/AuthorCheck.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/lib/DBICTest/AuthorCheck.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/lib/DBICTest/AuthorCheck.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -54,21 +54,17 @@
We have a number of reasons to believe that this is a development
checkout and that you, the user, did not run `perl Makefile.PL`
before using this code. You absolutely _must_ perform this step,
-as not doing so often results in a lot of wasted time for other
-contributors trying to assit you with "it broke!" problems.
+and ensure you have all required dependencies present. Not doing
+so often results in a lot of wasted time for other contributors
+trying to assit you with spurious "its broken!" problems.
If you are seeing this message unexpectedly (i.e. you are in fact
-attempting a regular installation be it through CPAN or manually,
-set the variable DBICTEST_NO_MAKEFILE_VERIFICATION to a true value
-so you can continue. Also _make_absolutely_sure_ to report this to
-either the mailing list or to the irc channel as described in
+attempting a regular installation be it through CPAN or manually),
+please report the situation to either the mailing list or to the
+irc channel as described in
http://search.cpan.org/dist/DBIx-Class/lib/DBIx/Class.pm#GETTING_HELP/SUPPORT
-Failure to do this will make us believe that all these checks are
-indeed foolproof and we will remove the ability to override this
-entirely.
-
The DBIC team
@@ -79,6 +75,19 @@
}
}
+# Mimic $Module::Install::AUTHOR
+sub is_author {
+
+ my $root = _find_co_root()
+ or return undef;
+
+ return (
+ ( not -d $root->subdir ('inc') )
+ or
+ ( -e $root->subdir ('inc')->file ($^O eq 'VMS' ? '_author' : '.author') )
+ );
+}
+
# Try to determine the root of a checkout/untar if possible
# or return undef
sub _find_co_root {
Modified: DBIx-Class/0.08/branches/ado_mssql/t/lib/DBICTest/Schema/Artist.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/lib/DBICTest/Schema/Artist.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/lib/DBICTest/Schema/Artist.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -30,6 +30,7 @@
},
);
__PACKAGE__->set_primary_key('artistid');
+__PACKAGE__->add_unique_constraint(artist => ['artistid']); # do not remove, part of a test
__PACKAGE__->mk_classdata('field_name_for', {
artistid => 'primary key',
@@ -68,4 +69,11 @@
}
}
+sub store_column {
+ my ($self, $name, $value) = @_;
+ $value = 'X '.$value if ($name eq 'name' && $value && $value =~ /(X )?store_column test/);
+ $self->next::method($name, $value);
+}
+
+
1;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/lib/DBICTest/Schema/Track.pm
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/lib/DBICTest/Schema/Track.pm 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/lib/DBICTest/Schema/Track.pm 2009-10-14 13:45:34 UTC (rev 7786)
@@ -14,7 +14,7 @@
data_type => 'integer',
},
'position' => {
- data_type => 'integer',
+ data_type => 'int',
accessor => 'pos',
},
'title' => {
Modified: DBIx-Class/0.08/branches/ado_mssql/t/lib/sqlite.sql
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/lib/sqlite.sql 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/lib/sqlite.sql 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,6 +1,6 @@
--
-- Created by SQL::Translator::Producer::SQLite
--- Created on Thu Aug 20 07:47:13 2009
+-- Created on Mon Sep 21 00:11:34 2009
--
@@ -283,7 +283,7 @@
CREATE TABLE track (
trackid INTEGER PRIMARY KEY NOT NULL,
cd integer NOT NULL,
- position integer NOT NULL,
+ position int NOT NULL,
title varchar(100) NOT NULL,
last_updated_on datetime,
last_updated_at datetime,
Modified: DBIx-Class/0.08/branches/ado_mssql/t/prefetch/attrs_untouched.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/prefetch/attrs_untouched.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/prefetch/attrs_untouched.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -4,7 +4,9 @@
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
+
use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
my $schema = DBICTest->init_schema();
Modified: DBIx-Class/0.08/branches/ado_mssql/t/prefetch/grouped.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/prefetch/grouped.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/prefetch/grouped.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -271,4 +271,62 @@
);
}
+{
+ my $cd_rs = $schema->resultset('CD')->search({}, {
+ distinct => 1,
+ join => [qw/ tracks /],
+ prefetch => [qw/ artist /],
+ });
+ is($cd_rs->count, 5, 'complex prefetch + non-prefetching has_many join count correct');
+ is($cd_rs->all, 5, 'complex prefetch + non-prefetching has_many join number of objects correct');
+
+ # make sure join tracks was thrown out
+ is_same_sql_bind (
+ $cd_rs->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield
+ FROM (
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ FROM cd me
+ JOIN artist artist ON artist.artistid = me.artist
+ GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ ) me
+ JOIN artist artist ON artist.artistid = me.artist
+ )',
+ [],
+ );
+
+
+
+ # try the same as above, but add a condition so the tracks join can not be thrown away
+ my $cd_rs2 = $cd_rs->search ({ 'tracks.title' => { '!=' => 'ugabuganoexist' } });
+ is($cd_rs2->count, 5, 'complex prefetch + non-prefetching restricted has_many join count correct');
+ is($cd_rs2->all, 5, 'complex prefetch + non-prefetching restricted has_many join number of objects correct');
+
+ # the outer group_by seems like a necessary evil, if someone can figure out how to take it away
+ # without breaking compat - be my guest
+ is_same_sql_bind (
+ $cd_rs2->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield
+ FROM (
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ FROM cd me
+ LEFT JOIN track tracks ON tracks.cd = me.cdid
+ JOIN artist artist ON artist.artistid = me.artist
+ WHERE ( tracks.title != ? )
+ GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
+ ) me
+ LEFT JOIN track tracks ON tracks.cd = me.cdid
+ JOIN artist artist ON artist.artistid = me.artist
+ WHERE ( tracks.title != ? )
+ GROUP BY me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield
+ )',
+ [ map { [ 'tracks.title' => 'ugabuganoexist' ] } (1 .. 2) ],
+ );
+}
+
done_testing;
Added: DBIx-Class/0.08/branches/ado_mssql/t/prefetch/join_type.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/prefetch/join_type.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/prefetch/join_type.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,47 @@
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBIC::SqlMakerTest;
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+
+# a regular belongs_to prefetch
+my $cds = $schema->resultset('CD')->search ({}, { prefetch => 'artist' } );
+
+my $nulls = {
+ hashref => {},
+ arrayref => [],
+ undef => undef,
+};
+
+# make sure null-prefetches do not screw with the final sql:
+for my $type (keys %$nulls) {
+# is_same_sql_bind (
+# $cds->search({}, { prefetch => { artist => $nulls->{$type} } })->as_query,
+# $cds->as_query,
+# "same sql with null $type prefetch"
+# );
+}
+
+# make sure left join is carried only starting from the first has_many
+is_same_sql_bind (
+ $cds->search({}, { prefetch => { artist => { cds => 'artist' } } })->as_query,
+ '(
+ SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track,
+ artist.artistid, artist.name, artist.rank, artist.charfield,
+ cds.cdid, cds.artist, cds.title, cds.year, cds.genreid, cds.single_track,
+ artist_2.artistid, artist_2.name, artist_2.rank, artist_2.charfield
+ FROM cd me
+ JOIN artist artist ON artist.artistid = me.artist
+ LEFT JOIN cd cds ON cds.artist = artist.artistid
+ LEFT JOIN artist artist_2 ON artist_2.artistid = cds.artist
+ ORDER BY cds.artist, cds.year
+ )',
+ [],
+);
+
+done_testing;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/prefetch/multiple_hasmany.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/prefetch/multiple_hasmany.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/prefetch/multiple_hasmany.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -48,18 +48,13 @@
$schema->storage->debug ($sdebug);
is($pr_tracks_count, $tracks_count, 'equal count of prefetched relations over several same level has_many\'s (1 -> M + M)');
+ is ($pr_tracks_rs->all, $tracks_rs->all, 'equal amount of objects returned with and without prefetch over several same level has_many\'s (1 -> M + M)');
- for ($pr_tracks_rs, $tracks_rs) {
- $_->result_class ('DBIx::Class::ResultClass::HashRefInflator');
- }
-
- is_deeply ([$pr_tracks_rs->all], [$tracks_rs->all], 'same structure returned with and without prefetch over several same level has_many\'s (1 -> M + M)');
-
#( M -> 1 -> M + M )
my $note_rs = $schema->resultset('LinerNotes')->search ({ notes => 'Buy Whiskey!' });
my $pr_note_rs = $note_rs->search ({}, {
prefetch => {
- cd => [qw/tags tracks/]
+ cd => [qw/tracks tags/]
},
});
@@ -86,12 +81,7 @@
$schema->storage->debug ($sdebug);
is($pr_tags_count, $tags_count, 'equal count of prefetched relations over several same level has_many\'s (M -> 1 -> M + M)');
-
- for ($pr_tags_rs, $tags_rs) {
- $_->result_class ('DBIx::Class::ResultClass::HashRefInflator');
- }
-
- is_deeply ([$pr_tags_rs->all], [$tags_rs->all], 'same structure returned with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)');
+ is($pr_tags_rs->all, $tags_rs->all, 'equal amount of objects with and without prefetch over several same level has_many\'s (M -> 1 -> M + M)');
}
# remove this closure once the TODO above is working
Modified: DBIx-Class/0.08/branches/ado_mssql/t/prefetch/standard.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/prefetch/standard.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/prefetch/standard.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -5,7 +5,6 @@
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
-use Data::Dumper;
use IO::File;
my $schema = DBICTest->init_schema();
@@ -20,8 +19,6 @@
my $search = { 'artist.name' => 'Caterwauler McCrae' };
my $attr = { prefetch => [ qw/artist liner_notes/ ],
order_by => 'me.cdid' };
-my $search_str = Dumper($search);
-my $attr_str = Dumper($attr);
my $rs = $schema->resultset("CD")->search($search, $attr);
my @cd = $rs->all;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/prefetch/via_search_related.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/prefetch/via_search_related.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/prefetch/via_search_related.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -57,34 +57,46 @@
# test where conditions at the root of the related chain
- my $artist_rs = $schema->resultset("Artist")->search({artistid => 11});
+ my $artist_rs = $schema->resultset("Artist")->search({artistid => 2});
+ my $artist = $artist_rs->next;
+ $artist->create_related ('cds', $_) for (
+ {
+ year => 1999, title => 'vague cd', genre => { name => 'vague genre' }
+ },
+ {
+ year => 1999, title => 'vague cd2', genre => { name => 'vague genre' }
+ },
+ );
-
$rs = $artist_rs->search_related('cds')->search_related('genre',
- { 'genre.name' => 'foo' },
+ { 'genre.name' => 'vague genre' },
{ prefetch => 'cds' },
);
- is($rs->all, 0, 'prefetch without distinct (objects)');
- is($rs->count, 0, 'prefetch without distinct (count)');
+ is($rs->all, 1, 'base without distinct (objects)');
+ is($rs->count, 1, 'base without distinct (count)');
+ # artist -> 2 cds -> 2 genres -> 2 cds for each genre = 4
+ is($rs->search_related('cds')->all, 4, 'prefetch without distinct (objects)');
+ is($rs->search_related('cds')->count, 4, 'prefetch without distinct (count)');
-
$rs = $artist_rs->search(undef, {distinct => 1})
->search_related('cds')->search_related('genre',
- { 'genre.name' => 'foo' },
+ { 'genre.name' => 'vague genre' },
);
- is($rs->all, 0, 'distinct without prefetch (objects)');
- is($rs->count, 0, 'distinct without prefetch (count)');
+ is($rs->all, 1, 'distinct without prefetch (objects)');
+ is($rs->count, 1, 'distinct without prefetch (count)');
-
$rs = $artist_rs->search({}, {distinct => 1})
->search_related('cds')->search_related('genre',
- { 'genre.name' => 'foo' },
+ { 'genre.name' => 'vague genre' },
{ prefetch => 'cds' },
);
- is($rs->all, 0, 'distinct with prefetch (objects)');
- is($rs->count, 0, 'distinct with prefetch (count)');
+ is($rs->all, 1, 'distinct with prefetch (objects)');
+ is($rs->count, 1, 'distinct with prefetch (count)');
+ # artist -> 2 cds -> 2 genres -> 2 cds for each genre + distinct = 2
+ is($rs->search_related('cds')->all, 2, 'prefetched distinct with prefetch (objects)');
+ is($rs->search_related('cds')->count, 2, 'prefetched distinct with prefetch (count)');
Modified: DBIx-Class/0.08/branches/ado_mssql/t/relationship/after_update.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/relationship/after_update.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/relationship/after_update.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,7 +1,5 @@
-#!/usr/bin/perl -w
-
use strict;
-use warnings;
+use warnings;
use Test::More;
use lib qw(t/lib);
Modified: DBIx-Class/0.08/branches/ado_mssql/t/relationship/core.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/relationship/core.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/relationship/core.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -5,12 +5,11 @@
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
+use DBIC::SqlMakerTest;
my $schema = DBICTest->init_schema();
my $sdebug = $schema->storage->debug;
-plan tests => 79;
-
# has_a test
my $cd = $schema->resultset("CD")->find(4);
my ($artist) = ($INC{'DBICTest/HelperRels'}
@@ -260,8 +259,22 @@
is($def_artist_cd->search_related('artist')->count, 0, 'closed search on null FK');
# test undirected many-to-many relationship (e.g. "related artists")
-my $undir_maps = $schema->resultset("Artist")->find(1)->artist_undirected_maps;
+my $undir_maps = $schema->resultset("Artist")
+ ->search ({artistid => 1})
+ ->search_related ('artist_undirected_maps');
is($undir_maps->count, 1, 'found 1 undirected map for artist 1');
+is_same_sql_bind (
+ $undir_maps->as_query,
+ '(
+ SELECT artist_undirected_maps.id1, artist_undirected_maps.id2
+ FROM artist me
+ LEFT JOIN artist_undirected_map artist_undirected_maps
+ ON artist_undirected_maps.id1 = me.artistid OR artist_undirected_maps.id2 = me.artistid
+ WHERE ( artistid = ? )
+ )',
+ [[artistid => 1]],
+ 'expected join sql produced',
+);
$undir_maps = $schema->resultset("Artist")->find(2)->artist_undirected_maps;
is($undir_maps->count, 1, 'found 1 undirected map for artist 2');
@@ -310,3 +323,5 @@
$cds = $schema->resultset("CD")->search({ 'me.cdid' => 5 }, { join => { single_track => { cd => {} } } });
is($cds->count, 1, "subjoins under left joins force_left (hashref)");
+
+done_testing;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/relationship/doesnt_exist.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/relationship/doesnt_exist.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/relationship/doesnt_exist.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,7 +1,5 @@
-#!/usr/bin/perl -w
-
use strict;
-use warnings;
+use warnings;
use Test::More;
use lib qw(t/lib);
Modified: DBIx-Class/0.08/branches/ado_mssql/t/relationship/update_or_create_multi.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/relationship/update_or_create_multi.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/relationship/update_or_create_multi.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -76,8 +76,9 @@
$schema->storage->debugcb(undef);
$schema->storage->debug ($sdebug);
+my ($search_sql) = $sql[0] =~ /^(SELECT .+?)\:/;
is_same_sql (
- $sql[0],
+ $search_sql,
'SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track
FROM cd me
WHERE ( me.artist = ? AND me.title = ? AND me.genreid = ? )
Modified: DBIx-Class/0.08/branches/ado_mssql/t/relationship/update_or_create_single.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/relationship/update_or_create_single.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/relationship/update_or_create_single.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -78,7 +78,7 @@
# expect a year update on the only related row
-# (non-qunique column only)
+# (non-unique column only)
$genre->update_or_create_related ('model_cd', {
year => 2011,
});
@@ -95,5 +95,3 @@
},
'CD year column updated correctly without a disambiguator',
);
-
-
Modified: DBIx-Class/0.08/branches/ado_mssql/t/resultset/as_query.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/resultset/as_query.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/resultset/as_query.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -3,8 +3,6 @@
use strict;
use warnings FATAL => 'all';
-use Data::Dumper;
-
use Test::More;
plan ( tests => 5 );
Added: DBIx-Class/0.08/branches/ado_mssql/t/resultset/is_paged.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/resultset/is_paged.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/resultset/is_paged.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,19 @@
+use strict;
+use warnings;
+
+use lib qw(t/lib);
+use Test::More;
+use Test::Exception;
+use DBICTest;
+
+my $schema = DBICTest->init_schema();
+
+my $tkfks = $schema->resultset('Artist');
+
+ok !$tkfks->is_paged, 'vanilla resultset is not paginated';
+
+my $paginated = $tkfks->search(undef, { page => 5 });
+ok $paginated->is_paged, 'resultset is paginated now';
+
+done_testing;
+
Property changes on: DBIx-Class/0.08/branches/ado_mssql/t/resultset/is_paged.t
___________________________________________________________________
Name: svn:mergeinfo
+
Name: svn:eol-style
+ native
Modified: DBIx-Class/0.08/branches/ado_mssql/t/search/preserve_original_rs.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/search/preserve_original_rs.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/search/preserve_original_rs.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -8,7 +8,10 @@
use DBIC::SqlMakerTest;
use DBIC::DebugObj;
use DBICTest;
+
+# use Data::Dumper comparisons to avoid mesing with coderefs
use Data::Dumper;
+$Data::Dumper::Sortkeys = 1;
my $schema = DBICTest->init_schema();
Modified: DBIx-Class/0.08/branches/ado_mssql/t/search/subquery.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/search/subquery.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/search/subquery.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -3,11 +3,8 @@
use strict;
use warnings;
-use Data::Dumper;
-
use Test::More;
-
use lib qw(t/lib);
use DBICTest;
use DBIC::SqlMakerTest;
@@ -19,6 +16,17 @@
my @tests = (
{
rs => $cdrs,
+ search => \[ "title = ? AND year LIKE ?", 'buahaha', '20%' ],
+ attrs => { rows => 5 },
+ sqlbind => \[
+ "( SELECT me.cdid,me.artist,me.title,me.year,me.genreid,me.single_track FROM cd me WHERE (title = ? AND year LIKE ?) LIMIT 5)",
+ 'buahaha',
+ '20%',
+ ],
+ },
+
+ {
+ rs => $cdrs,
search => {
artist_id => { 'in' => $art_rs->search({}, { rows => 1 })->get_column( 'id' )->as_query },
},
Copied: DBIx-Class/0.08/branches/ado_mssql/t/storage/base.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/92storage.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/base.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/base.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,189 @@
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Warn;
+use lib qw(t/lib);
+use DBICTest;
+use Data::Dumper;
+
+{
+ package DBICTest::ExplodingStorage::Sth;
+ use strict;
+ use warnings;
+
+ sub execute { die "Kablammo!" }
+
+ sub bind_param {}
+
+ package DBICTest::ExplodingStorage;
+ use strict;
+ use warnings;
+ use base 'DBIx::Class::Storage::DBI::SQLite';
+
+ my $count = 0;
+ sub sth {
+ my ($self, $sql) = @_;
+ return bless {}, "DBICTest::ExplodingStorage::Sth" unless $count++;
+ return $self->next::method($sql);
+ }
+
+ sub connected {
+ return 0 if $count == 1;
+ return shift->next::method(@_);
+ }
+}
+
+my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
+
+is( ref($schema->storage), 'DBIx::Class::Storage::DBI::SQLite',
+ 'Storage reblessed correctly into DBIx::Class::Storage::DBI::SQLite' );
+
+my $storage = $schema->storage;
+$storage->ensure_connected;
+
+eval {
+ $schema->storage->throw_exception('test_exception_42');
+};
+like($@, qr/\btest_exception_42\b/, 'basic exception');
+
+eval {
+ $schema->resultset('CD')->search_literal('broken +%$#$1')->all;
+};
+like($@, qr/prepare_cached failed/, 'exception via DBI->HandleError, etc');
+
+bless $storage, "DBICTest::ExplodingStorage";
+$schema->storage($storage);
+
+eval {
+ $schema->resultset('Artist')->create({ name => "Exploding Sheep" });
+};
+
+is($@, "", "Exploding \$sth->execute was caught");
+
+is(1, $schema->resultset('Artist')->search({name => "Exploding Sheep" })->count,
+ "And the STH was retired");
+
+
+# testing various invocations of connect_info ([ ... ])
+
+my $coderef = sub { 42 };
+my $invocations = {
+ 'connect_info ([ $d, $u, $p, \%attr, \%extra_attr])' => {
+ args => [
+ 'foo',
+ 'bar',
+ undef,
+ {
+ on_connect_do => [qw/a b c/],
+ PrintError => 0,
+ },
+ {
+ AutoCommit => 1,
+ on_disconnect_do => [qw/d e f/],
+ },
+ {
+ unsafe => 1,
+ auto_savepoint => 1,
+ },
+ ],
+ dbi_connect_info => [
+ 'foo',
+ 'bar',
+ undef,
+ {
+ %{$storage->_default_dbi_connect_attributes || {} },
+ PrintError => 0,
+ AutoCommit => 1,
+ },
+ ],
+ },
+
+ 'connect_info ([ \%code, \%extra_attr ])' => {
+ args => [
+ $coderef,
+ {
+ on_connect_do => [qw/a b c/],
+ PrintError => 0,
+ AutoCommit => 1,
+ on_disconnect_do => [qw/d e f/],
+ },
+ {
+ unsafe => 1,
+ auto_savepoint => 1,
+ },
+ ],
+ dbi_connect_info => [
+ $coderef,
+ ],
+ },
+
+ 'connect_info ([ \%attr ])' => {
+ args => [
+ {
+ on_connect_do => [qw/a b c/],
+ PrintError => 1,
+ AutoCommit => 0,
+ on_disconnect_do => [qw/d e f/],
+ user => 'bar',
+ dsn => 'foo',
+ },
+ {
+ unsafe => 1,
+ auto_savepoint => 1,
+ },
+ ],
+ dbi_connect_info => [
+ 'foo',
+ 'bar',
+ undef,
+ {
+ %{$storage->_default_dbi_connect_attributes || {} },
+ PrintError => 1,
+ AutoCommit => 0,
+ },
+ ],
+ },
+ 'connect_info ([ \%attr_with_coderef ])' => {
+ args => [ {
+ dbh_maker => $coderef,
+ dsn => 'blah',
+ user => 'bleh',
+ on_connect_do => [qw/a b c/],
+ on_disconnect_do => [qw/d e f/],
+ } ],
+ dbi_connect_info => [
+ $coderef
+ ],
+ warn => qr/Attribute\(s\) 'dsn', 'user' in connect_info were ignored/,
+ },
+};
+
+for my $type (keys %$invocations) {
+
+ # we can not use a cloner portably because of the coderef
+ # so compare dumps instead
+ local $Data::Dumper::Sortkeys = 1;
+ my $arg_dump = Dumper ($invocations->{$type}{args});
+
+ warnings_exist (
+ sub { $storage->connect_info ($invocations->{$type}{args}) },
+ $invocations->{$type}{warn} || (),
+ 'Warned about ignored attributes',
+ );
+
+ is ($arg_dump, Dumper ($invocations->{$type}{args}), "$type didn't modify passed arguments");
+
+ is_deeply ($storage->_dbi_connect_info, $invocations->{$type}{dbi_connect_info}, "$type produced correct _dbi_connect_info");
+ ok ( (not $storage->auto_savepoint and not $storage->unsafe), "$type correctly ignored extra hashref");
+
+ is_deeply (
+ [$storage->on_connect_do, $storage->on_disconnect_do ],
+ [ [qw/a b c/], [qw/d e f/] ],
+ "$type correctly parsed DBIC specific on_[dis]connect_do",
+ );
+}
+
+done_testing;
+
+1;
Copied: DBIx-Class/0.08/branches/ado_mssql/t/storage/dbh_do.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/dbh_do.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/dbh_do.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/dbh_do.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,33 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+
+use Test::More tests => 8;
+use lib qw(t/lib);
+use DBICTest;
+
+
+my $schema = DBICTest->init_schema();
+my $storage = $schema->storage;
+
+my $test_func = sub {
+ is $_[0], $storage;
+ is $_[1], $storage->dbh;
+ is $_[2], "foo";
+ is $_[3], "bar";
+};
+
+$storage->dbh_do(
+ $test_func,
+ "foo", "bar"
+);
+
+my $storage_class = ref $storage;
+{
+ no strict 'refs';
+ *{$storage_class .'::__test_method'} = $test_func;
+}
+$storage->dbh_do("__test_method", "foo", "bar");
+
+
\ No newline at end of file
Copied: DBIx-Class/0.08/branches/ado_mssql/t/storage/dbi_coderef.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/32connect_code_ref.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/dbi_coderef.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/dbi_coderef.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,24 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 1;
+
+# Set up the "usual" sqlite for DBICTest
+my $normal_schema = DBICTest->init_schema( sqlite_use_file => 1 );
+
+# Steal the dsn, which should be like 'dbi:SQLite:t/var/DBIxClass.db'
+my $normal_dsn = $normal_schema->storage->_dbi_connect_info->[0];
+
+# Make sure we have no active connection
+$normal_schema->storage->disconnect;
+
+# Make a new clone with a new connection, using a code reference
+my $code_ref_schema = $normal_schema->connect(sub { DBI->connect($normal_dsn); });
+
+# Stolen from 60core.t - this just verifies things seem to work at all
+my @art = $code_ref_schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
+cmp_ok(@art, '==', 3, "Three artists returned");
Copied: DBIx-Class/0.08/branches/ado_mssql/t/storage/debug.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/91debug.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/debug.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/debug.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,73 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::DebugObj;
+use DBIC::SqlMakerTest;
+
+my $schema = DBICTest->init_schema();
+
+plan tests => 7;
+
+ok ( $schema->storage->debug(1), 'debug' );
+ok ( defined(
+ $schema->storage->debugfh(
+ IO::File->new('t/var/sql.log', 'w')
+ )
+ ),
+ 'debugfh'
+ );
+
+$schema->storage->debugfh->autoflush(1);
+my $rs = $schema->resultset('CD')->search({});
+$rs->count();
+
+my $log = new IO::File('t/var/sql.log', 'r') or die($!);
+my $line = <$log>;
+$log->close();
+ok($line =~ /^SELECT COUNT/, 'Log success');
+
+$schema->storage->debugfh(undef);
+$ENV{'DBIC_TRACE'} = '=t/var/foo.log';
+$rs = $schema->resultset('CD')->search({});
+$rs->count();
+$log = new IO::File('t/var/foo.log', 'r') or die($!);
+$line = <$log>;
+$log->close();
+ok($line =~ /^SELECT COUNT/, 'Log success');
+$schema->storage->debugobj->debugfh(undef);
+delete($ENV{'DBIC_TRACE'});
+open(STDERRCOPY, '>&STDERR');
+stat(STDERRCOPY); # nop to get warnings quiet
+close(STDERR);
+eval {
+ $rs = $schema->resultset('CD')->search({});
+ $rs->count();
+};
+ok($@, 'Died on closed FH');
+open(STDERR, '>&STDERRCOPY');
+
+# test trace output correctness for bind params
+{
+ my ($sql, @bind);
+ $schema->storage->debugobj(DBIC::DebugObj->new(\$sql, \@bind));
+
+ my @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
+ is_same_sql_bind(
+ $sql, \@bind,
+ "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) ): '1', '1', '3'",
+ [qw/'1' '1' '3'/],
+ 'got correct SQL with all bind parameters (debugcb)'
+ );
+
+ @cds = $schema->resultset('CD')->search( { artist => 1, cdid => { -between => [ 1, 3 ] }, } );
+ is_same_sql_bind(
+ $sql, \@bind,
+ "SELECT me.cdid, me.artist, me.title, me.year, me.genreid, me.single_track FROM cd me WHERE ( artist = ? AND (cdid BETWEEN ? AND ?) )", ["'1'", "'1'", "'3'"],
+ 'got correct SQL with all bind parameters (debugobj)'
+ );
+}
+
+1;
Copied: DBIx-Class/0.08/branches/ado_mssql/t/storage/disable_sth_caching.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/35disable_sth_caching.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/disable_sth_caching.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/disable_sth_caching.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,19 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 2;
+
+# Set up the "usual" sqlite for DBICTest
+my $schema = DBICTest->init_schema;
+
+my $sth_one = $schema->storage->sth('SELECT 42');
+my $sth_two = $schema->storage->sth('SELECT 42');
+$schema->storage->disable_sth_caching(1);
+my $sth_three = $schema->storage->sth('SELECT 42');
+
+ok($sth_one == $sth_two, "statement caching works");
+ok($sth_two != $sth_three, "disabling statement caching works");
Copied: DBIx-Class/0.08/branches/ado_mssql/t/storage/error.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/18inserterror.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/error.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/error.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,29 @@
+use Class::C3;
+use strict;
+use Test::More;
+use warnings;
+
+BEGIN {
+ eval "use DBD::SQLite";
+ plan $@
+ ? ( skip_all => 'needs DBD::SQLite for testing' )
+ : ( tests => 4 );
+}
+
+use lib qw(t/lib);
+
+use_ok( 'DBICTest' );
+use_ok( 'DBICTest::Schema' );
+my $schema = DBICTest->init_schema;
+
+{
+ my $warnings;
+ local $SIG{__WARN__} = sub { $warnings .= $_[0] };
+ eval {
+ $schema->resultset('CD')
+ ->create({ title => 'vacation in antarctica' })
+ };
+ like $@, qr/NULL/; # as opposed to some other error
+ unlike( $warnings, qr/uninitialized value/, "No warning from Storage" );
+}
+
Added: DBIx-Class/0.08/branches/ado_mssql/t/storage/exception.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/exception.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/exception.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,43 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+
+use Test::More;
+use Test::Exception;
+use lib qw(t/lib);
+use DBICTest;
+use DBICTest::Schema;
+
+# make sure nothing eats the exceptions (an unchecked eval in Storage::DESTROY used to be a problem)
+
+{
+ package Dying::Storage;
+
+ use warnings;
+ use strict;
+
+ use base 'DBIx::Class::Storage::DBI';
+
+ sub _populate_dbh {
+ my $self = shift;
+ my $death = $self->_dbi_connect_info->[3]{die};
+
+ die "storage test died: $death" if $death eq 'before_populate';
+ my $ret = $self->next::method (@_);
+ die "storage test died: $death" if $death eq 'after_populate';
+
+ return $ret;
+ }
+}
+
+for (qw/before_populate after_populate/) {
+ dies_ok (sub {
+ my $schema = DBICTest::Schema->clone;
+ $schema->storage_type ('Dying::Storage');
+ $schema->connection (DBICTest->_database, { die => $_ });
+ $schema->storage->ensure_connected;
+ }, "$_ exception found");
+}
+
+done_testing;
Copied: DBIx-Class/0.08/branches/ado_mssql/t/storage/on_connect_call.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/92storage_on_connect_call.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/on_connect_call.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/on_connect_call.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,97 @@
+use strict;
+use warnings;
+no warnings qw/once redefine/;
+
+use lib qw(t/lib);
+use DBI;
+use DBICTest;
+use DBICTest::Schema;
+use DBIx::Class::Storage::DBI;
+
+# !!! do not replace this with done_testing - tests reside in the callbacks
+# !!! number of calls is important
+use Test::More tests => 15;
+# !!!
+
+my $schema = DBICTest::Schema->clone;
+
+{
+ *DBIx::Class::Storage::DBI::connect_call_foo = sub {
+ isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+ 'got storage in connect_call method';
+ is $_[1], 'bar', 'got param in connect_call method';
+ };
+
+ *DBIx::Class::Storage::DBI::disconnect_call_foo = sub {
+ isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+ 'got storage in disconnect_call method';
+ };
+
+ ok $schema->connection(
+ DBICTest->_database,
+ {
+ on_connect_call => [
+ [ do_sql => 'create table test1 (id integer)' ],
+ [ do_sql => [ 'insert into test1 values (?)', {}, 1 ] ],
+ [ do_sql => sub { ['insert into test1 values (2)'] } ],
+ [ sub { $_[0]->dbh->do($_[1]) }, 'insert into test1 values (3)' ],
+ # this invokes $storage->connect_call_foo('bar') (above)
+ [ foo => 'bar' ],
+ ],
+ on_connect_do => 'insert into test1 values (4)',
+ on_disconnect_call => 'foo',
+ },
+ ), 'connection()';
+
+ ok (! $schema->storage->connected, 'start disconnected');
+
+ is_deeply (
+ $schema->storage->dbh->selectall_arrayref('select * from test1'),
+ [ [ 1 ], [ 2 ], [ 3 ], [ 4 ] ],
+ 'on_connect_call/do actions worked'
+ );
+
+ $schema->storage->disconnect;
+}
+
+{
+ *DBIx::Class::Storage::DBI::connect_call_foo = sub {
+ isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+ 'got storage in connect_call method';
+ };
+
+ *DBIx::Class::Storage::DBI::connect_call_bar = sub {
+ isa_ok $_[0], 'DBIx::Class::Storage::DBI',
+ 'got storage in connect_call method';
+ };
+
+
+ ok $schema->connection(
+ DBICTest->_database,
+ {
+ # method list form
+ on_connect_call => [ 'foo', sub { ok 1, "coderef in list form" }, 'bar' ],
+ },
+ ), 'connection()';
+
+ ok (! $schema->storage->connected, 'start disconnected');
+ $schema->storage->ensure_connected;
+ $schema->storage->disconnect; # this should not fire any tests
+}
+
+{
+ ok $schema->connection(
+ sub { DBI->connect(DBICTest->_database) },
+ {
+ # method list form
+ on_connect_call => [ sub { ok 1, "on_connect_call after DT parser" }, ],
+ on_disconnect_call => [ sub { ok 1, "on_disconnect_call after DT parser" }, ],
+ },
+ ), 'connection()';
+
+ ok (! $schema->storage->connected, 'start disconnected');
+ my $parser = $schema->storage->datetime_parser;
+
+ $schema->storage->ensure_connected;
+ $schema->storage->disconnect;
+}
Copied: DBIx-Class/0.08/branches/ado_mssql/t/storage/on_connect_do.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/92storage_on_connect_do.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/on_connect_do.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/on_connect_do.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,89 @@
+use strict;
+use warnings;
+
+use Test::More tests => 12;
+
+use lib qw(t/lib);
+use base 'DBICTest';
+require DBI;
+
+
+my $schema = DBICTest->init_schema(
+ no_connect => 1,
+ no_deploy => 1,
+);
+
+ok $schema->connection(
+ DBICTest->_database,
+ {
+ on_connect_do => 'CREATE TABLE TEST_empty (id INTEGER)',
+ },
+), 'connection()';
+
+is_deeply (
+ $schema->storage->dbh->selectall_arrayref('SELECT * FROM TEST_empty'),
+ [],
+ 'string version on_connect_do() worked'
+);
+
+$schema->storage->disconnect;
+
+ok $schema->connection(
+ sub { DBI->connect(DBICTest->_database) },
+ {
+ on_connect_do => [
+ 'CREATE TABLE TEST_empty (id INTEGER)',
+ [ 'INSERT INTO TEST_empty VALUES (?)', {}, 2 ],
+ \&insert_from_subref,
+ ],
+ on_disconnect_do =>
+ [\&check_exists, 'DROP TABLE TEST_empty', \&check_dropped],
+ },
+), 'connection()';
+
+is_deeply (
+ $schema->storage->dbh->selectall_arrayref('SELECT * FROM TEST_empty'),
+ [ [ 2 ], [ 3 ], [ 7 ] ],
+ 'on_connect_do() worked'
+);
+eval { $schema->storage->dbh->do('SELECT 1 FROM TEST_nonexistent'); };
+ok $@, 'Searching for nonexistent table dies';
+
+$schema->storage->disconnect();
+
+my($connected, $disconnected, @cb_args);
+ok $schema->connection(
+ DBICTest->_database,
+ {
+ on_connect_do => sub { $connected = 1; @cb_args = @_; },
+ on_disconnect_do => sub { $disconnected = 1 },
+ },
+), 'second connection()';
+$schema->storage->dbh->do('SELECT 1');
+ok $connected, 'on_connect_do() called after connect()';
+ok ! $disconnected, 'on_disconnect_do() not called after connect()';
+$schema->storage->disconnect();
+ok $disconnected, 'on_disconnect_do() called after disconnect()';
+
+isa_ok($cb_args[0], 'DBIx::Class::Storage', 'first arg to on_connect_do hook');
+
+sub check_exists {
+ my $storage = shift;
+ ok $storage->dbh->do('SELECT 1 FROM TEST_empty'), 'Table still exists';
+ return;
+}
+
+sub check_dropped {
+ my $storage = shift;
+ eval { $storage->dbh->do('SELECT 1 FROM TEST_empty'); };
+ ok $@, 'Reading from dropped table fails';
+ return;
+}
+
+sub insert_from_subref {
+ my $storage = shift;
+ return [
+ [ 'INSERT INTO TEST_empty VALUES (?)', {}, 3 ],
+ 'INSERT INTO TEST_empty VALUES (7)',
+ ];
+}
Copied: DBIx-Class/0.08/branches/ado_mssql/t/storage/ping_count.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/92storage_ping_count.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/ping_count.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/ping_count.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,60 @@
+use strict;
+use warnings;
+
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+use DBIC::SqlMakerTest;
+
+my $ping_count = 0;
+
+{
+ local $SIG{__WARN__} = sub {};
+ require DBIx::Class::Storage::DBI;
+
+ my $ping = \&DBIx::Class::Storage::DBI::_ping;
+
+ *DBIx::Class::Storage::DBI::_ping = sub {
+ $ping_count++;
+ goto &$ping;
+ };
+}
+
+
+# measure pings around deploy() separately
+my $schema = DBICTest->init_schema( sqlite_use_file => 1, no_populate => 1 );
+
+is ($ping_count, 0, 'no _ping() calls during deploy');
+$ping_count = 0;
+
+
+
+DBICTest->populate_schema ($schema);
+
+# perform some operations and make sure they don't ping
+
+$schema->resultset('CD')->create({
+ cdid => 6, artist => 3, title => 'mtfnpy', year => 2009
+});
+
+$schema->resultset('CD')->create({
+ cdid => 7, artist => 3, title => 'mtfnpy2', year => 2009
+});
+
+$schema->storage->_dbh->disconnect;
+
+$schema->resultset('CD')->create({
+ cdid => 8, artist => 3, title => 'mtfnpy3', year => 2009
+});
+
+$schema->storage->_dbh->disconnect;
+
+$schema->txn_do(sub {
+ $schema->resultset('CD')->create({
+ cdid => 9, artist => 3, title => 'mtfnpy4', year => 2009
+ });
+});
+
+is $ping_count, 0, 'no _ping() calls';
+
+done_testing;
Copied: DBIx-Class/0.08/branches/ado_mssql/t/storage/reconnect.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/33storage_reconnect.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/reconnect.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/reconnect.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,73 @@
+use strict;
+use warnings;
+
+use FindBin;
+use File::Copy;
+use Test::More;
+use lib qw(t/lib);
+use DBICTest;
+
+plan tests => 6;
+
+my $db_orig = "$FindBin::Bin/../var/DBIxClass.db";
+my $db_tmp = "$db_orig.tmp";
+
+# Set up the "usual" sqlite for DBICTest
+my $schema = DBICTest->init_schema( sqlite_use_file => 1 );
+
+# Make sure we're connected by doing something
+my @art = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
+cmp_ok(@art, '==', 3, "Three artists returned");
+
+# Disconnect the dbh, and be sneaky about it
+# Also test if DBD::SQLite finaly knows how to ->disconnect properly
+{
+ my $w;
+ local $SIG{__WARN__} = sub { $w = shift };
+ $schema->storage->_dbh->disconnect;
+ ok ($w !~ /active statement handles/, 'SQLite can disconnect properly');
+}
+
+# Try the operation again - What should happen here is:
+# 1. S::DBI blindly attempts the SELECT, which throws an exception
+# 2. It catches the exception, checks ->{Active}/->ping, sees the disconnected state...
+# 3. Reconnects, and retries the operation
+# 4. Success!
+my @art_two = $schema->resultset("Artist")->search({ }, { order_by => 'name DESC'});
+cmp_ok(@art_two, '==', 3, "Three artists returned");
+
+### Now, disconnect the dbh, and move the db file;
+# create a new one and chmod 000 to prevent SQLite from connecting.
+$schema->storage->_dbh->disconnect;
+move( $db_orig, $db_tmp );
+open DBFILE, '>', $db_orig;
+print DBFILE 'THIS IS NOT A REAL DATABASE';
+close DBFILE;
+chmod 0000, $db_orig;
+
+### Try the operation again... it should fail, since there's no db
+{
+ # Catch the DBI connection error
+ local $SIG{__WARN__} = sub {};
+ eval {
+ my @art_three = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
+ };
+ ok( $@, 'The operation failed' );
+}
+
+### Now, move the db file back to the correct name
+unlink($db_orig);
+move( $db_tmp, $db_orig );
+
+SKIP: {
+ skip "Cannot reconnect if original connection didn't fail", 2
+ if ( $@ =~ /encrypted or is not a database/ );
+
+ ### Try the operation again... this time, it should succeed
+ my @art_four;
+ eval {
+ @art_four = $schema->resultset("Artist")->search( {}, { order_by => 'name DESC' } );
+ };
+ ok( !$@, 'The operation succeeded' );
+ cmp_ok( @art_four, '==', 3, "Three artists returned" );
+}
Copied: DBIx-Class/0.08/branches/ado_mssql/t/storage/replication.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/93storage_replication.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/replication.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/replication.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,853 @@
+use strict;
+use warnings;
+use lib qw(t/lib);
+use Test::More;
+use Test::Exception;
+use DBICTest;
+use List::Util 'first';
+use Scalar::Util 'reftype';
+use File::Spec;
+use IO::Handle;
+
+BEGIN {
+ eval "use DBIx::Class::Storage::DBI::Replicated; use Test::Moose";
+ plan skip_all => "Deps not installed: $@" if $@;
+}
+
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+use_ok 'DBIx::Class::Storage::DBI::Replicated';
+
+use Moose();
+use MooseX::Types();
+diag "Using Moose version $Moose::VERSION and MooseX::Types version $MooseX::Types::VERSION";
+
+=head1 HOW TO USE
+
+ This is a test of the replicated storage system. This will work in one of
+ two ways, either it was try to fake replication with a couple of SQLite DBs
+ and creative use of copy, or if you define a couple of %ENV vars correctly
+ will try to test those. If you do that, it will assume the setup is properly
+ replicating. Your results may vary, but I have demonstrated this to work with
+ mysql native replication.
+
+=cut
+
+
+## ----------------------------------------------------------------------------
+## Build a class to hold all our required testing data and methods.
+## ----------------------------------------------------------------------------
+
+TESTSCHEMACLASSES: {
+
+ ## --------------------------------------------------------------------- ##
+ ## Create an object to contain your replicated stuff.
+ ## --------------------------------------------------------------------- ##
+
+ package DBIx::Class::DBI::Replicated::TestReplication;
+
+ use DBICTest;
+ use base qw/Class::Accessor::Fast/;
+
+ __PACKAGE__->mk_accessors( qw/schema/ );
+
+ ## Initialize the object
+
+ sub new {
+ my ($class, $schema_method) = (shift, shift);
+ my $self = $class->SUPER::new(@_);
+
+ $self->schema( $self->init_schema($schema_method) );
+ return $self;
+ }
+
+ ## Get the Schema and set the replication storage type
+
+ sub init_schema {
+ # current SQLT SQLite producer does not handle DROP TABLE IF EXISTS, trap warnings here
+ local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/ };
+
+ my ($class, $schema_method) = @_;
+
+ my $method = "get_schema_$schema_method";
+ my $schema = $class->$method;
+
+ return $schema;
+ }
+
+ sub get_schema_by_storage_type {
+ DBICTest->init_schema(
+ sqlite_use_file => 1,
+ storage_type=>{
+ '::DBI::Replicated' => {
+ balancer_type=>'::Random',
+ balancer_args=>{
+ auto_validate_every=>100,
+ master_read_weight => 1
+ },
+ }
+ },
+ deploy_args=>{
+ add_drop_table => 1,
+ },
+ );
+ }
+
+ sub get_schema_by_connect_info {
+ DBICTest->init_schema(
+ sqlite_use_file => 1,
+ storage_type=> '::DBI::Replicated',
+ balancer_type=>'::Random',
+ balancer_args=> {
+ auto_validate_every=>100,
+ master_read_weight => 1
+ },
+ deploy_args=>{
+ add_drop_table => 1,
+ },
+ );
+ }
+
+ sub generate_replicant_connect_info {}
+ sub replicate {}
+ sub cleanup {}
+
+ ## --------------------------------------------------------------------- ##
+ ## Add a connect_info option to test option merging.
+ ## --------------------------------------------------------------------- ##
+ {
+ package DBIx::Class::Storage::DBI::Replicated;
+
+ use Moose;
+
+ __PACKAGE__->meta->make_mutable;
+
+ around connect_info => sub {
+ my ($next, $self, $info) = @_;
+ $info->[3]{master_option} = 1;
+ $self->$next($info);
+ };
+
+ __PACKAGE__->meta->make_immutable;
+
+ no Moose;
+ }
+
+ ## --------------------------------------------------------------------- ##
+ ## Subclass for when you are using SQLite for testing, this provides a fake
+ ## replication support.
+ ## --------------------------------------------------------------------- ##
+
+ package DBIx::Class::DBI::Replicated::TestReplication::SQLite;
+
+ use DBICTest;
+ use File::Copy;
+ use base 'DBIx::Class::DBI::Replicated::TestReplication';
+
+ __PACKAGE__->mk_accessors(qw/master_path slave_paths/);
+
+ ## Set the master path from DBICTest
+
+ sub new {
+ my $class = shift @_;
+ my $self = $class->SUPER::new(@_);
+
+ $self->master_path( DBICTest->_sqlite_dbfilename );
+ $self->slave_paths([
+ File::Spec->catfile(qw/t var DBIxClass_slave1.db/),
+ File::Spec->catfile(qw/t var DBIxClass_slave2.db/),
+ ]);
+
+ return $self;
+ }
+
+ ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
+ ## $storage->connect_info to be used for connecting replicants.
+
+ sub generate_replicant_connect_info {
+ my $self = shift @_;
+ my @dsn = map {
+ "dbi:SQLite:${_}";
+ } @{$self->slave_paths};
+
+ my @connect_infos = map { [$_,'','',{AutoCommit=>1}] } @dsn;
+
+ ## Make sure nothing is left over from a failed test
+ $self->cleanup;
+
+ ## try a hashref too
+ my $c = $connect_infos[0];
+ $connect_infos[0] = {
+ dsn => $c->[0],
+ user => $c->[1],
+ password => $c->[2],
+ %{ $c->[3] }
+ };
+
+ @connect_infos
+ }
+
+ ## Do a 'good enough' replication by copying the master dbfile over each of
+ ## the slave dbfiles. If the master is SQLite we do this, otherwise we
+ ## just do a one second pause to let the slaves catch up.
+
+ sub replicate {
+ my $self = shift @_;
+ foreach my $slave (@{$self->slave_paths}) {
+ copy($self->master_path, $slave);
+ }
+ }
+
+ ## Cleanup after ourselves. Unlink all gthe slave paths.
+
+ sub cleanup {
+ my $self = shift @_;
+ foreach my $slave (@{$self->slave_paths}) {
+ if(-e $slave) {
+ unlink $slave;
+ }
+ }
+ }
+
+ ## --------------------------------------------------------------------- ##
+ ## Subclass for when you are setting the databases via custom export vars
+ ## This is for when you have a replicating database setup that you are
+ ## going to test against. You'll need to define the correct $ENV and have
+ ## two slave databases to test against, as well as a replication system
+ ## that will replicate in less than 1 second.
+ ## --------------------------------------------------------------------- ##
+
+ package DBIx::Class::DBI::Replicated::TestReplication::Custom;
+ use base 'DBIx::Class::DBI::Replicated::TestReplication';
+
+ ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
+ ## $storage->connect_info to be used for connecting replicants.
+
+ sub generate_replicant_connect_info {
+ return (
+ [$ENV{"DBICTEST_SLAVE0_DSN"}, $ENV{"DBICTEST_SLAVE0_DBUSER"}, $ENV{"DBICTEST_SLAVE0_DBPASS"}, {AutoCommit => 1}],
+ [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}],
+ );
+ }
+
+ ## pause a bit to let the replication catch up
+
+ sub replicate {
+ sleep 1;
+ }
+}
+
+## ----------------------------------------------------------------------------
+## Create an object and run some tests
+## ----------------------------------------------------------------------------
+
+## Thi first bunch of tests are basic, just make sure all the bits are behaving
+
+my $replicated_class = DBICTest->has_custom_dsn ?
+ 'DBIx::Class::DBI::Replicated::TestReplication::Custom' :
+ 'DBIx::Class::DBI::Replicated::TestReplication::SQLite';
+
+my $replicated;
+
+for my $method (qw/by_connect_info by_storage_type/) {
+ undef $replicated;
+ ok $replicated = $replicated_class->new($method)
+ => "Created a replication object $method";
+
+ isa_ok $replicated->schema
+ => 'DBIx::Class::Schema';
+
+ isa_ok $replicated->schema->storage
+ => 'DBIx::Class::Storage::DBI::Replicated';
+
+ isa_ok $replicated->schema->storage->balancer
+ => 'DBIx::Class::Storage::DBI::Replicated::Balancer::Random'
+ => 'configured balancer_type';
+}
+
+ok $replicated->schema->storage->meta
+ => 'has a meta object';
+
+isa_ok $replicated->schema->storage->master
+ => 'DBIx::Class::Storage::DBI';
+
+isa_ok $replicated->schema->storage->pool
+ => 'DBIx::Class::Storage::DBI::Replicated::Pool';
+
+does_ok $replicated->schema->storage->balancer
+ => 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+
+ok my @replicant_connects = $replicated->generate_replicant_connect_info
+ => 'got replication connect information';
+
+ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects)
+ => 'Created some storages suitable for replicants';
+
+our %debug;
+$replicated->schema->storage->debug(1);
+$replicated->schema->storage->debugcb(sub {
+ my ($op, $info) = @_;
+ ##warn "\n$op, $info\n";
+ %debug = (
+ op => $op,
+ info => $info,
+ dsn => ($info=~m/\[(.+)\]/)[0],
+ storage_type => $info=~m/REPLICANT/ ? 'REPLICANT' : 'MASTER',
+ );
+});
+
+ok my @all_storages = $replicated->schema->storage->all_storages
+ => '->all_storages';
+
+is scalar @all_storages,
+ 3
+ => 'correct number of ->all_storages';
+
+is ((grep $_->isa('DBIx::Class::Storage::DBI'), @all_storages),
+ 3
+ => '->all_storages are correct type');
+
+my @all_storage_opts =
+ grep { (reftype($_)||'') eq 'HASH' }
+ map @{ $_->_connect_info }, @all_storages;
+
+is ((grep $_->{master_option}, @all_storage_opts),
+ 3
+ => 'connect_info was merged from master to replicants');
+
+my @replicant_names = keys %{ $replicated->schema->storage->replicants };
+
+ok @replicant_names, "found replicant names @replicant_names";
+
+## Silence warning about not supporting the is_replicating method if using the
+## sqlite dbs.
+$replicated->schema->storage->debugobj->silence(1)
+ if first { m{^t/} } @replicant_names;
+
+isa_ok $replicated->schema->storage->balancer->current_replicant
+ => 'DBIx::Class::Storage::DBI';
+
+$replicated->schema->storage->debugobj->silence(0);
+
+ok $replicated->schema->storage->pool->has_replicants
+ => 'does have replicants';
+
+is $replicated->schema->storage->pool->num_replicants => 2
+ => 'has two replicants';
+
+does_ok $replicated_storages[0]
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated_storages[1]
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated->schema->storage->replicants->{$replicant_names[0]}
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated->schema->storage->replicants->{$replicant_names[1]}
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+## Add some info to the database
+
+$replicated
+ ->schema
+ ->populate('Artist', [
+ [ qw/artistid name/ ],
+ [ 4, "Ozric Tentacles"],
+ ]);
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ like $debug{info}, qr/INSERT/, 'Last was an insert';
+
+## Make sure all the slaves have the table definitions
+
+$replicated->replicate;
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
+
+## Silence warning about not supporting the is_replicating method if using the
+## sqlite dbs.
+$replicated->schema->storage->debugobj->silence(1)
+ if first { m{^t/} } @replicant_names;
+
+$replicated->schema->storage->pool->validate_replicants;
+
+$replicated->schema->storage->debugobj->silence(0);
+
+## Make sure we can read the data.
+
+ok my $artist1 = $replicated->schema->resultset('Artist')->find(4)
+ => 'Created Result';
+
+## We removed testing here since master read weight is on, so we can't tell in
+## advance what storage to expect. We turn master read weight off a bit lower
+## is $debug{storage_type}, 'REPLICANT'
+## => "got last query from a replicant: $debug{dsn}, $debug{info}";
+
+isa_ok $artist1
+ => 'DBICTest::Artist';
+
+is $artist1->name, 'Ozric Tentacles'
+ => 'Found expected name for first result';
+
+## Check that master_read_weight is honored
+{
+ no warnings qw/once redefine/;
+
+ local
+ *DBIx::Class::Storage::DBI::Replicated::Balancer::Random::_random_number =
+ sub { 999 };
+
+ $replicated->schema->storage->balancer->increment_storage;
+
+ is $replicated->schema->storage->balancer->current_replicant,
+ $replicated->schema->storage->master
+ => 'master_read_weight is honored';
+
+ ## turn it off for the duration of the test
+ $replicated->schema->storage->balancer->master_read_weight(0);
+ $replicated->schema->storage->balancer->increment_storage;
+}
+
+## Add some new rows that only the master will have This is because
+## we overload any type of write operation so that is must hit the master
+## database.
+
+$replicated
+ ->schema
+ ->populate('Artist', [
+ [ qw/artistid name/ ],
+ [ 5, "Doom's Children"],
+ [ 6, "Dead On Arrival"],
+ [ 7, "Watergate"],
+ ]);
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ like $debug{info}, qr/INSERT/, 'Last was an insert';
+
+## Make sure all the slaves have the table definitions
+$replicated->replicate;
+
+## Should find some data now
+
+ok my $artist2 = $replicated->schema->resultset('Artist')->find(5)
+ => 'Sync succeed';
+
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
+isa_ok $artist2
+ => 'DBICTest::Artist';
+
+is $artist2->name, "Doom's Children"
+ => 'Found expected name for first result';
+
+## What happens when we disconnect all the replicants?
+
+is $replicated->schema->storage->pool->connected_replicants => 2
+ => "both replicants are connected";
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->disconnect;
+$replicated->schema->storage->replicants->{$replicant_names[1]}->disconnect;
+
+is $replicated->schema->storage->pool->connected_replicants => 0
+ => "both replicants are now disconnected";
+
+## All these should pass, since the database should automatically reconnect
+
+ok my $artist3 = $replicated->schema->resultset('Artist')->find(6)
+ => 'Still finding stuff.';
+
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
+isa_ok $artist3
+ => 'DBICTest::Artist';
+
+is $artist3->name, "Dead On Arrival"
+ => 'Found expected name for first result';
+
+is $replicated->schema->storage->pool->connected_replicants => 1
+ => "At Least One replicant reconnected to handle the job";
+
+## What happens when we try to select something that doesn't exist?
+
+ok ! $replicated->schema->resultset('Artist')->find(666)
+ => 'Correctly failed to find something.';
+
+is $debug{storage_type}, 'REPLICANT'
+ => "got last query from a replicant: $debug{dsn}";
+
+## test the reliable option
+
+TESTRELIABLE: {
+
+ $replicated->schema->storage->set_reliable_storage;
+
+ ok $replicated->schema->resultset('Artist')->find(2)
+ => 'Read from master 1';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ ok $replicated->schema->resultset('Artist')->find(5)
+ => 'Read from master 2';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ $replicated->schema->storage->set_balanced_storage;
+
+ ok $replicated->schema->resultset('Artist')->find(3)
+ => 'Read from replicant';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+}
+
+## Make sure when reliable goes out of scope, we are using replicants again
+
+ok $replicated->schema->resultset('Artist')->find(1)
+ => 'back to replicant 1.';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
+ok $replicated->schema->resultset('Artist')->find(2)
+ => 'back to replicant 2.';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
+## set all the replicants to inactive, and make sure the balancer falls back to
+## the master.
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+
+{
+ ## catch the fallback to master warning
+ open my $debugfh, '>', \my $fallback_warning;
+ my $oldfh = $replicated->schema->storage->debugfh;
+ $replicated->schema->storage->debugfh($debugfh);
+
+ ok $replicated->schema->resultset('Artist')->find(2)
+ => 'Fallback to master';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ like $fallback_warning, qr/falling back to master/
+ => 'emits falling back to master warning';
+
+ $replicated->schema->storage->debugfh($oldfh);
+}
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
+
+## Silence warning about not supporting the is_replicating method if using the
+## sqlite dbs.
+$replicated->schema->storage->debugobj->silence(1)
+ if first { m{^t/} } @replicant_names;
+
+$replicated->schema->storage->pool->validate_replicants;
+
+$replicated->schema->storage->debugobj->silence(0);
+
+ok $replicated->schema->resultset('Artist')->find(2)
+ => 'Returned to replicates';
+
+is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
+## Getting slave status tests
+
+SKIP: {
+ ## We skip this tests unless you have a custom replicants, since the default
+ ## sqlite based replication tests don't support these functions.
+
+ skip 'Cannot Test Replicant Status on Non Replicating Database', 10
+ unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
+
+ $replicated->replicate; ## Give the slaves a chance to catchup.
+
+ ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating
+ => 'Replicants are replicating';
+
+ is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0
+ => 'Replicant is zero seconds behind master';
+
+ ## Test the validate replicants
+
+ $replicated->schema->storage->pool->validate_replicants;
+
+ is $replicated->schema->storage->pool->active_replicants, 2
+ => 'Still have 2 replicants after validation';
+
+ ## Force the replicants to fail the validate test by required their lag to
+ ## be negative (ie ahead of the master!)
+
+ $replicated->schema->storage->pool->maximum_lag(-10);
+ $replicated->schema->storage->pool->validate_replicants;
+
+ is $replicated->schema->storage->pool->active_replicants, 0
+ => 'No way a replicant be be ahead of the master';
+
+ ## Let's be fair to the replicants again. Let them lag up to 5
+
+ $replicated->schema->storage->pool->maximum_lag(5);
+ $replicated->schema->storage->pool->validate_replicants;
+
+ is $replicated->schema->storage->pool->active_replicants, 2
+ => 'Both replicants in good standing again';
+
+ ## Check auto validate
+
+ is $replicated->schema->storage->balancer->auto_validate_every, 100
+ => "Got the expected value for auto validate";
+
+ ## This will make sure we auto validatge everytime
+ $replicated->schema->storage->balancer->auto_validate_every(0);
+
+ ## set all the replicants to inactive, and make sure the balancer falls back to
+ ## the master.
+
+ $replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+ $replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+
+ ## Ok, now when we go to run a query, autovalidate SHOULD reconnect
+
+ is $replicated->schema->storage->pool->active_replicants => 0
+ => "both replicants turned off";
+
+ ok $replicated->schema->resultset('Artist')->find(5)
+ => 'replicant reactivated';
+
+ is $debug{storage_type}, 'REPLICANT',
+ "got last query from a replicant: $debug{dsn}";
+
+ is $replicated->schema->storage->pool->active_replicants => 2
+ => "both replicants reactivated";
+}
+
+## Test the reliably callback
+
+ok my $reliably = sub {
+
+ ok $replicated->schema->resultset('Artist')->find(5)
+ => 'replicant reactivated';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+} => 'created coderef properly';
+
+$replicated->schema->storage->execute_reliably($reliably);
+
+## Try something with an error
+
+ok my $unreliably = sub {
+
+ ok $replicated->schema->resultset('ArtistXX')->find(5)
+ => 'replicant reactivated';
+
+} => 'created coderef properly';
+
+throws_ok {$replicated->schema->storage->execute_reliably($unreliably)}
+ qr/Can't find source for ArtistXX/
+ => 'Bad coderef throws proper error';
+
+## Make sure replication came back
+
+ok $replicated->schema->resultset('Artist')->find(3)
+ => 'replicant reactivated';
+
+is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+
+## make sure transactions are set to execute_reliably
+
+ok my $transaction = sub {
+
+ my $id = shift @_;
+
+ $replicated
+ ->schema
+ ->populate('Artist', [
+ [ qw/artistid name/ ],
+ [ $id, "Children of the Grave"],
+ ]);
+
+ ok my $result = $replicated->schema->resultset('Artist')->find($id)
+ => "Found expected artist for $id";
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ ok my $more = $replicated->schema->resultset('Artist')->find(1)
+ => 'Found expected artist again for 1';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ return ($result, $more);
+
+} => 'Created a coderef properly';
+
+## Test the transaction with multi return
+{
+ ok my @return = $replicated->schema->txn_do($transaction, 666)
+ => 'did transaction';
+
+ is $return[0]->id, 666
+ => 'first returned value is correct';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+ is $return[1]->id, 1
+ => 'second returned value is correct';
+
+ is $debug{storage_type}, 'MASTER',
+ "got last query from a master: $debug{dsn}";
+
+}
+
+## Test that asking for single return works
+{
+ ok my @return = $replicated->schema->txn_do($transaction, 777)
+ => 'did transaction';
+
+ is $return[0]->id, 777
+ => 'first returned value is correct';
+
+ is $return[1]->id, 1
+ => 'second returned value is correct';
+}
+
+## Test transaction returning a single value
+
+{
+ ok my $result = $replicated->schema->txn_do(sub {
+ ok my $more = $replicated->schema->resultset('Artist')->find(1)
+ => 'found inside a transaction';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+ return $more;
+ }) => 'successfully processed transaction';
+
+ is $result->id, 1
+ => 'Got expected single result from transaction';
+}
+
+## Make sure replication came back
+
+ok $replicated->schema->resultset('Artist')->find(1)
+ => 'replicant reactivated';
+
+is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+
+## Test Discard changes
+
+{
+ ok my $artist = $replicated->schema->resultset('Artist')->find(2)
+ => 'got an artist to test discard changes';
+
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+
+ ok $artist->get_from_storage({force_pool=>'master'})
+ => 'properly discard changes';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+
+ ok $artist->discard_changes({force_pool=>'master'})
+ => 'properly called discard_changes against master (manual attrs)';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+
+ ok $artist->discard_changes()
+ => 'properly called discard_changes against master (default attrs)';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+
+ ok $artist->discard_changes({force_pool=>$replicant_names[0]})
+ => 'properly able to override the default attributes';
+
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}"
+}
+
+## Test some edge cases, like trying to do a transaction inside a transaction, etc
+
+{
+ ok my $result = $replicated->schema->txn_do(sub {
+ return $replicated->schema->txn_do(sub {
+ ok my $more = $replicated->schema->resultset('Artist')->find(1)
+ => 'found inside a transaction inside a transaction';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+ return $more;
+ });
+ }) => 'successfully processed transaction';
+
+ is $result->id, 1
+ => 'Got expected single result from transaction';
+}
+
+{
+ ok my $result = $replicated->schema->txn_do(sub {
+ return $replicated->schema->storage->execute_reliably(sub {
+ return $replicated->schema->txn_do(sub {
+ return $replicated->schema->storage->execute_reliably(sub {
+ ok my $more = $replicated->schema->resultset('Artist')->find(1)
+ => 'found inside crazy deep transactions and execute_reliably';
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+ return $more;
+ });
+ });
+ });
+ }) => 'successfully processed transaction';
+
+ is $result->id, 1
+ => 'Got expected single result from transaction';
+}
+
+## Test the force_pool resultset attribute.
+
+{
+ ok my $artist_rs = $replicated->schema->resultset('Artist')
+ => 'got artist resultset';
+
+ ## Turn on Forced Pool Storage
+ ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'})
+ => 'Created a resultset using force_pool storage';
+
+ ok my $artist = $reliable_artist_rs->find(2)
+ => 'got an artist result via force_pool storage';
+
+ is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}";
+}
+
+## Test the force_pool resultset attribute part two.
+
+{
+ ok my $artist_rs = $replicated->schema->resultset('Artist')
+ => 'got artist resultset';
+
+ ## Turn on Forced Pool Storage
+ ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>$replicant_names[0]})
+ => 'Created a resultset using force_pool storage';
+
+ ok my $artist = $reliable_artist_rs->find(2)
+ => 'got an artist result via force_pool storage';
+
+ is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}";
+}
+## Delete the old database files
+$replicated->cleanup;
+
+done_testing;
+
+# vim: sw=4 sts=4 :
Copied: DBIx-Class/0.08/branches/ado_mssql/t/storage/stats.t (from rev 7355, DBIx-Class/0.08/branches/ado_mssql/t/31stats.t)
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/storage/stats.t (rev 0)
+++ DBIx-Class/0.08/branches/ado_mssql/t/storage/stats.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -0,0 +1,104 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use Test::More;
+
+plan tests => 12;
+
+use lib qw(t/lib);
+
+use_ok('DBICTest');
+my $schema = DBICTest->init_schema();
+
+my $cbworks = 0;
+
+$schema->storage->debugcb(sub { $cbworks = 1; });
+$schema->storage->debug(0);
+my $rs = $schema->resultset('CD')->search({});
+$rs->count();
+ok(!$cbworks, 'Callback not called with debug disabled');
+
+$schema->storage->debug(1);
+
+$rs->count();
+ok($cbworks, 'Debug callback worked.');
+
+my $prof = new DBIx::Test::Profiler();
+$schema->storage->debugobj($prof);
+
+# Test non-transaction calls.
+$rs->count();
+ok($prof->{'query_start'}, 'query_start called');
+ok($prof->{'query_end'}, 'query_end called');
+ok(!$prof->{'txn_begin'}, 'txn_begin not called');
+ok(!$prof->{'txn_commit'}, 'txn_commit not called');
+
+$prof->reset();
+
+# Test transaction calls
+$schema->txn_begin();
+ok($prof->{'txn_begin'}, 'txn_begin called');
+
+$rs = $schema->resultset('CD')->search({});
+$rs->count();
+ok($prof->{'query_start'}, 'query_start called');
+ok($prof->{'query_end'}, 'query_end called');
+
+$schema->txn_commit();
+ok($prof->{'txn_commit'}, 'txn_commit called');
+
+$prof->reset();
+
+# Test a rollback
+$schema->txn_begin();
+$rs = $schema->resultset('CD')->search({});
+$rs->count();
+$schema->txn_rollback();
+ok($prof->{'txn_rollback'}, 'txn_rollback called');
+
+$schema->storage->debug(0);
+
+package DBIx::Test::Profiler;
+use strict;
+
+sub new {
+ my $self = bless({});
+}
+
+sub query_start {
+ my $self = shift();
+ $self->{'query_start'} = 1;
+}
+
+sub query_end {
+ my $self = shift();
+ $self->{'query_end'} = 1;
+}
+
+sub txn_begin {
+ my $self = shift();
+ $self->{'txn_begin'} = 1;
+}
+
+sub txn_rollback {
+ my $self = shift();
+ $self->{'txn_rollback'} = 1;
+}
+
+sub txn_commit {
+ my $self = shift();
+ $self->{'txn_commit'} = 1;
+}
+
+sub reset {
+ my $self = shift();
+
+ $self->{'query_start'} = 0;
+ $self->{'query_end'} = 0;
+ $self->{'txn_begin'} = 0;
+ $self->{'txn_rollback'} = 0;
+ $self->{'txn_end'} = 0;
+}
+
+1;
Modified: DBIx-Class/0.08/branches/ado_mssql/t/zzzzzzz_perl_perf_bug.t
===================================================================
--- DBIx-Class/0.08/branches/ado_mssql/t/zzzzzzz_perl_perf_bug.t 2009-10-14 01:46:49 UTC (rev 7785)
+++ DBIx-Class/0.08/branches/ado_mssql/t/zzzzzzz_perl_perf_bug.t 2009-10-14 13:45:34 UTC (rev 7786)
@@ -1,6 +1,7 @@
use strict;
use warnings;
use Test::More;
+use Benchmark;
use lib qw(t/lib);
use DBICTest; # do not remove even though it is not used
@@ -25,9 +26,6 @@
plan skip_all => 'Skipping as AUTOMATED_TESTING is set'
if ( $ENV{AUTOMATED_TESTING} );
-eval "use Benchmark ':all'";
-plan skip_all => 'needs Benchmark for testing' if $@;
-
plan tests => 3;
ok( 1, 'Dummy - prevents next test timing out' );
More information about the Bast-commits
mailing list