[Bast-commits] r7708 - in DBIx-Class/0.08/branches: . sybase_bulkinsert_support sybase_bulkinsert_support/lib/DBIx/Class/Storage sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI/ODBC sybase_bulkinsert_support/t

ribasushi at dev.catalyst.perl.org ribasushi at dev.catalyst.perl.org
Mon Sep 21 00:06:22 GMT 2009


Author: ribasushi
Date: 2009-09-21 00:06:21 +0000 (Mon, 21 Sep 2009)
New Revision: 7708

Removed:
   DBIx-Class/0.08/branches/sybase_insert_bulk/
Modified:
   DBIx-Class/0.08/branches/sybase_bulkinsert_support/Changes
   DBIx-Class/0.08/branches/sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI.pm
   DBIx-Class/0.08/branches/sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
   DBIx-Class/0.08/branches/sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI/Sybase.pm
   DBIx-Class/0.08/branches/sybase_bulkinsert_support/t/746sybase.t
Log:
All sybase bulk-insert code by Caelum

Modified: DBIx-Class/0.08/branches/sybase_bulkinsert_support/Changes
===================================================================
--- DBIx-Class/0.08/branches/sybase_bulkinsert_support/Changes	2009-09-20 23:20:00 UTC (rev 7707)
+++ DBIx-Class/0.08/branches/sybase_bulkinsert_support/Changes	2009-09-21 00:06:21 UTC (rev 7708)
@@ -1,5 +1,7 @@
 Revision history for DBIx::Class
 
+        - Sybase bulk API support for populate()
+
         - Complete Sybase RDBMS support including:
           - Support for TEXT/IMAGE columns
           - Support for the 'money' datatype

Modified: DBIx-Class/0.08/branches/sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm	2009-09-20 23:20:00 UTC (rev 7707)
+++ DBIx-Class/0.08/branches/sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI/ODBC/Microsoft_SQL_Server.pm	2009-09-21 00:06:21 UTC (rev 7708)
@@ -61,7 +61,7 @@
   my $self = shift;
 
   if (ref($self->_dbi_connect_info->[0]) eq 'CODE') {
-    $self->throw_exception ('cannot set DBI attributes on a CODE ref connect_info');
+    $self->throw_exception ('Cannot set DBI attributes on a CODE ref connect_info');
   }
 
   my $dbi_attrs = $self->_dbi_connect_info->[-1];

Modified: DBIx-Class/0.08/branches/sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI/Sybase.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI/Sybase.pm	2009-09-20 23:20:00 UTC (rev 7707)
+++ DBIx-Class/0.08/branches/sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI/Sybase.pm	2009-09-21 00:06:21 UTC (rev 7708)
@@ -13,11 +13,13 @@
 use Sub::Name ();
 
 __PACKAGE__->mk_group_accessors('simple' =>
-    qw/_identity _blob_log_on_update _writer_storage _is_writer_storage
+    qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
+       _bulk_storage _is_bulk_storage _began_bulk_work
+       _bulk_disabled_due_to_coderef_connect_info_warned
        _identity_method/
 );
 
-my @also_proxy_to_writer_storage = qw/
+my @also_proxy_to_extra_storages = qw/
   connect_call_set_auto_cast auto_cast connect_call_blob_setup
   connect_call_datetime_setup
 
@@ -105,7 +107,7 @@
         bless $self, $no_bind_vars;
         $self->_rebless;
       } elsif (not $self->_typeless_placeholders_supported) {
-# this is highly unlikely, but we check just in case
+        # this is highly unlikely, but we check just in case
         $self->auto_cast(1);
       }
     }
@@ -121,30 +123,63 @@
 
 # create storage for insert/(update blob) transactions,
 # unless this is that storage
-  return if $self->_is_writer_storage;
+  return if $self->_is_extra_storage;
 
   my $writer_storage = (ref $self)->new;
 
-  $writer_storage->_is_writer_storage(1);
+  $writer_storage->_is_extra_storage(1);
   $writer_storage->connect_info($self->connect_info);
   $writer_storage->auto_cast($self->auto_cast);
 
   $self->_writer_storage($writer_storage);
+
+# create a bulk storage unless connect_info is a coderef
+  return
+    if (Scalar::Util::reftype($self->_dbi_connect_info->[0])||'') eq 'CODE';
+
+  my $bulk_storage = (ref $self)->new;
+
+  $bulk_storage->_is_extra_storage(1);
+  $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
+  $bulk_storage->connect_info($self->connect_info);
+
+# this is why
+  $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
+
+  $self->_bulk_storage($bulk_storage);
 }
 
-for my $method (@also_proxy_to_writer_storage) {
+for my $method (@also_proxy_to_extra_storages) {
   no strict 'refs';
   no warnings 'redefine';
 
   my $replaced = __PACKAGE__->can($method);
 
-  *{$method} = Sub::Name::subname __PACKAGE__."::$method" => sub {
+  *{$method} = Sub::Name::subname $method => sub {
     my $self = shift;
     $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
+    $self->_bulk_storage->$replaced(@_)   if $self->_bulk_storage;
     return $self->$replaced(@_);
   };
 }
 
+sub disconnect {
+  my $self = shift;
+
+# Even though we call $sth->finish for uses off the bulk API, there's still an
+# "active statement" warning on disconnect, which we throw away here.
+# This is due to the bug described in insert_bulk.
+# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
+  local $SIG{__WARN__} = sub {
+    warn $_[0] unless $_[0] =~ /active statement/i;
+  } if $self->_is_bulk_storage;
+
+# so that next transaction gets a dbh
+  $self->_began_bulk_work(0) if $self->_is_bulk_storage;
+
+  $self->next::method;
+}
+
 # Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
 # DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
 # we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
@@ -153,6 +188,12 @@
   my $self = shift;
 
   $self->next::method(@_);
+  
+  if ($self->_is_bulk_storage) {
+# this should be cleared on every reconnect
+    $self->_began_bulk_work(0);
+    return;
+  }
 
   if (not $self->using_freetds) {
     $self->_dbh->{syb_chained_txn} = 1;
@@ -428,7 +469,7 @@
   return $wantarray ? @res : $res[0];
 }
 
-### the insert_bulk stuff stolen from DBI/MSSQL.pm
+### the insert_bulk partially stolen from DBI/MSSQL.pm
 
 sub _set_identity_insert {
   my ($self, $table, $op) = @_;
@@ -475,31 +516,225 @@
 # for tests
 sub _can_insert_bulk { 1 }
 
-# XXX this should use the DBD::Sybase bulk API, where possible
 sub insert_bulk {
   my $self = shift;
   my ($source, $cols, $data) = @_;
 
+  my $identity_col = List::Util::first
+    { $source->column_info($_)->{is_auto_increment} }
+    $source->columns;
+
   my $is_identity_insert = (List::Util::first
-      { $source->column_info ($_)->{is_auto_increment} }
-      (@{$cols})
-  )
-     ? 1
-     : 0;
+    { $source->column_info ($_)->{is_auto_increment} }
+    @{$cols}
+  ) ? 1 : 0;
 
-  if ($is_identity_insert) {
-     $self->_set_identity_insert ($source->name);
+  my @source_columns = $source->columns;
+
+  my $use_bulk_api =
+    $self->_bulk_storage &&
+    $self->_get_dbh->{syb_has_blk};
+
+  if ((not $use_bulk_api) &&
+      (Scalar::Util::reftype($self->_dbi_connect_info->[0])||'') eq 'CODE' &&
+      (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
+    carp <<'EOF';
+Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
+array inserts.
+EOF
+    $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
   }
 
-  $self->next::method(@_);
+  if (not $use_bulk_api) {
+    my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
 
-  if ($is_identity_insert) {
-     $self->_unset_identity_insert ($source->name);
+    my $dumb_last_insert_id =
+         $identity_col
+      && (not $is_identity_insert)
+      && ($self->_identity_method||'') ne '@@IDENTITY';
+
+    ($self, my ($guard)) = do {
+      if ($self->{transaction_depth} == 0 &&
+          ($blob_cols || $dumb_last_insert_id)) {
+        ($self->_writer_storage, $self->_writer_storage->txn_scope_guard);
+      }
+      else {
+        ($self, undef);
+      }
+    };
+
+    $self->_set_identity_insert ($source->name)   if $is_identity_insert;
+    $self->next::method(@_);
+    $self->_unset_identity_insert ($source->name) if $is_identity_insert;
+
+    if ($blob_cols) {
+      if ($is_identity_insert) {
+        $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
+      }
+      else {
+        my @cols_with_identities = (@$cols, $identity_col);
+
+        ## calculate identities
+        # XXX This assumes identities always increase by 1, which may or may not
+        # be true.
+        my ($last_identity) =
+          $self->_dbh->selectrow_array (
+            $self->_fetch_identity_sql($source, $identity_col)
+          );
+        my @identities = (($last_identity - @$data + 1) .. $last_identity);
+
+        my @data_with_identities = map [@$_, shift @identities], @$data;
+
+        $self->_insert_blobs_array (
+          $source, $blob_cols, \@cols_with_identities, \@data_with_identities
+        );
+      }
+    }
+
+    $guard->commit if $guard;
+    return;
   }
+
+# otherwise, use the bulk API
+
+# rearrange @$data so that columns are in database order
+  my %orig_idx;
+  @orig_idx{@$cols} = 0..$#$cols;
+
+  my %new_idx;
+  @new_idx{@source_columns} = 0..$#source_columns;
+
+  my @new_data;
+  for my $datum (@$data) {
+    my $new_datum = [];
+    for my $col (@source_columns) {
+# identity data will be 'undef' if not $is_identity_insert
+# columns with defaults will also be 'undef'
+      $new_datum->[ $new_idx{$col} ] =
+        exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
+    }
+    push @new_data, $new_datum;
+  }
+
+# bcp identity index is 1-based
+  my $identity_idx = exists $new_idx{$identity_col} ?
+    $new_idx{$identity_col} + 1 : 0;
+
+## Set a client-side conversion error handler, straight from DBD::Sybase docs.
+# This ignores any data conversion errors detected by the client side libs, as
+# they are usually harmless.
+  my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
+    Sub::Name::subname insert_bulk => sub {
+      my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
+
+      return 1 if $errno == 36;
+
+      carp
+        "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
+        ($errmsg ? "\n$errmsg" : '') .
+        ($osmsg  ? "\n$osmsg"  : '')  .
+        ($blkmsg ? "\n$blkmsg" : '');
+
+      return 0;
+  });
+
+  eval {
+    my $bulk = $self->_bulk_storage;
+
+    my $guard = $bulk->txn_scope_guard;
+
+## XXX get this to work instead of our own $sth
+## will require SQLA or *Hacks changes for ordered columns
+#    $bulk->next::method($source, \@source_columns, \@new_data, {
+#      syb_bcp_attribs => {
+#        identity_flag   => $is_identity_insert,
+#        identity_column => $identity_idx,
+#      }
+#    });
+    my $sql = 'INSERT INTO ' .
+      $bulk->sql_maker->_quote($source->name) . ' (' .
+# colname list is ignored for BCP, but does no harm
+      (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
+      ' VALUES ('.  (join ', ', ('?') x @source_columns) . ')';
+
+## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
+## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
+## better yet the version above. Should be fixed in DBD::Sybase .
+    my $sth = $bulk->_get_dbh->prepare($sql,
+#      'insert', # op
+      {
+        syb_bcp_attribs => {
+          identity_flag   => $is_identity_insert,
+          identity_column => $identity_idx,
+        }
+      }
+    );
+
+    my $bind_attributes = $self->source_bind_attributes($source);
+
+    foreach my $slice_idx (0..$#source_columns) {
+      my $col = $source_columns[$slice_idx];
+
+      my $attributes = $bind_attributes->{$col}
+        if $bind_attributes && defined $bind_attributes->{$col};
+
+      my @slice = map $_->[$slice_idx], @new_data;
+
+      $sth->bind_param_array(($slice_idx + 1), \@slice, $attributes);
+    }
+
+    $bulk->_query_start($sql);
+
+# this is stolen from DBI::insert_bulk
+    my $tuple_status = [];
+    my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) };
+
+    if (my $err = $@ || $sth->errstr) {
+      my $i = 0;
+      ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
+
+      $self->throw_exception("Unexpected populate error: $err")
+        if ($i > $#$tuple_status);
+
+      $self->throw_exception(sprintf "%s for populate slice:\n%s",
+        ($tuple_status->[$i][1] || $err),
+        $self->_pretty_print ({
+          map { $source_columns[$_] => $new_data[$i][$_] } (0 .. $#$cols)
+        }),
+      );
+    }
+
+    $guard->commit;
+    $sth->finish;
+
+    $bulk->_query_end($sql);
+  };
+  my $exception = $@;
+  if ($exception =~ /-Y option/) {
+    carp <<"EOF";
+
+Sybase bulk API operation failed due to character set incompatibility, reverting
+to regular array inserts:
+
+*** Try unsetting the LANG environment variable.
+
+$@
+EOF
+    $self->_bulk_storage(undef);
+    DBD::Sybase::set_cslib_cb($orig_cslib_cb);
+    unshift @_, $self;
+    goto \&insert_bulk;
+  }
+  elsif ($exception) {
+    DBD::Sybase::set_cslib_cb($orig_cslib_cb);
+# rollback makes the bulkLogin connection unusable
+    $self->_bulk_storage->disconnect;
+    $self->throw_exception($exception);
+  }
+
+  DBD::Sybase::set_cslib_cb($orig_cslib_cb);
 }
 
-### end of stolen insert_bulk section
-
 # Make sure blobs are not bound as placeholders, and return any non-empty ones
 # as a hash.
 sub _remove_blob_cols {
@@ -523,6 +758,33 @@
   return keys %blob_cols ? \%blob_cols : undef;
 }
 
+# same for insert_bulk
+sub _remove_blob_cols_array {
+  my ($self, $source, $cols, $data) = @_;
+
+  my @blob_cols;
+
+  for my $i (0..$#$cols) {
+    my $col = $cols->[$i];
+
+    if ($self->_is_lob_type($source->column_info($col)->{data_type})) {
+      for my $j (0..$#$data) {
+        my $blob_val = delete $data->[$j][$i];
+        if (not defined $blob_val) {
+          $data->[$j][$i] = \'NULL';
+        }
+        else {
+          $data->[$j][$i] = \"''";
+          $blob_cols[$j][$i] = $blob_val
+            unless $blob_val eq '';
+        }
+      }
+    }
+  }
+
+  return @blob_cols ? \@blob_cols : undef;
+}
+
 sub _update_blobs {
   my ($self, $source, $blob_cols, $where) = @_;
 
@@ -620,6 +882,26 @@
   }
 }
 
+sub _insert_blobs_array {
+  my ($self, $source, $blob_cols, $cols, $data) = @_;
+
+  for my $i (0..$#$data) {
+    my $datum = $data->[$i];
+
+    my %row;
+    @row{ @$cols } = @$datum;
+
+    my %blob_vals;
+    for my $j (0..$#$cols) {
+      if (exists $blob_cols->[$i][$j]) {
+        $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
+      }
+    }
+
+    $self->_insert_blobs ($source, \%blob_vals, \%row);
+  }
+}
+
 =head2 connect_call_datetime_setup
 
 Used as:
@@ -671,10 +953,18 @@
 
 sub _dbh_begin_work {
   my $self = shift;
+
+# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
+# TRAN once. However, we need to make sure there's a $dbh.
+  return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
+
   $self->next::method(@_);
+
   if ($self->using_freetds) {
     $self->_get_dbh->do('BEGIN TRAN');
   }
+
+  $self->_began_bulk_work(1) if $self->_is_bulk_storage;
 }
 
 sub _dbh_commit {
@@ -846,6 +1136,27 @@
 See L</connect_call_blob_setup> for a L<DBIx::Class::Storage::DBI/connect_info>
 setting you need to work with C<IMAGE> columns.
 
+=head1 BULK API
+
+The experimental L<DBD::Sybase> Bulk API support is used for
+L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
+on a separate connection.
+
+To use this feature effectively, use a large number of rows for each
+L<populate|DBIx::Class::ResultSet/populate> call, eg.:
+
+  while (my $rows = $data_source->get_100_rows()) {
+    $rs->populate($rows);
+  }
+
+B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
+calls in your C<Result> classes B<must> list columns in database order for this
+to work. Also, you may have to unset the C<LANG> environment variable before
+loading your app, if it doesn't match the character set of your database.
+
+When inserting IMAGE columns using this method, you'll need to use
+L</connect_call_blob_setup> as well.
+
 =head1 AUTHOR
 
 See L<DBIx::Class/CONTRIBUTORS>.

Modified: DBIx-Class/0.08/branches/sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI.pm
===================================================================
--- DBIx-Class/0.08/branches/sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI.pm	2009-09-20 23:20:00 UTC (rev 7707)
+++ DBIx-Class/0.08/branches/sybase_bulkinsert_support/lib/DBIx/Class/Storage/DBI.pm	2009-09-21 00:06:21 UTC (rev 7708)
@@ -1335,7 +1335,7 @@
 ## scalar refs, or at least, all the same type as the first set, the statement is
 ## only prepped once.
 sub insert_bulk {
-  my ($self, $source, $cols, $data) = @_;
+  my ($self, $source, $cols, $data, $sth_attr) = @_;
 
 # redispatch to insert_bulk method of storage we reblessed into, if necessary
   if (not $self->_driver_determined) {
@@ -1344,18 +1344,52 @@
   }
 
   my %colvalues;
-  my $table = $source->from;
   @colvalues{@$cols} = (0..$#$cols);
 
+  # bind literal sql if it's the same in all slices
+  for my $i (0..$#$cols) {
+    my $first_val = $data->[0][$i];
+    next unless (Scalar::Util::reftype($first_val)||'') eq 'SCALAR';
+
+    $colvalues{ $cols->[$i] } = $first_val
+      if (grep {
+        (Scalar::Util::reftype($_)||'') eq 'SCALAR' &&
+        $$_ eq $$first_val
+      } map $data->[$_][$i], (1..$#$data)) == (@$data - 1);
+  }
+
   my ($sql, $bind) = $self->_prep_for_execute (
     'insert', undef, $source, [\%colvalues]
   );
-  my @bind = @$bind
-    or croak 'Cannot insert_bulk without support for placeholders';
+  my @bind = @$bind;
 
+  my $empty_bind = 1 if (not @bind) &&
+    (grep { (Scalar::Util::reftype($_)||'') eq 'SCALAR' } values %colvalues)
+    == @$cols;
+
+  if ((not @bind) && (not $empty_bind)) {
+    croak 'Cannot insert_bulk without support for placeholders';
+  }
+
   $self->_query_start( $sql, @bind );
-  my $sth = $self->sth($sql);
+  my $sth = $self->sth($sql, 'insert', $sth_attr);
 
+  if ($empty_bind) {
+    # bind_param_array doesn't work if there are no binds
+    eval {
+      local $self->_get_dbh->{RaiseError} = 1;
+      local $self->_get_dbh->{PrintError} = 0;
+      foreach (0..$#$data) {
+        $sth->execute;
+        $sth->fetchall_arrayref;
+      }
+    };
+    my $exception = $@;
+    $sth->finish;
+    $self->throw_exception($exception) if $exception;
+    return;
+  }
+
 #  @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
 
   ## This must be an arrayref, else nothing works!
@@ -1382,23 +1416,23 @@
     $sth->bind_param_array( $placeholder_index, [@data], $attributes );
     $placeholder_index++;
   }
+
   my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) };
   $sth->finish;
-  if (my $err = $@) {
+  if (my $err = $@ || $sth->errstr) {
     my $i = 0;
     ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
 
-    $self->throw_exception($sth->errstr || "Unexpected populate error: $err")
+    $self->throw_exception("Unexpected populate error: $err")
       if ($i > $#$tuple_status);
 
     $self->throw_exception(sprintf "%s for populate slice:\n%s",
-      $tuple_status->[$i][1],
+      ($tuple_status->[$i][1] || $err),
       $self->_pretty_print ({
         map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols)
       }),
     );
   }
-  $self->throw_exception($sth->errstr) if !$rv;
 
   $self->_query_end( $sql, @bind );
   return (wantarray ? ($rv, $sth, @bind) : $rv);
@@ -2063,12 +2097,15 @@
 =cut
 
 sub _dbh_sth {
-  my ($self, $dbh, $sql) = @_;
+  my ($self, $dbh, $sql, $op, $sth_attr) = @_;
+# $op is ignored right now
 
+  $sth_attr ||= {};
+
   # 3 is the if_active parameter which avoids active sth re-use
   my $sth = $self->disable_sth_caching
-    ? $dbh->prepare($sql)
-    : $dbh->prepare_cached($sql, {}, 3);
+    ? $dbh->prepare($sql, $sth_attr)
+    : $dbh->prepare_cached($sql, $sth_attr, 3);
 
   # XXX You would think RaiseError would make this impossible,
   #  but apparently that's not true :(
@@ -2078,8 +2115,8 @@
 }
 
 sub sth {
-  my ($self, $sql) = @_;
-  $self->dbh_do('_dbh_sth', $sql);  # retry over disconnects
+  my ($self, $sql, $op, $sth_attr) = @_;
+  $self->dbh_do('_dbh_sth', $sql, $op, $sth_attr);  # retry over disconnects
 }
 
 sub _dbh_columns_info_for {

Modified: DBIx-Class/0.08/branches/sybase_bulkinsert_support/t/746sybase.t
===================================================================
--- DBIx-Class/0.08/branches/sybase_bulkinsert_support/t/746sybase.t	2009-09-20 23:20:00 UTC (rev 7707)
+++ DBIx-Class/0.08/branches/sybase_bulkinsert_support/t/746sybase.t	2009-09-21 00:06:21 UTC (rev 7708)
@@ -12,7 +12,7 @@
 
 my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
 
-my $TESTS = 51 + 2;
+my $TESTS = 58 + 2;
 
 if (not ($dsn && $user)) {
   plan skip_all =>
@@ -208,8 +208,7 @@
     name => { -like => 'bulk artist %' }
   });
 
-# test insert_bulk using populate, this should always pass whether or not it
-# does anything Sybase specific or not. Just here to aid debugging.
+# test insert_bulk using populate.
   SKIP: {
     skip 'insert_bulk not supported', 4
       unless $schema->storage->_can_insert_bulk;
@@ -245,6 +244,53 @@
     $bulk_rs->delete;
   }
 
+# make sure insert_bulk works a second time on the same connection
+  SKIP: {
+    skip 'insert_bulk not supported', 3
+      unless $schema->storage->_can_insert_bulk;
+
+    lives_ok {
+      $schema->resultset('Artist')->populate([
+        {
+          name => 'bulk artist 1',
+          charfield => 'bar',
+        },
+        {
+          name => 'bulk artist 2',
+          charfield => 'bar',
+        },
+        {
+          name => 'bulk artist 3',
+          charfield => 'bar',
+        },
+      ]);
+    } 'insert_bulk via populate called a second time';
+
+    is $bulk_rs->count, 3,
+      'correct number inserted via insert_bulk';
+
+    is ((grep $_->charfield eq 'bar', $bulk_rs->all), 3,
+      'column set correctly via insert_bulk');
+
+    $bulk_rs->delete;
+  }
+
+# test invalid insert_bulk (missing required column)
+#
+# There should be a rollback, reconnect and the next valid insert_bulk should
+# succeed.
+  throws_ok {
+    $schema->resultset('Artist')->populate([
+      {
+        charfield => 'foo',
+      }
+    ]);
+  } qr/no value or default|does not allow null|placeholders/i,
+# The second pattern is the error from fallback to regular array insert on
+# incompatible charset.
+# The third is for ::NoBindVars with no syb_has_blk.
+  'insert_bulk with missing required column throws error';
+
 # now test insert_bulk with IDENTITY_INSERT
   SKIP: {
     skip 'insert_bulk not supported', 3
@@ -290,7 +336,7 @@
 
 # mostly stolen from the blob stuff Nniuq wrote for t/73oracle.t
   SKIP: {
-    skip 'TEXT/IMAGE support does not work with FreeTDS', 15
+    skip 'TEXT/IMAGE support does not work with FreeTDS', 18
       if $schema->storage->using_freetds;
 
     my $dbh = $schema->storage->_dbh;
@@ -302,7 +348,7 @@
         CREATE TABLE bindtype_test 
         (
           id    INT   IDENTITY PRIMARY KEY,
-          bytea INT   NULL,
+          bytea IMAGE NULL,
           blob  IMAGE NULL,
           clob  TEXT  NULL
         )
@@ -327,35 +373,31 @@
       foreach my $size (qw(small large)) {
         no warnings 'uninitialized';
 
-        my $created = eval { $rs->create( { $type => $binstr{$size} } ) };
-        ok(!$@, "inserted $size $type without dying");
-        diag $@ if $@;
+        my $created;
+        lives_ok {
+          $created = $rs->create( { $type => $binstr{$size} } )
+        } "inserted $size $type without dying";
 
         $last_id = $created->id if $created;
 
-        my $got = eval {
-          $rs->find($last_id)->$type
-        };
-        diag $@ if $@;
-        ok($got eq $binstr{$size}, "verified inserted $size $type");
+        lives_and {
+          ok($rs->find($last_id)->$type eq $binstr{$size})
+        } "verified inserted $size $type";
       }
     }
 
+    $rs->delete;
+
     # blob insert with explicit PK
     # also a good opportunity to test IDENTITY_INSERT
+    lives_ok {
+      $rs->create( { id => 1, blob => $binstr{large} } )
+    } 'inserted large blob without dying with manual PK';
 
-    $rs->delete;
+    lives_and {
+      ok($rs->find(1)->blob eq $binstr{large})
+    } 'verified inserted large blob with manual PK';
 
-    my $created = eval { $rs->create( { id => 1, blob => $binstr{large} } ) };
-    ok(!$@, "inserted large blob without dying with manual PK");
-    diag $@ if $@;
-
-    my $got = eval {
-      $rs->find(1)->blob
-    };
-    diag $@ if $@;
-    ok($got eq $binstr{large}, "verified inserted large blob with manual PK");
-
     # try a blob update
     my $new_str = $binstr{large} . 'mtfnpy';
 
@@ -365,15 +407,14 @@
       $schema = get_schema();
     }
 
-    eval { $rs->search({ id => 1 })->update({ blob => $new_str }) };
-    ok !$@, 'updated blob successfully';
-    diag $@ if $@;
-    $got = eval {
-      $rs->find(1)->blob
-    };
-    diag $@ if $@;
-    ok($got eq $new_str, "verified updated blob");
+    lives_ok {
+      $rs->search({ id => 1 })->update({ blob => $new_str })
+    } 'updated blob successfully';
 
+    lives_and {
+      ok($rs->find(1)->blob eq $new_str)
+    } 'verified updated blob';
+
     # try a blob update with IDENTITY_UPDATE
     lives_and {
       $new_str = $binstr{large} . 'hlagh';
@@ -383,12 +424,39 @@
 
     ## try multi-row blob update
     # first insert some blobs
-    $rs->delete;
-    $rs->create({ blob => $binstr{large} }) for (1..3);
     $new_str = $binstr{large} . 'foo';
-    $rs->update({ blob => $new_str });
-    is((grep $_->blob eq $new_str, $rs->all), 3, 'multi-row blob update');
+    lives_and {
+      $rs->delete;
+      $rs->create({ blob => $binstr{large} }) for (1..2);
+      $rs->update({ blob => $new_str });
+      is((grep $_->blob eq $new_str, $rs->all), 2);
+    } 'multi-row blob update';
 
+    $rs->delete;
+
+    # now try insert_bulk with blobs
+    $new_str = $binstr{large} . 'bar';
+    lives_ok {
+      $rs->populate([
+        {
+          bytea => 1,
+          blob => $binstr{large},
+          clob => $new_str,
+        },
+        {
+          bytea => 1,
+          blob => $binstr{large},
+          clob => $new_str,
+        },
+      ]);
+    } 'insert_bulk with blobs does not die';
+
+    is((grep $_->blob eq $binstr{large}, $rs->all), 2,
+      'IMAGE column set correctly via insert_bulk');
+
+    is((grep $_->clob eq $new_str, $rs->all), 2,
+      'TEXT column set correctly via insert_bulk');
+
     # make sure impossible blob update throws
     throws_ok {
       $rs->update({ clob => 'foo' });




More information about the Bast-commits mailing list