summaryrefslogtreecommitdiff
path: root/db/test
diff options
context:
space:
mode:
authorjbj <devnull@localhost>2001-07-23 20:09:04 +0000
committerjbj <devnull@localhost>2001-07-23 20:09:04 +0000
commitd91a331d0c88bef042117c4a20b597aede61cb77 (patch)
tree30b9292cd1a86e827ddf209558985e1a49cc5f26 /db/test
parent2aec992c9d5db8cdf706fb3dcd8cdfa642ca84d3 (diff)
downloadrpm-d91a331d0c88bef042117c4a20b597aede61cb77.tar.gz
rpm-d91a331d0c88bef042117c4a20b597aede61cb77.tar.bz2
rpm-d91a331d0c88bef042117c4a20b597aede61cb77.zip
Initial revision
CVS patchset: 4976 CVS date: 2001/07/23 20:09:04
Diffstat (limited to 'db/test')
-rw-r--r--db/test/bigfile001.tcl84
-rw-r--r--db/test/bigfile002.tcl45
-rw-r--r--db/test/env009.tcl57
-rw-r--r--db/test/rpc003.tcl167
-rw-r--r--db/test/si005.tcl179
-rw-r--r--db/test/test095.tcl228
-rw-r--r--db/test/test096.tcl186
-rw-r--r--db/test/txnscript.tcl67
8 files changed, 1013 insertions, 0 deletions
diff --git a/db/test/bigfile001.tcl b/db/test/bigfile001.tcl
new file mode 100644
index 000000000..e5dcbd7f2
--- /dev/null
+++ b/db/test/bigfile001.tcl
@@ -0,0 +1,84 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: bigfile001.tcl,v 11.3 2001/05/22 16:20:40 krinsky Exp
+#
+# Big file test.
+# Create a database greater than 4 GB in size. Close, verify. Grow
+# the database somewhat. Close, reverify. Lather, rinse, repeat.
+# Since it will not work on all systems, this test is not run by default.
+proc bigfile001 { method \
+ { itemsize 4096 } { nitems 1048576 } { growby 5000 } { growtms 2 } args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Bigfile: $method ($args) $nitems * $itemsize bytes of data"
+
+ env_cleanup $testdir
+
+ # Create the database. Use 64K pages; we want a good fill
+ # factor, and page size doesn't matter much. Use a 50MB
+ # cache; that should be manageable, and will help
+ # performance.
+ set dbname TESTDIR/big.db
+
+ set db [eval {berkdb_open -create} {-pagesize 65536 \
+ -cachesize {0 50000000 0}} $omethod $args $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts -nonewline "\tBigfile.a: Creating database...0%..."
+ flush stdout
+
+ set data [string repeat z $itemsize]
+
+ set more_than_ten_already 0
+ for { set i 0 } { $i < $nitems } { incr i } {
+ set key key[format %08u $i]
+
+ error_check_good db_put($i) [$db put $key $data] 0
+
+ if { $i % 5000 == 0 } {
+ set pct [expr 100 * $i / $nitems]
+ puts -nonewline "\b\b\b\b\b"
+ if { $pct >= 10 } {
+ if { $more_than_ten_already } {
+ puts -nonewline "\b"
+ } else {
+ set more_than_ten_already 1
+ }
+ }
+
+ puts -nonewline "$pct%..."
+ flush stdout
+ }
+ }
+ puts "\b\b\b\b\b\b100%..."
+ error_check_good db_close [$db close] 0
+
+ puts "\tBigfile.b: Verifying database..."
+ error_check_good verify \
+ [verify_dir $testdir "\t\t" 0 0 1 50000000] 0
+
+ puts "\tBigfile.c: Grow database $growtms times by $growby items"
+
+ for { set j 0 } { $j < $growtms } { incr j } {
+ set db [eval {berkdb_open} {-cachesize {0 50000000 0}} $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+ puts -nonewline "\t\tBigfile.c.1: Adding $growby items..."
+ flush stdout
+ for { set i 0 } { $i < $growby } { incr i } {
+ set key key[format %08u $i].$j
+ error_check_good db_put($j.$i) [$db put $key $data] 0
+ }
+ error_check_good db_close [$db close] 0
+ puts "done."
+
+ puts "\t\tBigfile.c.2: Verifying database..."
+ error_check_good verify($j) \
+ [verify_dir $testdir "\t\t\t" 0 0 1 50000000] 0
+ }
+}
diff --git a/db/test/bigfile002.tcl b/db/test/bigfile002.tcl
new file mode 100644
index 000000000..4ea869c05
--- /dev/null
+++ b/db/test/bigfile002.tcl
@@ -0,0 +1,45 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: bigfile002.tcl,v 11.2 2001/07/02 01:08:45 bostic Exp
+#
+# Big file test #2.
+# This one should be faster and not require so much disk space, although it
+# doesn't test as extensively.
+# Create an mpool file with 1K pages. Dirty page 6000000. Sync.
+proc bigfile002 { args } {
+ source ./include.tcl
+
+ puts -nonewline \
+ "Bigfile002: Creating large, sparse file through mpool..."
+ flush stdout
+
+ env_cleanup $testdir
+
+ # Create env.
+ set env [berkdb env -create -home TESTDIR]
+ error_check_good valid_env [is_valid_env $env] TRUE
+
+ # Create the file.
+ set name big002.file
+ set file [$env mpool -create -pagesize 1024 $name]
+
+ # Dirty page 6000000
+ set pg [$file get -create 6000000]
+ error_check_good pg_init [$pg init A] 0
+ error_check_good pg_set [$pg is_setto A] 1
+
+ # Put page back.
+ error_check_good pg_put [$pg put -dirty] 0
+
+ # Fsync.
+ error_check_good fsync [$file fsync] 0
+
+ puts "succeeded."
+
+ # Close.
+ error_check_good fclose [$file close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/db/test/env009.tcl b/db/test/env009.tcl
new file mode 100644
index 000000000..4e73f21e5
--- /dev/null
+++ b/db/test/env009.tcl
@@ -0,0 +1,57 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: env009.tcl,v 11.1 2001/05/23 16:47:32 sue Exp
+#
+# Env Test 9
+# Test calls to all the various stat functions.
+# We have several sprinkled throughout the test suite, but
+# this will ensure that we run all of them at least once.
+proc env009 { } {
+ source ./include.tcl
+
+ puts "Env009: Various stat function test."
+
+ env_cleanup $testdir
+ puts "\tEnv009.a: Setting up env and a database."
+
+ set e [berkdb env -create -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set dbbt [berkdb_open -create -btree $testdir/env009bt.db]
+ error_check_good dbopen [is_valid_db $dbbt] TRUE
+ set dbh [berkdb_open -create -hash $testdir/env009h.db]
+ error_check_good dbopen [is_valid_db $dbh] TRUE
+ set dbq [berkdb_open -create -btree $testdir/env009q.db]
+ error_check_good dbopen [is_valid_db $dbq] TRUE
+
+ set rlist {
+ { "lock_stat" "Max locks" "Env009.b"}
+ { "log_stat" "Magic" "Env009.c"}
+ { "mpool_stat" "Number of caches" "Env009.d"}
+ { "txn_stat" "Max Txns" "Env009.e"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set str [lindex $pair 1]
+ set msg [lindex $pair 2]
+ puts "\t$msg: $cmd"
+ set ret [$e $cmd]
+ error_check_good $cmd [is_substr $ret $str] 1
+ }
+ puts "\tEnv009.f: btree stats"
+ set ret [$dbbt stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ puts "\tEnv009.g: hash stats"
+ set ret [$dbh stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ puts "\tEnv009.f: queue stats"
+ set ret [$dbq stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ error_check_good dbclose [$dbbt close] 0
+ error_check_good dbclose [$dbh close] 0
+ error_check_good dbclose [$dbq close] 0
+ error_check_good envclose [$e close] 0
+}
diff --git a/db/test/rpc003.tcl b/db/test/rpc003.tcl
new file mode 100644
index 000000000..f468daa10
--- /dev/null
+++ b/db/test/rpc003.tcl
@@ -0,0 +1,167 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2003
+# Sleepycat Software. All rights reserved.
+#
+# Id: rpc003.tcl,v 11.4 2001/07/02 01:08:46 bostic Exp
+#
+# Test RPC and secondary indices.
+proc rpc003 { } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ #
+ # First set up the files. Secondary indices only work readonly
+ # over RPC. So we need to create the databases first without
+ # RPC. Then run checking over RPC.
+ #
+ puts "Rpc003: Secondary indices over RPC"
+ if { [string compare $rpc_server "localhost"] != 0 } {
+ puts "Cannot run to non-local RPC server. Skipping."
+ return
+ }
+ cleanup $testdir NULL
+ puts "\tRpc003.a: Creating local secondary index databases"
+
+ # Primary method/args.
+ set pmethod btree
+ set pomethod [convert_method $pmethod]
+ set pargs ""
+ set methods {dbtree dbtree}
+ set argses [convert_argses $methods ""]
+ set omethods [convert_methods $methods]
+
+ set nentries 500
+
+ puts "\tRpc003.b: ($pmethod/$methods) $nentries equal key/data pairs"
+ set pname "primary003.db"
+ set snamebase "secondary003"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # We have set up our databases, so now start the server and
+ # read them over RPC.
+ #
+ set dpid [exec $util_path/berkeley_db_svc -h $rpc_testdir &]
+ puts "\tRpc003.c: Started server, pid $dpid"
+ tclsleep 2
+
+ set home [file tail $rpc_testdir]
+ set env [eval {berkdb env -create -mode 0644 -home $home \
+ -server $rpc_server}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ #
+ # Attempt to send in a NULL callback to associate. It will fail
+ # if the primary and secondary are not both read-only.
+ #
+ set msg "\tRpc003.d"
+ puts "$msg: Using r/w primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.e"
+ puts "$msg: Using r/w primary and read-only secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -env $env -rdonly \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.f"
+ puts "$msg: Using read-only primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod -rdonly $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ # Open and associate the secondaries
+ puts "\tRpc003.g: Checking secondaries, both read-only"
+ set pdb [eval {berkdb_open_noerr -env} $env \
+ -rdonly $pomethod $pargs $pname]
+ error_check_good primary_open2 [is_valid_db $pdb] TRUE
+
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -env} $env -rdonly \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open2($i) [is_valid_db $sdb] TRUE
+ error_check_good db_associate2($i) \
+ [eval {$pdb associate} "" $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Rpc003.f"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ exec $KILL $dpid
+ return
+}
+
+
+proc rpc003_assoc_err { popen sopen msg } {
+ set pdb [eval $popen]
+ error_check_good assoc_err_popen [is_valid_db $pdb] TRUE
+
+ puts "$msg.0: NULL callback"
+ set sdb [eval $sopen]
+ error_check_good assoc_err_sopen [is_valid_db $sdb] TRUE
+ set stat [catch {eval {$pdb associate} "" $sdb} ret]
+ error_check_good db_associate:rdonly $stat 1
+ error_check_good db_associate:inval [is_substr $ret invalid] 1
+
+ puts "$msg.1: non-NULL callback"
+ set stat [catch {eval $pdb associate [callback_n 0] $sdb} ret]
+ error_check_good db_associate:callback $stat 1
+ error_check_good db_associate:rpc \
+ [is_substr $ret "not supported in RPC"] 1
+ error_check_good assoc_sclose [$sdb close] 0
+ error_check_good assoc_pclose [$pdb close] 0
+}
diff --git a/db/test/si005.tcl b/db/test/si005.tcl
new file mode 100644
index 000000000..e6c241fe9
--- /dev/null
+++ b/db/test/si005.tcl
@@ -0,0 +1,179 @@
+
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: si005.tcl,v 11.2 2001/07/02 01:08:46 bostic Exp
+#
+# Sindex005: Secondary index and join test.
+proc sindex005 { methods {nitems 1000} {tnum 5} args } {
+ source ./include.tcl
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Sindex005 does a join within a simulated database schema
+ # in which the primary index maps a record ID to a ZIP code and
+ # name in the form "XXXXXname", and there are two secondaries:
+ # one mapping ZIP to ID, the other mapping name to ID.
+ # The primary may be of any database type; the two secondaries
+ # must be either btree or hash.
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method for the two secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < 2 } { incr i } {
+ lappend methods $pmethod
+ }
+ } elseif { [llength $methods] != 2 } {
+ puts "FAIL: Sindex00$tnum requires exactly two secondaries."
+ return
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) Secondary index join test."
+ env_cleanup $testdir
+
+ set pname "sindex00$tnum-primary.db"
+ set zipname "sindex00$tnum-zip.db"
+ set namename "sindex00$tnum-name.db"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the databases.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ set zipdb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 0] [lindex $argses 0] $zipname]
+ error_check_good zip_open [is_valid_db $zipdb] TRUE
+ error_check_good zip_associate [$pdb associate s5_getzip $zipdb] 0
+
+ set namedb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 1] [lindex $argses 1] $namename]
+ error_check_good name_open [is_valid_db $namedb] TRUE
+ error_check_good name_associate [$pdb associate s5_getname $namedb] 0
+
+ puts "\tSindex00$tnum.a: Populate database with $nitems \"names\""
+ s5_populate $pdb $nitems
+ puts "\tSindex00$tnum.b: Perform a join on each \"name\" and \"ZIP\""
+ s5_jointest $pdb $zipdb $namedb
+
+ error_check_good name_close [$namedb close] 0
+ error_check_good zip_close [$zipdb close] 0
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
+
+proc s5_jointest { pdb zipdb namedb } {
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set item [lindex [lindex $dbt 0] 1]
+ set retlist [s5_dojoin $item $pdb $zipdb $namedb]
+ }
+}
+
+proc s5_dojoin { item pdb zipdb namedb } {
+ set name [s5_getname "" $item]
+ set zip [s5_getzip "" $item]
+
+ set zipc [$zipdb cursor]
+ error_check_good zipc($item) [is_valid_cursor $zipc $zipdb] TRUE
+
+ set namec [$namedb cursor]
+ error_check_good namec($item) [is_valid_cursor $namec $namedb] TRUE
+
+ set pc [$pdb cursor]
+ error_check_good pc($item) [is_valid_cursor $pc $pdb] TRUE
+
+ set ret [$zipc get -set $zip]
+ set zd [lindex [lindex $ret 0] 1]
+ error_check_good zipset($zip) [s5_getzip "" $zd] $zip
+
+ set ret [$namec get -set $name]
+ set nd [lindex [lindex $ret 0] 1]
+ error_check_good nameset($name) [s5_getname "" $nd] $name
+
+ set joinc [$pdb join $zipc $namec]
+
+ set anyreturned 0
+ for { set dbt [$joinc get] } { [llength $dbt] > 0 } \
+ { set dbt [$joinc get] } {
+ set ritem [lindex [lindex $dbt 0] 1]
+ error_check_good returned_item($item) $ritem $item
+ incr anyreturned
+ }
+ error_check_bad anyreturned($item) $anyreturned 0
+
+ error_check_good joinc_close($item) [$joinc close] 0
+ error_check_good pc_close($item) [$pc close] 0
+ error_check_good namec_close($item) [$namec close] 0
+ error_check_good zipc_close($item) [$zipc close] 0
+}
+
+proc s5_populate { db nitems } {
+ global dict
+
+ set did [open $dict]
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ puts "FAIL:\
+ unexpected pair of words < 3 chars long"
+ }
+ }
+ set datalist [s5_name2zips $word]
+ foreach data $datalist {
+ error_check_good db_put($data) [$db put $i $data$word] 0
+ }
+ }
+ close $did
+}
+
+proc s5_getzip { key data } { return [string range $data 0 4] }
+proc s5_getname { key data } { return [string range $data 5 end] }
+
+# The dirty secret of this test is that the ZIP code is a function of the
+# name, so we can generate a database and then verify join results easily
+# without having to consult actual data.
+#
+# Any word passed into this function will generate from 1 to 26 ZIP
+# entries, out of the set {00000, 01000 ... 99000}. The number of entries
+# is just the position in the alphabet of the word's first letter; the
+# entries are then hashed to the set {00, 01 ... 99} N different ways.
+proc s5_name2zips { name } {
+ global alphabet
+
+ set n [expr [string first [string index $name 0] $alphabet] + 1]
+ error_check_bad starts_with_abc($name) $n -1
+
+ set ret {}
+ for { set i 0 } { $i < $n } { incr i } {
+ set b 0
+ for { set j 1 } { $j < [string length $name] } \
+ { incr j } {
+ set b [s5_nhash $name $i $j $b]
+ }
+ lappend ret [format %05u [expr $b % 100]000]
+ }
+ return $ret
+}
+proc s5_nhash { name i j b } {
+ global alphabet
+
+ set c [string first [string index $name $j] $alphabet']
+ return [expr (($b * 991) + ($i * 997) + $c) % 10000000]
+}
diff --git a/db/test/test095.tcl b/db/test/test095.tcl
new file mode 100644
index 000000000..ec5e4f01d
--- /dev/null
+++ b/db/test/test095.tcl
@@ -0,0 +1,228 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: test095.tcl,v 11.6 2001/05/21 17:09:10 krinsky Exp
+#
+# DB Test 95 {access method}
+# Bulk get test.
+#
+proc test095 { method {nsets 1000} {noverflows 25} {tnum 95} args } {
+ source ./include.tcl
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set basename $testdir/test0$tnum
+ set env NULL
+ # If we've our own env, no reason to swap--this isn't
+ # an mpool test.
+ set carg { -cachesize {0 25000000 0} }
+ } else {
+ set basename test0$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set carg {}
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) Bulk get test"
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ # We run the meat of the test twice: once with unsorted dups,
+ # once with sorted dups.
+ for { set dflag "-dup"; set sort "unsorted"; set diter 0 } \
+ { $diter < 2 } \
+ { set dflag "-dup -dupsort"; set sort "sorted"; incr diter } {
+ set testfile $basename-$sort.db
+ set did [open $dict]
+
+ # Open and populate the database with $nsets sets of dups.
+ # Each set contains as many dups as its number
+ puts "\tTest0$tnum.a:\
+ Creating database with $nsets sets of $sort dups."
+ set dargs "$dflag $carg $args"
+ set db [eval {berkdb_open -create} $omethod $dargs $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ t95_populate $db $did $nsets 0
+
+ # Run basic get tests.
+ t95_gettest $db $tnum b [expr 8192] 1
+ t95_gettest $db $tnum c [expr 10 * 8192] 0
+
+ # Run cursor get tests.
+ t95_cgettest $db $tnum d [expr 100] 1
+ t95_cgettest $db $tnum e [expr 10 * 8192] 0
+
+ set m [expr 4000 * $noverflows]
+ puts "\tTest0$tnum.f: Growing\
+ database with $noverflows overflow sets (max item size $m)"
+ t95_populate $db $did $noverflows 4000
+
+ # Run overflow get tests.
+ t95_gettest $db $tnum g [expr 10 * 8192] 1
+ t95_gettest $db $tnum h [expr $m * 2] 1
+ t95_gettest $db $tnum i [expr $m * $noverflows * 2] 0
+
+ # Run cursor get tests.
+ t95_cgettest $db $tnum j [expr 10 * 8192] 1
+ t95_cgettest $db $tnum k [expr $m * 2] 0
+
+ error_check_good db_close [$db close] 0
+ close $did
+ }
+
+}
+
+proc t95_gettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 0
+}
+proc t95_cgettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 1
+}
+
+proc t95_gettest_body { db tnum letter bufsize expectfail usecursor } {
+ global errorCode
+
+ if { $usecursor == 0 } {
+ set action "db get -multi"
+ } else {
+ set action "dbc get -multi -set/-next"
+ }
+ puts "\tTest0$tnum.$letter: $action with bufsize $bufsize"
+
+ set allpassed TRUE
+ set saved_err ""
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ # Traverse DB with cursor; do get/c_get(DB_MULTIPLE) on each item.
+ set dbc [$db cursor]
+ error_check_good is_valid_dbc [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -nextnodup] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+
+ if { $usecursor == 0 } {
+ set ret [catch {eval $db get -multi $bufsize $key} res]
+ } else {
+ set res {}
+ for { set ret [catch {eval $getcurs get -multi $bufsize\
+ -set $key} tres] } \
+ { $ret == 0 && [llength $tres] != 0 } \
+ { set ret [catch {eval $getcurs get -multi $bufsize\
+ -nextdup} tres]} {
+ eval lappend res $tres
+ }
+ }
+
+ # If we expect a failure, be more tolerant if the above fails;
+ # just make sure it's an ENOMEM, mark it, and move along.
+ if { $expectfail != 0 && $ret != 0 } {
+ error_check_good multi_failure_errcode \
+ [is_substr $errorCode ENOMEM] 1
+ set allpassed FALSE
+ continue
+ }
+ error_check_good get_multi($key) $ret 0
+ t95_verify $res FALSE
+ }
+
+ set ret [catch {eval $db get -multi $bufsize} res]
+
+ if { $expectfail == 1 } {
+ error_check_good allpassed $allpassed FALSE
+ puts "\t\tTest0$tnum.$letter:\
+ returned at least one ENOMEM (as expected)"
+ } else {
+ error_check_good allpassed $allpassed TRUE
+ puts "\t\tTest0$tnum.$letter: succeeded (as expected)"
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+}
+
+# Verify that a passed-in list of key/data pairs all match the predicted
+# structure (e.g. {{thing1 thing1.0}}, {{key2 key2.0} {key2 key2.1}}).
+proc t95_verify { res multiple_keys } {
+ global alphabet
+
+ set i 0
+
+ set orig_key [lindex [lindex $res 0] 0]
+ set nkeys [string trim $orig_key $alphabet']
+ set base_key [string trim $orig_key 0123456789]
+ set datum_count 0
+
+ while { 1 } {
+ set key [lindex [lindex $res $i] 0]
+ set datum [lindex [lindex $res $i] 1]
+
+ if { $datum_count >= $nkeys } {
+ if { [llength $key] != 0 } {
+ # If there are keys beyond $nkeys, we'd
+ # better have multiple_keys set.
+ error_check_bad "keys beyond number $i allowed"\
+ $multiple_keys FALSE
+
+ # If multiple_keys is set, accept the new key.
+ set orig_key $key
+ set nkeys [eval string trim \
+ $orig_key {$alphabet'}]
+ set base_key [eval string trim \
+ $orig_key 0123456789]
+ set datum_count 0
+ } else {
+ # datum_count has hit nkeys. We're done.
+ return
+ }
+ }
+
+ error_check_good returned_key($i) $key $orig_key
+ error_check_good returned_datum($i) \
+ $datum $base_key.[format %4u $datum_count]
+ incr datum_count
+ incr i
+ }
+}
+
+# Add nsets dup sets, each consisting of {word$ndups word$n} pairs,
+# with "word" having (i * pad_bytes) bytes extra padding.
+proc t95_populate { db did nsets pad_bytes } {
+ for { set i 1 } { $i <= $nsets } { incr i } {
+ # basekey is a padded dictionary word
+ gets $did basekey
+
+ append basekey [repeat "a" [expr $pad_bytes * $i]]
+
+ # key is basekey with the number of dups stuck on.
+ set key $basekey$i
+
+ for { set j 0 } { $j < $i } { incr j } {
+ set data $basekey.[format %4u $j]
+ error_check_good db_put($key,$data) \
+ [$db put $key $data] 0
+ }
+ }
+
+ # This will make debugging easier, and since the database is
+ # read-only from here out, it's cheap.
+ error_check_good db_sync [$db sync] 0
+}
diff --git a/db/test/test096.tcl b/db/test/test096.tcl
new file mode 100644
index 000000000..b94416eeb
--- /dev/null
+++ b/db/test/test096.tcl
@@ -0,0 +1,186 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: test096.tcl,v 11.8 2001/07/09 14:49:16 dda Exp
+#
+# Access Method Test 96
+# Test of db->truncate method.
+proc test096 { method {pagesize 512} {nentries 1000} {ndups 19} args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test096: $method db truncate method test"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test096 skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test096: Skipping for specific pagesizes"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ set testfile test096.db
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ #
+ # Make sure the env we were given supports txns.
+ #
+ set stat [catch {$env txn} txn]
+ if { $stat != 0 } {
+ puts "Environment w/o txns specified; skipping."
+ return
+ }
+ error_check_good txnabort [$txn abort] 0
+ set closeenv 0
+ } else {
+ env_cleanup $testdir
+
+ #
+ # We need an env for exclusive-use testing.
+ set env [berkdb env -create -home $testdir -txn]
+ error_check_good env_create [is_valid_env $env] TRUE
+ set closeenv 1
+ }
+
+ set t1 $testdir/t1
+
+ puts "\tTest096.a: Create $nentries entries"
+ set db [eval {berkdb_open_noerr -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set datastr [reverse $str]
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good $key:dbget [llength $ret] 1
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest096.b: Truncate database"
+ error_check_good dbclose [$db close] 0
+ set dbtr [eval {berkdb_open_noerr -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate]
+ error_check_good dbtrunc $ret $nentries
+ error_check_good db_close [$dbtr close] 0
+
+ set db [berkdb_open -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbverify [verify_dir $testdir "\tTest096.c: "] 0
+
+ #
+ # Remove database, and create a new one with dups.
+ #
+ puts "\tTest096.d: Create $nentries entries with $ndups duplicates"
+ set ret [berkdb dbremove -env $env $testfile]
+ set db [eval {berkdb_open_noerr -pagesize $pagesize -dup -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_bad $key:dbget_dups [llength $ret] 0
+ error_check_good $key:dbget_dups1 [llength $ret] $ndups
+
+ incr count
+ }
+ close $did
+ set dlist ""
+ for { set i 1 } {$i <= $ndups} {incr i} {
+ lappend dlist $i
+ }
+ dup_check $db $txn $t1 $dlist
+ puts "\tTest096.e: Verify off page duplicates status"
+ set stat [$db stat]
+ error_check_bad stat:offpage [is_substr $stat \
+ "{{Duplicate pages} 0}"] 1
+
+ set recs [expr $ndups * $count]
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.f: Truncate database in a txn then abort"
+ set txn [$env txn]
+
+ set dbtr [eval {berkdb_open_noerr -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set ret [$dbtr truncate -txn $txn]
+ error_check_good dbtrunc $ret $recs
+
+ error_check_good txnabort [$txn abort] 0
+ error_check_good db_close [$dbtr close] 0
+
+ set db [berkdb_open -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] $recs
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.g: Truncate database in a txn then commit"
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set dbtr [eval {berkdb_open_noerr -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate -txn $txn]
+ error_check_good dbtrunc $ret $recs
+
+ error_check_good db_close [$dbtr close] 0
+ error_check_good txncommit [$txn commit] 0
+
+ set db [berkdb_open -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] 0
+ error_check_good dbclose [$db close] 0
+
+ error_check_good dbverify [verify_dir $testdir "\tTest096.h: "] 0
+
+ if { $closeenv == 1 } {
+ error_check_good envclose [$env close] 0
+ }
+}
diff --git a/db/test/txnscript.tcl b/db/test/txnscript.tcl
new file mode 100644
index 000000000..8739b1da0
--- /dev/null
+++ b/db/test/txnscript.tcl
@@ -0,0 +1,67 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: txnscript.tcl,v 11.1 2001/05/31 18:12:45 sue Exp
+#
+# Txn003 script - outstanding child prepare script
+# Usage: txnscript envcmd dbcmd gidf key data
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# key: key to use
+# data: new data to use
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "txnscript envcmd dbfile gidfile key data"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set key [ lindex $argv 3 ]
+set data [ lindex $argv 4 ]
+
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 1
+set db [berkdb_open -env $dbenv $dbfile]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+puts "\tTxnscript.a: begin parent and child txn"
+set parent [$dbenv txn]
+error_check_good parent [is_valid_txn $parent $dbenv] TRUE
+set child [$dbenv txn -parent $parent]
+error_check_good parent [is_valid_txn $child $dbenv] TRUE
+
+puts "\tTxnscript.b: Modify data"
+error_check_good db_put [$db put -txn $child $key $data] 0
+
+set gfd [open $gidfile w+]
+set gid [make_gid txnscript:$parent]
+puts $gfd $gid
+puts "\tTxnscript.c: Prepare parent only"
+error_check_good txn_prepare:$parent [$parent prepare $gid] 0
+close $gfd
+
+puts "\tTxnscript.d: Check child handle"
+set stat [catch {$child abort} ret]
+error_check_good child_handle $stat 1
+error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tTxnscript completed successfully"
+flush stdout